content
stringlengths
0
894k
type
stringclasses
2 values
# Generated by Django 3.2.7 on 2021-09-09 18:17 import datetime from django.db import migrations, models from django.utils.timezone import utc class Migration(migrations.Migration): dependencies = [ ('auctions', '0008_add_expiry_alter_category_on_listing'), ] operations = [ migrations.AlterField( model_name='listing', name='expiry_date', field=models.DateTimeField(default=datetime.datetime(2021, 10, 7, 18, 17, 0, 930064, tzinfo=utc), verbose_name='expiry date'), ), ]
python
import pytest from astropy.io import fits import numpy as np from numpy.testing import assert_array_equal from lightkurve import search_lightcurve from lightkurve.io.qlp import read_qlp_lightcurve from lightkurve.io.detect import detect_filetype @pytest.mark.remote_data def test_qlp(): """Can we read in QLP light curves?""" url = "https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/qlp/s0011/0000/0002/7755/4109/hlsp_qlp_tess_ffi_s0011-0000000277554109_tess_v01_llc.fits" with fits.open(url, mode="readonly") as hdulist: # Can we auto-detect a QLP file? assert detect_filetype(hdulist) == "QLP" # Are the correct fluxes read in? lc = read_qlp_lightcurve(url, quality_bitmask=0) assert lc.meta["FLUX_ORIGIN"] == "sap_flux" assert_array_equal(lc.flux.value, hdulist[1].data["SAP_FLUX"]) @pytest.mark.remote_data def test_search_qlp(): """Can we search and download QLP light curves from MAST?""" search = search_lightcurve("TIC 277554109", author="QLP", sector=11) assert len(search) == 1 assert search.table["author"][0] == "QLP" lc = search.download() assert type(lc).__name__ == "TessLightCurve" assert lc.sector == 11 assert lc.author == "QLP"
python
__version__ = 0.1 import os import logging import configparser import daiquiri import daiquiri.formatter _ROOT = os.path.dirname(os.path.abspath(__file__)) _CONFIG = os.path.join(_ROOT, 'config.ini') FORMAT = ( "%(asctime)s :: %(color)s%(levelname)s :: %(name)s :: %(funcName)s :" "%(message)s%(color_stop)s" ) daiquiri.setup(level=logging.INFO, outputs=( daiquiri.output.Stream(formatter=daiquiri.formatter.ColorFormatter( fmt=FORMAT)), )) logger = daiquiri.getLogger("root") if not os.path.isfile(_CONFIG): logger.error("Configuration file '%s' not found", _CONFIG) config = None else: config = configparser.ConfigParser(allow_no_value=True) with open(_CONFIG) as fobj: config.read_file(fobj)
python
import numpy as np from ._CFunctions import _Cgcpm import DateTimeTools as TT def GCPM(x,y,z,Date,ut,Kp=1.0,Verbose=False): ''' Calculates the Global Core Plasma Model at some given position(s) and time(s). Inputs ====== x : float scalar or array of x_SM (Solar Magnetic coordinates) component of the position, where units are in R_E. y : float scalar or array of y_SM z : float scalar or array of z_SM Date : int Date(s) in format yyyymmdd ut : float Time(s) in hours from beginning of day where ut = hh + mm/60.0 + ss/3600.0 Kp : float Kp index (or indices) Verbose : bool If True, model calculation progress will be displayed Returns ======= ne : float32 Array of electron densities in 1/cm^3 nH : float32 Array of proton densities in 1/cm^3 nHe : float32 Array of helium ion densities in 1/cm^3 nO : float 32 Array of Oxygen densities in 1/cm^3 ''' #reformat the positions _x = np.array([x]).flatten().astype('float32') _y = np.array([y]).flatten().astype('float32') _z = np.array([z]).flatten().astype('float32') _n = np.int32(_x.size) #sort out the dates dates = np.zeros(_n,dtype='int32') + Date _years = np.int32(dates//10000) _dayno = np.int32(TT.DayNo(dates)) #times _ut = np.zeros(_n,dtype='float32') + ut #Kp indices _kp = np.zeros(_n,dtype='float32') + Kp #Verbose flag _verb = np.int32(Verbose) #output arrays ne = np.zeros(_n,dtype='float32') nH = np.zeros(_n,dtype='float32') nHe = np.zeros(_n,dtype='float32') nO = np.zeros(_n,dtype='float32') #call the C wrapper _Cgcpm(_x,_y,_z,_years,_dayno,_ut,_kp,_n,ne,nH,nHe,nO,_verb) return ne,nH,nHe,nO
python
# Generated by Django 3.1.5 on 2021-01-23 02:13 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Cliente', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='created at')), ('modified', models.DateTimeField(auto_now=True, help_text='Date time on which the object was last modified.', verbose_name='modified at')), ('nombre', models.CharField(max_length=50, verbose_name='nombre cliente')), ('apellido', models.CharField(max_length=50, verbose_name='apellido cliente')), ('dpi', models.CharField(max_length=13, unique=True)), ('telefono', models.CharField(max_length=12, verbose_name='telefono cliente')), ('direccion', models.CharField(max_length=100, verbose_name='direccion cliente')), ], options={ 'abstract': False, }, ), ]
python
# Natural Language Processing # Importing the libraries import numpy as np import matplotlib.pyplot as plt import pandas as pd # Importing the dataset dataset = pd.read_csv('googleplaystoreuserreviews.csv') dataset.dropna(inplace=True) X = dataset.iloc[:,0].values # Cleaning the texts import re import nltk nltk.download('stopwords') from nltk.corpus import stopwords from nltk.stem.porter import PorterStemmer corpus = [] for i in range(0, 37427): review = re.sub('[^a-zA-Z]', ' ', str(X[i])) review = review.lower() review = review.split() ps = PorterStemmer() review = [ps.stem(word) for word in review if not word in set(stopwords.words('english'))] review = ' '.join(review) corpus.append(review) # Creating the Bag of Words model from sklearn.feature_extraction.text import CountVectorizer cv = CountVectorizer() x = cv.fit_transform(corpus).toarray() y = dataset.iloc[:, 1].values from sklearn.preprocessing import LabelEncoder, OneHotEncoder labelencoder_X = LabelEncoder() y = labelencoder_X.fit_transform(y) # Splitting the dataset into the Training set and Test set from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(x, y, test_size = 0.20, random_state = 0) from sklearn.metrics import r2_score # Fitting Logistic regression to the Training set from sklearn.linear_model import LogisticRegression classifier = LogisticRegression(random_state = 0) classifier.fit(X_train, y_train) y_pred = classifier.predict(X_test) r2_score(y_test, y_pred) # Fitting Naive Bayes to the Training set from sklearn.naive_bayes import GaussianNB classifier = GaussianNB() classifier.fit(X_train, y_train) y_pred1 = classifier.predict(X_test) r2_score(y_test, y_pred1) # Making the Confusion Matrix from sklearn.metrics import confusion_matrix,accuracy_score cm = confusion_matrix(y_test, y_pred) accuracy_score(y_test, y_pred) from xgboost import XGBClassifier classifier = XGBClassifier() classifier.fit(X_train, y_train) # Predicting the Test set results y_pred = classifier.predict(X_test) # Making the Confusion Matrix from sklearn.metrics import confusion_matrix cm = confusion_matrix(y_test, y_pred) # Applying k-Fold Cross Validation from sklearn.model_selection import cross_val_score accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10) accuracies.mean() accuracies.std()
python
from linebot.models import TextSendMessage, FlexSendMessage from app.config import CELEBRATING_TARGET from app.crud.leaderboard import update_amount, get_list_of_amount from . import line_bot_api, exception_handler @exception_handler def celebrating_birthday(line_event): group_id = line_event.source.group_id user_id = line_event.source.user_id update_amount(group_id, user_id) line_bot_api.reply_message(line_event.reply_token, TextSendMessage("🎉")) @exception_handler def send_leaderboard(line_event): group_id = line_event.source.group_id line_bot_api.push_message( group_id, [TextSendMessage("집계중입니다...")], notification_disabled=True ) response = get_list_of_amount(group_id) contents = { "type": "bubble", "styles": {"header": {"backgroundColor": "#E3D3A3"}}, "header": { "type": "box", "layout": "vertical", "contents": [ { "type": "text", "text": "생일 축하 리더보드", "size": "xl", "align": "center", "weight": "bold", } ], }, "body": {"type": "box", "layout": "vertical", "spacing": "md", "contents": []}, "footer": { "type": "box", "layout": "vertical", "contents": [ { "type": "button", "action": { "type": "message", "label": "생일 축하하기", "text": f"{CELEBRATING_TARGET}아 생일 축하해!", }, "style": "primary", } ], }, } count = 1 rank = 1 last_amount = 0 for item in response["Items"]: if int(item["amount"]) != last_amount: rank = count last_amount = int(item["amount"]) user_id = item["user_id"] user_profile = line_bot_api.get_group_member_profile(group_id, user_id) user_name = user_profile.display_name leaderboard_item = { "type": "box", "layout": "horizontal", "contents": [ {"type": "text", "text": f"{rank}위", "flex": 3, "weight": "bold"}, {"type": "text", "text": user_name, "flex": 6, "weight": "bold"}, { "type": "text", "text": str(item["amount"]), "flex": 2, "align": "end", "gravity": "center", }, ], } if rank is 1: leaderboard_item["contents"][0]["size"] = "xxl" leaderboard_item["contents"][0]["color"] = "#A4B60F" leaderboard_item["contents"][1]["size"] = "xxl" elif rank is 2: leaderboard_item["contents"][0]["size"] = "xl" leaderboard_item["contents"][0]["color"] = "#878787" leaderboard_item["contents"][1]["size"] = "xl" elif rank is 3: leaderboard_item["contents"][0]["size"] = "lg" leaderboard_item["contents"][0]["color"] = "#8A6200" leaderboard_item["contents"][1]["size"] = "lg" else: pass contents["body"]["contents"].append(leaderboard_item) count += 1 line_bot_api.reply_message( line_event.reply_token, FlexSendMessage(alt_text="Leaderboard", contents=contents), )
python
#!/usr/bin/env python3 import unittest import timeout_decorator from challenges.codility.lessons.q019.stone_wall_v001 import * MAX_N = 100000 MIN_ELEMENT = 1 MAX_ELEMENT = 1000000000 class StoneWallTestCase(unittest.TestCase): def test_description_examples(self): self.assertEqual(7, solution([8, 8, 5, 7, 9, 8, 7, 4, 8])) # Correctness def test_simple_1(self): self.assertEqual(1, solution([888])) def test_simple_2(self): self.assertEqual(1, solution([888, 888])) def test_simple_3(self): self.assertEqual(3, solution([888, 1, 888])) def test_simple_4(self): self.assertEqual(3, solution([5, 5, 4, 5])) self.assertEqual(3, solution([5, 5, 4, 4, 5])) def test_boundary_cases(self): n = 1000 self.assertEqual(n - MIN_ELEMENT + 1, solution((range(MIN_ELEMENT, n + 1)))) # Performance @timeout_decorator.timeout(0.015) def test_medium1(self): self.assertEqual(8, solution([4, 5, 6, 7, 7, 7, 8, 1, 3, 2])) @timeout_decorator.timeout(0.015) def test_medium2(self): self.assertEqual(3, solution([1, 2, 2, 1, 1, 1, 1, 1, 1, 2])) @timeout_decorator.timeout(0.015) def test_medium3(self): self.assertEqual(6, solution([17, 1, 17, 2, 2, 5, 5, 2, 5, 5])) @timeout_decorator.timeout(0.015) def test_medium4(self): self.assertEqual(15, solution([17, 5, 19, 69, 5, 10, 19, 92, 24, 11, 19, 95, 16, 8, 19, 68])) @timeout_decorator.timeout(0.350) def test_large_pyramid(self): start = 1 end = 17000 array = list(range(start, end + 1)) + list(range(end, start - 1, -1)) self.assertEqual(end - start + 1, solution(array)) @timeout_decorator.timeout(0.650) def test_large_increasing_decreasing(self): start = 2 end = 20000 array = list(range(start, end + 1, 2)) + list(range(end, start - 1, -2)) self.assertEqual((end - start) // 2 + 1, solution(array)) start = 3 end = 21000 array = list(range(start, end + 1, 3)) + list(range(end, start - 1, -3)) self.assertEqual((end - start) // 3 + 1, solution(array)) @timeout_decorator.timeout(0.350) def test_large_up_to_20(self): self.__test_sequence(200) @timeout_decorator.timeout(0.350) def test_large_up_to_100(self): self.__test_sequence(1000) @timeout_decorator.timeout(0.350) def test_large_max(self): self.__test_sequence(10000) def __test_sequence(self, n, start=MIN_ELEMENT): self.assertEqual(n, solution(range(start, start + n))) if __name__ == '__main__': unittest.main()
python
# Create CSS using GitHub's colour scheme from a JSON source like (https://github.com/doda/github-language-colors) import json with open('github_colors.json') as colors: with open('github_colors.css', 'w') as css: m = json.loads(colors.read()) for lang in m: color = m[lang] lang_safe = lang.replace('+', 'plus').replace('#','sharp').replace(' ','') css.write('.project-{0} {{ border-bottom: 5px solid {1}; }}\n'.format(lang_safe, m[lang]))
python
TRAINING_DATA = [ ( "i went to amsterdem last year and the canals were beautiful", {"entities": [(10, 19, "TOURIST_DESTINATION")]}, ), ( "You should visit Paris once in your life, but the Eiffel Tower is kinda boring", {"entities": [(17, 22, "TOURIST_DESTINATION")]}, ), ("There's also a Paris in Arkansas, lol", {"entities": []}), ( "Berlin is perfect for summer holiday: lots of parks, great nightlife, cheap beer!", {"entities": [(0, 6, "TOURIST_DESTINATION")]}, ), ]
python
""" @leofansq Basic function: show_img(name, img): Show the image find_files(directory, pattern): Method to find target files in one directory, including subdirectory Load function: load_calib_cam2cam(filename, debug=False): Only load R_rect & P_rect for need load_calib_lidar2cam(filename, debug=False): Load calib parameters for LiDAR2Cam load_calib(filename, debug=False): Load the calib parameters which has R_rect & P_rect & Tr in the same file load_img(filename, debug=False): Load the image load_lidar(filename, debug=False): Load the PointCloud Process function: cal_proj_matrix_raw(filename_c2c, filename_l2c, camera_id, debug=False): Compute the projection matrix from LiDAR to Img cal_proj_matrix(filename, camera_id, debug=False): Compute the projection matrix from LiDAR to Image project_lidar2img(img, pc, p_matrix, debug=False): Project the LiDAR PointCloud to Image generate_colorpc(img, pc, pcimg, debug=False): Generate the PointCloud with color save_pcd(filename, pc_color): Save the PointCloud with color in the term of .pcd """ import cv2 import numpy as np from pyntcloud import PyntCloud import os import fnmatch from tqdm import tqdm from pprint import pprint #**********************************************************# # Basic Function # #**********************************************************# def show_img(name, img): """ Show the image Parameters: name: name of window img: image """ cv2.namedWindow(name, 0) cv2.imshow(name, img) cv2.waitKey(50) def find_files(directory, pattern): """ Method to find target files in one directory, including subdirectory :param directory: path :param pattern: filter pattern :return: target file path list """ file_list = [] for root, _, files in os.walk(directory): for basename in files: if fnmatch.fnmatch(basename, pattern): filename = os.path.join(root, basename) file_list.append(filename) return file_list #**********************************************************# # Load Function # #**********************************************************# def load_calib_cam2cam(filename, debug=False): """ Only load R_rect & P_rect for neeed Parameters: filename of the calib file Return: R_rect: a list of r_rect(shape:3*3) P_rect: a list of p_rect(shape:3*4) """ with open(filename) as f_calib: lines = f_calib.readlines() R_rect = [] P_rect = [] for line in lines: title = line.strip().split(' ')[0] if title[:-4] == "R_rect": r_r = np.array(line.strip().split(' ')[1:], dtype=np.float32) r_r = np.reshape(r_r, (3,3)) R_rect.append(r_r) elif title[:-4] == "P_rect": p_r = np.array(line.strip().split(' ')[1:], dtype=np.float32) p_r = np.reshape(p_r, (3,4)) P_rect.append(p_r) if debug: print ("R_rect:") pprint (R_rect) print () print ("P_rect:") pprint (P_rect) return R_rect, P_rect def load_calib_lidar2cam(filename, debug=False): """ Load calib Parameters: filename of the calib file Return: tr: shape(4*4) [ r t 0 0 0 1] """ with open(filename) as f_calib: lines = f_calib.readlines() for line in lines: title = line.strip().split(' ')[0] if title[:-1] == "R": r = np.array(line.strip().split(' ')[1:], dtype=np.float32) r = np.reshape(r, (3,3)) if title[:-1] == "T": t = np.array(line.strip().split(' ')[1:], dtype=np.float32) t = np.reshape(t, (3,1)) tr = np.hstack([r,t]) tr = np.vstack([tr,np.array([0,0,0,1])]) if debug: print () print ("Tr:") print (tr) return tr def load_calib(filename, debug=False): """ Load the calib parameters which has R_rect & P_rect & Tr in the same file Parameters: filename: the filename of the calib file Return: R_rect, P_rect, Tr """ with open(filename) as f_calib: lines = f_calib.readlines() P_rect = [] for line in lines: title = line.strip().split(' ')[0] if len(title): if title[0] == "R": R_rect = np.array(line.strip().split(' ')[1:], dtype=np.float32) R_rect = np.reshape(R_rect, (3,3)) elif title[0] == "P": p_r = np.array(line.strip().split(' ')[1:], dtype=np.float32) p_r = np.reshape(p_r, (3,4)) P_rect.append(p_r) elif title[:-1] == "Tr_velo_to_cam": Tr = np.array(line.strip().split(' ')[1:], dtype=np.float32) Tr = np.reshape(Tr, (3,4)) Tr = np.vstack([Tr,np.array([0,0,0,1])]) return R_rect, P_rect, Tr def load_img(filename, debug=False): """ Load the image Parameter: filename: the filename of the image Return: img: image """ img = cv2.imread(filename) if debug: show_img("Image", img) return img def load_lidar(filename, debug=False): """ Load the PointCloud Parameter: filename: the filename of the PointCloud Return: points: PointCloud associated with the image """ # N*4 -> N*3 points = np.fromfile(filename, dtype=np.float32) points = np.reshape(points, (-1,4)) points = points[:, :3] points.tofile("./temp_pc.bin") # Remove all points behind image plane (approximation) cloud = PyntCloud.from_file("./temp_pc.bin") cloud.points = cloud.points[cloud.points["x"]>=0] points = np.array(cloud.points) if debug: print (points.shape) return points #**********************************************************# # Process Function # #**********************************************************# def cal_proj_matrix_raw(filename_c2c, filename_l2c, camera_id, debug=False): """ Compute the projection matrix from LiDAR to Img Parameters: filename_c2c: filename of the calib file for cam2cam filename_l2c: filename of the calib file for lidar2cam camera_id: the NO. of camera Return: P_lidar2img: the projection matrix from LiDAR to Img """ # Load Calib Parameters R_rect, P_rect = load_calib_cam2cam(filename_c2c, debug) tr = load_calib_lidar2cam(filename_l2c, debug) # Calculation R_cam2rect = np.hstack([R_rect[0], np.array([[0],[0],[0]])]) R_cam2rect = np.vstack([R_cam2rect, np.array([0,0,0,1])]) P_lidar2img = np.matmul(P_rect[camera_id], R_cam2rect) P_lidar2img = np.matmul(P_lidar2img, tr) if debug: print () print ("P_lidar2img:") print (P_lidar2img) return P_lidar2img def cal_proj_matrix(filename, camera_id, debug=False): """ Compute the projection matrix from LiDAR to Img Parameters: filename: filename of the calib file camera_id: the NO. of camera Return: P_lidar2img: the projection matrix from LiDAR to Img """ # Load Calib Parameters R_rect, P_rect, tr = load_calib(filename, debug) # Calculation R_cam2rect = np.hstack([R_rect, np.array([[0],[0],[0]])]) R_cam2rect = np.vstack([R_cam2rect, np.array([0,0,0,1])]) P_lidar2img = np.matmul(P_rect[camera_id], R_cam2rect) P_lidar2img = np.matmul(P_lidar2img, tr) if debug: print () print ("P_lidar2img:") print (P_lidar2img) return P_lidar2img def project_lidar2img(img, pc, p_matrix, debug=False): """ Project the LiDAR PointCloud to Image Parameters: img: Image pc: PointCloud p_matrix: projection matrix """ # Dimension of data & projection matrix dim_norm = p_matrix.shape[0] dim_proj = p_matrix.shape[1] # Do transformation in homogenuous coordinates pc_temp = pc.copy() if pc_temp.shape[1]<dim_proj: pc_temp = np.hstack([pc_temp, np.ones((pc_temp.shape[0],1))]) points = np.matmul(p_matrix, pc_temp.T) points = points.T temp = np.reshape(points[:,dim_norm-1], (-1,1)) points = points[:,:dim_norm]/(np.matmul(temp, np.ones([1,dim_norm]))) # Plot if debug: img_copy = img.copy() depth_max = np.max(pc[:,0]) for idx,i in enumerate(points): color = int((pc[idx,0]/depth_max)*255) cv2.rectangle(img_copy, (int(i[0]-1),int(i[1]-1)), (int(i[0]+1),int(i[1]+1)), (0, 0, color), -1) show_img("Test", img_copy) return points def generate_colorpc(img, pc, pcimg, debug=False): """ Generate the PointCloud with color Parameters: img: image pc: PointCloud pcimg: PointCloud project to image Return: pc_color: PointCloud with color e.g. X Y Z R G B """ x = np.reshape(pcimg[:,0], (-1,1)) y = np.reshape(pcimg[:,1], (-1,1)) xy = np.hstack([x,y]) pc_color = [] for idx, i in enumerate(xy): if (i[0]>1 and i[0]<img.shape[1]) and (i[1]>1 and i[1]<img.shape[0]): bgr = img[int(i[1]), int(i[0])] p_color = [pc[idx][0], pc[idx][1], pc[idx][2], bgr[2], bgr[1], bgr[0]] pc_color.append(p_color) pc_color = np.array(pc_color) return pc_color def save_pcd(filename, pc_color): """ Save the PointCloud with color in the term of .pcd Parameter: filename: filename of the pcd file pc_color: PointCloud with color """ f = open(filename, "w") f.write("# .PCD v0.7 - Point Cloud Data file format\n") f.write("VERSION 0.7\n") f.write("FIELDS x y z rgb\n") f.write("SIZE 4 4 4 4\n") f.write("TYPE F F F U\n") f.write("COUNT 1 1 1 1\n") f.write("WIDTH {}\n".format(pc_color.shape[0])) f.write("WIDTH {}\n".format(pc_color.shape[0])) f.write("HEIGHT 1\n") f.write("POINTS {}\n".format(pc_color.shape[0])) f.write("DATA ascii\n") for i in pc_color: rgb = (int(i[3])<<16) | (int(i[4])<<8) | (int(i[5]) | 1<<24) f.write("{:.6f} {:.6f} {:.6f} {}\n".format(i[0],i[1],i[2],rgb)) # f.write("{:.6f} {:.6f} {:.6f} {}\n".format(i[0],i[1],i[2],i[3],i[4],i[5])) f.close() if __name__ == '__main__': # Option calib_cam2cam = "./calib/calib_cam_to_cam.txt" calib_lidar2camera = "./calib/calib_velo_to_cam.txt" camera_id = 1 filepath_img = "./img/000003.png" # filepath_img = "./new.png" filepath_lidar = "./lidar/000003.bin" filename_save = "./test.pcd" debug = True # Process p_matrix = cal_proj_matrix_raw(calib_cam2cam, calib_lidar2camera, camera_id, debug) img = load_img(filepath_img, debug) img = img[0:150,0:500] pc = load_lidar(filepath_lidar, debug) pcimg = project_lidar2img(img, pc, p_matrix, debug) pc_color = generate_colorpc(img, pc, pcimg) save_pcd(filename_save, pc_color) if debug: key = cv2.waitKey(0) & 0xFF cv2.destroyAllWindows()
python
from dotenv.main import find_dotenv import tweepy import time import random from dotenv import load_dotenv import os import requests load_dotenv(find_dotenv()) API_KEY = os.getenv('API_KEY') API_SECRET_KEY = os.getenv('API_SECRET_KEY') ACCESS_TOKEN = os.getenv('ACCESS_TOKEN') ACCESS_TOKEN_SECRET = os.getenv('ACCESS_TOKEN_SECRET') auth = tweepy.OAuthHandler(API_KEY, API_SECRET_KEY) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) api = tweepy.API(auth, wait_on_rate_limit=True) try: api.verify_credentials() print("Authentication successful!\n") except: print("Unable to authenticate...") for i in range(0, 1000): try: response = requests.get( "https://api.spaceflightnewsapi.net/v3/articles") res = response.json() rand_no = random.randint(0, 9) tweet = res[rand_no]["summary"]+" "+res[rand_no]["url"] if(len(tweet) > 280): tweet = res[rand_no]["title"]+". "+res[rand_no]["url"] print("\nSummary longer than 280 so tweeted title") api.update_status(tweet) print(tweet+" Tweeted\n") i = i+1 time.sleep(86400) except tweepy.TweepyException as e: print(e) except StopIteration: break
python
# Copyright 2017 Hosang Yoon # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Program for training Use as (for example): DEV="device=cuda0" # single GPU DEV="contexts=dev0->cuda0;dev1->cuda1" # multi GPU (currently incomplete) FLAGS="floatX=float32,"$DEV",gpuarray.preallocate=1,base_compiledir=theano" THEANO_FLAGS=$FLAGS python -u train.py --data_dir=$DATA_DIR \ --save_to=$WORKSPACE_DIR/workspace_$NAME \ [--load_from=$WORKSPACE_DIR/workspace_$LOADNAME] [--seed=some_number] \ | tee -a $WORKSPACE_DIR/$NAME".log" - Device "cuda$" means $-th GPU - Flag contexts can map any number of GPUs to be used for data parallelism (this feature is incomplete until Theano completes implementation of support for this flag) - Flag gpuarray.preallocate reserves given ratio of GPU mem (reduce if needed) - Flag base_compiledir directs intermediate files to pwd/theano to avoid lock conflicts between multiple training instances (by default ~/.theano) - $NAME == $LOADNAME is permitted """ from __future__ import absolute_import, division, print_function from six import iterkeys, itervalues, iteritems from collections import OrderedDict import argparse from net import Net from data import build_id_idx, DataIter import time import numpy as np import theano as th from subprocess import call import sys def main(): options = OrderedDict() options['input_dim'] = 44 options['target_dim'] = 1 options['unit_type'] = 'lstm' # fc/lstm/gru options['lstm_peephole'] = True options['loss_type'] = 'l2' # l2/l1/huber # options['huber_delta'] = 0.33 # depends on target's scale options['net_width'] = 512 options['net_depth'] = 12 options['batch_size'] = 128 options['window_size'] = 128 options['step_size'] = 64 options['init_scale'] = 0.02 options['init_use_ortho'] = False options['weight_norm'] = False options['layer_norm'] = False options['residual_gate'] = True options['learn_init_states'] = True options['learn_id_embedding'] = False # options['id_embedding_dim'] = 16 options['learn_clock_params'] = False # options['clock_t_exp_lo'] = 1. # for learn_clock_params # options['clock_t_exp_hi'] = 6. # for learn_clock_params # options['clock_r_on'] = 0.2 # for learn_clock_params # options['clock_leak_rate'] = 0.001 # for learn_clock_params # options['grad_norm_clip'] = 2. # comment out to turn off options['update_type'] = 'nesterov' # sgd/momentum/nesterov options['update_mu'] = 0.9 # for momentum/nesterov options['force_type'] = 'adadelta' # vanilla/adadelta/rmsprop/adam options['force_ms_decay'] = 0.99 # for adadelta/rmsprop # options['force_adam_b1'] = 0.9 # options['force_adam_b2'] = 0.999 options['frames_per_epoch'] = 8 * 1024 * 1024 options['lr_init_val'] = 1e-5 options['lr_lower_bound'] = 1e-7 options['lr_decay_rate'] = 0.5 options['max_retry'] = 10 options['unroll_scan'] = False # faster training/slower compile if options['unroll_scan']: sys.setrecursionlimit(32 * options['window_size']) # 32 is empirical """ Parse arguments, list files, and THEANO_FLAG settings """ parser = argparse.ArgumentParser() parser.add_argument('--data_dir' , type = str, required = True) parser.add_argument('--save_to' , type = str, required = True) parser.add_argument('--load_from', type = str) parser.add_argument('--seed' , type = int) args = parser.parse_args() assert 0 == call(str('mkdir -p ' + args.save_to).split()) # store mean/whitening matrices from Reshaper (remove if inapplicable) assert 0 == call(str('cp ' + args.data_dir + '/mean.matrix ' + args.save_to).split()) assert 0 == call(str('cp ' + args.data_dir + '/whitening.matrix ' + args.save_to).split()) # store ID count, internal ID order, and number of sequences id_idx = build_id_idx(args.data_dir + '/train.list') options['id_count'] = len(id_idx) with open(args.save_to + '/ids.order', 'w') as f: f.write(';'.join(iterkeys(id_idx))) # code_0;...;code_N-1 def n_seqs(list_file): with open(list_file) as f: return sum(1 for line in f) n_seqs_train = n_seqs(args.data_dir + '/train.list') n_seqs_dev = n_seqs(args.data_dir + '/dev.list') # list of context_name's (THEANO_FLAGS=contexts=... for multi GPU mode) c_names = [m.split('->')[0] for m in th.config.contexts.split(';')] \ if th.config.contexts != "" else None # for replicating previous experiments seed = np.random.randint(np.iinfo(np.int32).max) \ if args.seed is None else args.seed np.random.seed(seed) """ Print summary for logging """ def print_hline(): print(''.join('-' for _ in range(79))) lapse_from = lambda start: ('(' + ('%.1f' % (time.time() - start)).rjust(7) + ' sec)') print_hline() # ----------------------------------------------------------- print('Data location : ' + args.data_dir) if args.load_from is not None: print('Re-train from : ' + args.load_from) print('Save model to : ' + args.save_to) print_hline() # ----------------------------------------------------------- print('Options') maxlen = max(len(k) for k in options.keys()) for k, v in iteritems(options): print(' ' + k.ljust(maxlen) + ' : ' + str(v)) print_hline() # ----------------------------------------------------------- print('Stats') print(' np.random.seed : ' + str(seed).rjust(10)) print(' # of train seqs : ' + str(n_seqs_train).rjust(10)) print(' # of dev seqs : ' + str(n_seqs_dev ).rjust(10)) print(' # of unique IDs : ' + str(options['id_count']).rjust(10)) print(' # of weights : ', end = '') net = Net(options, args.save_to, args.load_from, c_names) # takes few secs print(str(net.n_weights()).rjust(10)) """ Compile th.function's (time consuming) and prepare for training """ print_hline() # ----------------------------------------------------------- print('Compiling fwd/bwd propagators... ', end = '') # takes minutes ~ start = time.time() # hours (unroll_scan) f_fwd_bwd_propagate = net.compile_f_fwd_bwd_propagate() f_fwd_propagate = net.compile_f_fwd_propagate() print(lapse_from(start)) print('Compiling updater/initializer... ', end = '') start = time.time() f_update_v_params = net.compile_f_update_v_params() f_initialize_optimizer = net.compile_f_initialize_optimizer() print(lapse_from(start)) # NOTE: window_size must be the same as that given to Net train_data = DataIter(list_file = args.data_dir + '/train.list', window_size = options['window_size'], step_size = options['step_size'], batch_size = options['batch_size'], input_dim = options['input_dim'], target_dim = options['target_dim'], id_idx = id_idx) dev_data = DataIter(list_file = args.data_dir + '/dev.list', window_size = options['window_size'], step_size = options['step_size'], batch_size = options['batch_size'], input_dim = options['input_dim'], target_dim = options['target_dim'], id_idx = id_idx) chunk_size = options['step_size'] * options['batch_size'] trained_frames_per_epoch = \ (options['frames_per_epoch'] // chunk_size) * chunk_size def run_epoch(data_iter, lr_cur): """ lr_cur sets the running mode float training None inference """ is_training = lr_cur is not None if is_training: # apply BPTT(window_size; step_size) step_size = options['step_size'] else: # set next_prev_idx = window_size - 1 for efficiency step_size = options['window_size'] frames_per_step = step_size * options['batch_size'] data_iter.discard_unfinished() data_iter.set_step_size(step_size) loss_sum = 0. frames_seen = 0 for input_tbi, target_tbi, time_tb, id_idx_tb in data_iter: if is_training: loss = f_fwd_bwd_propagate(input_tbi, target_tbi, time_tb, id_idx_tb, step_size) else: loss = f_fwd_propagate(input_tbi, target_tbi, time_tb, id_idx_tb, step_size) loss_sum += np.asscalar(loss[0]) frames_seen += frames_per_step if is_training: f_update_v_params(lr_cur) if frames_seen >= trained_frames_per_epoch: break return np.float32(loss_sum / frames_seen) """ Scheduled learning rate annealing with patience Adapted from https://github.com/KyuyeonHwang/Fractal """ # Names for saving/loading name_pivot = '0' name_prev = '1' name_best = None # auto trained_frames = 0 trained_frames_at_pivot = 0 trained_frames_at_best = 0 discarded_frames = 0 loss_pivot = 0. loss_prev = 0. loss_best = 0. cur_retry = 0 lr = options['lr_init_val'] f_initialize_optimizer() net.save_to_workspace(name_prev) net.save_to_workspace(name_best) while True: print_hline() # ------------------------------------------------------- print('Training... ', end = '') start = time.time() loss_train = run_epoch(train_data, lr) print(lapse_from(start)) trained_frames += trained_frames_per_epoch print('Evaluating... ', end = '') start = time.time() loss_cur = run_epoch(dev_data, None) print(lapse_from(start)) print('Total trained frames : ' + str(trained_frames ).rjust(12)) print('Total discarded frames : ' + str(discarded_frames).rjust(12)) print('Train loss : %.6f' % loss_train) print('Eval loss : %.6f' % loss_cur, end = '') if np.isnan(loss_cur): loss_cur = np.float32('inf') if loss_cur < loss_best or trained_frames == trained_frames_per_epoch: print(' (best)', end = '') trained_frames_at_best = trained_frames loss_best = loss_cur net.save_to_workspace(name_best) print('') if loss_cur > loss_prev and trained_frames > trained_frames_per_epoch: print_hline() # --------------------------------------------------- cur_retry += 1 if cur_retry > options['max_retry']: cur_retry = 0 lr *= options['lr_decay_rate'] if lr < options['lr_lower_bound']: break # cur <- pivot & prev <- cur discard = trained_frames - trained_frames_at_pivot discarded_frames += discard trained_frames = trained_frames_at_pivot net.load_from_workspace(name_pivot) f_initialize_optimizer() loss_prev = loss_pivot net.save_to_workspace(name_prev) print('Discard recently trained ' + str(discard) + ' frames') print('New learning rate : ' + str(lr)) else: print('Retry count : ' + str(cur_retry) + ' / ' + str(options['max_retry'])) else: cur_retry = 0 # pivot <- prev & prev <- cur trained_frames_at_pivot = trained_frames - trained_frames_per_epoch loss_pivot, loss_prev = loss_prev, loss_cur name_pivot, name_prev = name_prev, name_pivot net.save_to_workspace(name_prev) discarded_frames += trained_frames - trained_frames_at_best trained_frames = trained_frames_at_best net.load_from_workspace(name_best) net.remove_from_workspace(name_pivot) net.remove_from_workspace(name_prev) print('') print('Best network') print('Total trained frames : ' + str(trained_frames ).rjust(12)) print('Total discarded frames : ' + str(discarded_frames).rjust(12)) print('[Train set] Loss : %.6f' % run_epoch(train_data, None)) print('[ Dev set ] Loss : %.6f' % run_epoch(dev_data , None)) print('') if __name__ == '__main__': main()
python
from .mlp_score_head import MLPScoreHead __all__ = [ 'MLPScoreHead' ]
python
import pytest from game import seed_grid, parse_args, print_grid, get_neighbours, live_or_die def test_parser(): with pytest.raises(BaseException): parse_args(["-x", "-y", "-c"]) args = parse_args(["-x 10", "-y 20", "-c (1,1),(2,2),(5,4)"]) assert args.x == 10 assert args.y == 20 assert args.cells == [[(1, 1), (2, 2), (5, 4)]] def test_seed(): grid = seed_grid(10, 20, [(0, 0), (9, 19)]) assert len(grid) == 10 assert len(grid[0]) == 20 assert grid[0][0] == "L" assert grid[9][19] == "L" assert grid[1][19] == " " def test_print(capsys): grid = seed_grid(4, 4, [(1, 2)]) print_grid(grid) captured = capsys.readouterr() assert captured.out == "| | | | |\n| | | | |\n| |L| | |\n| | | | |\n" def test_neighbours(): grid = seed_grid(4, 4, [(0, 0), (2, 2)]) live = get_neighbours(grid, 1, 1) assert live == 2 grid = seed_grid(4, 4, []) live = get_neighbours(grid, 3, 2) assert live == 0 grid = seed_grid(4, 4, [(0, 0), (0, 1)]) live = get_neighbours(grid, 0, 2) assert live == 1 grid = seed_grid(4, 4, [(3, 1)]) live = get_neighbours(grid, 2, 0) assert live == 1 grid = seed_grid(4, 4, [(3, 0)]) live = get_neighbours(grid, 2, 3) assert live == 1 def test_live_or_die(): assert live_or_die("L", 1) == " " assert live_or_die("L", 4) == " " assert live_or_die("L", 3) == "L" assert live_or_die(" ", 3) == "L" assert live_or_die(" ", 2) == " "
python
# -*- coding: utf-8 -*- import scrapy import pandas as pd class FirstSpider(scrapy.Spider): name = 'first' def start_requests(self): urls = ['https://www.worldometers.info/coronavirus/#countries'] for url in urls: yield scrapy.Request(url=url, callback=self.parse) def parse(self, response): table = pd.read_html(response.text) print(table)
python
from future import standard_library standard_library.install_aliases() import datetime import json import os import re import time from collections import namedtuple, defaultdict from urllib.parse import urlparse, urljoin from io import BytesIO import flask import sqlalchemy.sql from flask import abort from flask import current_app from flask import flash from flask import g from flask import make_response from flask import render_template from flask import request, url_for from flask import send_file from flask_wtf import Form from sqlalchemy.orm import joinedload from sqlalchemy.orm.exc import NoResultFound from typing import Optional from wtforms import SelectField, StringField, SubmitField from wtforms.validators import DataRequired, Length import lnt.server.db.rules_manager import lnt.server.db.search import lnt.server.reporting.analysis import lnt.server.reporting.dailyreport import lnt.server.reporting.latestrunsreport import lnt.server.reporting.runs import lnt.server.reporting.summaryreport import lnt.server.ui.util import lnt.util import lnt.util.ImportData import lnt.util.stats from lnt.external.stats import stats as ext_stats from lnt.server.db import testsuitedb from lnt.server.reporting.analysis import ComparisonResult, calc_geomean from lnt.server.ui import util from lnt.server.ui.decorators import frontend, db_route, v4_route from lnt.server.ui.globals import db_url_for, v4_url_for, v4_redirect from lnt.server.ui.util import FLASH_DANGER, FLASH_SUCCESS, FLASH_INFO from lnt.server.ui.util import PrecomputedCR from lnt.server.ui.util import baseline_key, convert_revision from lnt.server.ui.util import mean from lnt.testing import PASS from lnt.util import logger from lnt.util import multidict from lnt.util import stats # http://flask.pocoo.org/snippets/62/ def is_safe_url(target): ref_url = urlparse(request.host_url) test_url = urlparse(urljoin(request.host_url, target)) return test_url.scheme in ('http', 'https') and \ ref_url.netloc == test_url.netloc def get_redirect_target(): for target in request.values.get('next'), request.referrer: if not target: continue if is_safe_url(target): return target ### # Root-Only Routes @frontend.route('/favicon.ico') def favicon_ico(): return v4_redirect(url_for('.static', filename='favicon.ico')) @frontend.route('/select_db') def select_db(): path = request.args.get('path') db = request.args.get('db') if path is None: abort(400, "'path' argument is missing") if db not in current_app.old_config.databases: abort(404, "'db' argument is missing or invalid") # Rewrite the path. new_path = "/db_%s" % db if not path.startswith("/db_"): new_path += path else: if '/' in path[1:]: new_path += "/" + path.split("/", 2)[2] return v4_redirect(request.script_root + new_path) ##### # Per-Database Routes @db_route('/') def index(): return render_template("index.html") ### # Database Actions def _do_submit(): assert request.method == 'POST' input_file = request.files.get('file') input_data = request.form.get('input_data') if 'select_machine' not in request.form and \ 'update_machine' in request.form: # Compatibility with old clients update_machine = int(request.form.get('update_machine', 0)) != 0 select_machine = 'update' if update_machine else 'match' else: select_machine = request.form.get('select_machine', 'match') merge_run = request.form.get('merge', None) ignore_regressions = request.form.get('ignore_regressions', False) \ or getattr(current_app.old_config, 'ignore_regressions', False) if input_file and not input_file.content_length: input_file = None if not input_file and not input_data: return render_template( "submit_run.html", error="must provide input file or data") if input_file and input_data: return render_template( "submit_run.html", error="cannot provide input file *and* data") if input_file: data_value = input_file.read() else: data_value = input_data # The following accomodates old submitters. Note that we explicitely # removed the tag field from the new submission format, this is only here # for old submission jobs. The better way of doing it is mentioning the # correct test-suite in the URL. So when submitting to suite YYYY use # db_XXX/v4/YYYY/submitRun instead of db_XXXX/submitRun! if g.testsuite_name is None: try: data = json.loads(data_value) Run = data.get('Run') if Run is not None: Info = Run.get('Info') if Info is not None: g.testsuite_name = Info.get('tag') except Exception: pass if g.testsuite_name is None: g.testsuite_name = 'nts' # Get a DB connection. session = request.session db = request.get_db() result = lnt.util.ImportData.import_from_string( current_app.old_config, g.db_name, db, session, g.testsuite_name, data_value, select_machine=select_machine, merge_run=merge_run, ignore_regressions=ignore_regressions) # It is nice to have a full URL to the run, so fixup the request URL # here were we know more about the flask instance. if result.get('result_url'): result['result_url'] = request.url_root + result['result_url'] response = flask.jsonify(**result) error = result['error'] if error is not None: response.status_code = 400 logger.warning("%s: Submission rejected: %s" % (request.url, error)) return response def ts_data(ts): """Data about the current testsuite used by layout.html which should be present in most templates.""" baseline_id = flask.session.get(baseline_key(ts.name)) baselines = request.session.query(ts.Baseline).all() return { 'baseline_id': baseline_id, 'baselines': baselines, 'ts': ts } @db_route('/submitRun', methods=('GET', 'POST')) def submit_run(): """Compatibility url that hardcodes testsuite to 'nts'""" if request.method == 'GET': g.testsuite_name = 'nts' return v4_redirect(v4_url_for('.v4_submitRun')) # This route doesn't know the testsuite to use. We have some defaults/ # autodetection for old submissions, but really you should use the full # db_XXX/v4/YYYY/submitRun URL when using non-nts suites. g.testsuite_name = None return _do_submit() @v4_route('/submitRun', methods=('GET', 'POST')) def v4_submitRun(): if request.method == 'GET': ts = request.get_testsuite() return render_template("submit_run.html", **ts_data(ts)) return _do_submit() ### # V4 Schema Viewer @v4_route("/") def v4_overview(): ts = request.get_testsuite() return render_template("v4_overview.html", testsuite_name=g.testsuite_name, **ts_data(ts)) @v4_route("/recent_activity") def v4_recent_activity(): session = request.session ts = request.get_testsuite() # Get the most recent runs in this tag, we just arbitrarily limit to # looking at the last 100 submission. recent_runs = session.query(ts.Run) \ .options(joinedload(ts.Run.order)) \ .options(joinedload(ts.Run.machine)) \ .order_by(ts.Run.start_time.desc()).limit(100) recent_runs = recent_runs.all() # Compute the active machine list. active_machines = dict((run.machine.name, run) for run in recent_runs[::-1]) # Compute the active submission list. # # FIXME: Remove hard coded field use here. N = 30 active_submissions = [(r, r.order.llvm_project_revision) for r in recent_runs[:N]] return render_template("v4_recent_activity.html", testsuite_name=g.testsuite_name, active_machines=active_machines, active_submissions=active_submissions, **ts_data(ts)) @v4_route("/machine/") def v4_machines(): # Compute the list of associated runs, grouped by order. # Gather all the runs on this machine. session = request.session ts = request.get_testsuite() machines = session.query(ts.Machine).order_by(ts.Machine.name) return render_template("all_machines.html", machines=machines, **ts_data(ts)) @v4_route("/machine/<int:machine_id>/latest") def v4_machine_latest(machine_id): """Return the most recent run on this machine.""" session = request.session ts = request.get_testsuite() run = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_id) \ .order_by(ts.Run.start_time.desc()) \ .first() return v4_redirect(v4_url_for('.v4_run', id=run.id, **request.args)) @v4_route("/machine/<int:machine_id>/compare") def v4_machine_compare(machine_id): """Return the most recent run on this machine.""" session = request.session ts = request.get_testsuite() machine_compare_to_id = int(request.args['compare_to_id']) machine_1_run = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_id) \ .order_by(ts.Run.start_time.desc()) \ .first() machine_2_run = session.query(ts.Run) \ .filter(ts.Run.machine_id == machine_compare_to_id) \ .order_by(ts.Run.start_time.desc()) \ .first() return v4_redirect(v4_url_for('.v4_run', id=machine_1_run.id, compare_to=machine_2_run.id)) @v4_route("/machine/<int:id>") def v4_machine(id): # Compute the list of associated runs, grouped by order. # Gather all the runs on this machine. session = request.session ts = request.get_testsuite() associated_runs = multidict.multidict( (run_order, r) for r, run_order in (session.query(ts.Run, ts.Order) .join(ts.Order) .filter(ts.Run.machine_id == id) .order_by(ts.Run.start_time.desc()))) associated_runs = sorted(associated_runs.items()) try: machine = session.query(ts.Machine).filter(ts.Machine.id == id).one() except NoResultFound: abort(404, "Invalid machine id {}".format(id)) if request.args.get('json'): json_obj = dict() json_obj['name'] = machine.name json_obj['id'] = machine.id json_obj['runs'] = [] for order in associated_runs: rev = order[0].llvm_project_revision for run in order[1]: json_obj['runs'].append((run.id, rev, run.start_time.isoformat(), run.end_time.isoformat())) return flask.jsonify(**json_obj) machines = session.query(ts.Machine).order_by(ts.Machine.name).all() relatives = [m for m in machines if m.name == machine.name] return render_template("v4_machine.html", testsuite_name=g.testsuite_name, id=id, associated_runs=associated_runs, machine=machine, machines=machines, relatives=relatives, **ts_data(ts)) class V4RequestInfo(object): def __init__(self, run_id): session = request.session self.db = request.get_db() self.session = session self.ts = ts = request.get_testsuite() self.run = run = session.query(ts.Run).filter_by(id=run_id).first() if run is None: abort(404, "Invalid run id {}".format(run_id)) # Get the aggregation function to use. aggregation_fn_name = request.args.get('aggregation_fn') self.aggregation_fn = {'min': lnt.util.stats.safe_min, 'median': lnt.util.stats.median}.get( aggregation_fn_name, lnt.util.stats.safe_min) # Get the MW confidence level. try: confidence_lv = float(request.args.get('MW_confidence_lv')) except (TypeError, ValueError): confidence_lv = .05 self.confidence_lv = confidence_lv # Find the neighboring runs, by order. prev_runs = list(ts.get_previous_runs_on_machine(session, run, N=3)) next_runs = list(ts.get_next_runs_on_machine(session, run, N=3)) self.neighboring_runs = next_runs[::-1] + [self.run] + prev_runs # Select the comparison run as either the previous run, or a user # specified comparison run. compare_to_str = request.args.get('compare_to') if compare_to_str: compare_to_id = int(compare_to_str) compare_to = session.query(ts.Run) \ .filter_by(id=compare_to_id) \ .first() if compare_to is None: flash("Comparison Run is invalid: " + compare_to_str, FLASH_DANGER) else: self.comparison_neighboring_runs = ( list(ts.get_next_runs_on_machine(session, compare_to, N=3))[::-1] + [compare_to] + list(ts.get_previous_runs_on_machine(session, compare_to, N=3))) else: if prev_runs: compare_to = prev_runs[0] else: compare_to = None self.comparison_neighboring_runs = self.neighboring_runs try: self.num_comparison_runs = int( request.args.get('num_comparison_runs')) except Exception: self.num_comparison_runs = 0 # Find the baseline run, if requested. baseline_str = request.args.get('baseline') if baseline_str: baseline_id = int(baseline_str) baseline = session.query(ts.Run).filter_by(id=baseline_id).first() if baseline is None: flash("Could not find baseline " + baseline_str, FLASH_DANGER) else: baseline = None # We're going to render this on a real webpage with CSS support, so # override the default styles and provide bootstrap class names for # the tables. styles = { 'body': '', 'td': '', 'h1': 'font-size: 14pt', 'table': 'width: initial; font-size: 9pt;', 'th': 'text-align: center;' } classes = { 'table': 'table table-striped table-condensed table-hover' } self.data = lnt.server.reporting.runs.generate_run_data( session, self.run, baseurl=db_url_for('.index', _external=False), result=None, compare_to=compare_to, baseline=baseline, num_comparison_runs=self.num_comparison_runs, aggregation_fn=self.aggregation_fn, confidence_lv=confidence_lv, styles=styles, classes=classes) self.sri = self.data['sri'] note = self.data['visible_note'] if note: flash(note, FLASH_INFO) self.data.update(ts_data(ts)) @v4_route("/<int:id>/report") def v4_report(id): info = V4RequestInfo(id) return render_template('reporting/run_report.html', **info.data) @v4_route("/<int:id>/text_report") def v4_text_report(id): info = V4RequestInfo(id) text_report = render_template('reporting/run_report.txt', **info.data) response = make_response(text_report) response.mimetype = "text/plain" return response # Compatilibity route for old run pages. @db_route("/simple/<tag>/<int:id>/") def simple_run(tag, id): # Get the expected test suite. db = request.get_db() session = request.session ts = db.testsuite[tag] # Look for a matched run. matched_run = session.query(ts.Run).\ filter(ts.Run.simple_run_id == id).\ first() # If we found one, redirect to it's report. if matched_run is not None: return v4_redirect(db_url_for(".v4_run", testsuite_name=tag, id=matched_run.id)) # Otherwise, report an error. return render_template("error.html", message="""\ Unable to find a run for this ID. Please use the native v4 URL interface (instead of the /simple/... URL schema).""") @v4_route("/<int:id>") def v4_run(id): info = V4RequestInfo(id) session = info.session ts = info.ts run = info.run # Parse the view options. options = {} options['show_delta'] = bool(request.args.get('show_delta')) options['show_previous'] = bool(request.args.get('show_previous')) options['show_stddev'] = bool(request.args.get('show_stddev')) options['show_mad'] = bool(request.args.get('show_mad')) options['show_all'] = bool(request.args.get('show_all')) options['show_all_samples'] = bool(request.args.get('show_all_samples')) options['show_sample_counts'] = \ bool(request.args.get('show_sample_counts')) options['show_graphs'] = bool(request.args.get('show_graphs')) options['show_data_table'] = bool(request.args.get('show_data_table')) options['show_small_diff'] = bool(request.args.get('show_small_diff')) options['hide_report_by_default'] = bool( request.args.get('hide_report_by_default')) options['num_comparison_runs'] = info.num_comparison_runs options['test_filter'] = test_filter_str = request.args.get( 'test_filter', '') options['MW_confidence_lv'] = info.confidence_lv if test_filter_str: test_filter_re = re.compile(test_filter_str) else: test_filter_re = None options['test_min_value_filter'] = test_min_value_filter_str = \ request.args.get('test_min_value_filter', '') if test_min_value_filter_str != '': test_min_value_filter = float(test_min_value_filter_str) else: test_min_value_filter = 0.0 options['aggregation_fn'] = request.args.get('aggregation_fn', 'min') # Get the test names. test_info = session.query(ts.Test.name, ts.Test.id).\ order_by(ts.Test.name).all() # Filter the list of tests by name, if requested. if test_filter_re: test_info = [test for test in test_info if test_filter_re.search(test[0])] if request.args.get('json'): json_obj = dict() sri = lnt.server.reporting.analysis.RunInfo(session, ts, [id]) reported_tests = session.query(ts.Test.name, ts.Test.id).\ filter(ts.Run.id == id).\ filter(ts.Test.id.in_(sri.test_ids)).all() order = run.order.as_ordered_string() for test_name, test_id in reported_tests: test = dict(test_name=test_name, test_id=test_id, order=order, machine=run.machine.name) for sample_field in ts.sample_fields: res = sri.get_run_comparison_result( run, None, test_id, sample_field, ts.Sample.get_hash_of_binary_field()) test[sample_field.name] = res.current json_obj[test_name] = test return flask.jsonify(**json_obj) urls = { 'search': v4_url_for('.v4_search') } data = info.data data.update({ 'analysis': lnt.server.reporting.analysis, 'metric_fields': list(ts.Sample.get_metric_fields()), 'options': options, 'request_info': info, 'test_info': test_info, 'test_min_value_filter': test_min_value_filter, 'urls': urls, }) return render_template("v4_run.html", **data) class PromoteOrderToBaseline(Form): name = StringField('Name', validators=[DataRequired(), Length(max=32)]) description = StringField('Description', validators=[Length(max=256)]) promote = SubmitField('Promote') update = SubmitField('Update') demote = SubmitField('Demote') @v4_route("/order/<int:id>", methods=['GET', 'POST']) def v4_order(id): """Order page details order information, as well as runs that are in this order as well setting this run as a baseline.""" session = request.session ts = request.get_testsuite() form = PromoteOrderToBaseline() if form.validate_on_submit(): try: baseline = session.query(ts.Baseline) \ .filter(ts.Baseline.order_id == id) \ .one() except NoResultFound: baseline = ts.Baseline() if form.demote.data: session.delete(baseline) session.commit() flash("Baseline demoted.", FLASH_SUCCESS) else: baseline.name = form.name.data baseline.comment = form.description.data baseline.order_id = id session.add(baseline) session.commit() flash("Baseline {} updated.".format(baseline.name), FLASH_SUCCESS) return v4_redirect(v4_url_for(".v4_order", id=id)) try: baseline = session.query(ts.Baseline) \ .filter(ts.Baseline.order_id == id) \ .one() form.name.data = baseline.name form.description.data = baseline.comment except NoResultFound: pass # Get the order. order = session.query(ts.Order).filter(ts.Order.id == id).first() if order is None: abort(404, "Invalid order id {}".format(id)) previous_order = None if order.previous_order_id: previous_order = session.query(ts.Order) \ .filter(ts.Order.id == order.previous_order_id).one() next_order = None if order.next_order_id: next_order = session.query(ts.Order) \ .filter(ts.Order.id == order.next_order_id).one() runs = session.query(ts.Run) \ .filter(ts.Run.order_id == id) \ .options(joinedload(ts.Run.machine)) \ .all() num_runs = len(runs) return render_template("v4_order.html", order=order, form=form, previous_order=previous_order, next_order=next_order, runs=runs, num_runs=num_runs, **ts_data(ts)) @v4_route("/set_baseline/<int:id>") def v4_set_baseline(id): """Update the baseline stored in the user's session.""" session = request.session ts = request.get_testsuite() base = session.query(ts.Baseline).get(id) if not base: return abort(404, "Invalid baseline id {}".format(id)) flash("Baseline set to " + base.name, FLASH_SUCCESS) flask.session[baseline_key(ts.name)] = id return v4_redirect(get_redirect_target()) @v4_route("/all_orders") def v4_all_orders(): # Get the testsuite. session = request.session ts = request.get_testsuite() # Get the orders and sort them totally. orders = sorted(session.query(ts.Order).all()) return render_template("v4_all_orders.html", orders=orders, **ts_data(ts)) @v4_route("/<int:id>/graph") def v4_run_graph(id): # This is an old style endpoint that treated graphs as associated with # runs. Redirect to the new endpoint. session = request.session ts = request.get_testsuite() run = session.query(ts.Run).filter_by(id=id).first() if run is None: abort(404, "Invalid run id {}".format(id)) # Convert the old style test parameters encoding. args = {'highlight_run': id} plot_number = 0 for name, value in request.args.items(): # If this isn't a test specification, just forward it. if not name.startswith('test.'): args[name] = value continue # Otherwise, rewrite from the old style of:: # # test.<test id>=<sample field index> # # into the new style of:: # # plot.<number>=<machine id>.<test id>.<sample field index> test_id = name.split('.', 1)[1] args['plot.%d' % (plot_number,)] = '%d.%s.%s' % ( run.machine.id, test_id, value) plot_number += 1 return v4_redirect(v4_url_for(".v4_graph", **args)) BaselineLegendItem = namedtuple('BaselineLegendItem', 'name id') LegendItem = namedtuple('LegendItem', 'machine test_name field_name color url') @v4_route("/graph_for_sample/<int:sample_id>/<string:field_name>") def v4_graph_for_sample(sample_id, field_name): """Redirect to a graph of the data that a sample and field came from. When you have a sample from an API call, this can get you into the LNT graph page, for that sample. Extra args are passed through, to allow the caller to customize the graph page displayed, with for example run highlighting. :param sample_id: the sample ID from the database, obtained from the API. :param field_name: the name of the field. :return: a redirect to the graph page for that sample and field. """ session = request.session ts = request.get_testsuite() target_sample = session.query(ts.Sample).get(sample_id) if not target_sample: abort(404, "Could not find sample id {}".format(sample_id)) # Get the field index we are interested in. field_index = None for idx, f in enumerate(ts.sample_fields): if f.name == field_name: field_index = idx break if field_index is None: abort(400, "Could not find field {}".format(field_name)) kwargs = {'plot.0': '{machine_id}.{test_id}.{field_index}'.format( machine_id=target_sample.run.machine.id, test_id=target_sample.test_id, field_index=field_index)} # Pass request args through, so you can add graph options. kwargs.update(request.args) graph_url = v4_url_for('.v4_graph', **kwargs) return v4_redirect(graph_url) class PlotParameter(object): def __init__(self, machine, test, field, field_index): self.machine = machine self.test = test self.field = field self.field_index = field_index self.samples = None def __repr__(self): return "{}:{}({} samples)" \ .format(self.machine.name, self.test.name, len(self.samples) if self.samples else "No") def assert_field_idx_valid(field_idx, count): if not (0 <= field_idx < count): return abort(404, "Invalid field index {}. Total sample_fileds for " "the current suite is {}.".format(field_idx, count)) def load_plot_parameter(machine_id, test_id, field_index, session, ts): try: machine_id = int(machine_id) test_id = int(test_id) field_index = int(field_index) except ValueError: return abort(400, "Invalid plot arguments.") try: machine = session.query(ts.Machine) \ .filter(ts.Machine.id == machine_id) \ .one() except NoResultFound: return abort(404, "Invalid machine id {}".format(machine_id)) try: test = session.query(ts.Test).filter(ts.Test.id == test_id).one() except NoResultFound: return abort(404, "Invalid test id {}".format(test_id)) assert_field_idx_valid(field_index, len(ts.sample_fields)) try: field = ts.sample_fields[field_index] except NoResultFound: return abort(404, "Invalid field_index {}".format(field_index)) return PlotParameter(machine, test, field, field_index) def parse_plot_parameters(args): """ Returns a list of tuples of integers (machine_id, test_id, field_index). :param args: The request parameters dictionary. """ plot_parameters = [] for name, value in args.items(): # Plots are passed as:: # # plot.<unused>=<machine id>.<test id>.<field index> if not name.startswith('plot.'): continue # Ignore the extra part of the key, it is unused. try: machine_id, test_id, field_index = map(int, value.split('.')) except ValueError: return abort(400, "Parameter {} was malformed. {} must be int.int.int" .format(name, value)) plot_parameters.append((machine_id, test_id, field_index)) return plot_parameters def parse_and_load_plot_parameters(args, session, ts): """ Parses plot parameters and loads the corresponding entities from the database. Returns a list of PlotParameter instances sorted by machine name, test name and then field. :param args: The request parameters dictionary. :param session: The database session. :param ts: The test suite. """ plot_parameters = [load_plot_parameter(machine_id, test_id, field_index, session, ts) for (machine_id, test_id, field_index) in parse_plot_parameters(args)] # Order the plots by machine name, test name and then field. plot_parameters.sort(key=lambda plot_parameter: (plot_parameter.machine.name, plot_parameter.test.name, plot_parameter.field.name, plot_parameter.field_index)) return plot_parameters def parse_mean_parameter(args, session, ts): # Mean to graph is passed as: # # mean=<machine id>.<field index> value = args.get('mean') if not value: return None try: machine_id, field_index = map(int, value.split('.')) except ValueError: return abort(400, "Invalid format of 'mean={}', expected mean=<machine id>.<field index>".format(value)) try: machine = session.query(ts.Machine) \ .filter(ts.Machine.id == machine_id) \ .one() except NoResultFound: return abort(404, "Invalid machine id {}".format(machine_id)) assert_field_idx_valid(field_index, len(ts.sample_fields)) field = ts.sample_fields[field_index] return machine, field def load_graph_data(plot_parameter, show_failures, limit, xaxis_date, revision_cache=None): """ Load all the field values for this test on the same machine. :param plot_parameter: Stores machine, test and field to load. :param show_failures: Filter only passed values if False. :param limit: Limit points if specified. :param xaxis_date: X axis is Date, otherwise Order. """ session = request.session ts = request.get_testsuite() # Load all the field values for this test on the same machine. # # FIXME: Don't join to Order here, aggregate this across all the tests # we want to load. Actually, we should just make this a single query. values = session.query(plot_parameter.field.column, ts.Order, ts.Run.start_time, ts.Run.id) \ .join(ts.Run).join(ts.Order) \ .filter(ts.Run.machine_id == plot_parameter.machine.id) \ .filter(ts.Sample.test == plot_parameter.test) \ .filter(plot_parameter.field.column.isnot(None)) # Unless all samples requested, filter out failing tests. if not show_failures: if plot_parameter.field.status_field: values = values.filter((plot_parameter.field.status_field.column == PASS) | (plot_parameter.field.status_field.column.is_(None))) if limit: values = values.limit(limit) if xaxis_date: # Aggregate by date. data = list(multidict.multidict( (date, (val, order, date, run_id)) for val, order, date, run_id in values).items()) # Sort data points according to date. data.sort(key=lambda sample: sample[0]) else: # Aggregate by order (revision). data = list(multidict.multidict( (order.llvm_project_revision, (val, order, date, run_id)) for val, order, date, run_id in values).items()) # Sort data points according to order (revision). data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache)) return data def load_geomean_data(field, machine, limit, xaxis_date, revision_cache=None): """ Load geomean for specified field on the same machine. :param field: Field. :param machine: Machine. :param limit: Limit points if specified. :param xaxis_date: X axis is Date, otherwise Order. """ session = request.session ts = request.get_testsuite() values = session.query(sqlalchemy.sql.func.min(field.column), ts.Order, sqlalchemy.sql.func.min(ts.Run.start_time)) \ .join(ts.Run).join(ts.Order).join(ts.Test) \ .filter(ts.Run.machine_id == machine.id) \ .filter(field.column.isnot(None)) \ .group_by(ts.Order.llvm_project_revision, ts.Test) if limit: values = values.limit(limit) data = multidict.multidict( ((order, date), val) for val, order, date in values).items() # Calculate geomean of each revision. if xaxis_date: data = [(date, [(calc_geomean(vals), order, date)]) for ((order, date), vals) in data] # Sort data points according to date. data.sort(key=lambda sample: sample[0]) else: data = [(order.llvm_project_revision, [(calc_geomean(vals), order, date)]) for ((order, date), vals) in data] # Sort data points according to order (revision). data.sort(key=lambda sample: convert_revision(sample[0], cache=revision_cache)) return data @v4_route("/graph") def v4_graph(): session = request.session ts = request.get_testsuite() switch_min_mean_local = False if 'switch_min_mean_session' not in flask.session: flask.session['switch_min_mean_session'] = False # Parse the view options. options = {'min_mean_checkbox': 'min()'} if 'submit' in request.args: # user pressed a button if 'switch_min_mean' in request.args: # user checked mean() checkbox flask.session['switch_min_mean_session'] = \ options['switch_min_mean'] = \ bool(request.args.get('switch_min_mean')) switch_min_mean_local = flask.session['switch_min_mean_session'] else: # mean() check box is not checked flask.session['switch_min_mean_session'] = \ options['switch_min_mean'] = \ bool(request.args.get('switch_min_mean')) switch_min_mean_local = flask.session['switch_min_mean_session'] else: # new page was loaded by clicking link, not submit button options['switch_min_mean'] = switch_min_mean_local = \ flask.session['switch_min_mean_session'] options['hide_lineplot'] = bool(request.args.get('hide_lineplot')) show_lineplot = not options['hide_lineplot'] options['show_mad'] = show_mad = bool(request.args.get('show_mad')) options['show_stddev'] = show_stddev = \ bool(request.args.get('show_stddev')) options['hide_all_points'] = hide_all_points = bool( request.args.get('hide_all_points')) options['xaxis_date'] = xaxis_date = bool( request.args.get('xaxis_date')) options['limit'] = limit = int( request.args.get('limit', 0)) options['show_cumulative_minimum'] = show_cumulative_minimum = bool( request.args.get('show_cumulative_minimum')) options['show_linear_regression'] = show_linear_regression = bool( request.args.get('show_linear_regression')) options['show_failures'] = show_failures = bool( request.args.get('show_failures')) options['normalize_by_median'] = normalize_by_median = bool( request.args.get('normalize_by_median')) options['show_moving_average'] = moving_average = bool( request.args.get('show_moving_average')) options['show_moving_median'] = moving_median = bool( request.args.get('show_moving_median')) options['moving_window_size'] = moving_window_size = int( request.args.get('moving_window_size', 10)) options['hide_highlight'] = bool( request.args.get('hide_highlight')) options['logarithmic_scale'] = bool( request.args.get('logarithmic_scale')) show_highlight = not options['hide_highlight'] # Load the graph parameters. plot_parameters = parse_and_load_plot_parameters(request.args, session, ts) # Extract requested mean trend. mean_parameter = parse_mean_parameter(request.args, session, ts) # Sanity check the arguments. if not plot_parameters and not mean_parameter: return render_template("error.html", message="Nothing to graph.") # Extract requested baselines, and their titles. baseline_parameters = [] for name, value in request.args.items(): # Baselines to graph are passed as: # # baseline.title=<run id> if not name.startswith('baseline.'): continue baseline_title = name[len('baseline.'):] run_id_str = value try: run_id = int(run_id_str) except Exception: return abort(400, "Invalid baseline run id {}".format(run_id_str)) try: run = session.query(ts.Run) \ .options(joinedload(ts.Run.machine)) \ .filter(ts.Run.id == run_id) \ .one() except Exception: err_msg = ("The run {} was not found in the database." .format(run_id)) return render_template("error.html", message=err_msg) baseline_parameters.append((run, baseline_title)) # Create region of interest for run data region if we are performing a # comparison. revision_range = None highlight_run_id = request.args.get('highlight_run') if show_highlight and highlight_run_id and highlight_run_id.isdigit(): highlight_run = session.query(ts.Run).filter_by( id=int(highlight_run_id)).first() if highlight_run is None: abort(404, "Invalid highlight_run id {}".format(highlight_run_id)) # Find the neighboring runs, by order. prev_runs = list(ts.get_previous_runs_on_machine(session, highlight_run, N=1)) if prev_runs: start_rev = prev_runs[0].order.llvm_project_revision end_rev = highlight_run.order.llvm_project_revision revision_range = { "start": start_rev, "end": end_rev, } # Build the graph data. legend = [] graph_plots = [] graph_datum = [] baseline_plots = [] revision_cache = {} num_plots = len(plot_parameters) metrics = list(set(req.field.name for req in plot_parameters)) for i, req in enumerate(plot_parameters): # Determine the base plot color. col = list(util.makeDarkColor(float(i) / num_plots)) url = "/".join([str(req.machine.id), str(req.test.id), str(req.field_index)]) legend.append(LegendItem(req.machine, req.test.name, req.field.name, tuple(col), url)) # Load all the field values for this test on the same machine. data = load_graph_data(req, show_failures, limit, xaxis_date, revision_cache) graph_datum.append((req.test.name, data, col, req.field, url, req.machine)) # Get baselines for this line num_baselines = len(baseline_parameters) for baseline_id, (baseline, baseline_title) in \ enumerate(baseline_parameters): q_baseline = session.query(req.field.column, ts.Order.llvm_project_revision, ts.Run.start_time, ts.Machine.name) \ .join(ts.Run).join(ts.Order).join(ts.Machine) \ .filter(ts.Run.id == baseline.id) \ .filter(ts.Sample.test == req.test) \ .filter(req.field.column.isnot(None)) # In the event of many samples, use the mean of the samples as the # baseline. samples = [] for sample in q_baseline: samples.append(sample[0]) # Skip this baseline if there is no data. if not samples: continue mean = sum(samples)/len(samples) # Darken the baseline color distinguish from non-baselines. # Make a color closer to the sample than its neighbour. color_offset = float(baseline_id) / num_baselines / 2 my_color = (i + color_offset) / num_plots dark_col = list(util.makeDarkerColor(my_color)) str_dark_col = util.toColorString(dark_col) baseline_plots.append({ "color": str_dark_col, "lineWidth": 2, "yaxis": {"from": mean, "to": mean}, # "name": q_baseline[0].llvm_project_revision, "name": "Baseline %s: %s (%s)" % (baseline_title, req.test.name, req.field.name), }) baseline_name = ("Baseline {} on {}" .format(baseline_title, q_baseline[0].name)) legend.append(LegendItem(BaselineLegendItem( baseline_name, baseline.id), req.test.name, req.field.name, dark_col, None)) # Draw mean trend if requested. if mean_parameter: machine, field = mean_parameter test_name = 'Geometric Mean' if field.name not in metrics: metrics.append(field.name) col = (0, 0, 0) legend.append(LegendItem(machine, test_name, field.name, col, None)) data = load_geomean_data(field, machine, limit, xaxis_date, revision_cache) graph_datum.append((test_name, data, col, field, None, machine)) def trace_name(name, test_name, field_name): return "%s: %s (%s)" % (name, test_name, field_name) for test_name, data, col, field, url, machine in graph_datum: # Generate trace metadata. trace_meta = {} trace_meta["machine"] = machine.name trace_meta["machineID"] = machine.id if len(graph_datum) > 1: # If there are more than one plot in the graph, also label the # test name. trace_meta["test_name"] = test_name trace_meta["metric"] = field.name # Compute the graph points. pts_x = [] pts_y = [] meta = [] errorbar = {"x": [], "y": [], "error_y": {"type": "data", "visible": True, "array": []}} cumulative_minimum = {"x": [], "y": []} moving_median_data = {"x": [], "y": []} moving_average_data = {"x": [], "y": []} multisample_points_data = {"x": [], "y": [], "meta": []} if normalize_by_median: normalize_by = 1.0/stats.median([min([d[0] for d in values]) for _, values in data]) else: normalize_by = 1.0 min_val = None # Note data is sorted in load_graph_data(). for point_label, datapoints in data: # Get the samples. values = [data_array[0] for data_array in datapoints] orders = [data_array[1] for data_array in datapoints] # And the date on which they were taken. dates = [data_array[2] for data_array in datapoints] # Run ID where this point was collected. run_ids = [data_array[3] for data_array in datapoints if len(data_array) == 4] values = [v * normalize_by for v in values] is_multisample = (len(values) > 1) aggregation_fn = min if switch_min_mean_local: aggregation_fn = lnt.util.stats.agg_mean if field.bigger_is_better: aggregation_fn = max agg_value, agg_index = \ aggregation_fn((value, index) for (index, value) in enumerate(values)) pts_y.append(agg_value) # Plotly does not sort X axis in case of type: 'category'. # point_label is a string (order revision) if xaxis_date = False pts_x.append(point_label) # Generate point metadata. point_metadata = {"order": orders[agg_index].as_ordered_string(), "orderID": orders[agg_index].id, "date": str(dates[agg_index])} if run_ids: point_metadata["runID"] = str(run_ids[agg_index]) meta.append(point_metadata) # Add the multisample points, if requested. if not hide_all_points and (is_multisample or bool(request.args.get('csv')) or bool(request.args.get('download_csv'))): for i, v in enumerate(values): multisample_metadata = {"order": orders[i].as_ordered_string(), "orderID": orders[i].id, "date": str(dates[i])} if run_ids: multisample_metadata["runID"] = str(run_ids[i]) multisample_points_data["x"].append(point_label) multisample_points_data["y"].append(v) multisample_points_data["meta"].append(multisample_metadata) # Add the standard deviation error bar, if requested. if show_stddev: mean = stats.mean(values) sigma = stats.standard_deviation(values) errorbar["x"].append(point_label) errorbar["y"].append(mean) errorbar["error_y"]["array"].append(sigma) # Add the MAD error bar, if requested. if show_mad: med = stats.median(values) mad = stats.median_absolute_deviation(values, med) errorbar["x"].append(point_label) errorbar["y"].append(med) errorbar["error_y"]["array"].append(mad) if show_cumulative_minimum: min_val = agg_value if min_val is None else min(min_val, agg_value) cumulative_minimum["x"].append(point_label) cumulative_minimum["y"].append(min_val) # Compute the moving average and or moving median of our data if # requested. if moving_average or moving_median: def compute_moving_average(x, window, average_list, _): average_list["x"].append(x) average_list["y"].append(lnt.util.stats.mean(window)) def compute_moving_median(x, window, _, median_list): median_list["x"].append(x) median_list["y"].append(lnt.util.stats.median(window)) def compute_moving_average_and_median(x, window, average_list, median_list): average_list["x"].append(x) average_list["y"].append(lnt.util.stats.mean(window)) median_list["x"].append(x) median_list["y"].append(lnt.util.stats.median(window)) if moving_average and moving_median: fun = compute_moving_average_and_median elif moving_average: fun = compute_moving_average else: fun = compute_moving_median len_pts = len(pts_x) for i in range(len_pts): start_index = max(0, i - moving_window_size) end_index = min(len_pts, i + moving_window_size) window_pts = pts_y[start_index:end_index] fun(pts_x[i], window_pts, moving_average_data, moving_median_data) yaxis_index = metrics.index(field.name) yaxis = "y" if yaxis_index == 0 else "y%d" % (yaxis_index + 1) # Add the minimum line plot, if requested. if show_lineplot: plot = { "name": trace_name("Line", test_name, field.name), "legendgroup": test_name, "yaxis": yaxis, "type": "scatter", "mode": "lines+markers", "line": {"color": util.toColorString(col)}, "x": pts_x, "y": pts_y, "meta": meta } plot.update(trace_meta) if url: plot["url"] = url graph_plots.append(plot) # Add regression line, if requested. if show_linear_regression and len(pts_x) >= 2: unique_x = list(set(pts_x)) if xaxis_date: unique_x.sort() else: unique_x.sort(key=lambda sample: convert_revision(sample, cache=revision_cache)) num_unique_x = len(unique_x) if num_unique_x >= 2: dict_x = {} x_min = pts_x[0] x_max = pts_x[-1] # We compute the regression line in terms of a normalized X scale. if xaxis_date: x_range = float((x_max - x_min).total_seconds()) for x_key in unique_x: dict_x[x_key] = (x_key - x_min).total_seconds() / x_range else: for i, x_key in enumerate(unique_x): dict_x[x_key] = i/(num_unique_x - 1) norm_x = [dict_x[xi] for xi in pts_x] try: info = ext_stats.linregress(norm_x, pts_y) except ZeroDivisionError: info = None except ValueError: info = None if info is not None: slope, intercept, _, _, _ = info reglin_col = [c * 0.8 for c in col] if xaxis_date: reglin_y = [(xi - x_min).total_seconds() / x_range * slope + intercept for xi in unique_x] else: reglin_y = [i/(num_unique_x - 1) * slope + intercept for i in range(num_unique_x)] plot = { "name": trace_name("Linear Regression", test_name, field.name), "legendgroup": test_name, "yaxis": yaxis, "hoverinfo": "skip", "type": "scatter", "mode": "lines", "line": {"color": util.toColorString(reglin_col), "width": 2}, # "shadowSize": 4, "x": unique_x, "y": reglin_y } plot.update(trace_meta) graph_plots.insert(0, plot) # Add the points plot, if used. if multisample_points_data["x"]: pts_col = (0, 0, 0) multisample_points_data.update({ "name": trace_name("Points", test_name, field.name), "legendgroup": test_name, "showlegend": False, "yaxis": yaxis, # "hoverinfo": "skip", "type": "scatter", "mode": "markers", "marker": {"color": util.toColorString(pts_col), "size": 5} }) multisample_points_data.update(trace_meta) if url: multisample_points_data["url"] = url graph_plots.append(multisample_points_data) # Add the error bar plot, if used. if errorbar["x"]: bar_col = [c * 0.4 for c in col] errorbar.update({ "name": trace_name("Error bars", test_name, field.name), "showlegend": False, "yaxis": yaxis, "hoverinfo": "skip", "type": "scatter", "mode": "markers", "marker": {"color": util.toColorString(bar_col)} }) errorbar.update(trace_meta) graph_plots.append(errorbar) # Add the moving average plot, if used. if moving_average_data["x"]: avg_col = [c * 0.7 for c in col] moving_average_data.update({ "name": trace_name("Moving average", test_name, field.name), "legendgroup": test_name, "yaxis": yaxis, "hoverinfo": "skip", "type": "scatter", "mode": "lines", "line": {"color": util.toColorString(avg_col)} }) moving_average_data.update(trace_meta) graph_plots.append(moving_average_data) # Add the moving median plot, if used. if moving_median_data["x"]: med_col = [c * 0.6 for c in col] moving_median_data.update({ "name": trace_name("Moving median: ", test_name, field.name), "legendgroup": test_name, "yaxis": yaxis, "hoverinfo": "skip", "type": "scatter", "mode": "lines", "line": {"color": util.toColorString(med_col)} }) moving_median_data.update(trace_meta) graph_plots.append(moving_median_data) if cumulative_minimum["x"]: min_col = [c * 0.5 for c in col] cumulative_minimum.update({ "name": trace_name("Cumulative Minimum", test_name, field.name), "legendgroup": test_name, "yaxis": yaxis, "hoverinfo": "skip", "type": "scatter", "mode": "lines", "line": {"color": util.toColorString(min_col)} }) cumulative_minimum.update(trace_meta) graph_plots.append(cumulative_minimum) if bool(request.args.get("json")) or bool(request.args.get("download_json")): json_obj = dict() json_obj['data'] = graph_plots # Flatten ORM machine objects to their string names. simple_type_legend = [] for li in legend: # Flatten name, make color a dict. new_entry = { 'name': li.machine.name, 'test': li.test_name, 'unit': li.field_name, 'color': util.toColorString(li.color), 'url': li.url, } simple_type_legend.append(new_entry) json_obj['legend'] = simple_type_legend json_obj['revision_range'] = revision_range json_obj['current_options'] = options json_obj['test_suite_name'] = ts.name json_obj['baselines'] = baseline_plots flask_json = flask.jsonify(**json_obj) if bool(request.args.get('json')): return flask_json else: json_file = BytesIO() lines = flask_json.get_data() json_file.write(lines) json_file.seek(0) return send_file(json_file, mimetype='text/json', attachment_filename='Graph.json', as_attachment=True) return render_template("v4_graph.html", options=options, graph_plots=graph_plots, metrics=metrics, legend=legend, **ts_data(ts)) @v4_route("/global_status") def v4_global_status(): session = request.session ts = request.get_testsuite() metric_fields = sorted(list(ts.Sample.get_metric_fields()), key=lambda f: f.name) fields = dict((f.name, f) for f in metric_fields) # Get the latest run. latest = session.query(ts.Run.start_time).\ order_by(ts.Run.start_time.desc()).first() # If we found an entry, use that. if latest is not None: latest_date, = latest else: # Otherwise, just use today. latest_date = datetime.date.today() # Create a datetime for the day before the most recent run. yesterday = latest_date - datetime.timedelta(days=1) # Get arguments. revision = request.args.get('revision', str(ts.Machine.DEFAULT_BASELINE_REVISION)) field = fields.get(request.args.get('field', None), metric_fields[0]) # Get the list of all runs we might be interested in. recent_runs = session.query(ts.Run) \ .filter(ts.Run.start_time > yesterday) \ .all() # Aggregate the runs by machine. recent_runs_by_machine = multidict.multidict() for run in recent_runs: recent_runs_by_machine[run.machine] = run # Get a sorted list of recent machines. recent_machines = sorted(recent_runs_by_machine.keys(), key=lambda m: m.name) # We use periods in our machine names. css does not like this # since it uses periods to demark classes. Thus we convert periods # in the names of our machines to dashes for use in css. It is # also convenient for our computations in the jinja page to have # access to def get_machine_keys(m): m.css_name = m.name.replace('.', '-') return m recent_machines = list(map(get_machine_keys, recent_machines)) # For each machine, build a table of the machine, the baseline run, and the # most recent run. We also computed a list of all the runs we are reporting # over. machine_run_info = [] reported_run_ids = [] for machine in recent_machines: runs = recent_runs_by_machine[machine] # Get the baseline run for this machine. baseline = machine.get_closest_previously_reported_run( session, ts.Order(llvm_project_revision=revision)) # Choose the "best" run to report on. We want the most recent one with # the most recent order. run = max(runs, key=lambda r: (r.order, r.start_time)) if baseline: machine_run_info.append((baseline, run)) reported_run_ids.append(baseline.id) reported_run_ids.append(run.id) if not machine_run_info: abort(404, "No closest runs for revision '{}'".format(revision)) # Get the set all tests reported in the recent runs. reported_tests = session.query(ts.Test.id, ts.Test.name).filter( sqlalchemy.sql.exists('*', sqlalchemy.sql.and_( ts.Sample.run_id.in_(reported_run_ids), ts.Sample.test_id == ts.Test.id))).all() # Load all of the runs we are interested in. runinfo = lnt.server.reporting.analysis.RunInfo(session, ts, reported_run_ids) # Build the test matrix. This is a two dimensional table index by # (machine-index, test-index), where each entry is the percent change. test_table = [] for i, (test_id, test_name) in enumerate(reported_tests): # Create the row, starting with the test name and worst entry. row = [(test_id, test_name), None] # Compute comparison results for each machine. row.extend((runinfo.get_run_comparison_result( run, baseline, test_id, field, ts.Sample.get_hash_of_binary_field()), run.id) for baseline, run in machine_run_info) # Compute the worst cell value. if len(row) > 2: row[1] = max(cr.pct_delta for cr, _ in row[2:]) test_table.append(row) # Order the table by worst regression. test_table.sort(key=lambda row: row[1], reverse=True) return render_template("v4_global_status.html", tests=test_table, machines=recent_machines, fields=metric_fields, selected_field=field, selected_revision=revision, **ts_data(ts)) @v4_route("/daily_report") def v4_daily_report_overview(): # Redirect to the report for the most recent submitted run's date. session = request.session ts = request.get_testsuite() # Get the latest run. latest = session.query(ts.Run).\ order_by(ts.Run.start_time.desc()).limit(1).first() # If we found a run, use it's start time. if latest: date = latest.start_time else: # Otherwise, just use today. date = datetime.date.today() extra_args = request.args.copy() extra_args.pop("year", None) extra_args.pop("month", None) extra_args.pop("day", None) return v4_redirect(v4_url_for(".v4_daily_report", year=date.year, month=date.month, day=date.day, **extra_args)) @v4_route("/daily_report/<int:year>/<int:month>/<int:day>") def v4_daily_report(year, month, day): num_days_str = request.args.get('num_days') if num_days_str is not None: num_days = int(num_days_str) else: num_days = 3 day_start_str = request.args.get('day_start') if day_start_str is not None: day_start = int(day_start_str) else: day_start = 16 filter_machine_regex = request.args.get('filter-machine-regex') ts = request.get_testsuite() # Create the report object. report = lnt.server.reporting.dailyreport.DailyReport( ts, year, month, day, num_days, day_start, filter_machine_regex=filter_machine_regex) # Build the report. try: report.build(request.session) except ValueError: return abort(400) return render_template("v4_daily_report.html", report=report, analysis=lnt.server.reporting.analysis, **ts_data(ts)) ### # Cross Test-Suite V4 Views def get_summary_config_path(): return os.path.join(current_app.old_config.tempDir, 'summary_report_config.json') @db_route("/summary_report/edit", methods=('GET', 'POST')) def v4_summary_report_ui(): # If this is a POST request, update the saved config. session = request.session if request.method == 'POST': # Parse the config data. config_data = request.form.get('config') config = flask.json.loads(config_data) # Write the updated config. with open(get_summary_config_path(), 'w') as f: flask.json.dump(config, f, indent=2) # Redirect to the summary report. return v4_redirect(db_url_for(".v4_summary_report")) config_path = get_summary_config_path() if os.path.exists(config_path): with open(config_path) as f: config = flask.json.load(f) else: config = { "machine_names": [], "orders": [], "machine_patterns": [], } # Get the list of available test suites. testsuites = request.get_db().testsuite.values() # Gather the list of all run orders and all machines. def to_key(name): first = name.split('.', 1)[0] if first.isdigit(): return (int(first), name) return (first, name) all_machines = set() all_orders = set() for ts in testsuites: for name, in session.query(ts.Machine.name): all_machines.add(name) for name, in session.query(ts.Order.llvm_project_revision): all_orders.add(name) all_machines = sorted(all_machines) all_orders = sorted(all_orders, key=to_key) return render_template("v4_summary_report_ui.html", config=config, all_machines=all_machines, all_orders=all_orders, **ts_data(ts)) @v4_route("/latest_runs_report") def v4_latest_runs_report(): ts = request.get_testsuite() num_runs_str = request.args.get('num_runs') if num_runs_str is not None: num_runs = int(num_runs_str) else: num_runs = 10 report = lnt.server.reporting.latestrunsreport.LatestRunsReport(ts, num_runs) report.build(request.session) return render_template("v4_latest_runs_report.html", report=report, analysis=lnt.server.reporting.analysis, **ts_data(ts)) @db_route("/summary_report") def v4_summary_report(): session = request.session # Load the summary report configuration. config_path = get_summary_config_path() if not os.path.exists(config_path): return render_template("error.html", message="""\ You must define a summary report configuration first.""") with open(config_path) as f: config = flask.json.load(f) # Create the report object. report = lnt.server.reporting.summaryreport.SummaryReport( request.get_db(), config['orders'], config['machine_names'], config['machine_patterns']) # Build the report. report.build(session) if bool(request.args.get('json')): json_obj = dict() json_obj['ticks'] = report.report_orders data = [] for e in report.normalized_data_table.items(): header, samples = e raw_samples = samples.getvalue() data.append([header, raw_samples]) json_obj['data'] = data return flask.jsonify(**json_obj) return render_template("v4_summary_report.html", report=report) @frontend.route('/rules') def rules(): discovered_rules = lnt.server.db.rules_manager.DESCRIPTIONS return render_template("rules.html", rules=discovered_rules) @frontend.route('/log') def log(): with open(current_app.config['log_file_name'], 'r') as f: log_lines = f.readlines() r'2017-07-21 15:02:15,143 ERROR:' return render_template("log.html", log_lines=log_lines) @frontend.route('/debug') def debug(): assert not current_app.debug @frontend.route('/__health') def health(): """Our instance health. If queue is too long or we use too much mem, return 500. Monitor might reboot us for this.""" is_bad_state = False msg = "Ok" import resource stats = resource.getrusage(resource.RUSAGE_SELF) mem = stats.ru_maxrss if mem > 1024**3: is_bad_state = True msg = "Over memory " + str(mem) + ">" + str(1024**3) if is_bad_state: return msg, 500 return msg, 200 @v4_route("/search") def v4_search(): session = request.session ts = request.get_testsuite() query = request.args.get('q') l_arg = request.args.get('l', 8) default_machine = request.args.get('m', None) assert query results = lnt.server.db.search.search(session, ts, query, num_results=l_arg, default_machine=default_machine) return json.dumps( [('%s #%s' % (r.machine.name, r.order.llvm_project_revision), r.id) for r in results]) # How much data to render in the Matrix view. MATRIX_LIMITS = [ ('12', 'Small'), ('50', 'Medium'), ('250', 'Large'), ('-1', 'All'), ] class MatrixOptions(Form): limit = SelectField('Size', choices=MATRIX_LIMITS) def baseline(): # type: () -> Optional[testsuitedb.TestSuiteDB.Baseline] """Get the baseline object from the user's current session baseline value or None if one is not defined. """ session = request.session ts = request.get_testsuite() base_id = flask.session.get(baseline_key(ts.name)) if not base_id: return None try: base = session.query(ts.Baseline).get(base_id) except NoResultFound: return None return base @v4_route("/matrix", methods=['GET', 'POST']) def v4_matrix(): """A table view for Run sample data, because *some* people really like to be able to see results textually. request.args.limit limits the number of samples. for each dataset to add, there will be a "plot.n=.m.b.f" where m is machine ID, b is benchmark ID and f os field kind offset. "n" is used to unique the paramters, and is ignored. """ session = request.session ts = request.get_testsuite() # Load the matrix request parameters. form = MatrixOptions(request.form) if request.method == 'POST': post_limit = form.limit.data else: post_limit = MATRIX_LIMITS[0][0] plot_parameters = parse_and_load_plot_parameters(request.args, session, ts) if not plot_parameters: abort(404, "Request requires some plot arguments.") # Feature: if all of the results are from the same machine, hide the name # to make the headers more compact. dedup = True for r in plot_parameters: if r.machine.id != plot_parameters[0].machine.id: dedup = False if dedup: machine_name_common = plot_parameters[0].machine.name machine_id_common = plot_parameters[0].machine.id else: machine_name_common = machine_id_common = None # It is nice for the columns to be sorted by name. plot_parameters.sort(key=lambda x: x.test.name), # Now lets get the data. all_orders = set() order_to_id = {} for req in plot_parameters: q = session.query(req.field.column, ts.Order.llvm_project_revision, ts.Order.id) \ .join(ts.Run) \ .join(ts.Order) \ .filter(ts.Run.machine_id == req.machine.id) \ .filter(ts.Sample.test == req.test) \ .filter(req.field.column.isnot(None)) \ .order_by(ts.Order.llvm_project_revision.desc()) limit = request.args.get('limit', post_limit) if limit or post_limit: limit = int(limit) if limit != -1: q = q.limit(limit) req.samples = defaultdict(list) for s in q.all(): req.samples[s[1]].append(s[0]) all_orders.add(s[1]) order_to_id[s[1]] = s[2] if not all_orders: abort(404, "No orders found.") # Now grab the baseline data. user_baseline = baseline() backup_baseline = next(iter(all_orders)) if user_baseline: all_orders.add(user_baseline.order.llvm_project_revision) baseline_rev = user_baseline.order.llvm_project_revision baseline_name = user_baseline.name else: baseline_rev = backup_baseline baseline_name = backup_baseline for req in plot_parameters: q_baseline = session.query(req.field.column, ts.Order.llvm_project_revision, ts.Order.id) \ .join(ts.Run) \ .join(ts.Order) \ .filter(ts.Run.machine_id == req.machine.id) \ .filter(ts.Sample.test == req.test) \ .filter(req.field.column.isnot(None)) \ .filter(ts.Order.llvm_project_revision == baseline_rev) baseline_data = q_baseline.all() if baseline_data: for s in baseline_data: req.samples[s[1]].append(s[0]) all_orders.add(s[1]) order_to_id[s[1]] = s[2] else: # Well, there is a baseline, but we did not find data for it... # So lets revert back to the first run. msg = "Did not find data for {}. Showing {}." flash(msg.format(user_baseline, backup_baseline), FLASH_DANGER) all_orders.remove(baseline_rev) baseline_rev = backup_baseline baseline_name = backup_baseline all_orders = list(all_orders) all_orders.sort(reverse=True) all_orders.insert(0, baseline_rev) # Now calculate Changes between each run. for req in plot_parameters: req.change = {} for order in all_orders: cur_samples = req.samples[order] prev_samples = req.samples.get(baseline_rev, None) cr = ComparisonResult(mean, False, False, cur_samples, prev_samples, None, None, confidence_lv=0.05, bigger_is_better=False) req.change[order] = cr # Calculate Geomean for each order. order_to_geomean = {} curr_geomean = None for order in all_orders: curr_samples = [] prev_samples = [] for req in plot_parameters: curr_samples.extend(req.samples[order]) prev_samples.extend(req.samples[baseline_rev]) prev_geomean = calc_geomean(prev_samples) curr_geomean = calc_geomean(curr_samples) if prev_geomean: cr = ComparisonResult(mean, False, False, [curr_geomean], [prev_geomean], None, None, confidence_lv=0.05, bigger_is_better=False) order_to_geomean[order] = cr else: # There will be no change here, but display current val. if curr_geomean: order_to_geomean[order] = PrecomputedCR(curr_geomean, curr_geomean, False) # Calculate the date of each order. runs = session.query(ts.Run.start_time, ts.Order.llvm_project_revision) \ .join(ts.Order) \ .filter(ts.Order.llvm_project_revision.in_(all_orders)) \ .all() order_to_date = dict([(x[1], x[0]) for x in runs]) class FakeOptions(object): show_small_diff = False show_previous = False show_all = True show_delta = False show_stddev = False show_mad = False show_all_samples = False show_sample_counts = False return render_template("v4_matrix.html", testsuite_name=g.testsuite_name, associated_runs=plot_parameters, orders=all_orders, options=FakeOptions(), analysis=lnt.server.reporting.analysis, geomeans=order_to_geomean, order_to_id=order_to_id, form=form, baseline_rev=baseline_rev, baseline_name=baseline_name, machine_name_common=machine_name_common, machine_id_common=machine_id_common, order_to_date=order_to_date, **ts_data(ts)) @frontend.route("/explode") def explode(): """This route is going to exception. Used for testing 500 page.""" return 1/0 @frontend.route("/gone") def gone(): """This route returns 404. Used for testing 404 page.""" abort(404, "test") @frontend.route("/ping") def ping(): """Simple route to see if server is alive. Used by tests to poll on server creation.""" return "pong", 200 @frontend.route("/sleep") def sleep(): """Simple route to simulate long running page loads. Used by to diagnose proxy issues etc.""" sleep_time = 1 if request.args.get('timeout'): sleep_time = int(request.args.get('timeout')) time.sleep(sleep_time) return "Done", 200
python
import os from datetime import datetime import numpy import xarray as xr from esdl.cate.cube_gen import CateCubeSourceProvider class OzoneTemisProvider(CateCubeSourceProvider): def __init__(self, cube_config, name='ozone_temis', dir=None, resampling_order=None): super().__init__(cube_config, name, dir, resampling_order) self.old_indices = None @property def variable_descriptors(self): shared_meta_info = { 'data_type': numpy.float32, 'fill_value': numpy.nan, 'references': 'Jacob C. A. van Peet, Ronald J. van der A, Hennie M. Kelder, and Pieternel F. Levelt (2018),' 'Simultaneous assimilation of ozone profiles from multiple UV-VIS satellite instruments, Atmospheric Chemistry and Physics, ' 'doi:10.5194/acp-18-1685-2018', 'comment': 'The global tropospheric ozone column from 0 to 6 km is presented here. The column is derived by simultaneous assimlating ozone profiles of GOME-2 and OMI.', 'url': 'http://www.temis.nl/protocols/tropo.html', 'project_name' : 'Tropospheric ozone column', } ds = xr.open_dataset(self.dir_path + '/tropcol-20111202-v0002.nc') meta = dict() meta.update(shared_meta_info) meta.update(ds.attrs) coords = ('lon', 'lat', 'time') res = dict() for vs in ds.variables: if vs not in coords: meta_var = { 'source_name': vs, 'units': ds[vs].units, 'long_name': ds[vs].long_name, 'standard_name': ds[vs].standard_name, } meta_var.update(meta) res[vs] = meta_var ds.close() return res def compute_source_time_ranges(self): source_time_ranges = list() for root, sub_dirs, files in os.walk(self.dir_path): for file_name in files: if '.nc' in file_name: f = os.path.join(root, file_name) buff = file_name.split('-') dtt = datetime.strptime(buff[1], '%Y%m%d') source_time_ranges.append((dtt, dtt, f, 0)) return sorted(source_time_ranges, key=lambda item: item[0]) def transform_source_image(self, source_image): """ Transforms the source image, here by flipping and then shifting horizontally. :param source_image: 2D image :return: source_image """ # TODO (hans-permana, 20161219): the following line is a workaround to an issue where the nan values are # always read as -9.9. Find out why these values are automatically converted and create a better fix. source_image[source_image == -9.9] = numpy.nan return numpy.flipud(source_image)
python
# # This example demonstrates using Lark with a custom lexer. # # You can use a custom lexer to tokenize text when the lexers offered by Lark # are too slow, or not flexible enough. # # You can also use it (as shown in this example) to tokenize streams of objects. # from lark import Lark, Transformer, v_args from lark.lexer import Lexer, Token class TypeLexer(Lexer): def __init__(self, lexer_conf): pass def lex(self, data): for obj in data: if isinstance(obj, int): yield Token('INT', obj) elif isinstance(obj, (type(''), type(u''))): yield Token('STR', obj) else: raise TypeError(obj) parser = Lark(""" start: data_item+ data_item: STR INT* %declare STR INT """, parser='lalr', lexer=TypeLexer) class ParseToDict(Transformer): @v_args(inline=True) def data_item(self, name, *numbers): return name.value, [n.value for n in numbers] start = dict def test(): data = ['alice', 1, 27, 3, 'bob', 4, 'carrie', 'dan', 8, 6] print(data) tree = parser.parse(data) res = ParseToDict().transform(tree) print('-->') print(res) # prints {'alice': [1, 27, 3], 'bob': [4], 'carrie': [], 'dan': [8, 6]} if __name__ == '__main__': test()
python
import json from .errors import JrsNodeNotFound from .refs_resolver import RefsResolver class Context(object): def __init__(self): self.schemas = {} self.nodes = {} self.refsResolver = RefsResolver(self) def addSchema(self, schema): self.schemas[schema.id] = schema def addNode(self, schemaId, path, node): self.nodes["{}#{}".format(schemaId, path)] = node def getNode(self, schemaId, path): fullPath = "{}#{}".format(schemaId, path.replace("/", ".")) if fullPath not in self.nodes: raise JrsNodeNotFound("Not found node with schemaId: {}, path: {}".format(schemaId, path)) return self.nodes[fullPath] def initNodes(self): for schema in self.schemas.values(): schema.root.initNodes() def resolveRefs(self): self.refsResolver.resolveRefs() def toJson(self, prettyPrint): schemas = {} for item in self.schemas.values(): schemas[item.id] = item.root.value if prettyPrint: return json.dumps(schemas, separators=(",", ": "), indent=4) + "\n" else: return json.dumps(schemas, separators=(",", ":"))
python
# Given a list of dominoes, dominoes[i] = [a, b] is equivalent to dominoes[j] = [c, d] if and only if either (a==c and b==d), or (a==d and b==c) - that is, one domino can be rotated to be equal to another domino. # Return the number of pairs(i, j) for which 0 <= i < j < dominoes.length, and dominoes[i] is equivalent to dominoes[j]. class Solution(object): def numEquivalentDominoes(self, dominoes): count = 0 seen = [] for domino in dominoes: if domino in seen: count += 1 continue seen += [domino, list(reversed(domino))] return count print(Solution().numEquivalentDominoes([[1, 2], [2, 1], [3, 4], [5, 6]]))
python
from ralph.accounts.api import RalphUserSimpleSerializer from ralph.api import RalphAPIViewSet, router from ralph.assets.api.serializers import RalphAPISerializer from ralph.sim_cards.models import CellularCarrier, SIMCard, SIMCardFeatures class CellularCarrierSerializer(RalphAPISerializer): class Meta: model = CellularCarrier fields = ['name'] class SIMCardFeaturesSerializer(RalphAPISerializer): class Meta: model = SIMCardFeatures fields = ['name'] class SIMCardSerializer(RalphAPISerializer): carrier = CellularCarrierSerializer() features = SIMCardFeaturesSerializer(many=True) user = RalphUserSimpleSerializer() owner = RalphUserSimpleSerializer() class Meta: model = SIMCard fields = ['status', 'card_number', 'phone_number', 'pin1', 'puk1', 'user', 'owner', 'warehouse', 'carrier', 'features', 'quarantine_until', 'modified'] class CellularCarrierViewSet(RalphAPIViewSet): queryset = CellularCarrier.objects.all() serializer_class = CellularCarrierSerializer class SIMCardFeatureViewSet(RalphAPIViewSet): queryset = SIMCardFeatures.objects.all() serializer_class = SIMCardFeaturesSerializer class SIMCardViewSet(RalphAPIViewSet): queryset = SIMCard.objects.all() serializer_class = SIMCardSerializer select_related = ['carrier', 'user', 'owner'] prefetch_related = ['features'] filter_fields = ['user__username', 'features__name', 'owner__username', 'carrier__name'] router.register(r'sim-card-feature', SIMCardFeatureViewSet) router.register(r'sim-card-cellular-carrier', CellularCarrierViewSet) router.register(r'sim-card', SIMCardViewSet) urlpatterns = []
python
"""Test runway.config.components.runway._test_def.""" # pylint: disable=no-self-use,protected-access # pyright: basic import pytest from pydantic import ValidationError from runway.config.components.runway import ( CfnLintRunwayTestDefinition, RunwayTestDefinition, ScriptRunwayTestDefinition, YamlLintRunwayTestDefinition, ) from runway.config.models.runway import ( CfnLintRunwayTestDefinitionModel, ScriptRunwayTestDefinitionModel, YamlLintRunwayTestDefinitionModel, ) class TestCfnLintRunwayTestDefinition: """Test runway.config.components.runway._test_def.CfnLintRunwayTestDefinition.""" def test_parse_obj(self) -> None: """Test parse_obj.""" assert isinstance( CfnLintRunwayTestDefinition.parse_obj({}), CfnLintRunwayTestDefinition ) class TestRunwayTestDefinition: """Test runway.config.components.runway._test_def.RunwayTestDefinition.""" def test_new_cfn_lint(self) -> None: """Test creation CfnLintRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition(CfnLintRunwayTestDefinitionModel()), CfnLintRunwayTestDefinition, ) def test_new_invalid(self) -> None: """Test new invalid type.""" with pytest.raises(TypeError) as excinfo: RunwayTestDefinition({}) # type: ignore assert str(excinfo.value).startswith("expected data of type") def test_new_script(self) -> None: """Test creation ScriptRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition(ScriptRunwayTestDefinitionModel()), ScriptRunwayTestDefinition, ) def test_new_yamllint(self) -> None: """Test creation ScriptRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition(YamlLintRunwayTestDefinitionModel()), YamlLintRunwayTestDefinition, ) def test_parse_obj_cfn_lint(self) -> None: """Test parse_obj CfnLintRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition.parse_obj({"type": "cfn-lint"}), CfnLintRunwayTestDefinition, ) def test_parse_obj_invalid(self) -> None: """Test parse_obj invalid object.""" with pytest.raises(ValidationError): RunwayTestDefinition.parse_obj({"type": "invalid"}) def test_parse_obj_script(self) -> None: """Test parse_obj ScriptRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition.parse_obj({"type": "script"}), ScriptRunwayTestDefinition, ) def test_parse_obj_yamllint(self) -> None: """Test parse_obj YamlLintRunwayTestDefinition.""" assert isinstance( RunwayTestDefinition.parse_obj({"type": "yamllint"}), YamlLintRunwayTestDefinition, ) def test_register_variable(self) -> None: """Test _register_variable.""" obj = RunwayTestDefinition.parse_obj( {"type": "script", "name": "test_register_variable", "required": True} ) assert obj._vars["required"].name == "test_register_variable.required" class TestScriptRunwayTestDefinition: """Test runway.config.components.runway._test_def.ScriptRunwayTestDefinition.""" def test_parse_obj(self) -> None: """Test parse_obj.""" assert isinstance( ScriptRunwayTestDefinition.parse_obj({}), ScriptRunwayTestDefinition ) class TestYamlLintRunwayTestDefinition: """Test runway.config.components.runway._test_def.YamlLintRunwayTestDefinition.""" def test_parse_obj(self) -> None: """Test parse_obj.""" assert isinstance( YamlLintRunwayTestDefinition.parse_obj({}), YamlLintRunwayTestDefinition )
python
""""@package This package enables the research group usage for the database. """ from src.models.employee import EmployeeDataAccess class ResearchGroup: """ This class defines a research group """ def __init__(self, name, abbreviation, logo_location, description_id, address, telephone_number, is_active): """ ResearchGroup initializer. :param name: Research group name. :param abbreviation: Research group abbreviation. :param logo_location: Location of group logo. :param description_id: ID of the group description. :param address: Research group address. :param telephone_number: Research group telephone number. :param study_field: Research group study field. :param is_active: Status of research group. """ self.name = name self.abbreviation = abbreviation self.logo_location = logo_location self.address = address self.telephone_number = telephone_number self.is_active = is_active self.description_id = description_id self.description_eng = None self.description_nl = None self.contact_person = None def to_dict(self): """ Converts object to a dictionary. :return: Dictionary of the object data. """ return vars(self) class ResearchGroupDataAccess: """ This class interacts with the ResearchGroup component of the database. """ def __init__(self, dbconnect): """ Initiates the ResearchGroupDataAccess object. :param dbconnect: The database connection. """ self.dbconnect = dbconnect def get_group_names(self, active_only): # TODO #2 error for empty fetch """ Fetches all research group names. :param active_only: Only return active research groups. :return: A list with all the active and/or non-active research group names. """ cursor = self.dbconnect.get_cursor() if active_only: cursor.execute('SELECT name FROM research_group WHERE is_active = TRUE') else: cursor.execute('SELECT name FROM research_group') return [row[0] for row in cursor] def get_research_groups(self, active_only): # TODO #2 catching empty? """ Fetches all research groups from the database. :param active_only: Only return active research groups. :return: A list with all the active and/or non-active research groups. """ return [self.get_research_group(name) for name in self.get_group_names(active_only)] def get_research_group(self, group_name): # TODO #2 """ Retrieves all the data of a given research group. :param group_name: The name of the research group to fetch. :return: Research group object. """ cursor = self.dbconnect.get_cursor() """General info""" cursor.execute( 'SELECT name, abbreviation, logo_location, description_id, address, telephone_number' ', is_active FROM research_group WHERE name=%s', (group_name,)) row = cursor.fetchone() group = ResearchGroup(row[0], row[1], row[2], row[3], row[4], row[5], row[6]) """Descriptions""" cursor.execute('SELECT html_content_nl, html_content_eng FROM document WHERE document_id=%s', (group.description_id,)) row = cursor.fetchone() if row is not None: group.description_nl = row[0] group.description_eng = row[1] """Contact person""" cursor.execute('SELECT contact_person FROM contact_person WHERE research_group=%s', (group_name,)) row = cursor.fetchone() if row is not None: employee = EmployeeDataAccess(self.dbconnect).get_employee(row[0]) group.contact_person = employee.name return group def add_research_group(self, obj): """ Adds a research group to the database. :param obj: The new research group. :raise: Exception if the database has to roll back. """ cursor = self.dbconnect.get_cursor() try: cursor.execute('INSERT INTO research_group(name, abbreviation, logo_location, description_id, address, ' 'telephone_number, is_active) VALUES(%s,%s,%s,%s,%s,%s,%s)', (obj.name, obj.abbreviation, obj.logo_location, obj.description_id, obj.address, obj.telephone_number, obj.is_active)) self.dbconnect.commit() except: self.dbconnect.rollback() raise def update_research_group(self, group_name, obj): """ Updates a research group in the database. :param group_name: The original name of the group. :param obj: New research group. :raise: Exception if the database has to roll back. """ cursor = self.dbconnect.get_cursor() try: cursor.execute('UPDATE research_group ' 'SET name = %s, abbreviation = %s, logo_location = %s, description_id = %s, ' 'address = %s, telephone_number = %s, is_active = %s ' 'WHERE name=%s', (obj.name, obj.abbreviation, obj.logo_location, obj.description_id, obj.address, obj.telephone_number, obj.is_active, group_name)) self.dbconnect.commit() except: self.dbconnect.rollback() raise def set_active(self, group_name, active): """ Changes the status of the group. :param group_name: The group to change. :param active: The new active status. :raise: Exception if the database has to roll back. """ cursor = self.dbconnect.get_cursor() try: cursor.execute('UPDATE research_group ' 'SET is_active = %s ' 'WHERE name=%s', (active, group_name)) self.dbconnect.commit() except: self.dbconnect.rollback() raise def set_contact_person(self, group_name, contact_person_id): """ Sets the contact person of a group. :param group_name: The research group name. :param contact_person_id: The ID of contact person of the group. :raise: Exception if the database has to roll back. """ cursor = self.dbconnect.get_cursor() try: cursor.execute('DELETE from contact_person ' 'WHERE research_group = %s', (group_name, )) self.dbconnect.commit() employee = EmployeeDataAccess(self.dbconnect).get_employee_by_name(contact_person_id) cursor.execute('INSERT INTO contact_person VALUES (%s, %s)', (employee.e_id, group_name)) self.dbconnect.commit() except: self.dbconnect.rollback() raise
python
# MIT License # # Copyright (C) IBM Corporation 2018 # # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the # Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ This module implements the abstract base class for all poison filtering defences. """ from __future__ import absolute_import, division, print_function, unicode_literals import abc import sys # Ensure compatibility with Python 2 and 3 when using ABCMeta if sys.version_info >= (3, 4): ABC = abc.ABC else: ABC = abc.ABCMeta(str('ABC'), (), {}) class PoisonFilteringDefence(ABC): """ Base class for all poison filtering defences. """ defence_params = ['classifier'] def __init__(self, classifier, x_train, y_train): """ Create an :class:`.ActivationDefence` object with the provided classifier. :param classifier: model evaluated for poison :type classifier: :class:`art.classifiers.classifier.Classifier` :param x_train: dataset used to train the classifier. :type x_train: :class:`numpy.ndarray` :param y_train: labels used to train the classifier. :type y_train: :class:`numpy.ndarray` """ self.classifier = classifier self.x_train = x_train self.y_train = y_train @abc.abstractmethod def detect_poison(self, **kwargs): """ Detect poison. :param kwargs: Defence-specific parameters used by child classes. :type kwargs: `dict` :return: `(dict, list)` dictionary with report and list with items identified as poison """ raise NotImplementedError @abc.abstractmethod def evaluate_defence(self, is_clean, **kwargs): """ Evaluate the defence given the labels specifying if the data is poisoned or not. :param is_clean: 1-D array where is_clean[i]=1 means x_train[i] is clean and is_clean[i]=0 that it's poison. :param kwargs: Defence-specific parameters used by child classes. :type kwargs: `dict` :return: JSON object with confusion matrix """ raise NotImplementedError def set_params(self, **kwargs): """ Take in a dictionary of parameters and apply attack-specific checks before saving them as attributes. :param kwargs: a dictionary of defence-specific parameters :type kwargs: `dict` :return: `True` when parsing was successful """ for key, value in kwargs.items(): if key in self.defence_params: setattr(self, key, value) return True def get_params(self): """ Returns dictionary of parameters used to run defence. :return: `dict` """ dictionary = {} for param in self.defence_params: dictionary.update({param: getattr(self, param)}) return dictionary
python
import jimi, requests def reloadModule(module): # Apply system updates clusterMembers = jimi.cluster.getAll() for clusterMember in clusterMembers: headers = { "x-api-token" : jimi.auth.generateSystemSession() } requests.get("{0}{1}system/update/{2}/".format(clusterMember,jimi.api.base,jimi.cluster.getMasterId()),headers=headers, timeout=60) requests.get("{0}{1}system/reload/module/{2}/".format(clusterMember,jimi.api.base,module),headers=headers, timeout=60)
python
""" A coordinate transformation module. Made as a separate chunk of code to allow for easier implementation of newer/better reference frame translation methods. Generally used to project a trajectory in ECEF coordinates (eg lat/lon) into a projected reference system. ##just getting started! """ #collect dependencies import numpy as np import sys import pyproj as prj
python
from __future__ import absolute_import from six.moves.urllib.parse import urlparse from django.utils.translation import ugettext_lazy as _ from django import forms from sentry import http from sentry.web.helpers import render_to_response from sentry.identity.pipeline import IdentityProviderPipeline from sentry.identity.gitlab import get_user_info from sentry.identity.gitlab.provider import GitlabIdentityProvider from sentry.integrations import IntegrationInstallation, IntegrationFeatures, IntegrationProvider, IntegrationMetadata from sentry.pipeline import NestedPipelineView, PipelineView from sentry.utils.http import absolute_uri from .client import GitLabApiClient, GitLabApiClientPath DESCRIPTION = """ Fill me out """ FEATURES = [] metadata = IntegrationMetadata( description=DESCRIPTION.strip(), features=FEATURES, author='The Sentry Team', noun=_('Installation'), issue_url='https://github.com/getsentry/sentry/issues/', source_url='https://github.com/getsentry/sentry/tree/master/src/sentry/integrations/gitlab', aspects={}, ) class GitlabIntegration(IntegrationInstallation): def __init__(self, *args, **kwargs): super(GitlabIntegration, self).__init__(*args, **kwargs) self.default_identity = None def get_client(self): if self.default_identity is None: self.default_identity = self.get_default_identity() return GitLabApiClient(self) class InstallationForm(forms.Form): url = forms.CharField( label=_("Installation Url"), help_text=_('The "base URL" for your gitlab instance, ' 'includes the host and protocol.'), widget=forms.TextInput( attrs={'placeholder': 'https://github.example.com'} ), ) name = forms.CharField( label=_("Gitlab App Name"), help_text=_('The name of your OAuth Application in Gitlab. ' 'This can be found on the apps configuration ' 'page. (/profile/applications)'), widget=forms.TextInput( attrs={'placeholder': _('Sentry App')} ) ) group = forms.CharField( label=_("Gitlab Group Name"), widget=forms.TextInput( attrs={'placeholder': _('my-awesome-group')} ) ) verify_ssl = forms.BooleanField( label=_("Verify SSL"), help_text=_('By default, we verify SSL certificates ' 'when delivering payloads to your Gitlab instance'), widget=forms.CheckboxInput(), required=False ) client_id = forms.CharField( label=_("Gitlab Application ID"), widget=forms.TextInput( attrs={'placeholder': _( '5832fc6e14300a0d962240a8144466eef4ee93ef0d218477e55f11cf12fc3737')} ) ) client_secret = forms.CharField( label=_("Gitlab Application Secret"), widget=forms.TextInput( attrs={'placeholder': _('XXXXXXXXXXXXXXXXXXXXXXXXXXX')} ) ) def __init__(self, *args, **kwargs): super(InstallationForm, self).__init__(*args, **kwargs) self.fields['verify_ssl'].initial = True class InstallationConfigView(PipelineView): def dispatch(self, request, pipeline): form = InstallationForm(request.POST) if form.is_valid(): form_data = form.cleaned_data pipeline.bind_state('installation_data', form_data) pipeline.bind_state('oauth_config_information', { "access_token_url": u"{}/oauth/token".format(form_data.get('url')), "authorize_url": u"{}/oauth/authorize".format(form_data.get('url')), "client_id": form_data.get('client_id'), "client_secret": form_data.get('client_secret'), "verify_ssl": form_data.get('verify_ssl') }) return pipeline.next_step() project_form = InstallationForm() return render_to_response( template='sentry/integrations/gitlab-config.html', context={ 'form': project_form, }, request=request, ) class GitlabIntegrationProvider(IntegrationProvider): key = 'gitlab' name = 'Gitlab' metadata = metadata integration_cls = GitlabIntegration needs_default_identity = True features = frozenset([ IntegrationFeatures.ISSUE_BASIC, ]) setup_dialog_config = { 'width': 1030, 'height': 1000, } def _make_identity_pipeline_view(self): """ Make the nested identity provider view. It is important that this view is not constructed until we reach this step and the ``oauth_config_information`` is available in the pipeline state. This method should be late bound into the pipeline vies. """ identity_pipeline_config = dict( oauth_scopes=( 'api', 'sudo', ), redirect_url=absolute_uri('/extensions/gitlab/setup/'), **self.pipeline.fetch_state('oauth_config_information') ) return NestedPipelineView( bind_key='identity', provider_key='gitlab', pipeline_cls=IdentityProviderPipeline, config=identity_pipeline_config, ) def get_oauth_data(self, payload): data = {'access_token': payload['access_token']} # https://docs.gitlab.com/ee/api/oauth2.html#2-requesting-access-token # doesn't seem to be correct, format we actually get: # { # "access_token": "123432sfh29uhs29347", # "token_type": "bearer", # "refresh_token": "29f43sdfsk22fsj929", # "created_at": 1536798907, # "scope": "api sudo" # } if 'refresh_token' in payload: data['refresh_token'] = payload['refresh_token'] if 'token_type' in payload: data['token_type'] = payload['token_type'] return data def get_group_info(self, access_token, installation_data): session = http.build_session() resp = session.get( GitLabApiClientPath.build_api_url( base_url=installation_data['url'], path=GitLabApiClientPath.group.format( group=installation_data['group'], ) ), headers={ 'Accept': 'application/json', 'Authorization': 'Bearer %s' % access_token, }, verify=installation_data['verify_ssl'] ) resp.raise_for_status() return resp.json() def get_pipeline_views(self): return [InstallationConfigView(), lambda: self._make_identity_pipeline_view()] def build_integration(self, state): data = state['identity']['data'] oauth_data = self.get_oauth_data(data) user = get_user_info(data['access_token'], state['installation_data']) group = self.get_group_info(data['access_token'], state['installation_data']) scopes = sorted(GitlabIdentityProvider.oauth_scopes) base_url = state['installation_data']['url'] integration = { 'name': group['name'], 'external_id': u'{}:{}'.format(urlparse(base_url).netloc, group['id']), 'metadata': { 'icon': group['avatar_url'], 'domain_name': group['web_url'].replace('https://', ''), 'scopes': scopes, 'verify_ssl': state['installation_data']['verify_ssl'], 'base_url': base_url, }, 'user_identity': { 'type': 'gitlab', 'external_id': u'{}:{}'.format(urlparse(base_url).netloc, user['id']), 'scopes': scopes, 'data': oauth_data, }, } return integration
python
''' Created by Sidhant Nagpal Feb 1, 2018 ''' from matplotlib import pyplot as plt from random import shuffle import numpy as np import json plt.figure(figsize=(12,6)) data = json.load(open('data.json')) a = [(k,v) for k, v in data.iteritems()] for i in xrange(2,len(a)): if a[i-2]>a[i] and a[i-2]>a[i-1]: a[i-2], a[i] = a[i], a[i-2] elif a[i]>a[i-2] and a[i]>a[i-1]: a[i-1], a[i] = a[i], a[i-1] values = [y for x, y in a] probs = sum(values) labels = ['{} ({}) ({:.1f}%)'.format(x,y,100.*y/probs) for x, y in a] colors = ['crimson','lightcoral','darkcyan','green','coral','orange','seagreen','purple','gold','mediumvioletred','darkturquoise','greenyellow','indigo','limegreen'] shuffle(colors) colors = colors[:len(a)] patches, texts = plt.pie(values, colors=colors, frame=True, shadow=True, startangle=100) plt.axis('equal') plt.title('Total Solved = {}'.format(probs), loc='left') plt.legend(patches, labels, loc='lower right') plt.tight_layout() plt.show()
python
# -*- coding: utf-8 -*- """Simple ClaSP test.""" __author__ = ["patrickzib"] __all__ = [] import numpy as np from sktime.annotation.clasp import ClaSPSegmentation from sktime.datasets import load_gun_point_segmentation def test_clasp_sparse(): """Test ClaSP sparse segmentation. Check if the predicted change points match. """ # load the test dataset ts, period_size, cps = load_gun_point_segmentation() # compute a ClaSP segmentation clasp = ClaSPSegmentation(period_size, n_cps=1) clasp.fit(ts) found_cps = clasp.predict(ts) scores = clasp.predict_scores(ts) assert len(found_cps) == 1 and found_cps[0] == 893 assert len(scores) == 1 and scores[0] > 0.74 def test_clasp_dense(): """Tests ClaSP dense segmentation. Check if the predicted segmentation matches. """ # load the test dataset ts, period_size, cps = load_gun_point_segmentation() # compute a ClaSP segmentation clasp = ClaSPSegmentation(period_size, n_cps=1, fmt="dense") clasp.fit(ts) segmentation = clasp.predict(ts) scores = clasp.predict_scores(ts) assert len(segmentation) == 2 and segmentation[0].right == 893 assert np.argmax(scores) == 893
python
from string import ascii_uppercase from tkinter import * from analyst import BoardAnalyst from board import Board, Color class MainMenuWindow: """ A class that represents a Main Menu. Can branch to a NameWindow, to an AboutWindow or to a GoodByeWindow On button 1: Branch to a NameWindow, which will eventually start a new game. On button 2: Branch to an AboutWindow, which can only return to a MainMenuWindow. On button 3: If any player has given a name, branch to a GoodByeWindow and pass it the player name. Else, kill app. """ def __init__(self, player_name=None): """ Layout of MainMenuWindow is as follows: root | +--frame | +--Button (Nuevo juego) +--Button (Acerca de) +--Button (Salir) """ self.name = player_name self.root = Tk() self.root.focus_force() self.root.geometry("+100+100") Grid.columnconfigure(self.root, 0, weight=1) Grid.rowconfigure(self.root, 0, weight=1) frame = Frame(self.root, borderwidth=10) frame.grid(row=0, column=0, sticky=N + S + E + W) Button(frame, text="Nuevo juego", command=lambda: self.start_new_game()).grid(row=0, column=0, sticky=E + W) Button(frame, text="Acerca de...", command=lambda: self.show_about()).grid(row=1, column=0, sticky=E + W) Button(frame, text="Salir", command=lambda: self.exit()).grid(row=2, column=0, sticky=E + W) Grid.columnconfigure(frame, 0, weight=1) for i in range(3): Grid.rowconfigure(frame, i, weight=1) def show(self): self.root.mainloop() def start_new_game(self): new_window = NameWindow(self.name) self.root.destroy() new_window.show() def show_about(self): new_window = AboutWindow(self.name) self.root.destroy() new_window.show() def exit(self): if self.name is not None: new_window = GoodByeWindow(self.name) self.root.destroy() if self.name is not None: new_window.show() class NameWindow: """ A class that represents a Window that asks the user for his/her name. Will only branch to a GameWindow. """ def __init__(self, last_player_name): """ Layout of NameWindow is as follows: root | +--Label +--TextField +--Button (OK) """ self.root = Tk() self.root.geometry("+100+100") Grid.columnconfigure(self.root, 0, weight=1) Grid.rowconfigure(self.root, 0, weight=1) Label(self.root, text="Nombre").grid(row=0, column=0, padx=5, pady=5) self.text_field = Entry(self.root, justify=CENTER) if last_player_name is not None: self.text_field.insert(END, last_player_name) self.text_field.bind("<Return>", self.start_game) self.text_field.focus_force() self.text_field.select_range(0, END) self.text_field.grid(row=1, column=0, sticky=E + W, padx=5, pady=5) Button(self.root, text="OK", command=lambda: self.start_game(None)).grid(row=2, column=0, padx=5, pady=5) def show(self): self.root.mainloop() def start_game(self, _): board = Board() board.random_fill() analyst = BoardAnalyst(board) new_window = GameWindow(self.text_field.get() if len(self.text_field.get()) > 0 else "Sin nombre", board, analyst) self.root.destroy() new_window.show() class AboutWindow: """ A class that represent a Window that shows information about the program. Can only branch to a MainMenuWindow """ def __init__(self, player_name): """ Layout of NameWindow is as follows: root | +--frame | | | +--Text | +--second_frame | +--Button (OK) """ self.name = player_name self.root = Tk() self.root.geometry("700x300+100+100") Grid.rowconfigure(self.root, 0, weight=1) Grid.columnconfigure(self.root, 0, weight=1) frame = Frame(self.root, borderwidth=10) text = Text(frame) text.pack(fill=BOTH, expand=1) text.insert(END, "Acerca del juego\n\n") text.insert(END, "El juego consiste en eliminar los cuadros adyacentes del mismo color de un tablero.\n") text.insert(END, "Los cuadros están colocados de manera aleatoria.\n") text.insert(END, "Cuando se eliminan cuadros, los demás se desplazan hacia abajo.\n\n") text.insert(END, "Diseñado para Fundamentos de Programación, ESPOL\n") text.insert(END, "Anthony Adachi (KimCordero213)\nJosé Reyes (jreyesr, 0xC0FFEE)\n\n") import datetime text.insert(END, datetime.date.today().strftime("%A, %d/%m/%Y")) frame.grid(row=0, column=0, sticky=N + S + E + W) second_frame = Frame(self.root, borderwidth=10) second_frame.grid(row=1, column=0) ok_button = Button(second_frame, text="OK", command=lambda: self.close(None)) ok_button.grid(row=0, column=0) ok_button.focus_force() ok_button.bind("<Return>", self.close) def show(self): self.root.mainloop() def close(self, _): new_window = MainMenuWindow(self.name) self.root.destroy() new_window.show() class GameWindow: """ A class that represents a Game Window, where most of the processing happens. Can only branch to a GameOverWindow """ def __init__(self, player_name, board, analyst): """ Layout of GameWindow is as follows: root | +--upper_frame | | | +--Labels (in row (Board.SIZE+1) and column 1), total Board.SIZE*2 | +--Buttons (in rows 1 to Board.SIZE and columns 2 to Board.SIZE+1, total Board.SIZE^2 | +--lower_frame | +--Label (Puntos...) +--Button (Terminar juego) """ self.player_name = player_name self.score = 0 self.board = board self.analyst = analyst self.buttons = [[0 for _ in range(self.board.SIZE + 1)] for _ in range(self.board.SIZE + 1)] self.root = Tk() self.root.focus_force() self.root.geometry("500x500+100+100") Grid.rowconfigure(self.root, 0, weight=1) Grid.columnconfigure(self.root, 0, weight=1) upper_frame = Frame(self.root, borderwidth=10) upper_frame.grid(row=0, column=0, sticky=N + S + E + W) for row_index in range(self.board.SIZE): Grid.rowconfigure(upper_frame, row_index, weight=1) for col_index in range(self.board.SIZE): Grid.columnconfigure(upper_frame, col_index + 1, weight=1) btn = Button(upper_frame, command=lambda x=row_index, y=col_index: self.button_clicked(x, y)) btn.configure(bg=self.get_color(row_index, col_index)) self.buttons[row_index][col_index] = btn btn.grid(row=row_index, column=col_index + 1, sticky=N + S + E + W, padx=2, pady=2) # Set labels for i in range(self.board.SIZE): Label(upper_frame, text=ascii_uppercase[i]).grid(row=i, column=0, sticky=N + S + E + W) for j in range(self.board.SIZE): Label(upper_frame, text=str(j + 1)).grid(row=self.board.SIZE, column=j + 1, sticky=N + S + E + W) # Set additional info (score, exit button) lower_frame = Frame(self.root) lower_frame.grid(row=1, column=0, sticky=N + S + E + W) Grid.rowconfigure(lower_frame, 0, weight=1) Grid.columnconfigure(lower_frame, 0, weight=1) Grid.columnconfigure(lower_frame, 1, weight=1) # Score label lbl = Label(lower_frame, text="Puntos: 0") self._score_label = lbl lbl.grid(row=0, column=0, sticky=N + S + E + W, padx=5, pady=5) # Exit game button Button(lower_frame, text="Terminar juego", command=lambda: self.end_game()).grid(row=0, column=1, sticky=N + S + E + W, padx=20, pady=5) def show(self): self.root.mainloop() def end_game(self): new_window = GameOverWindow(self.player_name, self.score) self.root.destroy() new_window.show() def button_clicked(self, i, j): """ To be called when a button on the button grid is clicked. If item in said position in the board is not Blank and has friends, remove all friends and update score, board and grid accordingly. If there are not any friends for any button, end game automatically. """ if not self.analyst.has_friends(i, j) or self.board.item(i, j) == Color.Blank: return to_clear = self.analyst.all_friends(i, j) self.score += self.analyst.score(to_clear) self.board.clear_items(to_clear) self.board.compact_all() self._score_label.configure(text="Puntos: {}".format(self.score)) if not self.analyst.any_friends(): self.end_game() self.update_button_colors() def update_button_colors(self): """ Updates the button grid with the new colors. To be called after changing the Board. """ for i in range(self.board.SIZE): for j in range(self.board.SIZE): try: self.buttons[i][j].configure(bg=self.get_color(i, j)) except TclError: pass def get_color(self, i, j): """ Return a string representation for the color in position (i, j) in the Board :param i: The row of the item :param j: The column of the item :return: A string to be used in bg """ if self.board.item(i, j) == Color.A: return 'red' elif self.board.item(i, j) == Color.B: return 'green' elif self.board.item(i, j) == Color.C: return 'blue' elif self.board.item(i, j) == Color.D: return 'yellow' else: return 'gray' class GameOverWindow: """ A class representing a 'Game Over' window. Can only branch to a MainMenuWindow. """ def __init__(self, player_name, score): """ Layout of GameOverWindow is as follows: root | +--frame | +--Label (player name) +--Label (score) +--Button (OK) """ self.player_name = player_name self.score = score self.root = Tk() self.root.geometry("+100+100") Grid.columnconfigure(self.root, 0, weight=1) Grid.rowconfigure(self.root, 0, weight=1) frame = Frame(self.root, borderwidth=10) frame.grid(row=0, column=0, sticky=N + S + E + W) Label(frame, text=player_name).grid(row=0, column=0) Label(frame, text="{} puntos".format(score)).grid(row=1, column=0) ok_button = Button(frame, text="OK", command=lambda: self.close(None)) ok_button.grid(row=2, column=0) ok_button.focus_force() ok_button.bind("<Return>", self.close) Grid.columnconfigure(frame, 0, weight=1) Grid.rowconfigure(frame, 0, weight=1) Grid.rowconfigure(frame, 1, weight=1) def close(self, _): new_window = MainMenuWindow(self.player_name if self.player_name != "Sin nombre" else None) self.root.destroy() new_window.show() def show(self): self.root.mainloop() class GoodByeWindow: """ A class representing a 'Goodbye' window. Will only branch to nothingness... Is only called when MainMenuWindow has a player name stored """ def __init__(self, player_name): """ Layout of GoodByeWindow is as follows: root | +--frame | +--Label (player name, goodbye message) +--Button (OK) """ self.player_name = player_name self.root = Tk() self.root.geometry("+100+100") Grid.columnconfigure(self.root, 0, weight=1) Grid.rowconfigure(self.root, 0, weight=1) frame = Frame(self.root, borderwidth=10) frame.grid(row=0, column=0, sticky=N + S + E + W) Label(frame, text="Hasta luego, {}".format(player_name)).grid(row=0, column=0, pady=5) ok_button = Button(frame, text="OK", command=lambda: self.close(None)) ok_button.grid(row=1, column=0) ok_button.focus_force() ok_button.bind("<Return>", self.close) Grid.columnconfigure(frame, 0, weight=1) Grid.rowconfigure(frame, 0, weight=1) Grid.rowconfigure(frame, 1, weight=1) def close(self, _): self.root.destroy() def show(self): self.root.mainloop()
python
from typing import List class Solution: def plusOne(self, digits: List[int]) -> List[int]: carry = (digits[-1] + 1) > 9 digits[-1] = (digits[-1] + 1) % 10 for i in reversed(range(len(digits) - 1)): temp = carry carry = (digits[i] + carry > 9) digits[i] = (digits[i] + temp) % 10 if carry == 1: return [1, *digits] return digits
python
# Import libraries from collections import Counter, OrderedDict from itertools import chain from more_itertools import unique_everseen import numpy as np import pandas as pd import random import tensorflow as tf from keras import models import warnings import functools import operator warnings.filterwarnings("ignore") def get_df(): """ Returns main dataframe used in the project """ # Path to file hotels_path = "../data/clean_hotels_scraped_v2.csv" # Dataframe hotels_df = pd.read_csv(hotels_path, usecols = ["city", "country", "hotel_name", "rating", "address", "popularity_rating", "locality", "price", "landmark", "URL"]) return hotels_df def get_model(): """ Return model architecture and weights """ # Import embeddings model and weights model = models.load_model("../models/nn_scraped_hotels.h5") model.load_weights("../models/nn_scraped_hotels_weights.h5") return model def get_int_mapping(dataframe, column): """ Returns index, reverse_index, and list of unique items in a pandas datframe """ # Convert series to list column_to_list = dataframe[column].tolist() # Find set of unique items and convert to a list unique_items_list = list(unique_everseen(column_to_list)) # Create indexes for each item item_index = {item: idx for idx, item in enumerate(unique_items_list)} index_item = {idx: item for item, idx in item_index.items()} return item_index, index_item, unique_items_list def get_embeddings(layer_name): """ Given a model and a layer name, this function returns the normalized embedding [weights] for said layer """ # Get model model = get_model() # Get layer item_layer = model.get_layer(layer_name) # Get weights item_weights = item_layer.get_weights()[0] # Normalize the embeddings so that we can calculate cosine similarity item_weights = item_weights / np.linalg.norm(item_weights, axis = 1).reshape((-1, 1)) return item_weights def find_similar(name, weights, index_name = "hotel_name", n = 10, plot = True, filtering = False, filter_name = None): """ Return most similar items """ index = hotel_index rindex = index_hotel # Select index and reverse index if index_name == "city": index = city_index rindex = index_city if index_name == "country": index = country_index rindex = index_country if index_name == "rating": index = rating_index rindex = index_rating if index_name == "popularity_rating": index = popularity_index rindex = index_popularity if index_name == "locality": index = locality_index rindex = index_locality if index_name == "price": index = price_index rindex = index_price if index_name == "landmark": index = landmark_index rindex = index_landmark # Check name is in index try: # Calculate dot product between item/property and all others distances = np.dot(weights, weights[index[name]]) except KeyError: print(" {} Not Found.".format(name)) return # Sort distances from smallest to largest sorted_distances = np.argsort(distances) # Find the most similar closest = sorted_distances[-n:] # Limit results by filtering filter_ = None hotel_name = [] city = [] country = [] url = [] landmark = [] locality = [] rating = [] # Limit results by filtering filter_ = None filtered_results = [] if filtering: for idxs, rows in hotels_df.iterrows(): if hotels_df.at[idxs, index_name] == name: filter_ = hotels_df.at[idxs, filter_name] break match_df = hotels_df[hotels_df[filter_name].str.match(filter_)] match_df = match_df.reset_index(drop = True) match_df["distance"] = None for idxs, rows in match_df.iterrows(): item = match_df.at[idxs, index_name] distance = np.dot(weights[index[item]], weights[index[name]]) match_df.loc[match_df.index[idxs], "distance"] = distance match_df = match_df.sort_values(by = ["distance"], axis = 0, ascending = False) list_of_filtered_items = match_df[index_name].to_list() list_of_filtered_distances = match_df["distance"].to_list() list_of_filtered_results = list(zip(list_of_filtered_items, list_of_filtered_distances)) for item in list_of_filtered_results[1:]: if item not in filtered_results: filtered_results.append(item) if plot: # Find closest and most far away item closest = filtered_results[:n // 2] far_away = filtered_results[-n-1: len(filtered_results) - 1] to_plot = [c[0] for c in closest] to_plot.extend(c[0] for c in far_away) # Find distances dist = [c[1] for c in closest] dist.extend(c[1] for c in far_away) # Colors colors = ["darkturquoise" for _ in range(n)] colors.extend("hotpink" for _ in range(n // 2)) # Data in DataFrame data = pd.DataFrame({"distance": dist}, index = to_plot) # Bar chart data["distance"].plot.barh(color = colors, figsize = (10, 8), edgecolor = "k", linewidth = 2) plt.xlabel("Cosine Similarity"); plt.axvline(x = 0, color = "k"); # Title name_str = "Most and Least Similar to {}".format(name) plt.title(name_str, x = 0.2, size = 28, y = 1.05) return None return None # Plot results if plot: # Find closest and most far away item far_away = sorted_distances[:n // 2] closest = sorted_distances[-n-1: len(distances) - 1] to_plot = [rindex[c] for c in far_away] to_plot.extend(rindex[c] for c in closest) # Find distances dist = [distances[c] for c in far_away] dist.extend(distances[c] for c in closest) # Colors colors = ["hotpink" for _ in range(n // 2)] colors.extend("darkturquoise" for _ in range(n)) # Data in DataFrame data = pd.DataFrame({"distance": dist}, index = to_plot) # Bar chart data["distance"].plot.barh(color = colors, figsize = (10, 8), edgecolor = "k", linewidth = 2) plt.xlabel("Cosine Similarity"); plt.axvline(x = 0, color = "k"); # Title name_str = "Most and Least Similar to {}".format(name) plt.title(name_str, x = 0.2, size = 28, y = 1.05) return None
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """Define linear function approximator. Dependencies: - `pyrobolearn.models` - `pyrobolearn.states` - `pyrobolearn.actions` """ from pyrobolearn.approximators.approximator import Approximator from pyrobolearn.models.basics.polynomial import Polynomial, PolynomialFunction __author__ = "Brian Delhaisse" __copyright__ = "Copyright 2018, PyRoboLearn" __credits__ = ["Brian Delhaisse"] __license__ = "GNU GPLv3" __version__ = "1.0.0" __maintainer__ = "Brian Delhaisse" __email__ = "[email protected]" __status__ = "Development" class PolynomialApproximator(Approximator): r"""Polynomial Function Approximator The polynomial function approximator is a discriminative deterministic model expressed mathematically as :math:`y = f(x) = W \phi(x)`, where :math:`x` is the input vector, :math:`y` is the output vector, :math:`W` is the weight matrix, and :math:`\phi` is the polynomial function which returns the transformed input vector. This transformed input vector is often of higher dimension, based on the idea that if it is not linear with respect to the parameters in the current space, it might be in a higher dimensional space. """ def __init__(self, inputs, outputs, degree=1, preprocessors=None, postprocessors=None): """ Initialize the polynomial approximator. Args: inputs (State, Action, np.array, torch.Tensor): inputs of the inner models (instance of Action/State) outputs (State, Action, np.array, torch.Tensor): outputs of the inner models (instance of Action/State) degree (int, list of int, np.array[D]): degree(s) of the polynomial. Setting `degree=3`, will apply `[1,x,x^2,x^3]` to the inputs, while setting `degree=[1,3]` will apply `[x,x^3]` to the inputs. preprocessors (None, Processor, list of Processor): the inputs are first given to the preprocessors then to the model. postprocessors (None, Processor, list of Processor): the predicted outputs by the model are given to the processors before being returned. """ # create inner model polynomial_fct = PolynomialFunction(degree=degree) model = Polynomial(num_inputs=self._size(inputs), num_outputs=self._size(outputs), polynomial_fct=polynomial_fct) # call parent class super(PolynomialApproximator, self).__init__(inputs, outputs, model=model, preprocessors=preprocessors, postprocessors=postprocessors)
python
import requests from bs4 import BeautifulSoup server_address = 'http://127.0.0.1:5000' def getElementById(html, theId): soup = BeautifulSoup(html, 'html.parser') r = soup.find(id=theId) return r def register(uname, pword, twofa, session=None): url = server_address + '/register' if session is None: session = requests.session() session.close() credentials = {'uname': uname, 'pword': pword, '2fa': twofa} r = session.post(url, data=credentials) result = getElementById(r.text, 'success') if result is None: print('Unable to find id=result') return {'result': False, 'session': session} if 'success' in result.text: # Server response = successful return {'result': True, 'session': session} elif 'failure' in result.text: # Server response = failed return {'result': False, 'explicit_failure': True, 'session': session} else: # No response from server return {'result': False, 'explicit_failure': False, 'session': session} def login(uname, pword, twofa, session=None): url = server_address + '/login' if session is None: session = requests.session() session.close() # close any previous session if exist creds = {'uname': uname, 'pword': pword, '2fa': twofa} r = session.post(url, data=creds) result = getElementById(r.text, 'result') if result is None: print('Cannot find id=result in response') return {'result': False, 'session': session} if 'success' in result.text: return {'result': True, 'session': session} else: return {'result': False, 'session': session} def index_page_exists(): req = requests.get(server_address + '/') assert req.status_code == 200, "Status code not 200" def login_page_exists(): req = requests.get(server_address + '/login') assert req.status_code == 200, "Status code not 200" def register_page_exists(): req = requests.get(server_address + '/register') assert req.status_code == 200, "Status code not 200" def spell_page_exists(): req = requests.get(server_address + '/spell_check') assert req.status_code == 200, "Status code not 200" def logout_page_exists(): req = requests.get(server_address + '/logout') assert req.status_code == 200, "Status code not 200"
python
import random import gym import numpy as np M = 5.0 T = 1.0 GOAL = 0.001 class WeightEnv(gym.Env): metadata = {'render.modes': ['human']} def __init__(self): super(WeightEnv, self).__init__() self.reward_range = (-float('inf'), 0.0) self.state = np.array([0, 0, 0]) # position, velocity, acceleration # action: force[-10, 10] self.action_space = gym.spaces.Box(low=-10, high=10, shape=(1,), dtype=np.float32) # observation: position[-10,10], velocity[-10,10], acceleration[-10,10], jerk[-10,10] self.observation_space = gym.spaces.Box(np.array([-10, -10, -10, -10]), np.array([10, 10, 10, 10], dtype=np.float32)) self.steps = 0 def step(self, action): prev_position = self.state[0] prev_velocity = self.state[1] prev_acceleration = self.state[2] action_force = min(max(action[0], -10.0), 10.0) next_acceleration = action_force / M next_jerk = next_acceleration - prev_acceleration next_velocity = prev_velocity + next_acceleration * T next_position = prev_position + next_velocity * T self.steps += 1 done = ((abs(next_position) < GOAL) and (abs(next_velocity) < GOAL)) or (self.steps > 100) self.state = np.array([next_position, next_velocity, next_acceleration]) reward = 0.0 - (abs(next_position)**2) - (abs(next_velocity)**2) - (abs(next_acceleration)**2) - (abs(next_jerk)**2) return np.array([next_position, next_velocity, next_acceleration, next_jerk]), reward, done, {} def seed(self, seed=None): self.np_random, seed = gym.utils.seeding.np_random(seed) return [seed] def reset(self): self.steps = 0 self.state = np.array([self.np_random.uniform(low=-10.0, high=10.0), 0, 0]) # position, velocity, accel return np.array([self.state[0], self.state[1], self.state[2], 0])
python
PATTERN = r"(doge|shib)" TRANSFORMER_MODEL = 'cardiffnlp/twitter-xlm-roberta-base-sentiment' SENTIMENT_MAPPING = { 'Positive' : 1, 'Neutral' : 0, 'Negative' : -1 }
python
"""Two Number Sum Write a function that takes in a non-empy array of distinct integers and an integer representing a target sum. If any two numbers in the input array sum up to the target sum, the function should return them in an array, in any order. If no two numbers sum up to the target sum, the function should return an empty array. Note that the target sum has to be obtained by summing two different integers in the array; you can't add a single integer to itself in order to obtain the target sum. You can assume that there will be at most one pair of numbers summing up to the target sum. Sample Input: array = [3, 5, -4, 8, 11, 1, -1, 6] targetSum = 10 Sample Output: [-1, 11] // the numbers could be in reversed order """ def twoNumberSum(array : list, targetSum : int) -> list: """Finds the two numbers in the array needed to get targetSum This solution has O(n) time complexity | O(n) space complexity Args: array: A list containing all the candidate numbers targetSum: The target number we want to get by adding two numbers from the array Returns: A list containing the two numbers that added give targetSum as a result """ sum = [] diff = [] for e in array: if e in diff: sum.append(e) sum.append(array[diff.index(e)]) break else: diff.append(targetSum - e) return sum
python
from .bmp180 import bmp180
python
"""Pull git repos and update the local schemes and templates files """ import os import sys import shutil import asyncio from .shared import get_yaml_dict, rel_to_cwd, verb_msg, compat_event_loop def write_sources_file(): """Write a sources.yaml file to current working dir.""" file_content = ( "schemes: " "https://github.com/Base24/base24-schemes-source.git\n" "templates: " "https://github.com/Base24/base24-templates-source.git" ) file_path = rel_to_cwd("sources.yaml") with open(file_path, "w") as file_: file_.write(file_content) async def git_clone(git_url, path, verbose=False): """Clone git repository at $git_url to $path. Return True if successful, otherwise False.""" if verbose: print("Cloning {}...".format(git_url)) if os.path.exists(os.path.join(path, ".git")): # get rid of local repo if it already exists shutil.rmtree(path) os.makedirs(path, exist_ok=True) proc_env = os.environ.copy() proc_env["GIT_TERMINAL_PROMPT"] = "0" git_proc = await asyncio.create_subprocess_exec( "git", "clone", git_url, path, stderr=asyncio.subprocess.PIPE, env=proc_env ) _stdout, stderr = await git_proc.communicate() if git_proc.returncode != 0: # remove created directory if it's empty try: os.rmdir(path) except OSError: pass verb_msg("{}:\n{}".format(git_url, stderr.decode("utf-8"))) return False if verbose: print("Cloned {}".format(git_url)) return True async def git_clone_scheduler(yaml_file, base_dir, verbose=False): """Create task list for clone jobs and run them asynchronously.""" jobs = generate_jobs_from_yaml(yaml_file, base_dir) task_list = [git_clone(*args_, verbose=verbose) for args_ in jobs] return await asyncio.gather(*task_list) def generate_jobs_from_yaml(yaml_file, base_dir): """Get a set of jobs from a yaml file """ yaml_dict = get_yaml_dict(yaml_file) for key, value in yaml_dict.items(): yield (value, rel_to_cwd(base_dir, key)) def update(custom_sources=False, verbose=False): """Update function to be called from cli.py""" if not shutil.which("git"): print("Git executable not found in $PATH.") sys.exit(1) results = [] with compat_event_loop() as event_loop: if not custom_sources: print("Creating sources.yaml…") write_sources_file() print("Cloning sources…") r = event_loop.run_until_complete( git_clone_scheduler( rel_to_cwd("sources.yaml"), rel_to_cwd("sources"), verbose=verbose ) ) results.append(r) print("Cloning templates…") r = event_loop.run_until_complete( git_clone_scheduler( rel_to_cwd("sources", "templates", "list.yaml"), rel_to_cwd("templates"), verbose=verbose, ) ) results.append(r) print("Cloning schemes…") r = event_loop.run_until_complete( git_clone_scheduler( rel_to_cwd("sources", "schemes", "list.yaml"), rel_to_cwd("schemes"), verbose=verbose, ) ) results.append(r) return all(results)
python
# This module is derived (with modifications) from # https://github.com/GoogleCloudPlatform/tensorflow-without-a-phd/blob/master/tensorflow-rl-pong/trainer/task.py # Special thanks to: # Yu-Han Liu https://nuget.pkg.github.com/dizcology # Martin Görner https://github.com/martin-gorner # Copyright 2019 Leigh Johnson # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # Python import logging import argparse import os from collections import deque # Lib import tensorflow as tf import numpy as np import gym from trainer.helpers import discount_rewards, preprocess_frame from agents.tools.wrappers import AutoReset, FrameHistory # Legal moves in space invaders are FIRE, RIGHT, LEFT, and DO NOTHING (NOOP or "No operation") ACTIONS = { 0: "NOOP", 1: "FIRE", # 2: "UP", 2: "RIGHT", 3: "LEFT", # 5: "DOWN", # 6: "UPRIGHT", # 7: "UPLEFT", # 8: "DOWNRIGHT", # 9: "DOWNLEFT", # 10: "UPFIRE", # 11: "RIGHTFIRE", # 12: "LEFTFIRE", # 13: "DOWNFIRE", # 14: "UPRIGHTFIRE", # 15: "UPLEFTFIRE", # 16: "DOWNRIGHTFIRE", # 17: "DOWNLEFTFIRE", } MAX_MEMORY_LEN = 100000 ROLLOUT_SIZE = 10000 # We'll be pre-processing inputs into a 105 x 80 image diff (downsampled by a factor of 2) of currentframe - previousframe OBSERVATION_DIM = 105 * 80 # MEMORY stores tuples: # (observation, label, reward) MEMORY = deque([], maxlen=MAX_MEMORY_LEN) def gen(): for m in list(MEMORY): yield m def build_graph(observations): """Calculates logits from the input observations tensor. This function will be called twice: rollout and train. The weights will be shared. """ with tf.variable_scope('model', reuse=tf.AUTO_REUSE): hidden = tf.layers.dense( observations, args.hidden_dim, use_bias=False, activation=tf.nn.relu) logits = tf.layers.dense(hidden, len(ACTIONS), use_bias=False) return logits def main(args): args_dict = vars(args) logging.info('args: {}'.format(args_dict)) with tf.Graph().as_default() as g: # rollout subgraph with tf.name_scope('rollout'): observations = tf.placeholder( shape=(None, OBSERVATION_DIM), dtype=tf.float32) logits = build_graph(observations) logits_for_sampling = tf.reshape( logits, shape=(1, len(ACTIONS))) # Sample the action to be played during rollout. sample_action = tf.squeeze(tf.multinomial( logits=logits_for_sampling, num_samples=1)) optimizer = tf.train.RMSPropOptimizer( learning_rate=args.learning_rate, decay=args.rmsprop_decay ) # dataset subgraph for experience replay with tf.name_scope('dataset'): # the dataset reads from MEMORY ds = tf.data.Dataset.from_generator( gen, output_types=(tf.float32, tf.int32, tf.float32)) ds = ds.shuffle(MAX_MEMORY_LEN).repeat().batch(args.batch_size) iterator = ds.make_one_shot_iterator() # training subgraph with tf.name_scope('train'): # the train_op includes getting a batch of data from the dataset, so we do not need to use a feed_dict when running the train_op. next_batch = iterator.get_next() train_observations, labels, processed_rewards = next_batch # This reuses the same weights in the rollout phase. train_observations.set_shape((args.batch_size, OBSERVATION_DIM)) train_logits = build_graph(train_observations) cross_entropies = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=train_logits, labels=labels ) # Extra loss when the paddle is moved, to encourage more natural moves. probs = tf.nn.softmax(logits=train_logits) move_cost = args.move_penalty * \ tf.reduce_sum(probs * [0, 1.0, 1.0, 1.0], axis=1) loss = tf.reduce_sum(processed_rewards * cross_entropies + move_cost) global_step = tf.train.get_or_create_global_step() train_op = optimizer.minimize(loss, global_step=global_step) init = tf.global_variables_initializer() saver = tf.train.Saver(max_to_keep=args.max_to_keep) with tf.name_scope('summaries'): rollout_reward = tf.placeholder( shape=(), dtype=tf.float32 ) # the weights to the hidden layer can be visualized hidden_weights = tf.trainable_variables()[0] for h in range(args.hidden_dim): slice_ = tf.slice(hidden_weights, [0, h], [-1, 1]) image = tf.reshape(slice_, [1, 105, 80, 1]) tf.summary.image('hidden_{:04d}'.format(h), image) for var in tf.trainable_variables(): tf.summary.histogram(var.op.name, var) tf.summary.scalar('{}_max'.format( var.op.name), tf.reduce_max(var)) tf.summary.scalar('{}_min'.format( var.op.name), tf.reduce_min(var)) tf.summary.scalar('rollout_reward', rollout_reward) tf.summary.scalar('loss', loss) merged = tf.summary.merge_all() logging.info('Number of trainable variables: {}'.format( len(tf.trainable_variables()))) inner_env = gym.make('SpaceInvaders-v0') # tf.agents helper to more easily track consecutive pairs of frames env = FrameHistory(inner_env, past_indices=[0, 1], flatten=False) # tf.agents helper to automatically reset the environment env = AutoReset(env) with tf.Session(graph=g) as sess: if args.restore: restore_path = tf.train.latest_checkpoint(args.output_dir) logging.info('Restoring from {}'.format(restore_path)) saver.restore(sess, restore_path) else: sess.run(init) summary_path = os.path.join(args.output_dir, 'summary') summary_writer = tf.summary.FileWriter(summary_path, sess.graph) # lowest possible score after an episode as the # starting value of the running reward _rollout_reward = -21.0 for i in range(args.n_epoch): logging.info('>>>>>>> epoch {}'.format(i+1)) logging.info('>>> Rollout phase') epoch_memory = [] episode_memory = [] # The loop for actions/steps _observation = np.zeros(OBSERVATION_DIM) while True: # sample one action with the given probability distribution _action = sess.run(sample_action, feed_dict={ observations: [_observation]}) _label = ACTIONS[_action] _pair_state, _reward, _done, _ = env.step(_action) if args.render: env.render() # record experience episode_memory.append((_observation, _action, _reward)) # Get processed frame delta for the next step pair_state = _pair_state current_state, previous_state = pair_state current_x = preprocess_frame(current_state) previous_x = preprocess_frame(previous_state) _observation = current_x - previous_x if _done: obs, lbl, rwd = zip(*episode_memory) # processed rewards prwd = discount_rewards(rwd, args.reward_decay) prwd -= np.mean(prwd) prwd /= np.std(prwd) # store the processed experience to memory epoch_memory.extend(zip(obs, lbl, prwd)) # calculate the running rollout reward _rollout_reward = 0.9 * _rollout_reward + 0.1 * sum(rwd) episode_memory = [] # if args.render: # _ = input('episode done, press Enter to replay') # epoch_memory = [] # continue if len(epoch_memory) >= ROLLOUT_SIZE: break # add to the global memory MEMORY.extend(epoch_memory) logging.info('>>> Train phase') logging.info('rollout reward: {}'.format(_rollout_reward)) # Here we train only once. _, _global_step = sess.run([train_op, global_step]) if _global_step % args.save_checkpoint_steps == 0: logging.info('Writing summary') feed_dict = {rollout_reward: _rollout_reward} summary = sess.run(merged, feed_dict=feed_dict) summary_writer.add_summary(summary, _global_step) save_path = os.path.join(args.output_dir, 'model.ckpt') save_path = saver.save( sess, save_path, global_step=_global_step) logging.info('Model checkpoint saved: {}'.format(save_path)) def parse_args(): parser = argparse.ArgumentParser('') parser.add_argument( '--loglevel', type=str, default='INFO', choices=['debug', 'info', 'error', 'warning', 'DEBUG', 'INFO', 'ERROR', 'WARNING'] ) parser.add_argument( '--n-epoch', type=int, default=5000, help='Number of iterations (training rounds) to run' ) parser.add_argument( '--batch-size', type=int, default=10000, help='Number of batches to divide dataset into. Each epoch (training round) consists of dataset_size / batch_size training sets' ) parser.add_argument( '--output-dir', type=str, default='tmp/training-output', help='Directory where Tensorflow checkpoints will be written' ) parser.add_argument( '--restore', default=False, action='store_true', help='Restore from latest checkpoint in --output-dir' ) parser.add_argument( '--video-dir', default='tmp/training-videos', type=str, help='Directory where mp4s of each training epoch will be stored' ) parser.add_argument( '--learning-rate', type=float, default=0.001, help='learning_rate used by tf.train.RMSPropOptimizer' ) parser.add_argument( '--rmsprop-decay', type=float, default=0.99, help='decay (gamma) used by tf.train.RMSPropOptimizer' ) parser.add_argument( '--reward-decay', type=float, default=0.99, help='decay (gamma) used as a reward discount factor' ) parser.add_argument( '--move-penalty', type=float, default=0.01, help='additional penalty (loss function multipler) applied when actor is moved, which discourages super-human bursts of movement' ) parser.add_argument( '--hidden-dim', type=int, default=200 ) parser.add_argument( '--render', type=bool, default=True, help='Render gameplay visually (and record to --video-dir' ) parser.add_argument( '--save-checkpoint-steps', type=int, default=1 ) args = parser.parse_args() # save all checkpoints args.max_to_keep = args.n_epoch // args.save_checkpoint_steps return args if __name__ == '__main__': args = parse_args() logging.basicConfig(level=args.loglevel) main(args)
python
# coding: utf-8 """ Project Euler problem #40. """ def problem(): u""" Solve the problem. An irrational decimal fraction is created by concatenating the positive integers: 0.12345678910(1)112131415161718192021... It can be seen that the 12th digit of the fractional part is 1. If dn represents the nth digit of the fractional part, find the value of the following expression. d1 × d10 × d100 × d1000 × d10000 × d100000 × d1000000 Answer: 210 """ stops = [1, 10, 100, 1000, 10000, 10**5, 10**6] length, x, prod = 0, 0, 1 while stops: x += 1 length += len(str(x)) if length >= stops[0]: prod *= int(str(x)[stops[0] - length - 1]) stops.pop(0) return prod if __name__ == '__main__': print problem()
python
# Author: Mathurin Massias <[email protected]> # License: BSD 3 clause import os from pathlib import Path from bz2 import BZ2Decompressor import numpy as np from scipy import sparse from download import download from sklearn import preprocessing from sklearn.datasets import load_svmlight_file NAMES = { 'aloi': 'multiclass/aloi.bz2', 'bodyfat': 'regression/bodyfat', 'connect-4': 'multiclass/connect-4', 'dna': 'multiclass/dna.scale', 'eunite2001': 'regression/eunite2001', 'finance': 'regression/log1p.E2006.train.bz2', 'glass': 'multiclass/glass.scale', 'housing': 'regression/housing', 'iris': 'multiclass/iris.scale', 'kdda_train': 'binary/kdda.bz2', 'letter': 'multiclass/letter.scale', 'mnist': 'multiclass/mnist.bz2', 'news20': 'binary/news20.binary.bz2', 'news20_multiclass': 'multiclass/news20.bz2', # 'protein': 'multiclass/protein.bz2', 'rcv1_multiclass': 'multiclass/rcv1_train.multiclass.bz2', 'rcv1_topics_test': 'multilabel/rcv1_topics_test_2.svm.bz2', 'rcv1_train': 'binary/rcv1_train.binary.bz2', 'real-sim': 'binary/real-sim.bz2', 'sector_train': 'multiclass/sector/sector.bz2', 'sector_test': 'multiclass/sector/sector.t.bz2', 'smallNORB': 'multiclass/smallNORB.bz2', 'url': 'binary/url_combined.bz2', 'webspam': 'binary/webspam_wc_normalized_trigram.svm.bz2', } N_FEATURES = { 'aloi': 128, 'bodyfat': 14, 'connect-4': 126, 'dna': 180, 'eunite2001': 16, 'finance': 4_272_227, 'glass': 9, 'housing': 13, 'iris': 4, 'kdda_train': 20_216_830, 'letter': 16, 'mnist': 780, 'news20': 1_355_191, 'news20_multiclass': 62_061, # 'protein': 357, 'rcv1_multiclass': 47_236, 'rcv1_topics_test': 47_236, 'rcv1_train': 47_236, 'real-sim': 20_958, 'sector_train': 55_197, 'sector_test': 55_197, 'smallNORB': 18_432, 'url': 3_231_961, 'webspam': 16_609_143, } # DATA_HOME is determined using environment variables. # The top priority is the environment variable $LIBSVMDATA_HOME which is # specific to this package. # Else, it falls back on XDG_DATA_HOME if it is set. # Finally, it defaults to $HOME/data. # The data will be put in a subfolder 'libsvm' def get_data_home(): data_home = os.environ.get( 'LIBSVMDATA_HOME', os.environ.get('XDG_DATA_HOME', None) ) if data_home is None: data_home = Path.home() / 'data' return Path(data_home) / 'libsvm' DATA_HOME = get_data_home() def download_libsvm(dataset, destination, replace=False): """Download a dataset from LIBSVM website.""" url = ("https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/" + NAMES[dataset]) path = download(url, destination, replace=replace) return path def _get_X_y(dataset, multilabel, replace=False): """Load a LIBSVM dataset as sparse X and observation y/Y. If X and y already exists as npz and npy, they are not redownloaded unless replace=True.""" # some files are compressed, some are not: if NAMES[dataset].endswith('.bz2'): stripped_name = NAMES[dataset][:-4] else: stripped_name = NAMES[dataset] ext = '.npz' if multilabel else '.npy' y_path = DATA_HOME / f"{stripped_name}_target{ext}" X_path = DATA_HOME / f"{stripped_name}_data.npz" if replace or not y_path.exists() or not X_path.exists(): tmp_path = DATA_HOME / stripped_name # Download the dataset source_path = DATA_HOME / NAMES[dataset] if not source_path.parent.exists(): source_path.parent.mkdir(parents=True) download_libsvm(dataset, source_path, replace=replace) # decompress file only if it is compressed if NAMES[dataset].endswith('.bz2'): decompressor = BZ2Decompressor() print("Decompressing...") with open(tmp_path, "wb") as f, open(source_path, "rb") as g: for data in iter(lambda: g.read(100 * 1024), b''): f.write(decompressor.decompress(data)) source_path.unlink() n_features_total = N_FEATURES[dataset] print("Loading svmlight file...") with open(tmp_path, 'rb') as f: X, y = load_svmlight_file( f, n_features=n_features_total, multilabel=multilabel) tmp_path.unlink() X = sparse.csc_matrix(X) X.sort_indices() sparse.save_npz(X_path, X) if multilabel: indices = np.array([lab for labels in y for lab in labels]) indptr = np.cumsum([0] + [len(labels) for labels in y]) data = np.ones_like(indices) Y = sparse.csr_matrix((data, indices, indptr)) sparse.save_npz(y_path, Y) return X, Y else: np.save(y_path, y) else: X = sparse.load_npz(X_path) if multilabel: y = sparse.load_npz(y_path) else: y = np.load(y_path) return X, y def fetch_libsvm(dataset, replace=False, normalize=False, min_nnz=3): """ Download a dataset from LIBSVM website. Parameters ---------- dataset : string Dataset name. Must be in .NAMES.keys() replace : bool, default=False Whether to force download of dataset if already downloaded. normalize : bool, default=False If True, columns of X are set to unit norm. This may make little sense for a sparse matrix since centering is not performed. y is centered and set to unit norm if the dataset is a regression one. min_nnz: int, default=3 Columns of X with strictly less than min_nnz non-zero entries are discarded. Returns ------- X : scipy.sparse.csc_matrix Design matrix, in column sparse format. y : 1D or 2D np.array Design vector or matrix (in multiclass setting) References ---------- https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/ """ if dataset not in NAMES: raise ValueError("Unsupported dataset %s" % dataset) multilabel = NAMES[dataset].split('/')[0] == 'multilabel' is_regression = NAMES[dataset].split('/')[0] == 'regression' print("Dataset: %s" % dataset) X, y = _get_X_y(dataset, multilabel, replace=replace) # preprocessing if min_nnz != 0: X = X[:, np.diff(X.indptr) >= min_nnz] if normalize: X = preprocessing.normalize(X, axis=0) if is_regression: y -= np.mean(y) y /= np.std(y) return X, y if __name__ == "__main__": for dataset in NAMES: if not dataset.startswith("sector") and not dataset == "webspam": fetch_libsvm(dataset, replace=False)
python
# Create class for weather module # Imports import requests import json import datetime import time import os import sys from dotenv import load_dotenv # Class class WeatherModule: """ Weather module class """ # Initialize def __init__(self, city): """ Initialize WeatherModule class """ # Create instance of class self.city = city # Method def get_weather(self): """ Get weather data """ # Set up request load_dotenv() url = ( "http://api.openweathermap.org/data/2.5/weather?q=" + self.city + "&units=metric" + "&lang=sp" + "&APPID=" + os.getenv("OPENWEATHERMAP_API_KEY") ) # Get data data = requests.get(url).json() # Return data description = data.get("weather")[0].get("description") temp = data.get("main").get("temp_max") return description def get_temperature(self): load_dotenv() url = ( "http://api.openweathermap.org/data/2.5/weather?q=" + self.city + "&units=metric" + "&lang=sp" + "&APPID=" + os.getenv("OPENWEATHERMAP_API_KEY") ) # Get data data = requests.get(url).json() # Return data temp = data.get("main").get("temp_max") return temp
python
""" AWS Lambda entrypoint and Intent router """ from __future__ import print_function import json import logging import strings from manage_data import get_player_info from utility import ( get_household_and_person_ids, determine_welcome_message ) from play_new_game import play_new_game from handle_answer_request import ( handle_answer_request, next_clue_request, repeat_clue_request ) from alexa_responses import play_end_message, speech from session_attributes import SessionAttributes logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) def lambda_handler(event, _context): """ AWS Lambda entry point """ logger.debug('=====lambda handler started...') logger.debug(json.dumps(event)) household_id, person_id = get_household_and_person_ids(event) # If a one-shot was used to start a new game treat it like a LaunchRequest. if event['session']['new'] and event['request']['type'] == "IntentRequest": return launch_request(household_id, person_id) if event['request']['type'] == "LaunchRequest": return launch_request(household_id, person_id) if event['request']['type'] == "IntentRequest": return on_intent(event['request']['intent'], event['session']) if event['request']['type'] == "SessionEndedRequest": return play_end_message() def launch_request(household_id, person_id): """ Handles LaunchRequests """ player = get_player_info(household_id, person_id) logger.debug("=====Player Info: %s", player) tts = determine_welcome_message(household_id, person_id, player) session_attributes = { "game_status": "not_yet_started", "player_info": player } return speech(tts=tts, attributes=session_attributes, should_end_session=False, reprompt=strings.WELCOME_REPROMPT) def on_intent(intent, session): """ Router for IntentRequest """ intent_name = intent['name'] logger.debug("=====IntentRequest: %s", intent_name) this_game = SessionAttributes(session['attributes']) if intent_name == "AnswerIntent": return answer_intent(intent, this_game) if intent_name == "NextClueIntent": return next_clue_intent(this_game) if intent_name == "NotSureIntent": return not_sure_intent(intent, this_game) if intent_name == "RepeatIntent": return repeat_intent(this_game) if intent_name == "AMAZON.StartOverIntent": return start_over_intent(this_game) if intent_name == "AMAZON.YesIntent": return yes_intent(intent, this_game) if intent_name == "AMAZON.NoIntent": return no_intent(intent, this_game) if intent_name in ("AMAZON.StopIntent", "AMAZON.CancelIntent"): return play_end_message() if intent_name == 'AMAZON.HelpIntent': return help_intent(this_game) def answer_intent(intent, this_game): """ Handles AnswerIntent """ logger.debug("=====answer_intent fired...") game_status = this_game.game_status if game_status == "in_progress": return handle_answer_request(intent, this_game) # If the game hasn't started yet, the player may have # interrupted Alexa during the rules being read to them. if game_status == "not_yet_started": return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME, attributes=this_game.attributes, should_end_session=False, reprompt=strings.WELCOME_REPROMPT) # We probably got here because the player said something other than # yes or no after asking if they wanted to play the game again. logger.debug("=====No attributes, ending game!") return play_end_message() def next_clue_intent(this_game): """ Handle NextClueIntent """ logger.debug("=====next_clue_intent fired...") game_status = this_game.game_status if game_status == "in_progress": return next_clue_request(this_game) # If it's not started yet the player might have interrupted # Alexa during the rules being read so we repeat them. if game_status == "not_yet_started": return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME, attributes=this_game.attributes, should_end_session=False, reprompt=strings.WELCOME_REPROMPT) # Player probably got here because they said something other than # yes or no after asking if they wanted to play the game again. logger.debug("=====No attributes ending game...") return play_end_message() def not_sure_intent(intent, this_game): """ Handle NotSureIntent """ logger.debug("=====not_sure_intent fired...") game_status = this_game.game_status if game_status == "in_progress": # If we're on the last clue then count this as an answer. if this_game.current_clue_index == 4: return handle_answer_request(intent, this_game) # Otherwise we go to the next clue. return next_clue_request(this_game) # If it's not started yet the player might have interrupted # Alexa during the rules being read so we repeat them. if game_status == "not_yet_started": return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME, attributes=this_game.attributes, should_end_session=False, reprompt=strings.WELCOME_REPROMPT) # Player probably got here because they said something other than # yes or no after asking if they wanted to play the game again. logger.debug("=====No attributes ending game...") return play_end_message() def repeat_intent(this_game): """ Handle RepeatIntent """ logger.debug("=====repeat_intent fired...") game_status = this_game.game_status if game_status == "in_progress": return repeat_clue_request(this_game) # If it's not started yet the player might have interrupted # Alexa during the rules being read so we repeat them. if game_status == "not_yet_started": return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME, attributes=this_game.attributes, should_end_session=False, reprompt=strings.WELCOME_REPROMPT) # Player probably got here because they said something other than # yes or no after asking if they wanted to play the game again. logger.debug("=====no attributes ending game") return play_end_message() def start_over_intent(this_game): """ Handle StartOverIntent """ logger.debug("=====start_over_intent fired...") game_status = this_game.game_status if game_status == "in_progress": return play_new_game(this_game, replay=True) # If it's not started yet the player might have interrupted # Alexa during the rules being read so we repeat them. if game_status == "not_yet_started": return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME, attributes=this_game.attributes, should_end_session=False, reprompt=strings.WELCOME_REPROMPT) # If the game is over start a new one. if game_status == "ended": return play_new_game(this_game, replay=True) def yes_intent(intent, this_game): """ Handle YesIntent """ logger.debug("=====yes_intent fired...") game_status = this_game.game_status # If there is a game in progress we treat this as a wrong answer. if game_status == "in_progress": return handle_answer_request(intent, this_game) # If it's not started yet the player wants to hear the rules. if game_status == "not_yet_started": return speech(tts=strings.HELP_MESSAGE_BEFORE_GAME, attributes=this_game.attributes, should_end_session=False, reprompt=strings.WELCOME_REPROMPT) # Otherwise they're trying to play the game again after finishing a game. return play_new_game(this_game, replay=True) def no_intent(intent, this_game): """ Handle NoIntent """ logger.debug("=====no_intent fired...") game_status = this_game.game_status # If there is a game in progress we treat this as a wrong answer. if game_status == "in_progress": return handle_answer_request(intent, this_game) # If it's not started yet the player does not want the rules. if game_status == "not_yet_started": return play_new_game(this_game, replay=False) # Otherwise end the game. return play_end_message() def help_intent(this_game): """ Handle HelpIntent """ logger.debug("=====help_intent fired...") tts = strings.HELP_MESSAGE_BEFORE_GAME if this_game.game_status == "in_progress": tts = strings.HELP_MESSAGE_DURING_GAME + this_game.current_clue return speech(tts=tts, attributes=this_game.attributes, should_end_session=False)
python
# Generated by Django 3.1.7 on 2021-03-12 16:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('recruiter', '0023_auto_20210312_2144'), ] operations = [ migrations.AddField( model_name='recruiter', name='overall_rating', field=models.FloatField(default=0), ), ]
python
"""Search views init.""" from src.views.index import show_index
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' Created on Mar 31, 2018 @ Author: Frederich River ''' import atexit import os import signal import sys import time from apscheduler.executors.pool import ThreadPoolExecutor from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore from env import LOG_FILE, PID_FILE, TASK_FILE, MANUAL from libmysql8 import mysqlHeader, mysqlBase from libtask import taskManager from message import (DM_MSG, DM_START, DM_ALIVE, DM_STOP, DM_NOT_RUN) from sqlalchemy.ext.declarative import declarative_base from threading import Thread __version__ = '1.4.8' def neutrino(pid_file, log_file): # This is a daemon programe, which will start after # system booted. # # It is defined to start by rc.local. # # fork a sub process from father if os.path.exists(pid_file): raise RuntimeError('Neutrino is already running') try: if os.fork() > 0: raise SystemExit(0) except OSError: raise RuntimeError('Fork #1 failed.') os.chdir('/') os.umask(0) os.setsid() # Second fork try: if os.fork() > 0: raise SystemExit(0) except OSError: raise RuntimeError('Fork #2 failed.') # Flush I/O buffers sys.stdout.flush() sys.stderr.flush() # with open(log_file, 'rb', 0) as read_null: # os.dup2(read_null.fileno(), sys.stdin.fileno()) with open(log_file, 'a') as write_null: # Redirect to 1 which means stdout os.dup2(write_null.fileno(), 1) with open(log_file, 'a') as error_null: # Redirect to 2 which means stderr os.dup2(error_null.fileno(), 2) if pid_file: with open(pid_file, 'w+') as f: f.write(str(os.getpid())) atexit.register(os.remove, pid_file) def sigterm_handler(signo, frame): raise SystemExit(1) signal.signal(signal.SIGTERM, sigterm_handler) def _logMonitor(log_file): # A parallel programe which monitoring the log file. # If log file is not exists, it will create one and # relocalize the file. while True: if os.path.exists(log_file): time.sleep(10) else: create_file = open(log_file, 'a') create_file.close() with open(log_file, 'a') as write_null: os.dup2(write_null.fileno(), 1) with open(log_file, 'a') as error_null: os.dup2(error_null.fileno(), 2) print( f"{time.ctime()}: Log file is missing. Recreate it.\n" f"{time.ctime()}: Neutrino started with pid {os.getpid()}\n") def main_function(taskfile=None): # judge whether the task file exists. print( f"{time.ctime()}: " f"Neutrino started with pid {os.getpid()}\n") Base = declarative_base() header = mysqlHeader('root', '6414939', 'test') mysql = mysqlBase(header) jobstores = { 'default': SQLAlchemyJobStore( engine=mysql.engine, metadata=Base.metadata) } executor = {'default': ThreadPoolExecutor(20)} Neptune = taskManager(taskfile=taskfile, jobstores=jobstores, executors=executor) Neptune.start() print(f"{time.ctime()}: Neptune start.\n") while True: print(DM_ALIVE.format(time.ctime())) Neptune.check_task_file() time.sleep(1800) return 1 def print_info(info_file): infotext = '' with open(info_file) as r: infotext = r.read() print(infotext) if __name__ == '__main__': # This is main function # Arguments format is like 'netrino args' # Neutrino receives args like start stop or other. if len(sys.argv) != 2: print(DM_MSG.format(sys.argv[0])) raise SystemExit(1) if sys.argv[1] == 'start': try: neutrino(PID_FILE, LOG_FILE) sys.stdout.write(DM_START.format(t=time.ctime(), pid=os.getpid())) sys.stdout.flush() # Here we start a thread which monitoring the log # file. If log file is missing, it will create one. lm = Thread(target=_logMonitor, args=(LOG_FILE,), name='lm', daemon=True) lm.start() main_function(TASK_FILE) # ending of working code. except Exception: raise SystemExit(1) elif sys.argv[1] == 'stop': if os.path.exists(PID_FILE): sys.stdout.flush() with open(LOG_FILE, 'a') as write_null: os.dup2(write_null.fileno(), 1) write_null.write(DM_STOP.format(time.ctime())) with open(PID_FILE) as f: os.kill(int(f.read()), signal.SIGTERM) else: print(DM_NOT_RUN) raise SystemExit(1) elif sys.argv[1] == 'reboot': if os.path.exists(PID_FILE): sys.stdout.flush() with open(LOG_FILE, 'a') as write_null: os.dup2(write_null.fileno(), 1) write_null.write(DM_STOP.format(time.ctime())) with open(PID_FILE) as f: os.kill(int(f.read()), signal.SIGTERM) else: print(DM_NOT_RUN) # raise SystemExit(1) try: neutrino(PID_FILE, LOG_FILE) sys.stdout.write(DM_START.format(t=time.ctime(), pid=os.getpid())) sys.stdout.flush() # Here we start a thread which monitoring the log # file. If log file is missing, it will create one. lm = Thread(target=_logMonitor, args=(LOG_FILE,), name='lm', daemon=True) lm.start() main_function(TASK_FILE) # ending of working code. except Exception: raise SystemExit(1) elif sys.argv[1] == 'clear': with open(LOG_FILE, 'w') as f: pass elif sys.argv[1] == 'help': print_info(MANUAL) elif sys.argv[1] == 'log': print_info(LOG_FILE) elif sys.argv[1] == 'version': print(__version__) else: print('Unknown command {!r}'.format(sys.argv[1])) raise SystemExit(1)
python
#!/usr/bin/env python # coding: utf-8 from argparse import ArgumentParser import pandas as pd import pyprojroot LOSS_FUNC_ML_TASK_MAP = { 'CE-largest': 'single-label, largest', 'CE-random': 'single-label, random', 'BCE': 'multi-label', } def main(source_data_root, rm_corr_csv_path, test_results_csv_path, test_acc_v_r_coeff_csv_filename ): """generate source data for figure that plots accuracy on test set v. r coefficent from repeated measures correlations Parameters ---------- source_data_root : str, Path path to root of directory where "source data" csv files that are generated should be saved rm_corr_csv_path : str path to csv with repeated measures correlations results, output of generate_source_data_acc_vsd_corr.py. Path should be written relative to source_data_root test_results_csv_path : str path to csv with results of measuring accuracy on test set, output of generate_source_data_test_results.py. Path should be written relative to source_data_root test_acc_v_r_coeff_csv_filename : str filename for .csv that should be saved with accuracies and r coefficients combined. This is the actual source data used for plotting. Saved in source_data_root. """ rm_corr_df = pd.read_csv( source_data_root.joinpath(rm_corr_csv_path) ) # get just acc/f1 scores on test set for models trained with transfer learning test_results_df = pd.read_csv(source_data_root.joinpath(test_results_csv_path)) # copy cuz we're going to slice-and-dice # to get Dataframe we use for 'x-y' plot comparing test accuracy to r coeff size xy_df = rm_corr_df.copy() # add colum to rm_corr_df xy_df['task (M.L.)'] = xy_df['loss_func'].map(LOSS_FUNC_ML_TASK_MAP) # just keep transfer results, now will be same len as test_results_df xy_df = xy_df[xy_df.method == 'transfer'] xy_df['DNN architecture'] = xy_df.net_name.str.replace('_', ' ', regex=False) # keep only the columns we need COLUMNS_XY = [ 'task (M.L.)', 'DNN architecture', 'loss_func', 'r', 'CI95%', 'dof', 'power', 'pval', ] xy_df = xy_df[COLUMNS_XY] # use test_result_df as index for xy_df, so we can add columns from test_df xy_df = xy_df.set_index(['task (M.L.)', 'DNN architecture']) test_results_df = test_results_df.set_index(['task (M.L.)', 'DNN architecture']) xy_df = xy_df.reindex(index=test_results_df.index) for col in ['acc-largest-mean', 'acc-random-mean', 'f1-mean']: xy_df[col] = test_results_df[col] # finally reset index so we don't lose columns when we convert xy_df to 'long-form' xy_df = xy_df.reset_index() # make 'long form' so we can use seaborn relplot value_vars = ['acc-largest-mean', 'acc-random-mean', 'f1-mean'] id_vars = [id_var for id_var in xy_df.columns.tolist() if id_var not in value_vars] var_name = 'metric_name' value_name = 'metric_val' long_test_results_df = pd.melt(xy_df, id_vars=id_vars, value_vars=value_vars, var_name=var_name, value_name=value_name) pairs = [ ('single-label, largest', 'acc-largest-mean'), ('single-label, random', 'acc-random-mean'), ('multi-label', 'f1-mean'), ] long_test_results_df = pd.concat( [long_test_results_df[ (long_test_results_df['task (M.L.)'] == pair[0]) & (long_test_results_df['metric_name'] == pair[1]) ] for pair in pairs ] ) long_test_results_df.to_csv(source_data_root.joinpath(test_acc_v_r_coeff_csv_filename)) long_test_results_df.to_excel(source_data_root.joinpath( test_acc_v_r_coeff_csv_filename.replace('.csv', '.xlsx') )) SOURCE_DATA_ROOT = pyprojroot.here().joinpath('results/VSD/source_data') RM_CORR_CSV_PATH = '8-bins-quantile-strategy/rm_corr.csv' TEST_RESULTS_CSV_PATH = 'test_results_table_transfer.csv' def get_parser(): parser = ArgumentParser() parser.add_argument('--source_data_root', help=('path to root of directory where "source data" csv files ' 'that are generated should be saved'), default=SOURCE_DATA_ROOT) parser.add_argument('--rm_corr_csv_path', help=('path to csv with repeated measures correlations results, ' 'output of generate_source_data_acc_vsd_corr.py. ' 'Path should be written relative to source_data_root'), default=RM_CORR_CSV_PATH) parser.add_argument('--test_results_csv_path', help=('path to csv with results of measuring accuracy on test set, ' 'output of generate_source_data_test_results.py. ' 'Path should be written relative to source_data_root'), default=TEST_RESULTS_CSV_PATH) parser.add_argument('--test_acc_v_r_coeff_csv_filename', default='acc_v_r_coeff.csv', help=('filename for .csv that should be saved ' 'with accuracies and r coefficients combined. ' 'This is the actual source data used for plotting. ' 'Saved in source_data_root.')) return parser if __name__ == '__main__': parser = get_parser() args = parser.parse_args() main(source_data_root=args.source_data_root, rm_corr_csv_path=args.rm_corr_csv_path, test_results_csv_path=args.test_results_csv_path, test_acc_v_r_coeff_csv_filename=args.test_acc_v_r_coeff_csv_filename )
python
name = 'controllers' from .constant_controller import ConstantController from .controller import Controller from .energy_controller import EnergyController from .fb_lin_controller import FBLinController from .linear_controller import LinearController from .lqr_controller import LQRController from .pd_controller import PDController from .qp_controller import QPController from .mpc_controller import MPCController from .mpc_controller_dense import MPCControllerDense from .robust_mpc_controller_dense import RobustMpcDense from .mpc_controller_lift_fp import MPCControllerFast from .aggregated_mpc_controller import AggregatedMpcController from .random_controller import RandomController from .openloop_controller import OpenLoopController
python
from message_bot.database.engines.base import BaseEngine from message_bot.database.engines.gsheet import GsheetEngine from message_bot.database.engines.json import JSONEngine
python
array = input("Enter the string here: ").split() array.sort(key=len) print(array)
python
import unittest from oletools.common.clsid import KNOWN_CLSIDS class TestCommonClsid(unittest.TestCase): def test_known_clsids_uppercase(self): for k, v in KNOWN_CLSIDS.items(): k_upper = k.upper() self.assertEqual(k, k_upper)
python
import logging import os def setup_logger(log_directory='', log_filename="astronomaly.log"): """ Ensures the system logger is set up correctly. If a FileHandler logger has already been attached to the current logger, nothing new is done. Parameters ---------- log_directory : str, optional Location of log file, by default '' log_filename : str, optional Log file name, by default "astronomaly.log" Returns ------- Logger The Logger object """ root_logger = logging.getLogger() reset = False if len(root_logger.handlers) != 0: for h in root_logger.handlers: try: flname = h.baseFilename if flname != os.path.join(log_directory, log_filename): print('Warning: logger already attached to log file:') print(flname) print('Now switching to new log file:') print(os.path.join(log_directory, log_filename)) reset = True except AttributeError: pass if reset: root_logger.handlers = [] if len(root_logger.handlers) == 0: log_formatter = logging.Formatter( "%(asctime)s - %(levelname)s - %(message)s") root_logger.setLevel(logging.INFO) if not os.path.exists(log_directory): os.makedirs(log_directory) file_handler = logging.FileHandler( os.path.join(log_directory, log_filename)) file_handler.setFormatter(log_formatter) file_handler.setLevel(logging.INFO) console_handler = logging.StreamHandler() console_handler.setFormatter(log_formatter) console_handler.setLevel(logging.WARNING) root_logger.addHandler(file_handler) root_logger.addHandler(console_handler) return root_logger def format_function_call(func_name, *args, **kwargs): """ Formats a function of a PipelineStage or Dataset object to ensure proper recording of the function and its arguments. args and kwargs should be exactly those passed to the function. Parameters ---------- func_name : str Name of the stage Returns ------- str Formatted function call """ out_str = func_name + '(' if len(args) != 0: for a in args: out_str += (str)(a) + ', ' if len(kwargs.keys()) != 0: for k in kwargs.keys(): out_str += ((str)(k) + '=' + (str)(kwargs[k]) + ', ') if out_str[-2] == ',': out_str = out_str[:-2] out_str += ')' return out_str def log(msg, level='INFO'): """ Actually logs a message. Ensures the logger has been set up first. Parameters ---------- msg : str Log message level : str, optional DEBUG, INFO, WARNING or ERROR, by default 'INFO' """ root_logger = logging.getLogger() if len(root_logger.handlers) == 0: setup_logger() if level == 'ERROR': root_logger.error(msg) elif level == 'WARNING': root_logger.warning(msg) elif level == 'DEBUG': root_logger.debug(msg) else: root_logger.info(msg) def check_if_inputs_same(class_name, local_variables): """ Reads the log to check if this function has already been called with the same arguments (this may still result in the function being rerun if the input data has changed). Parameters ---------- class_name : str Name of PipelineStage local_variables : dict List of all local variables. Returns ------- args_same, bool True if the function was last called with the same arguments. checksum, int Reads the checksum stored in the log file and returns it. """ hdlrs = logging.getLogger().handlers # Try to be somewhat generic allowing for other handlers but this will # only return the filename of the first FileHandler object it finds. # This should be ok except for weird logging edge cases. flname = '' checksum = 0 for h in hdlrs: try: flname = h.baseFilename break except AttributeError: pass if len(flname) == 0 or not os.path.exists(flname): # Log file doesn't exist yet return False else: fl = open(flname) func_args = {} args_same = False for ln in fl.readlines()[::-1]: if class_name + '(' in ln: # To be completely general, the string manipulation has to # be a little complicated stripped_ln = ln.split('-')[-2].split(')')[0].split('(')[-1] the_list = stripped_ln.split('=') kwarg_list = [] if len(the_list) > 1: for l in the_list: if ',' not in l: kwarg_list.append(l) else: s = l.split(',') if len(s) > 2: kwarg_list.append(','.join(s[:-1])) else: kwarg_list.append(s[0]) kwarg_list.append(s[-1]) if len(kwarg_list) != 0: for k in range(0, len(kwarg_list), 2): try: key = kwarg_list[k] value = kwarg_list[k + 1] func_args[key.strip()] = value.strip() except ValueError: # This happens when there are no arguments pass checksum_ln = ln.split('checksum:') if len(checksum_ln) > 1: checksum = int(checksum_ln[-1]) else: checksum = 0 args_same = True for k in func_args.keys(): if k not in local_variables.keys(): args_same = False break else: if k != "force_rerun" and \ func_args[k] != (str)(local_variables[k]): args_same = False break break return args_same, checksum
python
from unittest.mock import patch import pytest from peerscout.utils.bq_data_service import ( load_file_into_bq, ) import peerscout.utils.bq_data_service \ as bq_data_service_module @pytest.fixture(name="mock_bigquery") def _bigquery(): with patch.object(bq_data_service_module, "bigquery") as mock: yield mock @pytest.fixture(name="mock_bq_client_class") def _bq_client(): with patch.object(bq_data_service_module, "Client") as mock: yield mock @pytest.fixture(name="mock_load_job_config") def _load_job_config(): with patch.object(bq_data_service_module, "LoadJobConfig") as mock: yield mock @pytest.fixture(name="mock_open", autouse=True) def _open(): with patch.object(bq_data_service_module, "open") as mock: yield mock @pytest.fixture(name="mock_path") def _getsize(): with patch.object(bq_data_service_module.os, "path") as mock: mock.getsize.return_value = 1 mock.isfile.return_value = True yield mock def test_load_file_into_bq( mock_load_job_config, mock_open, mock_bq_client_class): file_name = "file_name" dataset_name = "dataset_name" table_name = "table_name" load_file_into_bq( filename=file_name, dataset_name=dataset_name, table_name=table_name) mock_open.assert_called_with(file_name, "rb") source_file = mock_open.return_value.__enter__.return_value mock_bq_client_class.assert_called_once() mock_bq_client = mock_bq_client_class.return_value mock_bq_client.dataset.assert_called_with(dataset_name) mock_bq_client.dataset( dataset_name).table.assert_called_with(table_name) table_ref = mock_bq_client.dataset( dataset_name).table(table_name) mock_bq_client.load_table_from_file.assert_called_with( source_file, destination=table_ref, job_config=mock_load_job_config.return_value)
python
import click from jinja2 import PackageLoader from dgen import jinja env = jinja.create_env(PackageLoader(package_name=__package__)) TEXT_FIELD = """ %s = models.TextField( verbose_name=_('%s') )""" INTEGER_FIELD = """ %s = models.IntegerField( verbose_name=_('%s') )""" BOOLEAN_FIELD = """ %s = models.BooleanField( default=False, verbose_name=_('%s') )""" DATE_FIELD = """ %s = models.DateField( verbose_name=_('%s') )""" DATETIME_FIELD = """ %s = models.DateTimeField( verbose_name=_('%s') )""" TIME_FIELD = """ %s = models.TimeField( verbose_name=_('%s') )""" EMAIL_FIELD = """ %s = models.EmailField( verbose_name=_('%s') )""" SLUG_FIELD = """ %s = models.SlugField( allow_unicode=True, verbose_name=_('%s') )""" URL_FIELD = """ %s = models.URLField( verbose_name=_('%s') )""" UUID_FIELD = """ %s = models.UUIDField( unique=True, default=uuid.uuid4, editable=False, verbose_name=_('%s') )""" FIELDS = { 't': TEXT_FIELD, 'i': INTEGER_FIELD, 'b': BOOLEAN_FIELD, 'd': DATE_FIELD, 'dt': DATETIME_FIELD, 'time': TIME_FIELD, 'e': EMAIL_FIELD, 's': SLUG_FIELD, 'url': URL_FIELD, 'uuid': UUID_FIELD, } def get_field(ftype, name): verbose_name = name.replace('_', ' ').capitalize() return FIELDS[ftype] % (name, verbose_name) def parse_fields(fields): parsed_fields = [] for field in fields: parsed_field = get_field(ftype=field[0], name=field[1]) parsed_fields.append(parsed_field) return parsed_fields def model(name, fields): template = env.get_template('model.py') fields = parse_fields(fields) context = {'name': name, 'fields': fields} click.echo(template.render(context))
python
from cocos.layer import Layer, director from cocos.menu import Menu, CENTER, ToggleMenuItem, MenuItem from cocos.scene import Scene from app import gVariables import sceneGenerator class CustomPauseScene(Scene): def __init__(self, gScene): super(CustomPauseScene, self).__init__() #ADD ALL TO MAIN LAYER self.add(_MenuBackground(gScene)) self.add(_Menu(gScene)) #MENU LAYERS class _MenuBackground(Layer): def __init__(self, gScene): super(_MenuBackground, self).__init__() self.R = gScene.R self.menuBackgroundSprite= self.R.BACKGROUND[0] self.menuBackgroundSprite.position = (director._window_virtual_width/2, director._window_virtual_height/2) self.add(self.menuBackgroundSprite) class _Menu(Menu): def __init__(self, gScene): super(_Menu, self).__init__() self.gScene = gScene self.menu_valign = CENTER self.menu_halign = CENTER self.menu_hmargin = 4 self.font_item['color'] = (189,216,178,255) self.font_item_selected['color'] = (140,161,132,255) self.create_menu([ ToggleMenuItem("Sound Effect ", self.onToggleFX, gVariables.g_IS_FX), ToggleMenuItem("Music ", self.onToggleMusic, gVariables.g_IS_BACKMUSIC), MenuItem("Resume", self.onBack) ]) def onToggleFX(self, value): gVariables.g_IS_FX = value def onToggleMusic(self, value): if value: sceneGenerator.PLAYMUSIC.Play() else: sceneGenerator.PLAYMUSIC.Stop() gVariables.g_IS_BACKMUSIC = value def onBack(self): director.replace(Scene(self.gScene)) self.gScene.PLAYER.is_playing = False
python
#%% import numpy as np from scipy import sparse from scipy.linalg import block_diag #%% def sdp_ymat( lines, Ybus ): nbus = Ybus.shape[0] nline = len(lines) # busset = np.arange(0, nbus) # lineset = np.arange(0, nline) #%% def e(k): return np.eye(nbus)[:, k][np.newaxis] # size of e(k): (1, nbus) def Yk_small(k): return (e(k).T @ e(k)) @ Ybus def Yk(k): return (1/2) * \ np.block([ [np.real(Yk_small(k) + Yk_small(k).T), np.imag(Yk_small(k).T - Yk_small(k))], [np.imag(Yk_small(k) - Yk_small(k).T), np.real(Yk_small(k) + Yk_small(k).T)] ]) def Yk_(k): return -(1/2) * \ np.block([ [np.imag(Yk_small(k) + Yk_small(k).T), np.real(Yk_small(k) - Yk_small(k).T)], [np.real(Yk_small(k).T - Yk_small(k)), np.imag(Yk_small(k) + Yk_small(k).T)] ]) def Mk(k): return block_diag(e(k).T @ e(k), e(k).T @ e(k)) # Real part of line admittance def gl(l): return np.real(1 / (lines[l].r+1j*lines[l].x)) # Imaginary part of line admittance def bl(l): return np.imag(1 / (lines[l].r+1j*lines[l].x)) def tau(l): return 1 if lines[l].tap == 0 else lines[l].tap def theta(l): return lines[l].shft def gbcosft(l): return gl(l)*np.cos(theta(l)) + bl(l)*np.cos(theta(l)+np.pi/2) def gbsinft(l): return gl(l)*np.sin(theta(l)) + bl(l)*np.sin(theta(l)+np.pi/2) def gbcostf(l): return gl(l)*np.cos(-theta(l)) + bl(l)*np.cos(-theta(l)+np.pi/2) def gbsintf(l): return gl(l)*np.sin(-theta(l)) + bl(l)*np.sin(-theta(l)+np.pi/2) #%% def Ylineft(l): return 0.5*( sparse.coo_matrix(( [gl(l)/(tau(l)**2), -gbcosft(l)/tau(l), gbsinft(l)/tau(l), gl(l)/(tau(l)**2), -gbsinft(l)/tau(l), -gbcosft(l)/tau(l)], ([lines[l].fbus, lines[l].fbus, lines[l].fbus, lines[l].fbus + nbus, lines[l].fbus+nbus, lines[l].fbus+nbus], [lines[l].fbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus]) ), shape = (2*nbus, 2*nbus)) + sparse.coo_matrix(( [gl(l)/(tau(l)**2), -gbcosft(l)/tau(l), gbsinft(l)/tau(l), gl(l)/(tau(l)**2), -gbsinft(l)/tau(l), -gbcosft(l)/tau(l)], ([lines[l].fbus, lines[l].fbus, lines[l].fbus, lines[l].fbus + nbus, lines[l].fbus+nbus, lines[l].fbus+nbus], [lines[l].fbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus]) ), shape=(2*nbus, 2*nbus)).T ) def Y_lineft(l): return 0.5*( sparse.coo_matrix(( [-(bl(l)+lines[l].b/2)/(tau(l)**2), gbsinft(l)/tau(l), gbcosft(l)/tau(l), - (bl(l)+lines[l].b/2)/(tau(l)**2), -gbcosft(l)/tau(l), gbsinft(l)/tau(l)], ([lines[l].fbus, lines[l].fbus, lines[l].fbus, lines[l].fbus + nbus, lines[l].fbus+nbus, lines[l].fbus+nbus], [lines[l].fbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus]) ), shape=(2*nbus, 2*nbus)) + sparse.coo_matrix(( [-(bl(l)+lines[l].b/2)/(tau(l)**2), gbsinft(l)/tau(l), gbcosft(l)/tau(l), - (bl(l)+lines[l].b/2)/(tau(l)**2), -gbcosft(l)/tau(l), gbsinft(l)/tau(l)], ([lines[l].fbus, lines[l].fbus, lines[l].fbus, lines[l].fbus + nbus, lines[l].fbus+nbus, lines[l].fbus+nbus], [lines[l].fbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus]) ), shape=(2*nbus, 2*nbus)).T ) def Ylinetf(l): return 0.5*( sparse.coo_matrix(( [-gbcostf(l)/tau(l), -gbsintf(l)/tau(l), gbsintf(l) / tau(l), -gbcostf(l)/tau(l), gl(l), gl(l)], ([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus], [lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus]) ), shape = (2*nbus, 2*nbus)) + sparse.coo_matrix(( [-gbcostf(l)/tau(l), -gbsintf(l)/tau(l), gbsintf(l) / tau(l), -gbcostf(l)/tau(l), gl(l), gl(l)], ([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus], [lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus]) ), shape = (2*nbus, 2*nbus)).T ) def Y_linetf(l): return 0.5*( sparse.coo_matrix(( [gbsintf(l)/tau(l), -gbcostf(l)/tau(l), gbcostf(l)/tau(l), gbsintf(l)/tau(l), -(bl(l)+lines[l].b/2), -(bl(l)+lines[l].b/2)], ([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus], [lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus]) ), shape=(2*nbus, 2*nbus)) + sparse.coo_matrix(( [gbsintf(l)/tau(l), -gbcostf(l)/tau(l), gbcostf(l)/tau(l), gbsintf(l)/tau(l), -(bl(l)+lines[l].b/2), -(bl(l)+lines[l].b/2)], ([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus], [lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus, lines[l].tbus+nbus]) ), shape=(2*nbus, 2*nbus)).T ) def YL(l): return sparse.coo_matrix(( [1, -1, 1, -1, -1, 1, -1, 1], ([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus+nbus], [lines[l].fbus, lines[l].tbus, lines[l].fbus+nbus, lines[l].tbus+nbus, lines[l].fbus, lines[l].tbus, lines[l].fbus+nbus, lines[l].tbus+nbus]) ), shape = (2*nbus, 2*nbus)) * lines[l].r * (gl(l)**2 + bl(l)**2) def YL_(l): return (sparse.coo_matrix(( [1, -1, 1, -1, -1, 1, -1, 1], ([lines[l].fbus, lines[l].fbus, lines[l].fbus+nbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus, lines[l].tbus+nbus, lines[l].tbus+nbus], [lines[l].fbus, lines[l].tbus, lines[l].fbus+nbus, lines[l].tbus+nbus, lines[l].fbus, lines[l].tbus, lines[l].fbus+nbus, lines[l].tbus+nbus]) ), shape = (2*nbus, 2*nbus)) * lines[l].x * (gl(l)**2 + bl(l)**2) - sparse.coo_matrix(( [1, 1, 1, 1], ([lines[l].fbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus], [lines[l].fbus, lines[l].fbus+nbus, lines[l].tbus, lines[l].tbus+nbus]) ), shape = (2*nbus, 2*nbus)) * lines[l].b / 2) return Yk, Yk_, Mk, Ylineft, Ylinetf, Y_lineft, Y_linetf, YL, YL_
python
while True: try: height = input("Height: ") while 9 > int(height) > 1: i = 0 while i <= int(height): a = int(height) - i print(a * ' ' + "#" * i) i = i + 1 exit() except ValueError: print("invalid number please try again") except TypeError: print("Please try a positive number")
python
from typing import Dict, List, Tuple import pygame import pygame_gui from pygame.constants import TEXTINPUT from pygame.event import EventType from pygame_gui.core import UIContainer from pygame_gui.elements import UIButton from pygame_gui.elements.ui_label import UILabel from pygame_gui.ui_manager import UIManager import pysimgame from pysimgame.utils.abstract_managers import GameComponentManager class SpeedManager(GameComponentManager): """Manager of the model speed.""" speed: float available_speeds: List[float] container: UIContainer play_button: UIButton faster_button: UIButton slower_button: UIButton speed_label: UILabel settings: Dict def _resize_ui(self): """Recreate the ui to the size""" x, y = self.GAME_MANAGER.MAIN_DISPLAY.get_size() rect: pygame.Rect = self.settings["container_rect"] rect.x = (x - rect.width) / 2 rect.y = y - rect.height self.speed = 1 self.container = UIContainer( relative_rect=self.settings["container_rect"], manager=self.ui_manager, ) self.play_button = UIButton( relative_rect=self.settings["play_rect"], text=">", manager=self.ui_manager, container=self.container, ) self.faster_button = UIButton( relative_rect=self.settings["faster_rect"], text="+", manager=self.ui_manager, container=self.container, ) self.slower_button = UIButton( relative_rect=self.settings["slower_rect"], text="-", manager=self.ui_manager, container=self.container, ) self.speed_label = UILabel( relative_rect=self.settings["text_rect"], text=f"{self.speed} X", manager=self.ui_manager, container=self.container, ) def prepare(self): self.settings = { "available_speeds": [1 / 4, 1 / 2, 1, 2, 4, 10], "container_rect": pygame.Rect(-1, 500, 200, 50), "play_rect": pygame.Rect(0, 0, 50, 50), "faster_rect": pygame.Rect(175, 0, 25, 25), "slower_rect": pygame.Rect(175, 25, 25, 25), "text_rect": pygame.Rect(50, 0, 125, 50), } # Uses the game manager ui self.ui_manager = self.GAME_MANAGER.UI_MANAGER self.available_speeds = sorted(self.settings["available_speeds"]) self._resize_ui() def connect(self): self.MODEL_MANAGER = self.GAME_MANAGER.MODEL_MANAGER self._base_fps = self.MODEL_MANAGER.fps def increase_speed(self): """Increase the speed. 1 step in the available speeds. """ # Gets the current speed ind = self.available_speeds.index(self.speed) if ind < len(self.available_speeds) - 1: # Calculate the new speed index (assume sorted) self.speed = self.available_speeds[int(ind + 1)] self.post_changed_speed() def decrease_speed(self): """Decrease the speed. 1 step in the available speeds. """ # Gets the current speed ind = self.available_speeds.index(self.speed) if ind > 0: # Calculate the new speed index (assume sorted) self.speed = self.available_speeds[int(ind - 1)] self.post_changed_speed() def post_changed_speed(self): # post event event = pygame.event.Event( pysimgame.events.SpeedChanged, {"fps": self._base_fps * self.speed}, ) pygame.event.post(event) def process_events(self, event: pygame.event.Event) -> bool: """Listen the events for this manager.""" match event: case EventType( type=pygame_gui.UI_BUTTON_PRESSED, ui_element=self.faster_button, ) | EventType(type=pygame.TEXTINPUT, text="+"): self.increase_speed() case EventType( type=pygame_gui.UI_BUTTON_PRESSED, ui_element=self.slower_button, ) | EventType(type=pygame.TEXTINPUT, text="-"): self.decrease_speed() case EventType(type=pysimgame.events.SpeedChanged): self.speed_label.set_text(f"{self.speed} X") case EventType( type=pygame_gui.UI_BUTTON_PRESSED, ui_element=self.play_button, ): # Change the pause state event = pygame.event.Event(pysimgame.events.TogglePaused, {}) pygame.event.post(event) case EventType(type=pysimgame.events.Paused): self.play_button.set_text("||") case EventType(type=pysimgame.events.UnPaused): self.play_button.set_text(">")
python
# coding: utf-8 from __future__ import ( absolute_import, print_function, unicode_literals, ) from pydocx.models import XmlModel, XmlCollection from pydocx.openxml.vml import Shape, Rect class Picture(XmlModel): XML_TAG = 'pict' children = XmlCollection(Shape, Rect)
python
from typing import Union from pyppeteer.browser import Browser __all__ = ("BrowserContext",) class BrowserContext: def __init__(self) -> None: self._browser: Union[Browser, None] = None def set(self, browser: Browser) -> None: self._browser = browser def get(self) -> Union[Browser, None]: return self._browser def clear(self) -> None: self._browser = None def __repr__(self) -> str: return f"{self.__class__.__name__}<{self._browser!r}>"
python
#!/usr/bin/env python # -*- coding: UTF-8 -*- """ This script controls the head motors Altered by Johannes Sommerfeldt """ import os import sys import redis # ROS 2 Imports import rclpy from rclpy.node import Node from std_msgs.msg import Float32, String, Int16, Bool from head.msg import MotorPosition from systemcore.msg import I2Cwrite8, I2Cwrite16, I2CwriteArray import time from threading import Timer import threading class Commands(): """ This class contains the I2C Commands for the Arduino Motor Control. """ MOTOR_SET_STIFFNESS = 0x10 MOTOR_TURN_SET_ABSOLUTE = 0x11 MOTOR_TURN_SET_RELATIVE = 0x12 # Deprecated MOTOR_PITCH_SET_ABSOLUTE = 0x13 MOTOR_PITCH_SET_RELATIVE = 0x14 # Deprecated MOTOR_SET_SPEED = 0x20 class I2cDataConstants(): """ This class contains constant values that are sent in the data of Arduino commands """ MOTOR_CONTROL_SPEED = 0 MOTOR_CONTROL_DURATION = 1 MOTOR_MAX_SPEED = 50 # Motors will move with: (<value> / 10 * msg.speed) pwm per millisecond class NodeSpinner(threading.Thread): def __init__(self, node): threading.Thread.__init__(self) self.node = node def run(self): rclpy.spin(self.node) ########################################################################################################### ## Object representing a single motor ##################################################################### ########################################################################################################### class Motor(): # class Motor(Node): """ Object representing a single motor of the robot. Each new physical Motor should get its own Motor-Object in this script. """ def __init__(self, parentNode, name, redisTopicLastPWM, redisTopicLastAngle, redisKeyMaxPWM, redisKeyMinPWM, redisKey0PWM, redisKey90PWM, rosTopicSetPWM, rosTopicSetAngle, rosTopicChangeAngle, rosTopicIsMoving, cmdSetAbsolute, cmdSetRelative, i2cAddress, i2cArrayPublisher): """Constructor for a Motor Object Args: parentNode (Node): The ROS2-Node over which subscriptions and publisher are created. Be aware to have only ONE single node instance for every started ROS-node. name (String): Name of the motor redisTopicLastPWM (String): Redis Key under which the last set PWM value is stored and published.\n redisTopicLastAngle (String): Redis Key under which the last set Motor Angle is stored and published.\n redisKeyMaxPWM (String): Redis Key under which the Max PWM value for this motor is stored.\n redisKeyMinPWM (String): Redis Key under which the Min PWM value for this motor is stored.\n redisKey0PWM (String): Redis Key under which the PWM value for 0 degree is stored for this motor.\n redisKey90PWM (String): Redis Key under which the PWM relative absolute value for changing the motor position by 90 degree is stored.\n rosTopicSetPWM (String): The subscribed ROS-Topic to set an absolute PWM value for this motor.\n rosTopicSetAngle (String): The subscribed ROS-Topic to set an absolute angle for this motor.\n rosTopicChangeAngle (String): The subscribed ROS-Topic to change the current angle of this motor.\n cmdSetAbsolute (int): The I2C command linked to this motor to set an absolute value.\n i2cAddress (int): The I2C address of the arduino controlling the motor.\n i2cArrayPublisher (rclpy.Publisher): The ROS-Publisher object for publishing I2C-Arrays.\n In order to use the motor correctly, the default angle values (for 0° and the delta for 90°) and the min and max PWM values must be stored under the given Redis-Keys. The current default values for the motors can be found under: https://icampusnet.th-wildau.de/gitlab/ros-e/tischroboter-software-sbc/wikis/Redis-Systemwerte The arduino commands can be found under: https://icampusnet.th-wildau.de/gitlab/ros-e/tischroboter-software-sbc/wikis/Arduino-I2C-Kommandos#ansteuerung-der-motoren """ # super().__init__('motor_node_{}'.format(name)) super().__init__() self.parentNode = parentNode self.name = name # Just used for debugging ### Create redis objects self.r = redis.Redis(host="localhost", port=6379, db=0) # Redis object to store and get key-values self.p = self.r.pubsub(ignore_subscribe_messages=True) # PubSub to publish redis messages ### Redis publish topics for current motor status self.redisTopicLastPWM = redisTopicLastPWM self.redisTopicLastAngle = redisTopicLastAngle ### Min and Max pwm values for the motor self.maxPWM = int(self.r.get(redisKeyMaxPWM)) self.minPWM = int(self.r.get(redisKeyMinPWM)) self.value0PWM = int(self.r.get(redisKey0PWM)) self.value90PWM = int(self.r.get(redisKey90PWM)) ### I2C Command and Address self.cmdSetAbsolute = cmdSetAbsolute #self.cmdSetRelative = cmdSetRelative # unused self.i2cAddress = i2cAddress self.i2cArrayPublisher = i2cArrayPublisher ### Ros subscriber topics for input commands and publisher for status info self.rosTopicSetPWM = rosTopicSetPWM self.rosTopicSetAngle = rosTopicSetAngle self.rosTopicChangeAngle = rosTopicChangeAngle self.rosTopicIsMoving = rosTopicIsMoving ### Motor specific topics subscriber and publisher if (self.rosTopicSetPWM is not None): self.parentNode.create_subscription(Int16, self.rosTopicSetPWM, self.onSetPWM, 10) if (self.rosTopicSetAngle is not None): self.parentNode.create_subscription(MotorPosition, self.rosTopicSetAngle, self.onSetAngle, 10) if (self.rosTopicChangeAngle is not None): self.parentNode.create_subscription(MotorPosition, self.rosTopicChangeAngle, self.onChangeAngle, 10) self.isMovingPublisher = self.parentNode.create_publisher(Bool, self.rosTopicIsMoving, 10) self.logger = self.parentNode.get_logger() # variables for the publisher self.isMoving = False self.isMovingTimer = None self.logger.info("Subscribed: {:20} | Msg: head/MotorPosition".format(self.rosTopicSetAngle)) self.logger.info("Subscribed: {:20} | Msg: head/MotorPosition".format(self.rosTopicChangeAngle)) self.logger.info("Subscribed: {:20} | Msg: Int16".format(self.rosTopicSetPWM)) self.logger.info("Publishes: {:20} | Msg: Bool".format(self.rosTopicIsMoving)) ### Init head position # Assume the pwm value the motor had at the end of the last session as the current value. self.currentPWM = int(self.r.get(redisTopicLastPWM)) # If no value was found in redis, reset to looking straight if self.currentPWM == None: self.currentPWM = self.value0PWM self.logger.info(name + " found no redis entry for the last pwm value. Reset to: " + str(self.currentPWM)) # waiting seems to be necessary in order for the published motor movement to be functioning time.sleep(1) # move to the expected pwm value to avoid inconsistencies in case the arduino moved the motors without this node and without updating redis self.onSetPWM(Int16(data=self.currentPWM)) ################################ ### LOG INFO ################### self.logger.info("Publish on: system/i2c/write8 | Msg: system/I2Cwrite8") self.logger.info("Publish on: system/i2c/writeArray | Msg: system/I2CwriteArray") self.logger.info("Started Motor Control Node {}".format(name)) ################################ def onShutdown(self): pass def onSetPWM(self, msg): """Method to set an absolute PWM value for this motor. This method will take care about the maximum pwm values. Args: msg (std_msgs.msg.Int16): ROS Int16 message object. """ self.logger.info("Got message on set {} pwm: \n{}".format(self.name, msg)) pwm = int(msg.data) # Create a MotorPosition object so the setPWM command can be handled like the set/change angle methods motorPosition = MotorPosition() motorPosition.duration = 0 motorPosition.speed = 10.0 self.commitMovement(pwm, motorPosition) def onSetAngle(self, msg): """Method to set an absolute motor angle. This method will take care about the maximum motor angles. Args: msg (head.msg.MotorPosition): ROS MotorPosition message object. """ self.logger.info("Got message on set {} angle:\n{}".format(self.name, msg)) angle = int(msg.angle) # use the PWM value for 0 degrees (straight view angle) as reference to turn to an absolute angle pwm = self.value0PWM + self.getPwmDeltaFromAngle(angle) self.commitMovement(pwm, msg) def onChangeAngle(self, msg): """Method to change the current motor angle relatively. This method will take care about the maximum motor angles. Args: msg (head.msg.MotorPosition): ROS MotorPosition message object. """ self.logger.info("Got message on change {} angle:\n{}".format(self.name, msg)) angle = int(msg.angle) # use the current PWM value as reference to turn to an angle relative to the previous position pwm = self.currentPWM + self.getPwmDeltaFromAngle(angle) self.commitMovement(pwm, msg) def commitMovement(self, pwm, msg): """ The code all turn commands have in common. Handles everything about the turn. """ # Make sure the PWM is in a range the motor can actually turn to pwm = self.limitPWM(pwm) angle = round(self.getAngleFromPWM(pwm)) self.logger.info("Moving with PWM = {} (Calced angle: {})".format(pwm, angle)) # Store the last values in Redis if (self.redisTopicLastPWM is not None): self.r.set(self.redisTopicLastPWM, pwm) self.r.publish(self.redisTopicLastPWM, pwm) if (self.redisTopicLastAngle is not None): self.r.set(self.redisTopicLastAngle, angle) self.r.publish(self.redisTopicLastAngle, angle) # Update the motor position with the calculated PWM value self.updateMotorPosition(self.cmdSetAbsolute, pwm, int(round(msg.speed)), int(msg.duration)) # Publish info that the motor is moving deltaPwm = pwm - self.currentPWM self.pubMotorActivity(deltaPwm, msg.speed, msg.duration) self.currentPWM = pwm def getPwmDeltaFromAngle(self, angle): """ Returns the PWM delta that matches the specified angle delta. """ return int(float(angle) / 90.0 * self.value90PWM) def getAngleFromPWM(self, pwm): """ Returns the absolute angle that matches the specified pwm value. """ return float(pwm - self.value0PWM) * 90.0 / float(self.value90PWM) def limitPWM(self, pwm): """ Returns the maximum or minimum pwm value that can be turned to, if the specified pwm value is too great/small. If the pwm value is already within the legal range, it is returned unchanged. """ return min(max(pwm, self.minPWM), self.maxPWM) ### Generic method to take care of the I2C publishing of new motor positions ### def updateMotorPosition(self, cmd, pwm, speed, duration): """Generic method to update the motor position to a new PWM value. This method is handling the necessary I2C publishing. Args: cmd (int): Arduino Command for the motor position update. pwm (int): New PWM value for the motor. speed (int): Speed value between 1 and 100 to reach the new position. The speed argument is only considered if the duration argument == 0. duration (int): Duration in ms to reach the new motor position. If duration != 0, the speed argument is not considered. """ self.logger.info("{} --> pwm: {} | speed: {} | duration: {}".format(cmd, pwm, speed, duration)) # Creating the I2C Array Object for publishing to the I2C Bridge node. o = I2CwriteArray() o.address = self.i2cAddress o.command = cmd # differ between speed or duration value if duration is not None and duration > 0: o.data = [int(pwm >> 8), int(pwm & 0x00FF), int(I2cDataConstants.MOTOR_CONTROL_DURATION), int(duration >> 8), int(duration & 0x00FF)] else: o.data = [int(pwm >> 8), int(pwm & 0x00FF), int(I2cDataConstants.MOTOR_CONTROL_SPEED), int(speed & 0x00FF), int(0)] self.i2cArrayPublisher.publish(o) def pubMotorActivity(self, deltaPwm, speed, duration): """ Handles publishing of the motor's activity flag. """ # find out how long the movement will take, so a timer can handle resetting the isMoving flag # "speed factor" is the pwm per milliscond to move the motor and "speed" is the percentage of that factor to use timeActiveMillis = duration if duration is not None and duration > 0 else abs(deltaPwm) / (float(speed) / 100 * I2cDataConstants.MOTOR_MAX_SPEED) timeActiveSeconds = float(timeActiveMillis) / 1000 # add a small constant time to make sure the movement-stopped-info is sent after the hardware actually stopped even if there is a tiny hardware delay timeActiveSeconds += 0.1 # If the motor was not moving before, publish that it started moving if self.isMoving == False: self.isMovingPublisher.publish(Bool(data = True)) self.isMoving = True self.logger.info("Published info that '" + str(self.name) + "' started moving.") else: # If another command is received while the motor is still moving, the first movement will be overwritten and the new movement will begin immediately. # In that case, the timer is now outdated and has to be shut down and started again with the new duration # to make sure it only publishes when the new movement will finish rather than when the cancelled movement would have finished. #self.isMovingTimer.shutdown() self.isMovingTimer.cancel() # The timer. Sets isMoving to False again after the calculated time for the motor movement has passed self.isMovingTimer = Timer(interval=timeActiveSeconds, function=self.pubInactive) self.isMovingTimer.start() # self.isMovingTimer = self.create_timer(period=timeActiveSeconds, callback=self.pubInactive, oneshot=True) def pubInactive(self): """ Handles setting the motor's activity flag to inactive. """ self.isMovingPublisher.publish(Bool(data = False)) self.isMovingTimer.cancel() self.isMoving = False self.logger.info("Published info that '" + str(self.name) + "' stopped moving.") ########################################################################################################### ## ROS Node, contains all motor objects ################################################################### ########################################################################################################### class MotorControl(Node): """ ROS Node containing all motor objects """ def __init__(self): super().__init__('motorControl_node') # Arduino address of the Arduino controlling the head motors self.arduinoI2C = 0x08 ### Publisher for I2C Connection self.pubI2Cwrite8 = self.create_publisher(I2Cwrite8, "system/i2c/write8", 10) self.pubI2CwriteArray = self.create_publisher(I2CwriteArray, "system/i2c/writeArray", 10) ### Publish the maximum speed constant to the arduino so this node and hardware have the same value o = I2Cwrite8() o.address = self.arduinoI2C o.command = Commands.MOTOR_SET_SPEED o.data = int(I2cDataConstants.MOTOR_MAX_SPEED & 0x00FF) self.pubI2Cwrite8.publish(o) # time.sleep(1) # Creating the object for the head turn motor self.motorTurn = Motor(parentNode = self, name="turn", redisTopicLastPWM="head/motorturn/lastPWM", redisTopicLastAngle="head/turn/lastAngle", redisKeyMaxPWM="head/motorturn/maxPWM", redisKeyMinPWM="head/motorturn/minPWM", redisKey0PWM="head/motorturn/pwm0degree", redisKey90PWM="head/motorturn/pwm90degree", rosTopicSetPWM="head/motorturn/setPWM", rosTopicSetAngle="head/turn/setAngle", rosTopicChangeAngle="head/turn/changeAngle", rosTopicIsMoving="head/turn/isMoving", cmdSetAbsolute=Commands.MOTOR_TURN_SET_ABSOLUTE, cmdSetRelative=Commands.MOTOR_TURN_SET_RELATIVE, i2cAddress=self.arduinoI2C, i2cArrayPublisher=self.pubI2CwriteArray ) # time.sleep(1) # Creating the object for the head pitch motor self.motorPitch = Motor(parentNode = self, name="pitch", redisTopicLastPWM="head/motorpitch/lastPWM", redisTopicLastAngle="head/pitch/lastAngle", redisKeyMaxPWM="head/motorpitch/maxPWM", redisKeyMinPWM="head/motorpitch/minPWM", redisKey0PWM="head/motorpitch/pwm0degree", redisKey90PWM="head/motorpitch/pwm90degree", rosTopicSetPWM="head/motorpitch/setPWM", rosTopicSetAngle="head/pitch/setAngle", rosTopicChangeAngle="head/pitch/changeAngle", rosTopicIsMoving="head/pitch/isMoving", cmdSetAbsolute=Commands.MOTOR_PITCH_SET_ABSOLUTE, cmdSetRelative=Commands.MOTOR_PITCH_SET_RELATIVE, i2cAddress=self.arduinoI2C, i2cArrayPublisher=self.pubI2CwriteArray ) # rclpy.spin(self.motorPitch) # rclpy.spin(self.motorTurn) # self.spinNode(self.motorTurn) # self.spinNode(self.motorPitch) def onShutdown(self): self.motorTurn.onShutdown() self.motorPitch.onShutdown() # def spinNode(self, node): # thread = NodeSpinner(node) # thread.start() def main(args=None): rclpy.init(args=args) # 'motorControl_node' # Init all motors node = MotorControl() # Spin forever rclpy.spin(node) node.onShutdown() node.destroy_node() rclpy.shutdown() if __name__ == '__main__': main()
python
from hapServer import * import hapBack as hb import time import sys hs = hapserver() if sys.argv[1] == "1": hb.a1(int(sys.argv[2]),hs) time.sleep(1) if sys.argv[1] == "2": hb.a2(int(sys.argv[2]),hs) time.sleep(1) if sys.argv[1] == "3": hb.r1(int(sys.argv[2]),hs) time.sleep(1) if sys.argv[1] == "4": hb.r2(int(sys.argv[2]),hs) time.sleep(1)
python
# Copyright 2018 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import inspect import json import logging import os import sys import types import jwt from azure.common.credentials import (BasicTokenAuthentication, ServicePrincipalCredentials) from azure.keyvault import KeyVaultAuthentication, AccessToken from c7n_azure import constants from c7n_azure.utils import (ResourceIdParser, StringUtils, custodian_azure_send_override, ManagedGroupHelper) from c7n_azure.utils import get_keyvault_secret from msrestazure.azure_active_directory import MSIAuthentication try: from azure.cli.core._profile import Profile except Exception: Profile = None class Session(object): def __init__(self, subscription_id=None, authorization_file=None, resource=constants.RESOURCE_ACTIVE_DIRECTORY): """ :param subscription_id: If provided overrides environment variables. :param authorization_file: Path to file populated from 'get_functions_auth_string' :param resource: Resource endpoint for OAuth token. """ self.log = logging.getLogger('custodian.azure.session') self._provider_cache = {} self.subscription_id_override = subscription_id self.credentials = None self.subscription_id = None self.tenant_id = None self.resource_namespace = resource self._is_token_auth = False self._is_cli_auth = False self.authorization_file = authorization_file self._auth_params = {} @property def auth_params(self): self._initialize_session() return self._auth_params def _authenticate(self): keyvault_client_id = self._auth_params.get('keyvault_client_id') keyvault_secret_id = self._auth_params.get('keyvault_secret_id') # If user provided KeyVault secret, we will pull auth params information from it if keyvault_secret_id: self._auth_params.update( json.loads( get_keyvault_secret(keyvault_client_id, keyvault_secret_id))) client_id = self._auth_params.get('client_id') client_secret = self._auth_params.get('client_secret') access_token = self._auth_params.get('access_token') tenant_id = self._auth_params.get('tenant_id') use_msi = self._auth_params.get('use_msi') subscription_id = self._auth_params.get('subscription_id') if access_token and subscription_id: self.log.info("Creating session with Token Authentication") self.subscription_id = subscription_id self.credentials = BasicTokenAuthentication( token={ 'access_token': access_token }) self._is_token_auth = True elif client_id and client_secret and tenant_id and subscription_id: self.log.info("Creating session with Service Principal Authentication") self.subscription_id = subscription_id self.credentials = ServicePrincipalCredentials( client_id=client_id, secret=client_secret, tenant=tenant_id, resource=self.resource_namespace) self.tenant_id = tenant_id elif use_msi and subscription_id: self.log.info("Creating session with MSI Authentication") self.subscription_id = subscription_id if client_id: self.credentials = MSIAuthentication( client_id=client_id, resource=self.resource_namespace) else: self.credentials = MSIAuthentication( resource=self.resource_namespace) elif self._auth_params.get('enable_cli_auth'): self.log.info("Creating session with Azure CLI Authentication") self._is_cli_auth = True try: (self.credentials, self.subscription_id, self.tenant_id) = Profile().get_login_credentials( resource=self.resource_namespace) except Exception: self.log.error('Unable to authenticate with Azure') self.log.info("Session using Subscription ID: %s" % self.subscription_id) def _initialize_session(self): """ Creates a session using available authentication type. Auth priority: 1. Token Auth 2. Tenant Auth 3. Azure CLI Auth """ # Only run once if self.credentials is not None: return if self.authorization_file: self.log.info("Using file for authentication parameters") with open(self.authorization_file) as json_file: self._auth_params = json.load(json_file) else: self.log.info("Using environment variables for authentication parameters") self._auth_params = { 'client_id': os.environ.get(constants.ENV_CLIENT_ID), 'client_secret': os.environ.get(constants.ENV_CLIENT_SECRET), 'access_token': os.environ.get(constants.ENV_ACCESS_TOKEN), 'tenant_id': os.environ.get(constants.ENV_TENANT_ID), 'use_msi': bool(os.environ.get(constants.ENV_USE_MSI)), 'subscription_id': os.environ.get(constants.ENV_SUB_ID), 'keyvault_client_id': os.environ.get(constants.ENV_KEYVAULT_CLIENT_ID), 'keyvault_secret_id': os.environ.get(constants.ENV_KEYVAULT_SECRET_ID), 'enable_cli_auth': True } # Let provided id parameter override everything else if self.subscription_id_override is not None: self._auth_params['subscription_id'] = self.subscription_id_override self._authenticate() if self.credentials is None: self.log.error('Unable to authenticate with Azure.') sys.exit(1) # TODO: cleanup this workaround when issue resolved. # https://github.com/Azure/azure-sdk-for-python/issues/5096 if self.resource_namespace == constants.RESOURCE_VAULT: access_token = AccessToken(token=self.get_bearer_token()) self.credentials = KeyVaultAuthentication(lambda _1, _2, _3: access_token) def get_session_for_resource(self, resource): return Session( subscription_id=self.subscription_id_override, authorization_file=self.authorization_file, resource=resource) def client(self, client): self._initialize_session() service_name, client_name = client.rsplit('.', 1) svc_module = importlib.import_module(service_name) klass = getattr(svc_module, client_name) klass_parameters = None if sys.version_info[0] < 3: import funcsigs klass_parameters = funcsigs.signature(klass).parameters else: klass_parameters = inspect.signature(klass).parameters client = None if 'subscription_id' in klass_parameters: client = klass(credentials=self.credentials, subscription_id=self.subscription_id) else: client = klass(credentials=self.credentials) # Override send() method to log request limits & custom retries service_client = client._client service_client.orig_send = service_client.send service_client.send = types.MethodType(custodian_azure_send_override, service_client) # Don't respect retry_after_header to implement custom retries service_client.config.retry_policy.policy.respect_retry_after_header = False return client def get_credentials(self): self._initialize_session() return self.credentials def get_subscription_id(self): self._initialize_session() return self.subscription_id def get_function_target_subscription_name(self): self._initialize_session() if constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME in os.environ: return os.environ[constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME] return os.environ.get(constants.ENV_FUNCTION_SUB_ID, self.subscription_id) def get_function_target_subscription_ids(self): self._initialize_session() if constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME in os.environ: return ManagedGroupHelper.get_subscriptions_list( os.environ[constants.ENV_FUNCTION_MANAGEMENT_GROUP_NAME], self.get_credentials()) return [os.environ.get(constants.ENV_FUNCTION_SUB_ID, self.subscription_id)] def resource_api_version(self, resource_id): """ latest non-preview api version for resource """ namespace = ResourceIdParser.get_namespace(resource_id) resource_type = ResourceIdParser.get_resource_type(resource_id) cache_id = namespace + resource_type if cache_id in self._provider_cache: return self._provider_cache[cache_id] resource_client = self.client('azure.mgmt.resource.ResourceManagementClient') provider = resource_client.providers.get(namespace) # The api version may be directly provided if not provider.resource_types and resource_client.providers.api_version: return resource_client.providers.api_version rt = next((t for t in provider.resource_types if StringUtils.equal(t.resource_type, resource_type)), None) if rt and rt.api_versions: versions = [v for v in rt.api_versions if 'preview' not in v.lower()] api_version = versions[0] if versions else rt.api_versions[0] self._provider_cache[cache_id] = api_version return api_version def get_tenant_id(self): self._initialize_session() if self._is_token_auth: decoded = jwt.decode(self.credentials.token['access_token'], verify=False) return decoded['tid'] return self.tenant_id def get_bearer_token(self): self._initialize_session() if self._is_cli_auth: return self.credentials._token_retriever()[1] return self.credentials.token['access_token'] def load_auth_file(self, path): with open(path) as json_file: data = json.load(json_file) self.tenant_id = data['credentials']['tenant'] return (ServicePrincipalCredentials( client_id=data['credentials']['client_id'], secret=data['credentials']['secret'], tenant=self.tenant_id, resource=self.resource_namespace ), data.get('subscription', None)) def get_functions_auth_string(self, target_subscription_id): """ Build auth json string for deploying Azure Functions. Look for dedicated Functions environment variables or fall back to normal Service Principal variables. """ self._initialize_session() function_auth_variables = [ constants.ENV_FUNCTION_TENANT_ID, constants.ENV_FUNCTION_CLIENT_ID, constants.ENV_FUNCTION_CLIENT_SECRET ] required_params = ['client_id', 'client_secret', 'tenant_id'] function_auth_params = {k: v for k, v in self._auth_params.items() if k in required_params} function_auth_params['subscription_id'] = target_subscription_id # Use dedicated function env vars if available if all(k in os.environ for k in function_auth_variables): function_auth_params['client_id'] = os.environ[constants.ENV_FUNCTION_CLIENT_ID] function_auth_params['client_secret'] = os.environ[constants.ENV_FUNCTION_CLIENT_SECRET] function_auth_params['tenant_id'] = os.environ[constants.ENV_FUNCTION_TENANT_ID] # Verify SP authentication parameters if any(k not in function_auth_params.keys() for k in required_params): raise NotImplementedError( "Service Principal credentials are the only " "supported auth mechanism for deploying functions.") return json.dumps(function_auth_params, indent=2)
python
# This sample is used in conjunction with protocolModule4.py. from typing import Protocol, TypeVar Y = TypeVar("Y", contravariant=True) class Fn(Protocol[Y]): def __call__(self, y: Y) -> None: ... def x(x: Fn[int]) -> None: print(x)
python
#!/usr/bin/env python3 # Create C/C++ code for two lookup tables. import math # Size of static tables. kTableSize = 4096 # Scale factor for float arg to int index. kScaleFactor = 256.0 print("// Generated code with lookup tables") print('#include "functions.h"') print("namespace tesseract {") print("const double TanhTable[] = {") for i in range(kTableSize): print(" %a," % math.tanh(i / kScaleFactor)) print("};") print("const double LogisticTable[] = {") for i in range(kTableSize): print(" %a," % (1 / (1 + math.exp(-i / kScaleFactor)))) print("};") print("} // namespace tesseract.")
python
class FactorProfile: types = { 'question': str, 'questionText': str, 'answer': str, 'phoneNumber': str, 'credentialId': str } def __init__(self): # unique key for question self.question = None # str # display text for question self.questionText = None # str # answer to question self.answer = None # str # phone number of mobile device self.phoneNumber = None # str # unique id for instance self.credentialId = None # str
python
""" Scheduler Service for starting flow :license: MIT """ import calendar import datetime import json import os from src.dependencies.dependency_typing import (PynamoDBCheckIn, PynamoDBConsultant, PynamoDBCustomers, Requests) from src.dependencies.pynamodb_checkin_provider import get_checkin_provider from src.dependencies.pynamodb_consultant_provider import \ get_consultants_provider from src.dependencies.pynamodb_customers_provider import get_customers_provider from src.dependencies.requests_provider import get_requests_provider def pub(event, context): ''' AWS Serverless Handler - :param event: AWS event :param context: AWS Lambda context ''' print("context:", context) print("event", event) checkin_model = get_checkin_provider() consultants_model = get_consultants_provider() customers_model = get_customers_provider() requests_client = get_requests_provider() run_scheduler(checkin_model, consultants_model, customers_model, requests_client) def run_scheduler(checkin_model: PynamoDBCheckIn, consultants_model: PynamoDBConsultant, customers_model: PynamoDBCustomers, requests_client: Requests) -> None: ''' Runs Scheduler Services - :param checkin_model: Checkin model :param consultants_model: Consultant model :param customers_model: Customer model :param requests_client: Request client ''' auth_token = os.environ['SlackAuth'] hed = {'Authorization': 'Bearer ' + auth_token} today = datetime.datetime.today() first_date = datetime.datetime(today.year, today.month, 1) - datetime.timedelta(days=1) last_date = datetime.datetime(today.year, today.month,\ calendar.monthrange(today.year, today.month)[1]) consultants_list = list(consultants_model.scan()) customers_list = list(customers_model.scan()) checkins_list = list(checkin_model.scan(checkin_model.date.between(str(first_date),\ str(last_date)) & (checkin_model.completed == 'True'))) for con in consultants_list: con_data = list(filter(lambda x: con.uuid == x.consultant_uuid, checkins_list)) cust_time = {} for data in con_data: customers = next((x for x in json.loads(data.user_input) if\ x['action_id'] == 'customers'), None) if customers is not None: customers = list(filter(lambda x: not x['unchecked'], customers['value'])) times = [x for x in json.loads(data.user_input)\ if x['action_id'].startswith('time_desc_input')] for cust in customers: time = next((z for z in times if z['customer'] == cust['value']), None) if time is not None: name = next((c for c in customers_list if\ c.uuid == cust['value']), None).friendlyName cust_time[name] = cust_time.get(name, 0) + time['value']['time'] print("Cust_time: ", cust_time) report = '{0}:'.format(today.strftime("%B")) for key in cust_time: report += '\n• {0} - {1} h'.format(key, (cust_time[key])) data = { "channel": con.slack_id, "text": report } requests_client.post('https://slack.com/api/chat.postMessage', json=data, headers=hed)
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2016 Christoph Reiter # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. import os import sys from setuptools import setup, Command import senf class coverage_command(Command): description = "generate test coverage data" user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): try: from coverage import coverage except ImportError: raise SystemExit( "Missing 'coverage' module. See " "https://pypi.python.org/pypi/coverage or try " "`apt-get install python-coverage python3-coverage`") for key in list(sys.modules.keys()): if key.startswith('senf'): del(sys.modules[key]) cov = coverage() cov.start() cmd = self.reinitialize_command("test") cmd.ensure_finalized() cmd.run() dest = os.path.join(os.getcwd(), "coverage") cov.stop() cov.html_report( directory=dest, ignore_errors=True, include=["senf/*"]) print("Coverage summary: file://%s/index.html" % dest) class pytest_command(Command): user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")] def initialize_options(self): self.pytest_args = [] def finalize_options(self): pass def run(self): import pytest errno = pytest.main(self.pytest_args) if errno != 0: sys.exit(errno) if __name__ == "__main__": with open('README.rst') as h: long_description = h.read() setup( name="senf", version=senf.version_string, url="https://github.com/quodlibet/senf", description=("Consistent filename handling for all Python versions " "and platforms"), long_description=long_description, author="Christoph Reiter", author_email="[email protected]", packages=[ "senf", ], classifiers=[ 'Operating System :: OS Independent', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'License :: OSI Approved :: MIT License', ], tests_require=['pytest'], cmdclass={ 'test': pytest_command, 'coverage': coverage_command, }, )
python
#!/usr/bin/env python ################################################################################ # Created by Oscar Martinez # # [email protected] # ################################################################################ import traceback from flask import Flask, Response, request, jsonify from flask.ext.cors import CORS, cross_origin from TermSuggestionsAggregator import TermSuggestionsAggregator, Aggregation from elsearch import ELSearch from wnsearch import WNSearch from word2vec import Word2VecSuggester from precomputed import PrecomputedClusterSuggester from rocchio import RocchioSuggester import MakeChart from config import get_word2vec_model from rocchio import RocchioSuggester app = Flask(__name__) CORS(app) methodsConfigurationDict = {1: (WNSearch, ()), 2: (ELSearch, ()), 3: (PrecomputedClusterSuggester, ()), 4: (Word2VecSuggester, (get_word2vec_model(), )), 5: (RocchioSuggester, ()), } methodsInstances = {} for mKey in methodsConfigurationDict: methodsInstances[mKey] = methodsConfigurationDict[mKey][0](*methodsConfigurationDict[mKey][1]) ts = TermSuggestionsAggregator() @app.route('/') @cross_origin(supports_credentials=True) def api_root(): m = {} for methodKey in sorted(methodsConfigurationDict.keys()): m[methodKey ] = (methodsConfigurationDict[methodKey][0].__name__, methodsConfigurationDict[methodKey][1]) return jsonify(m) @app.errorhandler(404) @cross_origin(supports_credentials=True) def api_error(error=None): message = { 'status': 404, 'message': 'Error: ' + error, } resp = jsonify(message) resp.status_code = 404 return resp @app.route("/suggester", methods = ['GET',]) @cross_origin(supports_credentials=True) def api_term(): if request.method == 'GET': if 'term' in request.args: if 'agg-method' in request.args: aggMethod = str(request.args['agg-method']).strip() if aggMethod == 'sum': aggMethod = Aggregation.Sum elif aggMethod == 'average': aggMethod = Aggregation.Average else: return api_error('specify correct aggregation method: sum or average') else: # Default aggragation method aggMethod = Aggregation.Sum if 'methods[]' in request.args: methods_str = request.values.getlist('methods[]') methods = [methodsInstances[int(m)] for m in methods_str] else: return api_error('Please select one or more query expansion methods.') # Get the suggestions data = ts.getSuggestions(str(request.args['term']), methods, aggMethod) resp = Response(MakeChart.dict2bar(data), status=200, mimetype='application/json') return resp else: return api_error('a term is required') if __name__ == "__main__": app.run(debug=True)
python
#! /usr/bin/env python from __future__ import print_function import logging logging.getLogger(__name__).setLevel(logging.INFO) import os,sys,time #import yaml import signal from snowboy import snowboydecoder interrupted = False def signal_handler(signal, frame): global interrupted interrupted = True def interrupt_callback(): global interrupted return interrupted # capture SIGINT signal, e.g., Ctrl+C signal.signal(signal.SIGINT, signal_handler) from play_audio import play_music from microphone import microphone from alexa_query import internet_on,alexa_query from busman import busman_query path = os.path.realpath(__file__).rstrip(os.path.basename(__file__)) alexa_tmp = '/tmp/alexa-pi' if sys.platform.startswith('linux'): alexa_tmp = '/dev/shm/alexa-pi' try: os.makedirs(os.path.join(alexa_tmp,'bak')) except: pass raw_recording = os.path.join(alexa_tmp,'recording.raw') mp3_response = os.path.join(alexa_tmp,'response.mp3') http_log = os.path.join(alexa_tmp,'http.log') if sys.platform.startswith('linux'): # handle alsa-lib error log things from ctypes import CFUNCTYPE,cdll,c_char_p, c_int ERROR_HANDLER_FUNC = CFUNCTYPE(None, c_char_p, c_int, c_char_p, c_int, c_char_p) def py_error_handler(filename, line, function, err, fmt): pass #print 'messages are yummy' c_error_handler = ERROR_HANDLER_FUNC(py_error_handler) asound = cdll.LoadLibrary('libasound.so') asound.snd_lib_error_set_handler(c_error_handler) def ding(): snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING) # def handle(): # with open(raw_recording,'rb') as raw: # directives = alexa_query(raw, mp3_response, http_log) # if 'speak' in directives: # play_music(mp3_response,60000) # return directives # def start2(): # while True: # ding() # if record_to_file(raw_recording): # directives = handle() def handle_alexa(): wait = True #False while True: ding() mic = microphone(wait) #logging.warn(('start microphone',wait)) #logging.warn(('end microphone',wait)) directives = alexa_query(mic, mp3_response, http_log) logging.warn(('directives:', directives.keys())) if 'speak' in directives: play_music(mp3_response,60000) #if len(directives) > 0 and not 'listen' in directives: if not 'listen' in directives: break wait = True logging.warn(('[Snowboy Listening...]')) ding() def handle_okbus(): wait = False while True: ding() mic = microphone(wait) directives = busman_query(mic) logging.warn(('directives:', directives.keys())) if len(directives) > 0 and not 'listen' in directives: break wait = True logging.warn(('[Snowboy Listening...]')) ding() if __name__ == "__main__": while not internet_on(): sys.stderr.write('.') #start2() models = [ 'pmdl/Alexa.pmdl', # 'pmdl/ok bus.pmdl' ] sensitivity = [ 0.45, # 0.45 ] callbacks = [ handle_alexa, # handle_okbus ] # test while True: handle_alexa() logging.warn(('handle_alexa finished')) detector = snowboydecoder.HotwordDetector(models, sensitivity=sensitivity) logging.warn(('[Snowboy Listening...]')) ding() # main loop detector.start(detected_callback=callbacks, interrupt_check=interrupt_callback, sleep_time=0.03) detector.terminate() # Emacs: # mode: javascript # c-basic-offset: 4 # tab-width: 8 # indent-tabs-mode: nil # End: # vim: se ft=javascript st=4 ts=8 sts=4
python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import ddt import netifaces from cinderclient import exceptions as cinder_exceptions from os_brick import exception from oslotest import base from brick_cinderclient_ext import volume_actions @ddt.ddt class TestVolumeActions(base.BaseTestCase): def setUp(self): super(TestVolumeActions, self).setUp() self.volume_id = '3d96b134-75bd-492b-8372-330455cae38f' self.brick_client = mock.Mock() self.v_client = mock.Mock() self.command_args = [self.v_client, self.volume_id] def test_reserve(self): with volume_actions.Reserve(*self.command_args) as cmd: cmd.reserve() self.v_client.volumes.reserve.assert_called_once_with(self.volume_id) def test_reserve_failed(self): self.v_client.volumes.reserve.side_effect = ( cinder_exceptions.BadRequest(400)) try: with volume_actions.Reserve(*self.command_args) as cmd: cmd.reserve() except cinder_exceptions.BadRequest: self.v_client.volumes.unreserve.assert_called_once_with( self.volume_id) self.v_client.volumes.reserve.assert_called_once_with(self.volume_id) @mock.patch('netifaces.ifaddresses', return_value={netifaces.AF_INET: [{'addr': '127.0.0.1'}]}) @mock.patch('netifaces.interfaces', return_value=['eth1']) @mock.patch('brick_cinderclient_ext.brick_utils.get_my_ip', return_value='1.0.0.0') @ddt.data((None, {'ip': '1.0.0.0'}), ('eth1', {'ip': '127.0.0.1'})) @ddt.unpack def test_initialize_connection(self, _nic, _conn_prop, _fake_my_ip, _fake_interfaces, _fake_ifaddresses): """Test calling initialize_connection with different input params. Contains next initialize connection test cases: 1. Without any additional parameters in request; 2. Using --nic as a parameter; TODO (mdovgal): add other test cases; """ self.brick_client.get_connector.return_value = _conn_prop with volume_actions.InitializeConnection(*self.command_args) as cmd: cmd.initialize(self.brick_client, False, False, _nic) self.brick_client.get_connector.assert_called_once_with(False, False, _nic) self.v_client.volumes.initialize_connection.assert_called_once_with( self.volume_id, _conn_prop) @ddt.data('iscsi', 'iSCSI', 'ISCSI', 'rbd', 'RBD') def test_verify_protocol(self, protocol): with volume_actions.VerifyProtocol(*self.command_args) as cmd: # NOTE(e0ne): veryfy that no exception is rased cmd.verify(protocol) def test_verify_protocol_failed(self): try: with volume_actions.VerifyProtocol(*self.command_args) as cmd: cmd.verify('protocol') except exception.ProtocolNotSupported: self.v_client.volumes.unreserve.assert_called_once_with( self.volume_id) def test_connect_volume(self): connector = mock.Mock() connector.connect_volume.return_value = {'device': 'info'} with volume_actions.ConnectVolume(*self.command_args) as cmd: cmd.connect(connector, 'connection_data', 'mountpoint', 'mode', 'hostname') connector.connect_volume.assert_called_once_with('connection_data') self.v_client.volumes.attach.assert_called_once_with( self.volume_id, instance_uuid=None, mountpoint='mountpoint', mode='mode', host_name='hostname') @ddt.data((None, {}), ('connection_data', 'connection_data')) @ddt.unpack def test_disconnect_no_device_info(self, command_arg, connector_arg): connector = mock.Mock() with volume_actions.DisconnectVolume(*self.command_args) as cmd: cmd.disconnect(connector, 'connection_data', command_arg) connector.disconnect_volume.assert_called_once_with('connection_data', connector_arg) def test_detach(self): brick_client = mock.Mock() brick_client.get_connector.return_value = 'connector' with volume_actions.DetachVolume(*self.command_args) as cmd: cmd.detach(brick_client, 'attachment_uuid', 'multipath', 'enforce_multipath') brick_client.get_connector.assert_called_once_with('multipath', 'enforce_multipath') self.v_client.volumes.terminate_connection.assert_called_once_with( self.volume_id, 'connector') self.v_client.volumes.detach.assert_called_once_with( self.volume_id, 'attachment_uuid')
python
import os import unittest import numpy as np import pandas as pd from cgnal.core.data.model.ml import ( LazyDataset, IterGenerator, MultiFeatureSample, Sample, PandasDataset, PandasTimeIndexedDataset, CachedDataset, features_and_labels_to_dataset, ) from typing import Iterator, Generator from cgnal.core.tests.core import TestCase, logTest from tests import TMP_FOLDER samples = [ Sample(features=[100, 101], label=1), Sample(features=[102, 103], label=2), Sample(features=[104, 105], label=3), Sample(features=[106, 107], label=4), Sample(features=[108, 109], label=5), Sample(features=[110, 111], label=6), Sample(features=[112, 113], label=7), Sample(features=[114, 115], label=8), Sample(features=[116, 117], label=9), ] def samples_gen(): for sample in samples: if not any([np.isnan(x).any() for x in sample.features]): yield sample lazyDat = LazyDataset(IterGenerator(samples_gen)) class features_and_labels_to_datasetTests(TestCase): def test_features_and_labels_to_dataset(self): dataset = features_and_labels_to_dataset( pd.concat( [ pd.Series([1, 0, 2, 3], name="feat1"), pd.Series([1, 2, 3, 4], name="feat2"), ], axis=1, ), pd.Series([0, 0, 0, 1], name="Label"), ) dataset_no_labels = features_and_labels_to_dataset( pd.concat( [ pd.Series([1, 0, 2, 3], name="feat1"), pd.Series([1, 2, 3, 4], name="feat2"), ], axis=1, ), None, ) self.assertTrue(isinstance(dataset_no_labels, CachedDataset)) self.assertTrue(isinstance(dataset, CachedDataset)) self.assertTrue( ( dataset.getFeaturesAs("pandas") == pd.concat( [ pd.Series([1, 0, 2, 3], name="feat1"), pd.Series([1, 2, 3, 4], name="feat2"), ], axis=1, ) ) .all() .all() ) self.assertTrue( ( dataset.getLabelsAs("pandas") == pd.DataFrame(pd.Series([0, 0, 0, 1], name="Label")) ) .all() .all() ) class LazyDatasetTests(TestCase): @logTest def test_withLookback_MultiFeatureSample(self): samples = [ MultiFeatureSample( features=[np.array([100.0, 101.0]), np.array([np.NaN])], label=1.0 ), MultiFeatureSample( features=[np.array([102.0, 103.0]), np.array([1.0])], label=2.0 ), MultiFeatureSample( features=[np.array([104.0, 105.0]), np.array([2.0])], label=3.0 ), MultiFeatureSample( features=[np.array([106.0, 107.0]), np.array([3.0])], label=4.0 ), MultiFeatureSample( features=[np.array([108.0, 109.0]), np.array([4.0])], label=5.0 ), MultiFeatureSample( features=[np.array([110.0, 111.0]), np.array([5.0])], label=6.0 ), MultiFeatureSample( features=[np.array([112.0, 113.0]), np.array([6.0])], label=7.0 ), MultiFeatureSample( features=[np.array([114.0, 115.0]), np.array([7.0])], label=8.0 ), MultiFeatureSample( features=[np.array([116.0, 117.0]), np.array([8.0])], label=9.0 ), ] def samples_gen(): for sample in samples: if not any([np.isnan(x).any() for x in sample.features]): yield sample X1 = np.array( [ [[102.0, 103.0], [104.0, 105.0], [106.0, 107.0]], [[104.0, 105.0], [106.0, 107.0], [108.0, 109.0]], [[106.0, 107.0], [108.0, 109.0], [110.0, 111.0]], [[108.0, 109.0], [110.0, 111.0], [112.0, 113.0]], ] ) y1 = np.array( [ [[1.0], [2.0], [3.0]], [[2.0], [3.0], [4.0]], [[3.0], [4.0], [5.0]], [[4.0], [5.0], [6.0]], ] ) lab1 = np.array([4.0, 5.0, 6.0, 7.0]) X2 = np.array( [ [[110.0, 111.0], [112.0, 113.0], [114.0, 115.0]], [[112.0, 113.0], [114.0, 115.0], [116.0, 117.0]], ] ) y2 = np.array([[[5.0], [6.0], [7.0]], [[6.0], [7.0], [8.0]]]) lab2 = np.array([8.0, 9.0]) lookback = 3 batch_size = 4 lazyDat = LazyDataset(IterGenerator(samples_gen)) lookbackDat = lazyDat.withLookback(lookback) batch_gen = lookbackDat.batch(batch_size) batch1: CachedDataset = next(batch_gen) batch2: CachedDataset = next(batch_gen) tmp1 = batch1.getFeaturesAs("array") temp1X = np.array(list(map(lambda x: np.stack(x), tmp1[:, :, 0]))) temp1y = np.array(list(map(lambda x: np.stack(x), tmp1[:, :, 1]))) tmp1lab = batch1.getLabelsAs("array") res = [ np.array_equal(temp1X, X1), np.array_equal(temp1y, y1), np.array_equal(tmp1lab, lab1), ] tmp2 = batch2.getFeaturesAs("array") temp2X = np.array(list(map(lambda x: np.stack(x), tmp2[:, :, 0]))) temp2y = np.array(list(map(lambda x: np.stack(x), tmp2[:, :, 1]))) tmp2lab = batch2.getLabelsAs("array") res = res + [ np.array_equal(temp2X, X2), np.array_equal(temp2y, y2), np.array_equal(tmp2lab, lab2), ] self.assertTrue(all(res)) @logTest def test_withLookback_ArrayFeatureSample(self): samples = [ Sample(features=np.array([100, 101]), label=1), Sample(features=np.array([102, 103]), label=2), Sample(features=np.array([104, 105]), label=3), Sample(features=np.array([106, 107]), label=4), Sample(features=np.array([108, 109]), label=5), Sample(features=np.array([110, 111]), label=6), Sample(features=np.array([112, 113]), label=7), Sample(features=np.array([114, 115]), label=8), Sample(features=np.array([116, 117]), label=9), ] def samples_gen(): for sample in samples: if not any([np.isnan(x).any() for x in sample.features]): yield sample X1 = np.array( [ [[100, 101], [102, 103], [104, 105]], [[102, 103], [104, 105], [106, 107]], [[104, 105], [106, 107], [108, 109]], [[106, 107], [108, 109], [110, 111]], ] ) lab1 = np.array([3, 4, 5, 6]) X2 = np.array( [ [[108, 109], [110, 111], [112, 113]], [[110, 111], [112, 113], [114, 115]], [[112, 113], [114, 115], [116, 117]], ] ) lab2 = np.array([7, 8, 9]) lookback = 3 batch_size = 4 lazyDat = LazyDataset(IterGenerator(samples_gen)) lookbackDat = lazyDat.withLookback(lookback) batch_gen = lookbackDat.batch(batch_size) batch1: CachedDataset = next(batch_gen) batch2: CachedDataset = next(batch_gen) tmp1 = batch1.getFeaturesAs("array") tmp1lab = batch1.getLabelsAs("array") res = [np.array_equal(tmp1, X1), np.array_equal(tmp1lab, lab1)] tmp2 = batch2.getFeaturesAs("array") tmp2lab = batch2.getLabelsAs("array") res = res + [np.array_equal(tmp2, X2), np.array_equal(tmp2lab, lab2)] self.assertTrue(all(res)) @logTest def test_withLookback_ListFeatureSample(self): samples = [ Sample(features=[100, 101], label=1), Sample(features=[102, 103], label=2), Sample(features=[104, 105], label=3), Sample(features=[106, 107], label=4), Sample(features=[108, 109], label=5), Sample(features=[110, 111], label=6), Sample(features=[112, 113], label=7), Sample(features=[114, 115], label=8), Sample(features=[116, 117], label=9), ] def samples_gen(): for sample in samples: if not any([np.isnan(x).any() for x in sample.features]): yield sample X1 = np.array( [ [[100, 101], [102, 103], [104, 105]], [[102, 103], [104, 105], [106, 107]], [[104, 105], [106, 107], [108, 109]], [[106, 107], [108, 109], [110, 111]], ] ) lab1 = np.array([3, 4, 5, 6]) X2 = np.array( [ [[108, 109], [110, 111], [112, 113]], [[110, 111], [112, 113], [114, 115]], [[112, 113], [114, 115], [116, 117]], ] ) lab2 = np.array([7, 8, 9]) lookback = 3 batch_size = 4 lazyDat = LazyDataset(IterGenerator(samples_gen)) lookbackDat = lazyDat.withLookback(lookback) batch_gen = lookbackDat.batch(batch_size) batch1: CachedDataset = next(batch_gen) batch2: CachedDataset = next(batch_gen) tmp1 = batch1.getFeaturesAs("array") tmp1lab = batch1.getLabelsAs("array") res = [np.array_equal(tmp1, X1), np.array_equal(tmp1lab, lab1)] tmp2 = batch2.getFeaturesAs("array") tmp2lab = batch2.getLabelsAs("array") res = res + [np.array_equal(tmp2, X2), np.array_equal(tmp2lab, lab2)] self.assertTrue(all(res)) @logTest def test_features_labels(self): self.assertTrue(isinstance(lazyDat.features(), Generator)) self.assertTrue(isinstance(lazyDat.labels(), Generator)) self.assertTrue(isinstance(lazyDat.getFeaturesAs(), Generator)) self.assertTrue(isinstance(lazyDat.getLabelsAs(), Generator)) self.assertEqual(next(lazyDat.getFeaturesAs()), samples[0].features) self.assertEqual(next(lazyDat.getLabelsAs()), samples[0].label) self.assertEqual(next(lazyDat.features()), samples[0].features) self.assertEqual(next(lazyDat.labels()), samples[0].label) class CachedDatasetTests(TestCase): @logTest def test_to_df(self): self.assertTrue(isinstance(CachedDataset(lazyDat).to_df(), pd.DataFrame)) self.assertTrue( ( CachedDataset(lazyDat).to_df()["features"][0].values == [100, 102, 104, 106, 108, 110, 112, 114, 116] ).all() ) self.assertTrue( ( CachedDataset(lazyDat).to_df()["labels"][0].values == [1, 2, 3, 4, 5, 6, 7, 8, 9] ).all() ) @logTest def test_asPandasDataset(self): self.assertTrue( isinstance(CachedDataset(lazyDat).asPandasDataset, PandasDataset) ) self.assertTrue( ( CachedDataset(lazyDat).asPandasDataset.features[0].values == [100, 102, 104, 106, 108, 110, 112, 114, 116] ).all() ) self.assertTrue( ( CachedDataset(lazyDat).asPandasDataset.labels[0].values == [1, 2, 3, 4, 5, 6, 7, 8, 9] ).all() ) class PandasDatasetTests(TestCase): dataset: PandasDataset = PandasDataset( features=pd.concat( [ pd.Series([1, np.nan, 2, 3], name="feat1"), pd.Series([1, 2, 3, 4], name="feat2"), ], axis=1, ), labels=pd.Series([0, 0, 0, 1], name="Label"), ) dataset_no_label: PandasDataset = PandasDataset( features=pd.concat( [ pd.Series([1, np.nan, 2, 3], name="feat1"), pd.Series([1, 2, 3, 4], name="feat2"), ], axis=1, ) ) @logTest def test_check_none(self): self.assertEqual(self.dataset._check_none(None), None) self.assertEqual(self.dataset._check_none("test"), "test") @logTest def test__len__(self): self.assertEqual(self.dataset.__len__(), 4) @logTest def test_items(self): self.assertTrue(isinstance(self.dataset.items, Iterator)) self.assertEqual(next(self.dataset.items).features, {"feat1": 1.0, "feat2": 1}) self.assertEqual(next(self.dataset.items).label["Label"], 0) self.assertEqual( next(self.dataset_no_label.items).features, {"feat1": 1.0, "feat2": 1} ) self.assertEqual(next(self.dataset_no_label.items).label, None) @logTest def test_dropna_none_labels(self): res = pd.concat( [pd.Series([1, 2, 3], name="feat1"), pd.Series([1, 3, 4], name="feat2")], axis=1, ) self.assertTrue( ( self.dataset.dropna(subset=["feat1"]).features.reset_index(drop=True) == res ) .all() .all() ) self.assertTrue( ( self.dataset.dropna(feat__subset=["feat1"]).features.reset_index( drop=True ) == res ) .all() .all() ) self.assertTrue( ( self.dataset.dropna(labs__subset=["Label"]).features.reset_index( drop=True ) == res ) .all() .all() ) @logTest def test_cached(self): self.assertTrue(self.dataset.cached) @logTest def test_features_labels(self): self.assertEqual( self.dataset.features, pd.concat( [ pd.Series([1, np.nan, 2, 3], name="feat1"), pd.Series([1, 2, 3, 4], name="feat2"), ], axis=1, ), ) self.assertTrue((self.dataset.labels["Label"] == pd.Series([0, 0, 0, 1])).all()) @logTest def test_index(self): self.assertTrue((self.dataset.index == range(4)).all()) @logTest def test_createObject(self): self.assertTrue( isinstance( PandasDataset.createObject( features=pd.concat( [ pd.Series([1, np.nan, 2, 3], name="feat1"), pd.Series([1, 2, 3, 4], name="feat2"), ], axis=1, ), labels=None, ), PandasDataset, ) ) self.assertEqual( PandasDataset.createObject( features=pd.concat( [ pd.Series([1, np.nan, 2, 3], name="feat1"), pd.Series([1, 2, 3, 4], name="feat2"), ], axis=1, ), labels=None, ).features, self.dataset_no_label.features, ) self.assertEqual( PandasDataset.createObject( features=pd.concat( [ pd.Series([1, np.nan, 2, 3], name="feat1"), pd.Series([1, 2, 3, 4], name="feat2"), ], axis=1, ), labels=None, ).labels, self.dataset_no_label.labels, ) @logTest def test_take(self): self.assertTrue(isinstance(self.dataset.takeAsPandas(1), PandasDataset)) self.assertEqual( self.dataset.takeAsPandas(1).features.feat2, pd.Series([1], name="feat2") ) self.assertEqual( self.dataset.takeAsPandas(1).labels["Label"], pd.Series([0], name="Label") ) @logTest def test_loc(self): self.assertEqual(self.dataset.loc(2).features[2]["feat1"], 2) self.assertEqual(self.dataset.loc(2).features[2]["feat2"], 3) self.assertEqual(self.dataset.loc(2).labels[2]["Label"], 0) self.assertTrue(self.dataset_no_label.loc(2).labels is None) @logTest def test_from_sequence(self): features_1 = pd.DataFrame( {"feat1": [1, 2, 3, 4], "feat2": [100, 200, 300, 400]}, index=[1, 2, 3, 4] ) features_2 = pd.DataFrame( {"feat1": [9, 11, 13, 14], "feat2": [90, 110, 130, 140]}, index=[10, 11, 12, 13], ) features_3 = pd.DataFrame( {"feat1": [90, 10, 10, 1400], "feat2": [0.9, 0.11, 0.13, 0.14]}, index=[15, 16, 17, 18], ) labels_1 = pd.DataFrame({"target": [1, 0, 1, 1]}, index=[1, 2, 3, 4]) labels_2 = pd.DataFrame({"target": [1, 1, 1, 0]}, index=[10, 11, 12, 13]) labels_3 = pd.DataFrame({"target": [0, 1, 1, 0]}, index=[15, 16, 17, 18]) dataset_1 = PandasDataset(features_1, labels_1) dataset_2 = PandasDataset(features_2, labels_2) dataset_3 = PandasDataset(features_3, labels_3) dataset_merged = PandasDataset.from_sequence([dataset_1, dataset_2, dataset_3]) self.assertEqual( pd.concat([features_1, features_2, features_3]), dataset_merged.features ) self.assertEqual( pd.concat([labels_1, labels_2, labels_3]), dataset_merged.labels ) @logTest def test_serialization(self): filename = os.path.join(TMP_FOLDER, "my_dataset.p") self.dataset.write(filename) newDataset: PandasDataset = PandasDataset.load(filename) self.assertTrue(isinstance(newDataset, PandasDataset)) self.assertTrue( (self.dataset.features.fillna("NaN") == newDataset.features.fillna("NaN")) .all() .all() ) @logTest def test_creation_from_samples(self): samples = [ Sample(features=[100, 101], label=1, name=1), Sample(features=[102, 103], label=2, name=2), Sample(features=[104, 105], label=1, name=3), Sample(features=[106, 107], label=2, name=4), Sample(features=[108, 109], label=2, name=5), Sample(features=[110, 111], label=2, name=6), Sample(features=[112, 113], label=1, name=7), Sample(features=[114, 115], label=2, name=8), Sample(features=[116, 117], label=2, name=9), ] lazyDataset = CachedDataset(samples).filter(lambda x: x.label <= 5) assert isinstance(lazyDataset, LazyDataset) for format in ["pandas", "array", "dict"]: features1 = lazyDataset.getFeaturesAs(format) labels1 = lazyDataset.getLabelsAs(format) cached: CachedDataset = lazyDataset.asCached features2 = cached.getFeaturesAs(format) labels2 = cached.getLabelsAs(format) self.assertEqual(features1, features2) self.assertEqual(labels1, labels2) pandasDataset = cached.asPandasDataset features3 = pandasDataset.getFeaturesAs(format) labels3 = pandasDataset.getLabelsAs(format) self.assertEqual(features1, features3) self.assertEqual(labels1, labels3) @logTest def test_union(self): union = self.dataset.union( PandasDataset( features=pd.concat( [ pd.Series([np.nan, 5, 6, 7], name="feat1"), pd.Series([7, 8, 9, 10], name="feat2"), ], axis=1, ), labels=pd.Series([0, 0, 0, 1], name="Label"), ) ) self.assertTrue(isinstance(union, PandasDataset)) self.assertEqual( union.features.reset_index(drop=True), pd.concat( [ pd.Series([1, np.nan, 2, 3, np.nan, 5, 6, 7], name="feat1"), pd.Series([1, 2, 3, 4, 7, 8, 9, 10], name="feat2"), ], axis=1, ), ) self.assertEqual( union.labels.Label.reset_index(drop=True), pd.Series([0, 0, 0, 1, 0, 0, 0, 1], name="Label"), ) @logTest def test_intersection(self): other = PandasDataset( features=pd.concat( [ pd.Series([1, 2, 3, 4], name="feat1"), pd.Series([5, 6, 7, 8], name="feat2"), ], axis=1, ), labels=pd.Series([1, 1, 0, 0], name="Label", index=[0, 1, 4, 5]), ) self.assertEqual(other.intersection().labels.index.to_list(), [0, 1]) self.assertEqual(other.intersection().features.index.to_list(), [0, 1]) @logTest def test_getFeaturesAs(self): self.assertTrue(isinstance(self.dataset.getFeaturesAs("array"), np.ndarray)) self.assertTrue(isinstance(self.dataset.getFeaturesAs("pandas"), pd.DataFrame)) self.assertTrue(isinstance(self.dataset.getFeaturesAs("dict"), dict)) @logTest def test_getLabelsAs(self): self.assertTrue(isinstance(self.dataset.getLabelsAs("array"), np.ndarray)) self.assertTrue(isinstance(self.dataset.getLabelsAs("pandas"), pd.DataFrame)) self.assertTrue(isinstance(self.dataset.getLabelsAs("dict"), dict)) class PandasTimeIndexedDatasetTests(TestCase): dates = pd.date_range("2010-01-01", "2010-01-04") dateStr = [str(x) for x in dates] dataset = PandasTimeIndexedDataset( features=pd.concat( [ pd.Series([1, np.nan, 2, 3], index=dateStr, name="feat1"), pd.Series([1, 2, 3, 4], index=dateStr, name="feat2"), ], axis=1, ) ) @logTest def test_time_index(self): # duck-typing check days = [x.day for x in self.dataset.features.index] self.assertTrue(set(days), set(range(4))) @logTest def test_serialization(self): filename = os.path.join(TMP_FOLDER, "my_dataset.p") self.dataset.write(filename) newDataset = type(self.dataset).load(filename) self.assertTrue(isinstance(newDataset, PandasTimeIndexedDataset)) self.assertTrue( (self.dataset.features.fillna("NaN") == newDataset.features.fillna("NaN")) .all() .all() ) @logTest def test_createObject(self): NewDataset = self.dataset.createObject( features=pd.concat( [ pd.Series([1, 3], index=self.dateStr[0:2], name="feat1"), pd.Series([1, 2], index=self.dateStr[0:2], name="feat2"), ], axis=1, ), labels=pd.Series([0, 0], index=self.dateStr[0:2], name="Label"), ) self.assertTrue(isinstance(NewDataset, PandasTimeIndexedDataset)) self.assertTrue( ( NewDataset.features == pd.concat( [ pd.Series( [1, 3], index=map(pd.to_datetime, self.dateStr[0:2]), name="feat1", ), pd.Series( [1, 2], index=map(pd.to_datetime, self.dateStr[0:2]), name="feat2", ), ], axis=1, ) ) .all() .all() ) self.assertTrue( ( NewDataset.labels.values == pd.Series([0, 0], index=self.dateStr[0:2], name="Label").values ).all() ) @logTest def test_loc(self): new_dataset = self.dataset.loc( [x for x in pd.date_range("2010-01-01", "2010-01-02")] ) to_check = PandasTimeIndexedDataset( features=pd.DataFrame(self.dataset.features.iloc[:2]) ) self.assertIsInstance(new_dataset, PandasTimeIndexedDataset) self.assertEqual(new_dataset.features, to_check.features) if __name__ == "__main__": unittest.main()
python
"""CheckingProxy derived from jsonrpc.proxy due to subclassing problems w/getattr. Converts service errors into ServiceError exceptions, otherwise call returns the jsonrpc "result" field. """ from __future__ import print_function from __future__ import division from __future__ import absolute_import import sys import uuid import json import time import os import crds from crds import python23 from crds import log, config from crds import exceptions def apply_with_retries(func, *pars, **keys): """Apply function func() as f(*pargs, **keys) and return the result. Retry on any exception as defined in config.py""" retries = config.get_client_retry_count() delay = config.get_client_retry_delay_seconds() for retry in range(retries): try: return func(*pars, **keys) except Exception as exc: log.verbose("FAILED: Attempt", str(retry+1), "of", retries, "with:", str(exc)) log.verbose("FAILED: Waiting for", delay, "seconds before retrying") # waits after total fail... time.sleep(delay) exc2 = exc else: raise exc2 def message_id(): """Return a nominal identifier for this program.""" return _program_name() + "-" + crds.__version__ + "-" + _PROCESS_ID + "-" + _request_id() def _program_name(): """Return the name of this program.""" return os.path.basename(os.path.splitext(sys.argv[0])[0]) try: _PROCESS_ID = str(uuid.uuid4()) except Exception: _PROCESS_ID = "00000000-0000-0000-00000000000000000" MSG_NO = 0 def _request_id(): """Return an identifier unique to this particular JSONRPC request.""" global MSG_NO MSG_NO += 1 return "%08x" % MSG_NO class CheckingProxy(object): """CheckingProxy converts calls to undefined methods into JSON RPC service calls. If the JSON rpc returns an error, CheckingProxy raises a ServiceError exception containing the error's message. XXX NOTE: Always underscore new methods or you may hide a real JSONRPC method which also appears in the proxy object's namespace with the same name. """ def __init__(self, service_url, service_name=None, version='1.0'): self.__version = str(version) self.__service_url = service_url self.__service_name = service_name def __getattr__(self, name): if self.__service_name != None: name = "%s.%s" % (self.__service_name, name) return CheckingProxy(self.__service_url, name, self.__version) def __repr__(self): return self.__class__.__name__ + "(url='%s', method='%s')" % \ (self.__service_url, self.__service_name) def _call(self, *args, **kwargs): """Core of RPC dispatch without error interpretation, logging, or return value decoding.""" params = kwargs if len(kwargs) else args # if Any.kind(params) == Object and self.__version != '2.0': # raise Exception('Unsupport arg type for JSON-RPC 1.0 ' # '(the default version for this client, ' # 'pass version="2.0" to use keyword arguments)') jsonrpc_params = {"jsonrpc": self.__version, "method": self.__service_name, 'params': params, 'id': message_id() } parameters = json.dumps(jsonrpc_params) url = self._get_url(jsonrpc_params) if "serverless" in url or "server-less" in url: raise exceptions.ServiceError("Configured for server-less mode. Skipping JSON RPC " + repr(self.__service_name)) if log.get_verbose() <= 50: log.verbose("CRDS JSON RPC", self.__service_name, params if len(str(params)) <= 60 else "(...)", "-->") else: log.verbose("CRDS JSON RPC to", url, "parameters", params, "-->") response = apply_with_retries(self._call_service, parameters, url) try: rval = json.loads(response) except Exception: log.warning("Invalid CRDS jsonrpc response:\n", response) raise return rval def _get_url(self, jsonrpc_params): """Return the JSONRPC URL used to perform a method call. Since post parameters are not visible in the log, annotate the URL with additional method id paths which are functionally ignored but visible in the log. """ return self.__service_url + jsonrpc_params["method"] + "/" + jsonrpc_params["id"] + "/" def _call_service(self, parameters, url): """Call the JSONRPC defined by `parameters` and raise a ServiceError on any exception.""" if not isinstance(parameters, bytes): parameters = parameters.encode("utf-8") try: # context = ssl.create_default_context() # channel = urlopen(url, parameters, context=context) channel = python23.urlopen(url, parameters) return channel.read().decode("utf-8") except Exception as exc: raise exceptions.ServiceError("CRDS jsonrpc failure " + repr(self.__service_name) + " " + str(exc)) def __call__(self, *args, **kwargs): jsonrpc = self._call(*args, **kwargs) if jsonrpc["error"]: decoded = str(python23.unescape(jsonrpc["error"]["message"])) raise self.classify_exception(decoded) else: result = crds_decode(jsonrpc["result"]) result = fix_strings(result) log.verbose("RPC OK", log.PP(result) if log.get_verbose() >= 70 else "") return result def classify_exception(self, decoded): """Interpret exc __str__ to define as more precise CRDS exception.""" if "Channel" in decoded and "not found" in decoded: return exceptions.StatusChannelNotFoundError(decoded) elif "External agent requested calling process termination." in decoded: return exceptions.OwningProcessAbortedError(decoded) else: msg = "CRDS jsonrpc failure " + repr(self.__service_name) + " " + str(decoded) return exceptions.ServiceError(msg) def fix_strings(rval): """Convert unicode to strings.""" if isinstance(rval, python23.string_types): return str(rval) elif isinstance(rval, tuple): return tuple([fix_strings(x) for x in rval]) elif isinstance(rval, list): return [fix_strings(x) for x in rval] elif isinstance(rval, dict): return { fix_strings(key):fix_strings(val) for (key, val) in rval.items()} else: return rval # ============================================================================ # These operate transparently in the proxy and are optionally used by the server. # # This makes a new client with crds_decoder compatible with both encoding and # unencoding servers. # # An older client without crds_decoder will not work with a new server which is encoding. # That could be achieved, but wasn't because the function where the feature was # needed would not work without compression anyway. def crds_encode(obj): """Return a JSON-compatible encoding of `obj`, nominally json-ified, compressed, and base64 encooded. This is nominally to be called on the server. """ return dict(crds_encoded = "1.0", crds_payload = json.dumps(obj).encode('zlib').encode('base64')) def crds_decode(msg): """Decode something which was crds_encode'd, or return it unaltered if it wasn't. """ if isinstance(msg, dict) and "crds_encoded" in msg: json_str = msg["crds_payload"].decode('base64').decode('zlib') return json.loads(json_str) else: return msg
python
import sys from PyQt5.QtWidgets import QAction,QHBoxLayout,QWidget,QApplication,QMainWindow from PyQt5.QtGui import QIcon class QToolBarDemo(QMainWindow): def __init__(self): super(QToolBarDemo, self).__init__() #设置窗口大小 self.resize(400, 150) #设置窗口标题 self.setWindowTitle("QToolBarDemo") toolBar = self.addToolBar('File') new = QAction(QIcon('u1.ico'), 'new', self) toolBar.addAction(new) open = QAction(QIcon('u2.ico'), 'open', self) toolBar.addAction(open) save = QAction(QIcon('u3.ico'), 'save', self) toolBar.addAction(save) toolBar.actionTriggered[QAction].connect(self.btnClick) #创建水平布局 layout = QHBoxLayout() mainFrame = QWidget() mainFrame.setLayout(layout) self.setCentralWidget(mainFrame) def btnClick(self, w): print("pressed tool button is:", w.text()) if __name__ == '__main__': app = QApplication(sys.argv) main = QToolBarDemo() main.show() sys.exit(app.exec_())
python
from django.contrib.auth.base_user import AbstractBaseUser from django.db import models from django.db.models import Manager class EqualizeMixin: equal_fields = () def __eq__(self, other): equal_fields = self._get_equal_fields() for field in equal_fields: if getattr(self, field) != getattr(other, field): return False return True def _get_equal_fields(self): if not self.equal_fields: raise NotImplementedError() return self.equal_fields def merge(self, other): equal_fields = self._get_equal_fields() for field in equal_fields: if getattr(self, field) != getattr(other, field): setattr(self, field, getattr(other, field)) class ChangeMixin: def _change(self, **kwargs): is_changed = False for key, value in kwargs.items(): if getattr(self, key) == value: continue setattr(self, key, value) is_changed = True return is_changed class BaseModel(EqualizeMixin, ChangeMixin, models.Model): create_time = models.DateTimeField(auto_now_add=True, editable=False, verbose_name='등록일') update_time = models.DateTimeField(auto_now=True, verbose_name='수정일') objects = Manager() equal_fields = () class Meta: abstract = True class BaseUserModel(BaseModel, AbstractBaseUser): class Meta: abstract = True
python
""" Cisco Intersight Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501 The version of the OpenAPI document: 1.0.9-4950 Contact: [email protected] Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from intersight.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) def lazy_import(): from intersight.model.mo_base_complex_type import MoBaseComplexType from intersight.model.virtualization_esxi_clone_custom_spec import VirtualizationEsxiCloneCustomSpec from intersight.model.virtualization_esxi_ova_custom_spec import VirtualizationEsxiOvaCustomSpec globals()['MoBaseComplexType'] = MoBaseComplexType globals()['VirtualizationEsxiCloneCustomSpec'] = VirtualizationEsxiCloneCustomSpec globals()['VirtualizationEsxiOvaCustomSpec'] = VirtualizationEsxiOvaCustomSpec class VirtualizationBaseCustomSpec(ModelComposed): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { ('class_id',): { 'ACCESS.ADDRESSTYPE': "access.AddressType", 'ADAPTER.ADAPTERCONFIG': "adapter.AdapterConfig", 'ADAPTER.DCEINTERFACESETTINGS': "adapter.DceInterfaceSettings", 'ADAPTER.ETHSETTINGS': "adapter.EthSettings", 'ADAPTER.FCSETTINGS': "adapter.FcSettings", 'ADAPTER.PORTCHANNELSETTINGS': "adapter.PortChannelSettings", 'APPLIANCE.APISTATUS': "appliance.ApiStatus", 'APPLIANCE.CERTRENEWALPHASE': "appliance.CertRenewalPhase", 'APPLIANCE.KEYVALUEPAIR': "appliance.KeyValuePair", 'APPLIANCE.STATUSCHECK': "appliance.StatusCheck", 'ASSET.ADDRESSINFORMATION': "asset.AddressInformation", 'ASSET.APIKEYCREDENTIAL': "asset.ApiKeyCredential", 'ASSET.CLIENTCERTIFICATECREDENTIAL': "asset.ClientCertificateCredential", 'ASSET.CLOUDCONNECTION': "asset.CloudConnection", 'ASSET.CONNECTIONCONTROLMESSAGE': "asset.ConnectionControlMessage", 'ASSET.CONTRACTINFORMATION': "asset.ContractInformation", 'ASSET.CUSTOMERINFORMATION': "asset.CustomerInformation", 'ASSET.DEPLOYMENTALARMINFO': "asset.DeploymentAlarmInfo", 'ASSET.DEPLOYMENTDEVICEALARMINFO': "asset.DeploymentDeviceAlarmInfo", 'ASSET.DEPLOYMENTDEVICEINFORMATION': "asset.DeploymentDeviceInformation", 'ASSET.DEVICEINFORMATION': "asset.DeviceInformation", 'ASSET.DEVICESTATISTICS': "asset.DeviceStatistics", 'ASSET.DEVICETRANSACTION': "asset.DeviceTransaction", 'ASSET.GLOBALULTIMATE': "asset.GlobalUltimate", 'ASSET.HTTPCONNECTION': "asset.HttpConnection", 'ASSET.INTERSIGHTDEVICECONNECTORCONNECTION': "asset.IntersightDeviceConnectorConnection", 'ASSET.METERINGTYPE': "asset.MeteringType", 'ASSET.NEWRELICCREDENTIAL': "asset.NewRelicCredential", 'ASSET.NOAUTHENTICATIONCREDENTIAL': "asset.NoAuthenticationCredential", 'ASSET.OAUTHBEARERTOKENCREDENTIAL': "asset.OauthBearerTokenCredential", 'ASSET.OAUTHCLIENTIDSECRETCREDENTIAL': "asset.OauthClientIdSecretCredential", 'ASSET.ORCHESTRATIONHITACHIVIRTUALSTORAGEPLATFORMOPTIONS': "asset.OrchestrationHitachiVirtualStoragePlatformOptions", 'ASSET.ORCHESTRATIONSERVICE': "asset.OrchestrationService", 'ASSET.PARENTCONNECTIONSIGNATURE': "asset.ParentConnectionSignature", 'ASSET.PRIVATEKEYCREDENTIAL': "asset.PrivateKeyCredential", 'ASSET.PRODUCTINFORMATION': "asset.ProductInformation", 'ASSET.SERVICENOWCREDENTIAL': "asset.ServiceNowCredential", 'ASSET.SSHCONNECTION': "asset.SshConnection", 'ASSET.SUDIINFO': "asset.SudiInfo", 'ASSET.TARGETKEY': "asset.TargetKey", 'ASSET.TARGETSIGNATURE': "asset.TargetSignature", 'ASSET.TARGETSTATUSDETAILS': "asset.TargetStatusDetails", 'ASSET.TERRAFORMINTEGRATIONSERVICE': "asset.TerraformIntegrationService", 'ASSET.TERRAFORMINTEGRATIONTERRAFORMAGENTOPTIONS': "asset.TerraformIntegrationTerraformAgentOptions", 'ASSET.TERRAFORMINTEGRATIONTERRAFORMCLOUDOPTIONS': "asset.TerraformIntegrationTerraformCloudOptions", 'ASSET.USERNAMEPASSWORDCREDENTIAL': "asset.UsernamePasswordCredential", 'ASSET.VIRTUALIZATIONAMAZONWEBSERVICEOPTIONS': "asset.VirtualizationAmazonWebServiceOptions", 'ASSET.VIRTUALIZATIONSERVICE': "asset.VirtualizationService", 'ASSET.VMHOST': "asset.VmHost", 'ASSET.WORKLOADOPTIMIZERAMAZONWEBSERVICESBILLINGOPTIONS': "asset.WorkloadOptimizerAmazonWebServicesBillingOptions", 'ASSET.WORKLOADOPTIMIZERDYNATRACEOPTIONS': "asset.WorkloadOptimizerDynatraceOptions", 'ASSET.WORKLOADOPTIMIZERHYPERVOPTIONS': "asset.WorkloadOptimizerHypervOptions", 'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREAPPLICATIONINSIGHTSOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureApplicationInsightsOptions", 'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREENTERPRISEAGREEMENTOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureEnterpriseAgreementOptions", 'ASSET.WORKLOADOPTIMIZERMICROSOFTAZURESERVICEPRINCIPALOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureServicePrincipalOptions", 'ASSET.WORKLOADOPTIMIZERNEWRELICOPTIONS': "asset.WorkloadOptimizerNewRelicOptions", 'ASSET.WORKLOADOPTIMIZEROPENSTACKOPTIONS': "asset.WorkloadOptimizerOpenStackOptions", 'ASSET.WORKLOADOPTIMIZERREDHATOPENSTACKOPTIONS': "asset.WorkloadOptimizerRedHatOpenStackOptions", 'ASSET.WORKLOADOPTIMIZERSERVICE': "asset.WorkloadOptimizerService", 'ASSET.WORKLOADOPTIMIZERVMWAREVCENTEROPTIONS': "asset.WorkloadOptimizerVmwareVcenterOptions", 'BOOT.BOOTLOADER': "boot.Bootloader", 'BOOT.ISCSI': "boot.Iscsi", 'BOOT.LOCALCDD': "boot.LocalCdd", 'BOOT.LOCALDISK': "boot.LocalDisk", 'BOOT.NVME': "boot.Nvme", 'BOOT.PCHSTORAGE': "boot.PchStorage", 'BOOT.PXE': "boot.Pxe", 'BOOT.SAN': "boot.San", 'BOOT.SDCARD': "boot.SdCard", 'BOOT.UEFISHELL': "boot.UefiShell", 'BOOT.USB': "boot.Usb", 'BOOT.VIRTUALMEDIA': "boot.VirtualMedia", 'BULK.HTTPHEADER': "bulk.HttpHeader", 'BULK.RESTRESULT': "bulk.RestResult", 'BULK.RESTSUBREQUEST': "bulk.RestSubRequest", 'CAPABILITY.PORTRANGE': "capability.PortRange", 'CAPABILITY.SWITCHNETWORKLIMITS': "capability.SwitchNetworkLimits", 'CAPABILITY.SWITCHSTORAGELIMITS': "capability.SwitchStorageLimits", 'CAPABILITY.SWITCHSYSTEMLIMITS': "capability.SwitchSystemLimits", 'CAPABILITY.SWITCHINGMODECAPABILITY': "capability.SwitchingModeCapability", 'CERTIFICATEMANAGEMENT.IMC': "certificatemanagement.Imc", 'CLOUD.AVAILABILITYZONE': "cloud.AvailabilityZone", 'CLOUD.BILLINGUNIT': "cloud.BillingUnit", 'CLOUD.CLOUDREGION': "cloud.CloudRegion", 'CLOUD.CLOUDTAG': "cloud.CloudTag", 'CLOUD.CUSTOMATTRIBUTES': "cloud.CustomAttributes", 'CLOUD.IMAGEREFERENCE': "cloud.ImageReference", 'CLOUD.INSTANCETYPE': "cloud.InstanceType", 'CLOUD.NETWORKACCESSCONFIG': "cloud.NetworkAccessConfig", 'CLOUD.NETWORKADDRESS': "cloud.NetworkAddress", 'CLOUD.NETWORKINSTANCEATTACHMENT': "cloud.NetworkInstanceAttachment", 'CLOUD.NETWORKINTERFACEATTACHMENT': "cloud.NetworkInterfaceAttachment", 'CLOUD.SECURITYGROUPRULE': "cloud.SecurityGroupRule", 'CLOUD.TFCWORKSPACEVARIABLES': "cloud.TfcWorkspaceVariables", 'CLOUD.VOLUMEATTACHMENT': "cloud.VolumeAttachment", 'CLOUD.VOLUMEINSTANCEATTACHMENT': "cloud.VolumeInstanceAttachment", 'CLOUD.VOLUMEIOPSINFO': "cloud.VolumeIopsInfo", 'CLOUD.VOLUMETYPE': "cloud.VolumeType", 'CMRF.CMRF': "cmrf.CmRf", 'COMM.IPV4ADDRESSBLOCK': "comm.IpV4AddressBlock", 'COMM.IPV4INTERFACE': "comm.IpV4Interface", 'COMM.IPV6INTERFACE': "comm.IpV6Interface", 'COMPUTE.ALARMSUMMARY': "compute.AlarmSummary", 'COMPUTE.IPADDRESS': "compute.IpAddress", 'COMPUTE.PERSISTENTMEMORYMODULE': "compute.PersistentMemoryModule", 'COMPUTE.PERSISTENTMEMORYOPERATION': "compute.PersistentMemoryOperation", 'COMPUTE.SERVERCONFIG': "compute.ServerConfig", 'COMPUTE.SERVEROPSTATUS': "compute.ServerOpStatus", 'COMPUTE.STORAGECONTROLLEROPERATION': "compute.StorageControllerOperation", 'COMPUTE.STORAGEPHYSICALDRIVE': "compute.StoragePhysicalDrive", 'COMPUTE.STORAGEPHYSICALDRIVEOPERATION': "compute.StoragePhysicalDriveOperation", 'COMPUTE.STORAGEVIRTUALDRIVE': "compute.StorageVirtualDrive", 'COMPUTE.STORAGEVIRTUALDRIVEOPERATION': "compute.StorageVirtualDriveOperation", 'COND.ALARMSUMMARY': "cond.AlarmSummary", 'CONNECTOR.CLOSESTREAMMESSAGE': "connector.CloseStreamMessage", 'CONNECTOR.COMMANDCONTROLMESSAGE': "connector.CommandControlMessage", 'CONNECTOR.COMMANDTERMINALSTREAM': "connector.CommandTerminalStream", 'CONNECTOR.EXPECTPROMPT': "connector.ExpectPrompt", 'CONNECTOR.FETCHSTREAMMESSAGE': "connector.FetchStreamMessage", 'CONNECTOR.FILECHECKSUM': "connector.FileChecksum", 'CONNECTOR.FILEMESSAGE': "connector.FileMessage", 'CONNECTOR.HTTPREQUEST': "connector.HttpRequest", 'CONNECTOR.SSHCONFIG': "connector.SshConfig", 'CONNECTOR.SSHMESSAGE': "connector.SshMessage", 'CONNECTOR.STARTSTREAM': "connector.StartStream", 'CONNECTOR.STARTSTREAMFROMDEVICE': "connector.StartStreamFromDevice", 'CONNECTOR.STREAMACKNOWLEDGE': "connector.StreamAcknowledge", 'CONNECTOR.STREAMINPUT': "connector.StreamInput", 'CONNECTOR.STREAMKEEPALIVE': "connector.StreamKeepalive", 'CONNECTOR.TARGETCHANGEMESSAGE': "connector.TargetChangeMessage", 'CONNECTOR.URL': "connector.Url", 'CONNECTOR.WINRMREQUEST': "connector.WinrmRequest", 'CONNECTOR.XMLAPIMESSAGE': "connector.XmlApiMessage", 'CONNECTORPACK.CONNECTORPACKUPDATE': "connectorpack.ConnectorPackUpdate", 'CONTENT.COMPLEXTYPE': "content.ComplexType", 'CONTENT.PARAMETER': "content.Parameter", 'CONTENT.TEXTPARAMETER': "content.TextParameter", 'CONVERGEDINFRA.ALARMSUMMARY': "convergedinfra.AlarmSummary", 'CONVERGEDINFRA.COMPLIANCESUMMARY': "convergedinfra.ComplianceSummary", 'CONVERGEDINFRA.PODSUMMARY': "convergedinfra.PodSummary", 'CRD.CUSTOMRESOURCECONFIGPROPERTY': "crd.CustomResourceConfigProperty", 'EQUIPMENT.IOCARDIDENTITY': "equipment.IoCardIdentity", 'FABRIC.LLDPSETTINGS': "fabric.LldpSettings", 'FABRIC.MACAGINGSETTINGS': "fabric.MacAgingSettings", 'FABRIC.PORTIDENTIFIER': "fabric.PortIdentifier", 'FABRIC.QOSCLASS': "fabric.QosClass", 'FABRIC.UDLDGLOBALSETTINGS': "fabric.UdldGlobalSettings", 'FABRIC.UDLDSETTINGS': "fabric.UdldSettings", 'FABRIC.VLANSETTINGS': "fabric.VlanSettings", 'FCPOOL.BLOCK': "fcpool.Block", 'FEEDBACK.FEEDBACKDATA': "feedback.FeedbackData", 'FIRMWARE.CHASSISUPGRADEIMPACT': "firmware.ChassisUpgradeImpact", 'FIRMWARE.CIFSSERVER': "firmware.CifsServer", 'FIRMWARE.COMPONENTIMPACT': "firmware.ComponentImpact", 'FIRMWARE.COMPONENTMETA': "firmware.ComponentMeta", 'FIRMWARE.DIRECTDOWNLOAD': "firmware.DirectDownload", 'FIRMWARE.FABRICUPGRADEIMPACT': "firmware.FabricUpgradeImpact", 'FIRMWARE.FIRMWAREINVENTORY': "firmware.FirmwareInventory", 'FIRMWARE.HTTPSERVER': "firmware.HttpServer", 'FIRMWARE.INCLUDECOMPONENTLISTTYPE': "firmware.IncludeComponentListType", 'FIRMWARE.NETWORKSHARE': "firmware.NetworkShare", 'FIRMWARE.NFSSERVER': "firmware.NfsServer", 'FIRMWARE.SERVERUPGRADEIMPACT': "firmware.ServerUpgradeImpact", 'FORECAST.MODEL': "forecast.Model", 'HCL.CONSTRAINT': "hcl.Constraint", 'HCL.FIRMWARE': "hcl.Firmware", 'HCL.HARDWARECOMPATIBILITYPROFILE': "hcl.HardwareCompatibilityProfile", 'HCL.PRODUCT': "hcl.Product", 'HYPERFLEX.ALARMSUMMARY': "hyperflex.AlarmSummary", 'HYPERFLEX.APPSETTINGCONSTRAINT': "hyperflex.AppSettingConstraint", 'HYPERFLEX.BACKUPPOLICYSETTINGS': "hyperflex.BackupPolicySettings", 'HYPERFLEX.DATASTOREINFO': "hyperflex.DatastoreInfo", 'HYPERFLEX.ENTITYREFERENCE': "hyperflex.EntityReference", 'HYPERFLEX.ERRORSTACK': "hyperflex.ErrorStack", 'HYPERFLEX.FEATURELIMITENTRY': "hyperflex.FeatureLimitEntry", 'HYPERFLEX.FILEPATH': "hyperflex.FilePath", 'HYPERFLEX.HEALTHCHECKSCRIPTINFO': "hyperflex.HealthCheckScriptInfo", 'HYPERFLEX.HXHOSTMOUNTSTATUSDT': "hyperflex.HxHostMountStatusDt", 'HYPERFLEX.HXLICENSEAUTHORIZATIONDETAILSDT': "hyperflex.HxLicenseAuthorizationDetailsDt", 'HYPERFLEX.HXLINKDT': "hyperflex.HxLinkDt", 'HYPERFLEX.HXNETWORKADDRESSDT': "hyperflex.HxNetworkAddressDt", 'HYPERFLEX.HXPLATFORMDATASTORECONFIGDT': "hyperflex.HxPlatformDatastoreConfigDt", 'HYPERFLEX.HXREGISTRATIONDETAILSDT': "hyperflex.HxRegistrationDetailsDt", 'HYPERFLEX.HXRESILIENCYINFODT': "hyperflex.HxResiliencyInfoDt", 'HYPERFLEX.HXSITEDT': "hyperflex.HxSiteDt", 'HYPERFLEX.HXUUIDDT': "hyperflex.HxUuIdDt", 'HYPERFLEX.HXZONEINFODT': "hyperflex.HxZoneInfoDt", 'HYPERFLEX.HXZONERESILIENCYINFODT': "hyperflex.HxZoneResiliencyInfoDt", 'HYPERFLEX.IPADDRRANGE': "hyperflex.IpAddrRange", 'HYPERFLEX.LOGICALAVAILABILITYZONE': "hyperflex.LogicalAvailabilityZone", 'HYPERFLEX.MACADDRPREFIXRANGE': "hyperflex.MacAddrPrefixRange", 'HYPERFLEX.MAPCLUSTERIDTOPROTECTIONINFO': "hyperflex.MapClusterIdToProtectionInfo", 'HYPERFLEX.MAPCLUSTERIDTOSTSNAPSHOTPOINT': "hyperflex.MapClusterIdToStSnapshotPoint", 'HYPERFLEX.MAPUUIDTOTRACKEDDISK': "hyperflex.MapUuidToTrackedDisk", 'HYPERFLEX.NAMEDVLAN': "hyperflex.NamedVlan", 'HYPERFLEX.NAMEDVSAN': "hyperflex.NamedVsan", 'HYPERFLEX.PORTTYPETOPORTNUMBERMAP': "hyperflex.PortTypeToPortNumberMap", 'HYPERFLEX.PROTECTIONINFO': "hyperflex.ProtectionInfo", 'HYPERFLEX.REPLICATIONCLUSTERREFERENCETOSCHEDULE': "hyperflex.ReplicationClusterReferenceToSchedule", 'HYPERFLEX.REPLICATIONPEERINFO': "hyperflex.ReplicationPeerInfo", 'HYPERFLEX.REPLICATIONPLATDATASTORE': "hyperflex.ReplicationPlatDatastore", 'HYPERFLEX.REPLICATIONPLATDATASTOREPAIR': "hyperflex.ReplicationPlatDatastorePair", 'HYPERFLEX.REPLICATIONSCHEDULE': "hyperflex.ReplicationSchedule", 'HYPERFLEX.REPLICATIONSTATUS': "hyperflex.ReplicationStatus", 'HYPERFLEX.RPOSTATUS': "hyperflex.RpoStatus", 'HYPERFLEX.SERVERFIRMWAREVERSIONINFO': "hyperflex.ServerFirmwareVersionInfo", 'HYPERFLEX.SERVERMODELENTRY': "hyperflex.ServerModelEntry", 'HYPERFLEX.SNAPSHOTFILES': "hyperflex.SnapshotFiles", 'HYPERFLEX.SNAPSHOTINFOBRIEF': "hyperflex.SnapshotInfoBrief", 'HYPERFLEX.SNAPSHOTPOINT': "hyperflex.SnapshotPoint", 'HYPERFLEX.SNAPSHOTSTATUS': "hyperflex.SnapshotStatus", 'HYPERFLEX.STPLATFORMCLUSTERHEALINGINFO': "hyperflex.StPlatformClusterHealingInfo", 'HYPERFLEX.STPLATFORMCLUSTERRESILIENCYINFO': "hyperflex.StPlatformClusterResiliencyInfo", 'HYPERFLEX.SUMMARY': "hyperflex.Summary", 'HYPERFLEX.TRACKEDDISK': "hyperflex.TrackedDisk", 'HYPERFLEX.TRACKEDFILE': "hyperflex.TrackedFile", 'HYPERFLEX.VIRTUALMACHINE': "hyperflex.VirtualMachine", 'HYPERFLEX.VIRTUALMACHINERUNTIMEINFO': "hyperflex.VirtualMachineRuntimeInfo", 'HYPERFLEX.VMPROTECTIONSPACEUSAGE': "hyperflex.VmProtectionSpaceUsage", 'HYPERFLEX.WWXNPREFIXRANGE': "hyperflex.WwxnPrefixRange", 'I18N.MESSAGE': "i18n.Message", 'I18N.MESSAGEPARAM': "i18n.MessageParam", 'IAAS.LICENSEKEYSINFO': "iaas.LicenseKeysInfo", 'IAAS.LICENSEUTILIZATIONINFO': "iaas.LicenseUtilizationInfo", 'IAAS.WORKFLOWSTEPS': "iaas.WorkflowSteps", 'IAM.ACCOUNTPERMISSIONS': "iam.AccountPermissions", 'IAM.CLIENTMETA': "iam.ClientMeta", 'IAM.ENDPOINTPASSWORDPROPERTIES': "iam.EndPointPasswordProperties", 'IAM.FEATUREDEFINITION': "iam.FeatureDefinition", 'IAM.GROUPPERMISSIONTOROLES': "iam.GroupPermissionToRoles", 'IAM.LDAPBASEPROPERTIES': "iam.LdapBaseProperties", 'IAM.LDAPDNSPARAMETERS': "iam.LdapDnsParameters", 'IAM.PERMISSIONREFERENCE': "iam.PermissionReference", 'IAM.PERMISSIONTOROLES': "iam.PermissionToRoles", 'IAM.RULE': "iam.Rule", 'IAM.SAMLSPCONNECTION': "iam.SamlSpConnection", 'IAM.SSOSESSIONATTRIBUTES': "iam.SsoSessionAttributes", 'IMCCONNECTOR.WEBUIMESSAGE': "imcconnector.WebUiMessage", 'INFRA.HARDWAREINFO': "infra.HardwareInfo", 'INFRA.METADATA': "infra.MetaData", 'INVENTORY.INVENTORYMO': "inventory.InventoryMo", 'INVENTORY.UEMINFO': "inventory.UemInfo", 'IPPOOL.IPV4BLOCK': "ippool.IpV4Block", 'IPPOOL.IPV4CONFIG': "ippool.IpV4Config", 'IPPOOL.IPV6BLOCK': "ippool.IpV6Block", 'IPPOOL.IPV6CONFIG': "ippool.IpV6Config", 'IQNPOOL.IQNSUFFIXBLOCK': "iqnpool.IqnSuffixBlock", 'KUBERNETES.ACTIONINFO': "kubernetes.ActionInfo", 'KUBERNETES.ADDON': "kubernetes.Addon", 'KUBERNETES.ADDONCONFIGURATION': "kubernetes.AddonConfiguration", 'KUBERNETES.BAREMETALNETWORKINFO': "kubernetes.BaremetalNetworkInfo", 'KUBERNETES.CALICOCONFIG': "kubernetes.CalicoConfig", 'KUBERNETES.CLUSTERCERTIFICATECONFIGURATION': "kubernetes.ClusterCertificateConfiguration", 'KUBERNETES.CLUSTERMANAGEMENTCONFIG': "kubernetes.ClusterManagementConfig", 'KUBERNETES.CONFIGURATION': "kubernetes.Configuration", 'KUBERNETES.DAEMONSETSTATUS': "kubernetes.DaemonSetStatus", 'KUBERNETES.DEPLOYMENTSTATUS': "kubernetes.DeploymentStatus", 'KUBERNETES.ESSENTIALADDON': "kubernetes.EssentialAddon", 'KUBERNETES.ESXIVIRTUALMACHINEINFRACONFIG': "kubernetes.EsxiVirtualMachineInfraConfig", 'KUBERNETES.ETHERNET': "kubernetes.Ethernet", 'KUBERNETES.ETHERNETMATCHER': "kubernetes.EthernetMatcher", 'KUBERNETES.HYPERFLEXAPVIRTUALMACHINEINFRACONFIG': "kubernetes.HyperFlexApVirtualMachineInfraConfig", 'KUBERNETES.INGRESSSTATUS': "kubernetes.IngressStatus", 'KUBERNETES.INSTANCETYPEDETAILS': "kubernetes.InstanceTypeDetails", 'KUBERNETES.IPV4CONFIG': "kubernetes.IpV4Config", 'KUBERNETES.KEYVALUE': "kubernetes.KeyValue", 'KUBERNETES.LOADBALANCER': "kubernetes.LoadBalancer", 'KUBERNETES.NETWORKINTERFACESPEC': "kubernetes.NetworkInterfaceSpec", 'KUBERNETES.NODEADDRESS': "kubernetes.NodeAddress", 'KUBERNETES.NODEGROUPLABEL': "kubernetes.NodeGroupLabel", 'KUBERNETES.NODEGROUPTAINT': "kubernetes.NodeGroupTaint", 'KUBERNETES.NODEINFO': "kubernetes.NodeInfo", 'KUBERNETES.NODESPEC': "kubernetes.NodeSpec", 'KUBERNETES.NODESTATUS': "kubernetes.NodeStatus", 'KUBERNETES.OBJECTMETA': "kubernetes.ObjectMeta", 'KUBERNETES.OVSBOND': "kubernetes.OvsBond", 'KUBERNETES.PODSTATUS': "kubernetes.PodStatus", 'KUBERNETES.PROXYCONFIG': "kubernetes.ProxyConfig", 'KUBERNETES.ROUTE': "kubernetes.Route", 'KUBERNETES.SERVICESTATUS': "kubernetes.ServiceStatus", 'KUBERNETES.STATEFULSETSTATUS': "kubernetes.StatefulSetStatus", 'KUBERNETES.TAINT': "kubernetes.Taint", 'MACPOOL.BLOCK': "macpool.Block", 'MEMORY.PERSISTENTMEMORYGOAL': "memory.PersistentMemoryGoal", 'MEMORY.PERSISTENTMEMORYLOCALSECURITY': "memory.PersistentMemoryLocalSecurity", 'MEMORY.PERSISTENTMEMORYLOGICALNAMESPACE': "memory.PersistentMemoryLogicalNamespace", 'META.ACCESSPRIVILEGE': "meta.AccessPrivilege", 'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition", 'META.IDENTITYDEFINITION': "meta.IdentityDefinition", 'META.PROPDEFINITION': "meta.PropDefinition", 'META.RELATIONSHIPDEFINITION': "meta.RelationshipDefinition", 'MO.MOREF': "mo.MoRef", 'MO.TAG': "mo.Tag", 'MO.VERSIONCONTEXT': "mo.VersionContext", 'NIAAPI.DETAIL': "niaapi.Detail", 'NIAAPI.NEWRELEASEDETAIL': "niaapi.NewReleaseDetail", 'NIAAPI.REVISIONINFO': "niaapi.RevisionInfo", 'NIAAPI.SOFTWAREREGEX': "niaapi.SoftwareRegex", 'NIAAPI.VERSIONREGEXPLATFORM': "niaapi.VersionRegexPlatform", 'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails", 'NIATELEMETRY.DEPLOYMENTSTATUS': "niatelemetry.DeploymentStatus", 'NIATELEMETRY.DISKINFO': "niatelemetry.Diskinfo", 'NIATELEMETRY.INTERFACE': "niatelemetry.Interface", 'NIATELEMETRY.INTERFACEELEMENT': "niatelemetry.InterfaceElement", 'NIATELEMETRY.JOBDETAIL': "niatelemetry.JobDetail", 'NIATELEMETRY.LOGICALLINK': "niatelemetry.LogicalLink", 'NIATELEMETRY.NVEPACKETCOUNTERS': "niatelemetry.NvePacketCounters", 'NIATELEMETRY.NVEVNI': "niatelemetry.NveVni", 'NIATELEMETRY.NXOSBGPMVPN': "niatelemetry.NxosBgpMvpn", 'NIATELEMETRY.NXOSVTP': "niatelemetry.NxosVtp", 'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense", 'NIATELEMETRY.VNISTATUS': "niatelemetry.VniStatus", 'NOTIFICATION.ALARMMOCONDITION': "notification.AlarmMoCondition", 'NOTIFICATION.SENDEMAIL': "notification.SendEmail", 'NTP.AUTHNTPSERVER': "ntp.AuthNtpServer", 'ONPREM.IMAGEPACKAGE': "onprem.ImagePackage", 'ONPREM.SCHEDULE': "onprem.Schedule", 'ONPREM.UPGRADENOTE': "onprem.UpgradeNote", 'ONPREM.UPGRADEPHASE': "onprem.UpgradePhase", 'OPRS.KVPAIR': "oprs.Kvpair", 'OS.ANSWERS': "os.Answers", 'OS.GLOBALCONFIG': "os.GlobalConfig", 'OS.IPV4CONFIGURATION': "os.Ipv4Configuration", 'OS.IPV6CONFIGURATION': "os.Ipv6Configuration", 'OS.PHYSICALDISK': "os.PhysicalDisk", 'OS.PHYSICALDISKRESPONSE': "os.PhysicalDiskResponse", 'OS.PLACEHOLDER': "os.PlaceHolder", 'OS.SERVERCONFIG': "os.ServerConfig", 'OS.VALIDATIONINFORMATION': "os.ValidationInformation", 'OS.VIRTUALDRIVE': "os.VirtualDrive", 'OS.VIRTUALDRIVERESPONSE': "os.VirtualDriveResponse", 'OS.VMWAREPARAMETERS': "os.VmwareParameters", 'OS.WINDOWSPARAMETERS': "os.WindowsParameters", 'PKIX.DISTINGUISHEDNAME': "pkix.DistinguishedName", 'PKIX.ECDSAKEYSPEC': "pkix.EcdsaKeySpec", 'PKIX.EDDSAKEYSPEC': "pkix.EddsaKeySpec", 'PKIX.RSAALGORITHM': "pkix.RsaAlgorithm", 'PKIX.SUBJECTALTERNATENAME': "pkix.SubjectAlternateName", 'POLICY.ACTIONPARAM': "policy.ActionParam", 'POLICY.ACTIONQUALIFIER': "policy.ActionQualifier", 'POLICY.CONFIGCHANGE': "policy.ConfigChange", 'POLICY.CONFIGCHANGECONTEXT': "policy.ConfigChangeContext", 'POLICY.CONFIGCONTEXT': "policy.ConfigContext", 'POLICY.CONFIGRESULTCONTEXT': "policy.ConfigResultContext", 'POLICY.QUALIFIER': "policy.Qualifier", 'POLICYINVENTORY.JOBINFO': "policyinventory.JobInfo", 'RECOVERY.BACKUPSCHEDULE': "recovery.BackupSchedule", 'RESOURCE.PERTYPECOMBINEDSELECTOR': "resource.PerTypeCombinedSelector", 'RESOURCE.SELECTOR': "resource.Selector", 'RESOURCE.SOURCETOPERMISSIONRESOURCES': "resource.SourceToPermissionResources", 'RESOURCE.SOURCETOPERMISSIONRESOURCESHOLDER': "resource.SourceToPermissionResourcesHolder", 'RESOURCEPOOL.SERVERLEASEPARAMETERS': "resourcepool.ServerLeaseParameters", 'RESOURCEPOOL.SERVERPOOLPARAMETERS': "resourcepool.ServerPoolParameters", 'SDCARD.DIAGNOSTICS': "sdcard.Diagnostics", 'SDCARD.DRIVERS': "sdcard.Drivers", 'SDCARD.HOSTUPGRADEUTILITY': "sdcard.HostUpgradeUtility", 'SDCARD.OPERATINGSYSTEM': "sdcard.OperatingSystem", 'SDCARD.PARTITION': "sdcard.Partition", 'SDCARD.SERVERCONFIGURATIONUTILITY': "sdcard.ServerConfigurationUtility", 'SDCARD.USERPARTITION': "sdcard.UserPartition", 'SDWAN.NETWORKCONFIGURATIONTYPE': "sdwan.NetworkConfigurationType", 'SDWAN.TEMPLATEINPUTSTYPE': "sdwan.TemplateInputsType", 'SERVER.PENDINGWORKFLOWTRIGGER': "server.PendingWorkflowTrigger", 'SNMP.TRAP': "snmp.Trap", 'SNMP.USER': "snmp.User", 'SOFTWAREREPOSITORY.APPLIANCEUPLOAD': "softwarerepository.ApplianceUpload", 'SOFTWAREREPOSITORY.CIFSSERVER': "softwarerepository.CifsServer", 'SOFTWAREREPOSITORY.CONSTRAINTMODELS': "softwarerepository.ConstraintModels", 'SOFTWAREREPOSITORY.HTTPSERVER': "softwarerepository.HttpServer", 'SOFTWAREREPOSITORY.IMPORTRESULT': "softwarerepository.ImportResult", 'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine", 'SOFTWAREREPOSITORY.NFSSERVER': "softwarerepository.NfsServer", 'STORAGE.AUTOMATICDRIVEGROUP': "storage.AutomaticDriveGroup", 'STORAGE.HITACHIARRAYUTILIZATION': "storage.HitachiArrayUtilization", 'STORAGE.HITACHICAPACITY': "storage.HitachiCapacity", 'STORAGE.HITACHIINITIATOR': "storage.HitachiInitiator", 'STORAGE.INITIATOR': "storage.Initiator", 'STORAGE.KEYSETTING': "storage.KeySetting", 'STORAGE.LOCALKEYSETTING': "storage.LocalKeySetting", 'STORAGE.M2VIRTUALDRIVECONFIG': "storage.M2VirtualDriveConfig", 'STORAGE.MANUALDRIVEGROUP': "storage.ManualDriveGroup", 'STORAGE.NETAPPETHERNETPORTLAG': "storage.NetAppEthernetPortLag", 'STORAGE.NETAPPETHERNETPORTVLAN': "storage.NetAppEthernetPortVlan", 'STORAGE.NETAPPEXPORTPOLICYRULE': "storage.NetAppExportPolicyRule", 'STORAGE.NETAPPHIGHAVAILABILITY': "storage.NetAppHighAvailability", 'STORAGE.NETAPPPERFORMANCEMETRICSAVERAGE': "storage.NetAppPerformanceMetricsAverage", 'STORAGE.NETAPPPORT': "storage.NetAppPort", 'STORAGE.NETAPPSTORAGECLUSTEREFFICIENCY': "storage.NetAppStorageClusterEfficiency", 'STORAGE.NETAPPSTORAGEUTILIZATION': "storage.NetAppStorageUtilization", 'STORAGE.PUREARRAYUTILIZATION': "storage.PureArrayUtilization", 'STORAGE.PUREDISKUTILIZATION': "storage.PureDiskUtilization", 'STORAGE.PUREHOSTUTILIZATION': "storage.PureHostUtilization", 'STORAGE.PUREREPLICATIONBLACKOUT': "storage.PureReplicationBlackout", 'STORAGE.PUREVOLUMEUTILIZATION': "storage.PureVolumeUtilization", 'STORAGE.R0DRIVE': "storage.R0Drive", 'STORAGE.REMOTEKEYSETTING': "storage.RemoteKeySetting", 'STORAGE.SPANDRIVES': "storage.SpanDrives", 'STORAGE.STORAGECONTAINERHOSTMOUNTSTATUS': "storage.StorageContainerHostMountStatus", 'STORAGE.STORAGECONTAINERUTILIZATION': "storage.StorageContainerUtilization", 'STORAGE.VIRTUALDRIVECONFIGURATION': "storage.VirtualDriveConfiguration", 'STORAGE.VIRTUALDRIVEPOLICY': "storage.VirtualDrivePolicy", 'STORAGE.VOLUMEUTILIZATION': "storage.VolumeUtilization", 'SYSLOG.LOCALFILELOGGINGCLIENT': "syslog.LocalFileLoggingClient", 'SYSLOG.REMOTELOGGINGCLIENT': "syslog.RemoteLoggingClient", 'TAM.ACTION': "tam.Action", 'TAM.APIDATASOURCE': "tam.ApiDataSource", 'TAM.EOLADVISORYDETAILS': "tam.EolAdvisoryDetails", 'TAM.EOLSEVERITY': "tam.EolSeverity", 'TAM.IDENTIFIERS': "tam.Identifiers", 'TAM.MILESTONE': "tam.Milestone", 'TAM.PSIRTSEVERITY': "tam.PsirtSeverity", 'TAM.QUERYENTRY': "tam.QueryEntry", 'TAM.S3DATASOURCE': "tam.S3DataSource", 'TAM.SECURITYADVISORYDETAILS': "tam.SecurityAdvisoryDetails", 'TAM.TEXTFSMTEMPLATEDATASOURCE': "tam.TextFsmTemplateDataSource", 'TECHSUPPORTMANAGEMENT.APPLIANCEPARAM': "techsupportmanagement.ApplianceParam", 'TECHSUPPORTMANAGEMENT.NIAPARAM': "techsupportmanagement.NiaParam", 'TECHSUPPORTMANAGEMENT.PLATFORMPARAM': "techsupportmanagement.PlatformParam", 'TEMPLATE.TRANSFORMATIONSTAGE': "template.TransformationStage", 'TERRAFORM.CLOUDRESOURCE': "terraform.CloudResource", 'TERRAFORM.RUNSTATE': "terraform.Runstate", 'UCSD.CONNECTORPACK': "ucsd.ConnectorPack", 'UCSD.UCSDRESTOREPARAMETERS': "ucsd.UcsdRestoreParameters", 'UCSDCONNECTOR.RESTCLIENTMESSAGE': "ucsdconnector.RestClientMessage", 'UUIDPOOL.UUIDBLOCK': "uuidpool.UuidBlock", 'VIRTUALIZATION.ACTIONINFO': "virtualization.ActionInfo", 'VIRTUALIZATION.AWSVMCOMPUTECONFIGURATION': "virtualization.AwsVmComputeConfiguration", 'VIRTUALIZATION.AWSVMCONFIGURATION': "virtualization.AwsVmConfiguration", 'VIRTUALIZATION.AWSVMNETWORKCONFIGURATION': "virtualization.AwsVmNetworkConfiguration", 'VIRTUALIZATION.AWSVMSTORAGECONFIGURATION': "virtualization.AwsVmStorageConfiguration", 'VIRTUALIZATION.BONDSTATE': "virtualization.BondState", 'VIRTUALIZATION.CLOUDINITCONFIG': "virtualization.CloudInitConfig", 'VIRTUALIZATION.COMPUTECAPACITY': "virtualization.ComputeCapacity", 'VIRTUALIZATION.CPUALLOCATION': "virtualization.CpuAllocation", 'VIRTUALIZATION.CPUINFO': "virtualization.CpuInfo", 'VIRTUALIZATION.DISKSTATUS': "virtualization.DiskStatus", 'VIRTUALIZATION.ESXICLONECUSTOMSPEC': "virtualization.EsxiCloneCustomSpec", 'VIRTUALIZATION.ESXIHOSTCONFIGURATION': "virtualization.EsxiHostConfiguration", 'VIRTUALIZATION.ESXIOVACUSTOMSPEC': "virtualization.EsxiOvaCustomSpec", 'VIRTUALIZATION.ESXIVMCOMPUTECONFIGURATION': "virtualization.EsxiVmComputeConfiguration", 'VIRTUALIZATION.ESXIVMCONFIGURATION': "virtualization.EsxiVmConfiguration", 'VIRTUALIZATION.ESXIVMNETWORKCONFIGURATION': "virtualization.EsxiVmNetworkConfiguration", 'VIRTUALIZATION.ESXIVMSTORAGECONFIGURATION': "virtualization.EsxiVmStorageConfiguration", 'VIRTUALIZATION.GUESTINFO': "virtualization.GuestInfo", 'VIRTUALIZATION.HXAPVMCONFIGURATION': "virtualization.HxapVmConfiguration", 'VIRTUALIZATION.IPADDRESSINFO': "virtualization.IpAddressInfo", 'VIRTUALIZATION.MEMORYALLOCATION': "virtualization.MemoryAllocation", 'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity", 'VIRTUALIZATION.NETWORKINTERFACE': "virtualization.NetworkInterface", 'VIRTUALIZATION.NETWORKPORT': "virtualization.NetworkPort", 'VIRTUALIZATION.PRODUCTINFO': "virtualization.ProductInfo", 'VIRTUALIZATION.STORAGECAPACITY': "virtualization.StorageCapacity", 'VIRTUALIZATION.VDISKCONFIG': "virtualization.VdiskConfig", 'VIRTUALIZATION.VIRTUALDISKCONFIG': "virtualization.VirtualDiskConfig", 'VIRTUALIZATION.VIRTUALMACHINEDISK': "virtualization.VirtualMachineDisk", 'VIRTUALIZATION.VMDISK': "virtualization.VmDisk", 'VIRTUALIZATION.VMESXIDISK': "virtualization.VmEsxiDisk", 'VIRTUALIZATION.VMINTERFACE': "virtualization.VmInterface", 'VIRTUALIZATION.VMWAREREMOTEDISPLAYINFO': "virtualization.VmwareRemoteDisplayInfo", 'VIRTUALIZATION.VMWARERESOURCECONSUMPTION': "virtualization.VmwareResourceConsumption", 'VIRTUALIZATION.VMWARESHARESINFO': "virtualization.VmwareSharesInfo", 'VIRTUALIZATION.VMWARETEAMINGANDFAILOVER': "virtualization.VmwareTeamingAndFailover", 'VIRTUALIZATION.VMWAREVLANRANGE': "virtualization.VmwareVlanRange", 'VIRTUALIZATION.VMWAREVMCPUSHAREINFO': "virtualization.VmwareVmCpuShareInfo", 'VIRTUALIZATION.VMWAREVMCPUSOCKETINFO': "virtualization.VmwareVmCpuSocketInfo", 'VIRTUALIZATION.VMWAREVMDISKCOMMITINFO': "virtualization.VmwareVmDiskCommitInfo", 'VIRTUALIZATION.VMWAREVMMEMORYSHAREINFO': "virtualization.VmwareVmMemoryShareInfo", 'VIRTUALIZATION.VOLUMEINFO': "virtualization.VolumeInfo", 'VMEDIA.MAPPING': "vmedia.Mapping", 'VNIC.ARFSSETTINGS': "vnic.ArfsSettings", 'VNIC.CDN': "vnic.Cdn", 'VNIC.COMPLETIONQUEUESETTINGS': "vnic.CompletionQueueSettings", 'VNIC.ETHINTERRUPTSETTINGS': "vnic.EthInterruptSettings", 'VNIC.ETHRXQUEUESETTINGS': "vnic.EthRxQueueSettings", 'VNIC.ETHTXQUEUESETTINGS': "vnic.EthTxQueueSettings", 'VNIC.FCERRORRECOVERYSETTINGS': "vnic.FcErrorRecoverySettings", 'VNIC.FCINTERRUPTSETTINGS': "vnic.FcInterruptSettings", 'VNIC.FCQUEUESETTINGS': "vnic.FcQueueSettings", 'VNIC.FLOGISETTINGS': "vnic.FlogiSettings", 'VNIC.ISCSIAUTHPROFILE': "vnic.IscsiAuthProfile", 'VNIC.LUN': "vnic.Lun", 'VNIC.NVGRESETTINGS': "vnic.NvgreSettings", 'VNIC.PLACEMENTSETTINGS': "vnic.PlacementSettings", 'VNIC.PLOGISETTINGS': "vnic.PlogiSettings", 'VNIC.ROCESETTINGS': "vnic.RoceSettings", 'VNIC.RSSHASHSETTINGS': "vnic.RssHashSettings", 'VNIC.SCSIQUEUESETTINGS': "vnic.ScsiQueueSettings", 'VNIC.TCPOFFLOADSETTINGS': "vnic.TcpOffloadSettings", 'VNIC.USNICSETTINGS': "vnic.UsnicSettings", 'VNIC.VIFSTATUS': "vnic.VifStatus", 'VNIC.VLANSETTINGS': "vnic.VlanSettings", 'VNIC.VMQSETTINGS': "vnic.VmqSettings", 'VNIC.VSANSETTINGS': "vnic.VsanSettings", 'VNIC.VXLANSETTINGS': "vnic.VxlanSettings", 'WORKFLOW.ACTIONWORKFLOWDEFINITION': "workflow.ActionWorkflowDefinition", 'WORKFLOW.ARRAYDATATYPE': "workflow.ArrayDataType", 'WORKFLOW.ASSOCIATEDROLES': "workflow.AssociatedRoles", 'WORKFLOW.CLICOMMAND': "workflow.CliCommand", 'WORKFLOW.COMMENTS': "workflow.Comments", 'WORKFLOW.CONSTRAINTS': "workflow.Constraints", 'WORKFLOW.CUSTOMARRAYITEM': "workflow.CustomArrayItem", 'WORKFLOW.CUSTOMDATAPROPERTY': "workflow.CustomDataProperty", 'WORKFLOW.CUSTOMDATATYPE': "workflow.CustomDataType", 'WORKFLOW.CUSTOMDATATYPEPROPERTIES': "workflow.CustomDataTypeProperties", 'WORKFLOW.DECISIONCASE': "workflow.DecisionCase", 'WORKFLOW.DECISIONTASK': "workflow.DecisionTask", 'WORKFLOW.DEFAULTVALUE': "workflow.DefaultValue", 'WORKFLOW.DISPLAYMETA': "workflow.DisplayMeta", 'WORKFLOW.DYNAMICWORKFLOWACTIONTASKLIST': "workflow.DynamicWorkflowActionTaskList", 'WORKFLOW.ENUMENTRY': "workflow.EnumEntry", 'WORKFLOW.EXPECTPROMPT': "workflow.ExpectPrompt", 'WORKFLOW.FAILUREENDTASK': "workflow.FailureEndTask", 'WORKFLOW.FILEDOWNLOADOP': "workflow.FileDownloadOp", 'WORKFLOW.FILEOPERATIONS': "workflow.FileOperations", 'WORKFLOW.FILETEMPLATEOP': "workflow.FileTemplateOp", 'WORKFLOW.FILETRANSFER': "workflow.FileTransfer", 'WORKFLOW.FORKTASK': "workflow.ForkTask", 'WORKFLOW.INITIATORCONTEXT': "workflow.InitiatorContext", 'WORKFLOW.INTERNALPROPERTIES': "workflow.InternalProperties", 'WORKFLOW.JOINTASK': "workflow.JoinTask", 'WORKFLOW.LOOPTASK': "workflow.LoopTask", 'WORKFLOW.MESSAGE': "workflow.Message", 'WORKFLOW.MOREFERENCEARRAYITEM': "workflow.MoReferenceArrayItem", 'WORKFLOW.MOREFERENCEDATATYPE': "workflow.MoReferenceDataType", 'WORKFLOW.MOREFERENCEPROPERTY': "workflow.MoReferenceProperty", 'WORKFLOW.PARAMETERSET': "workflow.ParameterSet", 'WORKFLOW.PRIMITIVEARRAYITEM': "workflow.PrimitiveArrayItem", 'WORKFLOW.PRIMITIVEDATAPROPERTY': "workflow.PrimitiveDataProperty", 'WORKFLOW.PRIMITIVEDATATYPE': "workflow.PrimitiveDataType", 'WORKFLOW.PROPERTIES': "workflow.Properties", 'WORKFLOW.RESULTHANDLER': "workflow.ResultHandler", 'WORKFLOW.ROLLBACKTASK': "workflow.RollbackTask", 'WORKFLOW.ROLLBACKWORKFLOWTASK': "workflow.RollbackWorkflowTask", 'WORKFLOW.SELECTORPROPERTY': "workflow.SelectorProperty", 'WORKFLOW.SSHCMD': "workflow.SshCmd", 'WORKFLOW.SSHCONFIG': "workflow.SshConfig", 'WORKFLOW.SSHSESSION': "workflow.SshSession", 'WORKFLOW.STARTTASK': "workflow.StartTask", 'WORKFLOW.SUBWORKFLOWTASK': "workflow.SubWorkflowTask", 'WORKFLOW.SUCCESSENDTASK': "workflow.SuccessEndTask", 'WORKFLOW.TARGETCONTEXT': "workflow.TargetContext", 'WORKFLOW.TARGETDATATYPE': "workflow.TargetDataType", 'WORKFLOW.TARGETPROPERTY': "workflow.TargetProperty", 'WORKFLOW.TASKCONSTRAINTS': "workflow.TaskConstraints", 'WORKFLOW.TASKRETRYINFO': "workflow.TaskRetryInfo", 'WORKFLOW.UIINPUTFILTER': "workflow.UiInputFilter", 'WORKFLOW.VALIDATIONERROR': "workflow.ValidationError", 'WORKFLOW.VALIDATIONINFORMATION': "workflow.ValidationInformation", 'WORKFLOW.WAITTASK': "workflow.WaitTask", 'WORKFLOW.WAITTASKPROMPT': "workflow.WaitTaskPrompt", 'WORKFLOW.WEBAPI': "workflow.WebApi", 'WORKFLOW.WORKERTASK': "workflow.WorkerTask", 'WORKFLOW.WORKFLOWCTX': "workflow.WorkflowCtx", 'WORKFLOW.WORKFLOWENGINEPROPERTIES': "workflow.WorkflowEngineProperties", 'WORKFLOW.WORKFLOWINFOPROPERTIES': "workflow.WorkflowInfoProperties", 'WORKFLOW.WORKFLOWPROPERTIES': "workflow.WorkflowProperties", 'WORKFLOW.XMLAPI': "workflow.XmlApi", 'X509.CERTIFICATE': "x509.Certificate", }, ('object_type',): { 'ACCESS.ADDRESSTYPE': "access.AddressType", 'ADAPTER.ADAPTERCONFIG': "adapter.AdapterConfig", 'ADAPTER.DCEINTERFACESETTINGS': "adapter.DceInterfaceSettings", 'ADAPTER.ETHSETTINGS': "adapter.EthSettings", 'ADAPTER.FCSETTINGS': "adapter.FcSettings", 'ADAPTER.PORTCHANNELSETTINGS': "adapter.PortChannelSettings", 'APPLIANCE.APISTATUS': "appliance.ApiStatus", 'APPLIANCE.CERTRENEWALPHASE': "appliance.CertRenewalPhase", 'APPLIANCE.KEYVALUEPAIR': "appliance.KeyValuePair", 'APPLIANCE.STATUSCHECK': "appliance.StatusCheck", 'ASSET.ADDRESSINFORMATION': "asset.AddressInformation", 'ASSET.APIKEYCREDENTIAL': "asset.ApiKeyCredential", 'ASSET.CLIENTCERTIFICATECREDENTIAL': "asset.ClientCertificateCredential", 'ASSET.CLOUDCONNECTION': "asset.CloudConnection", 'ASSET.CONNECTIONCONTROLMESSAGE': "asset.ConnectionControlMessage", 'ASSET.CONTRACTINFORMATION': "asset.ContractInformation", 'ASSET.CUSTOMERINFORMATION': "asset.CustomerInformation", 'ASSET.DEPLOYMENTALARMINFO': "asset.DeploymentAlarmInfo", 'ASSET.DEPLOYMENTDEVICEALARMINFO': "asset.DeploymentDeviceAlarmInfo", 'ASSET.DEPLOYMENTDEVICEINFORMATION': "asset.DeploymentDeviceInformation", 'ASSET.DEVICEINFORMATION': "asset.DeviceInformation", 'ASSET.DEVICESTATISTICS': "asset.DeviceStatistics", 'ASSET.DEVICETRANSACTION': "asset.DeviceTransaction", 'ASSET.GLOBALULTIMATE': "asset.GlobalUltimate", 'ASSET.HTTPCONNECTION': "asset.HttpConnection", 'ASSET.INTERSIGHTDEVICECONNECTORCONNECTION': "asset.IntersightDeviceConnectorConnection", 'ASSET.METERINGTYPE': "asset.MeteringType", 'ASSET.NEWRELICCREDENTIAL': "asset.NewRelicCredential", 'ASSET.NOAUTHENTICATIONCREDENTIAL': "asset.NoAuthenticationCredential", 'ASSET.OAUTHBEARERTOKENCREDENTIAL': "asset.OauthBearerTokenCredential", 'ASSET.OAUTHCLIENTIDSECRETCREDENTIAL': "asset.OauthClientIdSecretCredential", 'ASSET.ORCHESTRATIONHITACHIVIRTUALSTORAGEPLATFORMOPTIONS': "asset.OrchestrationHitachiVirtualStoragePlatformOptions", 'ASSET.ORCHESTRATIONSERVICE': "asset.OrchestrationService", 'ASSET.PARENTCONNECTIONSIGNATURE': "asset.ParentConnectionSignature", 'ASSET.PRIVATEKEYCREDENTIAL': "asset.PrivateKeyCredential", 'ASSET.PRODUCTINFORMATION': "asset.ProductInformation", 'ASSET.SERVICENOWCREDENTIAL': "asset.ServiceNowCredential", 'ASSET.SSHCONNECTION': "asset.SshConnection", 'ASSET.SUDIINFO': "asset.SudiInfo", 'ASSET.TARGETKEY': "asset.TargetKey", 'ASSET.TARGETSIGNATURE': "asset.TargetSignature", 'ASSET.TARGETSTATUSDETAILS': "asset.TargetStatusDetails", 'ASSET.TERRAFORMINTEGRATIONSERVICE': "asset.TerraformIntegrationService", 'ASSET.TERRAFORMINTEGRATIONTERRAFORMAGENTOPTIONS': "asset.TerraformIntegrationTerraformAgentOptions", 'ASSET.TERRAFORMINTEGRATIONTERRAFORMCLOUDOPTIONS': "asset.TerraformIntegrationTerraformCloudOptions", 'ASSET.USERNAMEPASSWORDCREDENTIAL': "asset.UsernamePasswordCredential", 'ASSET.VIRTUALIZATIONAMAZONWEBSERVICEOPTIONS': "asset.VirtualizationAmazonWebServiceOptions", 'ASSET.VIRTUALIZATIONSERVICE': "asset.VirtualizationService", 'ASSET.VMHOST': "asset.VmHost", 'ASSET.WORKLOADOPTIMIZERAMAZONWEBSERVICESBILLINGOPTIONS': "asset.WorkloadOptimizerAmazonWebServicesBillingOptions", 'ASSET.WORKLOADOPTIMIZERDYNATRACEOPTIONS': "asset.WorkloadOptimizerDynatraceOptions", 'ASSET.WORKLOADOPTIMIZERHYPERVOPTIONS': "asset.WorkloadOptimizerHypervOptions", 'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREAPPLICATIONINSIGHTSOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureApplicationInsightsOptions", 'ASSET.WORKLOADOPTIMIZERMICROSOFTAZUREENTERPRISEAGREEMENTOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureEnterpriseAgreementOptions", 'ASSET.WORKLOADOPTIMIZERMICROSOFTAZURESERVICEPRINCIPALOPTIONS': "asset.WorkloadOptimizerMicrosoftAzureServicePrincipalOptions", 'ASSET.WORKLOADOPTIMIZERNEWRELICOPTIONS': "asset.WorkloadOptimizerNewRelicOptions", 'ASSET.WORKLOADOPTIMIZEROPENSTACKOPTIONS': "asset.WorkloadOptimizerOpenStackOptions", 'ASSET.WORKLOADOPTIMIZERREDHATOPENSTACKOPTIONS': "asset.WorkloadOptimizerRedHatOpenStackOptions", 'ASSET.WORKLOADOPTIMIZERSERVICE': "asset.WorkloadOptimizerService", 'ASSET.WORKLOADOPTIMIZERVMWAREVCENTEROPTIONS': "asset.WorkloadOptimizerVmwareVcenterOptions", 'BOOT.BOOTLOADER': "boot.Bootloader", 'BOOT.ISCSI': "boot.Iscsi", 'BOOT.LOCALCDD': "boot.LocalCdd", 'BOOT.LOCALDISK': "boot.LocalDisk", 'BOOT.NVME': "boot.Nvme", 'BOOT.PCHSTORAGE': "boot.PchStorage", 'BOOT.PXE': "boot.Pxe", 'BOOT.SAN': "boot.San", 'BOOT.SDCARD': "boot.SdCard", 'BOOT.UEFISHELL': "boot.UefiShell", 'BOOT.USB': "boot.Usb", 'BOOT.VIRTUALMEDIA': "boot.VirtualMedia", 'BULK.HTTPHEADER': "bulk.HttpHeader", 'BULK.RESTRESULT': "bulk.RestResult", 'BULK.RESTSUBREQUEST': "bulk.RestSubRequest", 'CAPABILITY.PORTRANGE': "capability.PortRange", 'CAPABILITY.SWITCHNETWORKLIMITS': "capability.SwitchNetworkLimits", 'CAPABILITY.SWITCHSTORAGELIMITS': "capability.SwitchStorageLimits", 'CAPABILITY.SWITCHSYSTEMLIMITS': "capability.SwitchSystemLimits", 'CAPABILITY.SWITCHINGMODECAPABILITY': "capability.SwitchingModeCapability", 'CERTIFICATEMANAGEMENT.IMC': "certificatemanagement.Imc", 'CLOUD.AVAILABILITYZONE': "cloud.AvailabilityZone", 'CLOUD.BILLINGUNIT': "cloud.BillingUnit", 'CLOUD.CLOUDREGION': "cloud.CloudRegion", 'CLOUD.CLOUDTAG': "cloud.CloudTag", 'CLOUD.CUSTOMATTRIBUTES': "cloud.CustomAttributes", 'CLOUD.IMAGEREFERENCE': "cloud.ImageReference", 'CLOUD.INSTANCETYPE': "cloud.InstanceType", 'CLOUD.NETWORKACCESSCONFIG': "cloud.NetworkAccessConfig", 'CLOUD.NETWORKADDRESS': "cloud.NetworkAddress", 'CLOUD.NETWORKINSTANCEATTACHMENT': "cloud.NetworkInstanceAttachment", 'CLOUD.NETWORKINTERFACEATTACHMENT': "cloud.NetworkInterfaceAttachment", 'CLOUD.SECURITYGROUPRULE': "cloud.SecurityGroupRule", 'CLOUD.TFCWORKSPACEVARIABLES': "cloud.TfcWorkspaceVariables", 'CLOUD.VOLUMEATTACHMENT': "cloud.VolumeAttachment", 'CLOUD.VOLUMEINSTANCEATTACHMENT': "cloud.VolumeInstanceAttachment", 'CLOUD.VOLUMEIOPSINFO': "cloud.VolumeIopsInfo", 'CLOUD.VOLUMETYPE': "cloud.VolumeType", 'CMRF.CMRF': "cmrf.CmRf", 'COMM.IPV4ADDRESSBLOCK': "comm.IpV4AddressBlock", 'COMM.IPV4INTERFACE': "comm.IpV4Interface", 'COMM.IPV6INTERFACE': "comm.IpV6Interface", 'COMPUTE.ALARMSUMMARY': "compute.AlarmSummary", 'COMPUTE.IPADDRESS': "compute.IpAddress", 'COMPUTE.PERSISTENTMEMORYMODULE': "compute.PersistentMemoryModule", 'COMPUTE.PERSISTENTMEMORYOPERATION': "compute.PersistentMemoryOperation", 'COMPUTE.SERVERCONFIG': "compute.ServerConfig", 'COMPUTE.SERVEROPSTATUS': "compute.ServerOpStatus", 'COMPUTE.STORAGECONTROLLEROPERATION': "compute.StorageControllerOperation", 'COMPUTE.STORAGEPHYSICALDRIVE': "compute.StoragePhysicalDrive", 'COMPUTE.STORAGEPHYSICALDRIVEOPERATION': "compute.StoragePhysicalDriveOperation", 'COMPUTE.STORAGEVIRTUALDRIVE': "compute.StorageVirtualDrive", 'COMPUTE.STORAGEVIRTUALDRIVEOPERATION': "compute.StorageVirtualDriveOperation", 'COND.ALARMSUMMARY': "cond.AlarmSummary", 'CONNECTOR.CLOSESTREAMMESSAGE': "connector.CloseStreamMessage", 'CONNECTOR.COMMANDCONTROLMESSAGE': "connector.CommandControlMessage", 'CONNECTOR.COMMANDTERMINALSTREAM': "connector.CommandTerminalStream", 'CONNECTOR.EXPECTPROMPT': "connector.ExpectPrompt", 'CONNECTOR.FETCHSTREAMMESSAGE': "connector.FetchStreamMessage", 'CONNECTOR.FILECHECKSUM': "connector.FileChecksum", 'CONNECTOR.FILEMESSAGE': "connector.FileMessage", 'CONNECTOR.HTTPREQUEST': "connector.HttpRequest", 'CONNECTOR.SSHCONFIG': "connector.SshConfig", 'CONNECTOR.SSHMESSAGE': "connector.SshMessage", 'CONNECTOR.STARTSTREAM': "connector.StartStream", 'CONNECTOR.STARTSTREAMFROMDEVICE': "connector.StartStreamFromDevice", 'CONNECTOR.STREAMACKNOWLEDGE': "connector.StreamAcknowledge", 'CONNECTOR.STREAMINPUT': "connector.StreamInput", 'CONNECTOR.STREAMKEEPALIVE': "connector.StreamKeepalive", 'CONNECTOR.TARGETCHANGEMESSAGE': "connector.TargetChangeMessage", 'CONNECTOR.URL': "connector.Url", 'CONNECTOR.WINRMREQUEST': "connector.WinrmRequest", 'CONNECTOR.XMLAPIMESSAGE': "connector.XmlApiMessage", 'CONNECTORPACK.CONNECTORPACKUPDATE': "connectorpack.ConnectorPackUpdate", 'CONTENT.COMPLEXTYPE': "content.ComplexType", 'CONTENT.PARAMETER': "content.Parameter", 'CONTENT.TEXTPARAMETER': "content.TextParameter", 'CONVERGEDINFRA.ALARMSUMMARY': "convergedinfra.AlarmSummary", 'CONVERGEDINFRA.COMPLIANCESUMMARY': "convergedinfra.ComplianceSummary", 'CONVERGEDINFRA.PODSUMMARY': "convergedinfra.PodSummary", 'CRD.CUSTOMRESOURCECONFIGPROPERTY': "crd.CustomResourceConfigProperty", 'EQUIPMENT.IOCARDIDENTITY': "equipment.IoCardIdentity", 'FABRIC.LLDPSETTINGS': "fabric.LldpSettings", 'FABRIC.MACAGINGSETTINGS': "fabric.MacAgingSettings", 'FABRIC.PORTIDENTIFIER': "fabric.PortIdentifier", 'FABRIC.QOSCLASS': "fabric.QosClass", 'FABRIC.UDLDGLOBALSETTINGS': "fabric.UdldGlobalSettings", 'FABRIC.UDLDSETTINGS': "fabric.UdldSettings", 'FABRIC.VLANSETTINGS': "fabric.VlanSettings", 'FCPOOL.BLOCK': "fcpool.Block", 'FEEDBACK.FEEDBACKDATA': "feedback.FeedbackData", 'FIRMWARE.CHASSISUPGRADEIMPACT': "firmware.ChassisUpgradeImpact", 'FIRMWARE.CIFSSERVER': "firmware.CifsServer", 'FIRMWARE.COMPONENTIMPACT': "firmware.ComponentImpact", 'FIRMWARE.COMPONENTMETA': "firmware.ComponentMeta", 'FIRMWARE.DIRECTDOWNLOAD': "firmware.DirectDownload", 'FIRMWARE.FABRICUPGRADEIMPACT': "firmware.FabricUpgradeImpact", 'FIRMWARE.FIRMWAREINVENTORY': "firmware.FirmwareInventory", 'FIRMWARE.HTTPSERVER': "firmware.HttpServer", 'FIRMWARE.INCLUDECOMPONENTLISTTYPE': "firmware.IncludeComponentListType", 'FIRMWARE.NETWORKSHARE': "firmware.NetworkShare", 'FIRMWARE.NFSSERVER': "firmware.NfsServer", 'FIRMWARE.SERVERUPGRADEIMPACT': "firmware.ServerUpgradeImpact", 'FORECAST.MODEL': "forecast.Model", 'HCL.CONSTRAINT': "hcl.Constraint", 'HCL.FIRMWARE': "hcl.Firmware", 'HCL.HARDWARECOMPATIBILITYPROFILE': "hcl.HardwareCompatibilityProfile", 'HCL.PRODUCT': "hcl.Product", 'HYPERFLEX.ALARMSUMMARY': "hyperflex.AlarmSummary", 'HYPERFLEX.APPSETTINGCONSTRAINT': "hyperflex.AppSettingConstraint", 'HYPERFLEX.BACKUPPOLICYSETTINGS': "hyperflex.BackupPolicySettings", 'HYPERFLEX.DATASTOREINFO': "hyperflex.DatastoreInfo", 'HYPERFLEX.ENTITYREFERENCE': "hyperflex.EntityReference", 'HYPERFLEX.ERRORSTACK': "hyperflex.ErrorStack", 'HYPERFLEX.FEATURELIMITENTRY': "hyperflex.FeatureLimitEntry", 'HYPERFLEX.FILEPATH': "hyperflex.FilePath", 'HYPERFLEX.HEALTHCHECKSCRIPTINFO': "hyperflex.HealthCheckScriptInfo", 'HYPERFLEX.HXHOSTMOUNTSTATUSDT': "hyperflex.HxHostMountStatusDt", 'HYPERFLEX.HXLICENSEAUTHORIZATIONDETAILSDT': "hyperflex.HxLicenseAuthorizationDetailsDt", 'HYPERFLEX.HXLINKDT': "hyperflex.HxLinkDt", 'HYPERFLEX.HXNETWORKADDRESSDT': "hyperflex.HxNetworkAddressDt", 'HYPERFLEX.HXPLATFORMDATASTORECONFIGDT': "hyperflex.HxPlatformDatastoreConfigDt", 'HYPERFLEX.HXREGISTRATIONDETAILSDT': "hyperflex.HxRegistrationDetailsDt", 'HYPERFLEX.HXRESILIENCYINFODT': "hyperflex.HxResiliencyInfoDt", 'HYPERFLEX.HXSITEDT': "hyperflex.HxSiteDt", 'HYPERFLEX.HXUUIDDT': "hyperflex.HxUuIdDt", 'HYPERFLEX.HXZONEINFODT': "hyperflex.HxZoneInfoDt", 'HYPERFLEX.HXZONERESILIENCYINFODT': "hyperflex.HxZoneResiliencyInfoDt", 'HYPERFLEX.IPADDRRANGE': "hyperflex.IpAddrRange", 'HYPERFLEX.LOGICALAVAILABILITYZONE': "hyperflex.LogicalAvailabilityZone", 'HYPERFLEX.MACADDRPREFIXRANGE': "hyperflex.MacAddrPrefixRange", 'HYPERFLEX.MAPCLUSTERIDTOPROTECTIONINFO': "hyperflex.MapClusterIdToProtectionInfo", 'HYPERFLEX.MAPCLUSTERIDTOSTSNAPSHOTPOINT': "hyperflex.MapClusterIdToStSnapshotPoint", 'HYPERFLEX.MAPUUIDTOTRACKEDDISK': "hyperflex.MapUuidToTrackedDisk", 'HYPERFLEX.NAMEDVLAN': "hyperflex.NamedVlan", 'HYPERFLEX.NAMEDVSAN': "hyperflex.NamedVsan", 'HYPERFLEX.PORTTYPETOPORTNUMBERMAP': "hyperflex.PortTypeToPortNumberMap", 'HYPERFLEX.PROTECTIONINFO': "hyperflex.ProtectionInfo", 'HYPERFLEX.REPLICATIONCLUSTERREFERENCETOSCHEDULE': "hyperflex.ReplicationClusterReferenceToSchedule", 'HYPERFLEX.REPLICATIONPEERINFO': "hyperflex.ReplicationPeerInfo", 'HYPERFLEX.REPLICATIONPLATDATASTORE': "hyperflex.ReplicationPlatDatastore", 'HYPERFLEX.REPLICATIONPLATDATASTOREPAIR': "hyperflex.ReplicationPlatDatastorePair", 'HYPERFLEX.REPLICATIONSCHEDULE': "hyperflex.ReplicationSchedule", 'HYPERFLEX.REPLICATIONSTATUS': "hyperflex.ReplicationStatus", 'HYPERFLEX.RPOSTATUS': "hyperflex.RpoStatus", 'HYPERFLEX.SERVERFIRMWAREVERSIONINFO': "hyperflex.ServerFirmwareVersionInfo", 'HYPERFLEX.SERVERMODELENTRY': "hyperflex.ServerModelEntry", 'HYPERFLEX.SNAPSHOTFILES': "hyperflex.SnapshotFiles", 'HYPERFLEX.SNAPSHOTINFOBRIEF': "hyperflex.SnapshotInfoBrief", 'HYPERFLEX.SNAPSHOTPOINT': "hyperflex.SnapshotPoint", 'HYPERFLEX.SNAPSHOTSTATUS': "hyperflex.SnapshotStatus", 'HYPERFLEX.STPLATFORMCLUSTERHEALINGINFO': "hyperflex.StPlatformClusterHealingInfo", 'HYPERFLEX.STPLATFORMCLUSTERRESILIENCYINFO': "hyperflex.StPlatformClusterResiliencyInfo", 'HYPERFLEX.SUMMARY': "hyperflex.Summary", 'HYPERFLEX.TRACKEDDISK': "hyperflex.TrackedDisk", 'HYPERFLEX.TRACKEDFILE': "hyperflex.TrackedFile", 'HYPERFLEX.VIRTUALMACHINE': "hyperflex.VirtualMachine", 'HYPERFLEX.VIRTUALMACHINERUNTIMEINFO': "hyperflex.VirtualMachineRuntimeInfo", 'HYPERFLEX.VMPROTECTIONSPACEUSAGE': "hyperflex.VmProtectionSpaceUsage", 'HYPERFLEX.WWXNPREFIXRANGE': "hyperflex.WwxnPrefixRange", 'I18N.MESSAGE': "i18n.Message", 'I18N.MESSAGEPARAM': "i18n.MessageParam", 'IAAS.LICENSEKEYSINFO': "iaas.LicenseKeysInfo", 'IAAS.LICENSEUTILIZATIONINFO': "iaas.LicenseUtilizationInfo", 'IAAS.WORKFLOWSTEPS': "iaas.WorkflowSteps", 'IAM.ACCOUNTPERMISSIONS': "iam.AccountPermissions", 'IAM.CLIENTMETA': "iam.ClientMeta", 'IAM.ENDPOINTPASSWORDPROPERTIES': "iam.EndPointPasswordProperties", 'IAM.FEATUREDEFINITION': "iam.FeatureDefinition", 'IAM.GROUPPERMISSIONTOROLES': "iam.GroupPermissionToRoles", 'IAM.LDAPBASEPROPERTIES': "iam.LdapBaseProperties", 'IAM.LDAPDNSPARAMETERS': "iam.LdapDnsParameters", 'IAM.PERMISSIONREFERENCE': "iam.PermissionReference", 'IAM.PERMISSIONTOROLES': "iam.PermissionToRoles", 'IAM.RULE': "iam.Rule", 'IAM.SAMLSPCONNECTION': "iam.SamlSpConnection", 'IAM.SSOSESSIONATTRIBUTES': "iam.SsoSessionAttributes", 'IMCCONNECTOR.WEBUIMESSAGE': "imcconnector.WebUiMessage", 'INFRA.HARDWAREINFO': "infra.HardwareInfo", 'INFRA.METADATA': "infra.MetaData", 'INVENTORY.INVENTORYMO': "inventory.InventoryMo", 'INVENTORY.UEMINFO': "inventory.UemInfo", 'IPPOOL.IPV4BLOCK': "ippool.IpV4Block", 'IPPOOL.IPV4CONFIG': "ippool.IpV4Config", 'IPPOOL.IPV6BLOCK': "ippool.IpV6Block", 'IPPOOL.IPV6CONFIG': "ippool.IpV6Config", 'IQNPOOL.IQNSUFFIXBLOCK': "iqnpool.IqnSuffixBlock", 'KUBERNETES.ACTIONINFO': "kubernetes.ActionInfo", 'KUBERNETES.ADDON': "kubernetes.Addon", 'KUBERNETES.ADDONCONFIGURATION': "kubernetes.AddonConfiguration", 'KUBERNETES.BAREMETALNETWORKINFO': "kubernetes.BaremetalNetworkInfo", 'KUBERNETES.CALICOCONFIG': "kubernetes.CalicoConfig", 'KUBERNETES.CLUSTERCERTIFICATECONFIGURATION': "kubernetes.ClusterCertificateConfiguration", 'KUBERNETES.CLUSTERMANAGEMENTCONFIG': "kubernetes.ClusterManagementConfig", 'KUBERNETES.CONFIGURATION': "kubernetes.Configuration", 'KUBERNETES.DAEMONSETSTATUS': "kubernetes.DaemonSetStatus", 'KUBERNETES.DEPLOYMENTSTATUS': "kubernetes.DeploymentStatus", 'KUBERNETES.ESSENTIALADDON': "kubernetes.EssentialAddon", 'KUBERNETES.ESXIVIRTUALMACHINEINFRACONFIG': "kubernetes.EsxiVirtualMachineInfraConfig", 'KUBERNETES.ETHERNET': "kubernetes.Ethernet", 'KUBERNETES.ETHERNETMATCHER': "kubernetes.EthernetMatcher", 'KUBERNETES.HYPERFLEXAPVIRTUALMACHINEINFRACONFIG': "kubernetes.HyperFlexApVirtualMachineInfraConfig", 'KUBERNETES.INGRESSSTATUS': "kubernetes.IngressStatus", 'KUBERNETES.INSTANCETYPEDETAILS': "kubernetes.InstanceTypeDetails", 'KUBERNETES.IPV4CONFIG': "kubernetes.IpV4Config", 'KUBERNETES.KEYVALUE': "kubernetes.KeyValue", 'KUBERNETES.LOADBALANCER': "kubernetes.LoadBalancer", 'KUBERNETES.NETWORKINTERFACESPEC': "kubernetes.NetworkInterfaceSpec", 'KUBERNETES.NODEADDRESS': "kubernetes.NodeAddress", 'KUBERNETES.NODEGROUPLABEL': "kubernetes.NodeGroupLabel", 'KUBERNETES.NODEGROUPTAINT': "kubernetes.NodeGroupTaint", 'KUBERNETES.NODEINFO': "kubernetes.NodeInfo", 'KUBERNETES.NODESPEC': "kubernetes.NodeSpec", 'KUBERNETES.NODESTATUS': "kubernetes.NodeStatus", 'KUBERNETES.OBJECTMETA': "kubernetes.ObjectMeta", 'KUBERNETES.OVSBOND': "kubernetes.OvsBond", 'KUBERNETES.PODSTATUS': "kubernetes.PodStatus", 'KUBERNETES.PROXYCONFIG': "kubernetes.ProxyConfig", 'KUBERNETES.ROUTE': "kubernetes.Route", 'KUBERNETES.SERVICESTATUS': "kubernetes.ServiceStatus", 'KUBERNETES.STATEFULSETSTATUS': "kubernetes.StatefulSetStatus", 'KUBERNETES.TAINT': "kubernetes.Taint", 'MACPOOL.BLOCK': "macpool.Block", 'MEMORY.PERSISTENTMEMORYGOAL': "memory.PersistentMemoryGoal", 'MEMORY.PERSISTENTMEMORYLOCALSECURITY': "memory.PersistentMemoryLocalSecurity", 'MEMORY.PERSISTENTMEMORYLOGICALNAMESPACE': "memory.PersistentMemoryLogicalNamespace", 'META.ACCESSPRIVILEGE': "meta.AccessPrivilege", 'META.DISPLAYNAMEDEFINITION': "meta.DisplayNameDefinition", 'META.IDENTITYDEFINITION': "meta.IdentityDefinition", 'META.PROPDEFINITION': "meta.PropDefinition", 'META.RELATIONSHIPDEFINITION': "meta.RelationshipDefinition", 'MO.MOREF': "mo.MoRef", 'MO.TAG': "mo.Tag", 'MO.VERSIONCONTEXT': "mo.VersionContext", 'NIAAPI.DETAIL': "niaapi.Detail", 'NIAAPI.NEWRELEASEDETAIL': "niaapi.NewReleaseDetail", 'NIAAPI.REVISIONINFO': "niaapi.RevisionInfo", 'NIAAPI.SOFTWAREREGEX': "niaapi.SoftwareRegex", 'NIAAPI.VERSIONREGEXPLATFORM': "niaapi.VersionRegexPlatform", 'NIATELEMETRY.BOOTFLASHDETAILS': "niatelemetry.BootflashDetails", 'NIATELEMETRY.DEPLOYMENTSTATUS': "niatelemetry.DeploymentStatus", 'NIATELEMETRY.DISKINFO': "niatelemetry.Diskinfo", 'NIATELEMETRY.INTERFACE': "niatelemetry.Interface", 'NIATELEMETRY.INTERFACEELEMENT': "niatelemetry.InterfaceElement", 'NIATELEMETRY.JOBDETAIL': "niatelemetry.JobDetail", 'NIATELEMETRY.LOGICALLINK': "niatelemetry.LogicalLink", 'NIATELEMETRY.NVEPACKETCOUNTERS': "niatelemetry.NvePacketCounters", 'NIATELEMETRY.NVEVNI': "niatelemetry.NveVni", 'NIATELEMETRY.NXOSBGPMVPN': "niatelemetry.NxosBgpMvpn", 'NIATELEMETRY.NXOSVTP': "niatelemetry.NxosVtp", 'NIATELEMETRY.SMARTLICENSE': "niatelemetry.SmartLicense", 'NIATELEMETRY.VNISTATUS': "niatelemetry.VniStatus", 'NOTIFICATION.ALARMMOCONDITION': "notification.AlarmMoCondition", 'NOTIFICATION.SENDEMAIL': "notification.SendEmail", 'NTP.AUTHNTPSERVER': "ntp.AuthNtpServer", 'ONPREM.IMAGEPACKAGE': "onprem.ImagePackage", 'ONPREM.SCHEDULE': "onprem.Schedule", 'ONPREM.UPGRADENOTE': "onprem.UpgradeNote", 'ONPREM.UPGRADEPHASE': "onprem.UpgradePhase", 'OPRS.KVPAIR': "oprs.Kvpair", 'OS.ANSWERS': "os.Answers", 'OS.GLOBALCONFIG': "os.GlobalConfig", 'OS.IPV4CONFIGURATION': "os.Ipv4Configuration", 'OS.IPV6CONFIGURATION': "os.Ipv6Configuration", 'OS.PHYSICALDISK': "os.PhysicalDisk", 'OS.PHYSICALDISKRESPONSE': "os.PhysicalDiskResponse", 'OS.PLACEHOLDER': "os.PlaceHolder", 'OS.SERVERCONFIG': "os.ServerConfig", 'OS.VALIDATIONINFORMATION': "os.ValidationInformation", 'OS.VIRTUALDRIVE': "os.VirtualDrive", 'OS.VIRTUALDRIVERESPONSE': "os.VirtualDriveResponse", 'OS.VMWAREPARAMETERS': "os.VmwareParameters", 'OS.WINDOWSPARAMETERS': "os.WindowsParameters", 'PKIX.DISTINGUISHEDNAME': "pkix.DistinguishedName", 'PKIX.ECDSAKEYSPEC': "pkix.EcdsaKeySpec", 'PKIX.EDDSAKEYSPEC': "pkix.EddsaKeySpec", 'PKIX.RSAALGORITHM': "pkix.RsaAlgorithm", 'PKIX.SUBJECTALTERNATENAME': "pkix.SubjectAlternateName", 'POLICY.ACTIONPARAM': "policy.ActionParam", 'POLICY.ACTIONQUALIFIER': "policy.ActionQualifier", 'POLICY.CONFIGCHANGE': "policy.ConfigChange", 'POLICY.CONFIGCHANGECONTEXT': "policy.ConfigChangeContext", 'POLICY.CONFIGCONTEXT': "policy.ConfigContext", 'POLICY.CONFIGRESULTCONTEXT': "policy.ConfigResultContext", 'POLICY.QUALIFIER': "policy.Qualifier", 'POLICYINVENTORY.JOBINFO': "policyinventory.JobInfo", 'RECOVERY.BACKUPSCHEDULE': "recovery.BackupSchedule", 'RESOURCE.PERTYPECOMBINEDSELECTOR': "resource.PerTypeCombinedSelector", 'RESOURCE.SELECTOR': "resource.Selector", 'RESOURCE.SOURCETOPERMISSIONRESOURCES': "resource.SourceToPermissionResources", 'RESOURCE.SOURCETOPERMISSIONRESOURCESHOLDER': "resource.SourceToPermissionResourcesHolder", 'RESOURCEPOOL.SERVERLEASEPARAMETERS': "resourcepool.ServerLeaseParameters", 'RESOURCEPOOL.SERVERPOOLPARAMETERS': "resourcepool.ServerPoolParameters", 'SDCARD.DIAGNOSTICS': "sdcard.Diagnostics", 'SDCARD.DRIVERS': "sdcard.Drivers", 'SDCARD.HOSTUPGRADEUTILITY': "sdcard.HostUpgradeUtility", 'SDCARD.OPERATINGSYSTEM': "sdcard.OperatingSystem", 'SDCARD.PARTITION': "sdcard.Partition", 'SDCARD.SERVERCONFIGURATIONUTILITY': "sdcard.ServerConfigurationUtility", 'SDCARD.USERPARTITION': "sdcard.UserPartition", 'SDWAN.NETWORKCONFIGURATIONTYPE': "sdwan.NetworkConfigurationType", 'SDWAN.TEMPLATEINPUTSTYPE': "sdwan.TemplateInputsType", 'SERVER.PENDINGWORKFLOWTRIGGER': "server.PendingWorkflowTrigger", 'SNMP.TRAP': "snmp.Trap", 'SNMP.USER': "snmp.User", 'SOFTWAREREPOSITORY.APPLIANCEUPLOAD': "softwarerepository.ApplianceUpload", 'SOFTWAREREPOSITORY.CIFSSERVER': "softwarerepository.CifsServer", 'SOFTWAREREPOSITORY.CONSTRAINTMODELS': "softwarerepository.ConstraintModels", 'SOFTWAREREPOSITORY.HTTPSERVER': "softwarerepository.HttpServer", 'SOFTWAREREPOSITORY.IMPORTRESULT': "softwarerepository.ImportResult", 'SOFTWAREREPOSITORY.LOCALMACHINE': "softwarerepository.LocalMachine", 'SOFTWAREREPOSITORY.NFSSERVER': "softwarerepository.NfsServer", 'STORAGE.AUTOMATICDRIVEGROUP': "storage.AutomaticDriveGroup", 'STORAGE.HITACHIARRAYUTILIZATION': "storage.HitachiArrayUtilization", 'STORAGE.HITACHICAPACITY': "storage.HitachiCapacity", 'STORAGE.HITACHIINITIATOR': "storage.HitachiInitiator", 'STORAGE.INITIATOR': "storage.Initiator", 'STORAGE.KEYSETTING': "storage.KeySetting", 'STORAGE.LOCALKEYSETTING': "storage.LocalKeySetting", 'STORAGE.M2VIRTUALDRIVECONFIG': "storage.M2VirtualDriveConfig", 'STORAGE.MANUALDRIVEGROUP': "storage.ManualDriveGroup", 'STORAGE.NETAPPETHERNETPORTLAG': "storage.NetAppEthernetPortLag", 'STORAGE.NETAPPETHERNETPORTVLAN': "storage.NetAppEthernetPortVlan", 'STORAGE.NETAPPEXPORTPOLICYRULE': "storage.NetAppExportPolicyRule", 'STORAGE.NETAPPHIGHAVAILABILITY': "storage.NetAppHighAvailability", 'STORAGE.NETAPPPERFORMANCEMETRICSAVERAGE': "storage.NetAppPerformanceMetricsAverage", 'STORAGE.NETAPPPORT': "storage.NetAppPort", 'STORAGE.NETAPPSTORAGECLUSTEREFFICIENCY': "storage.NetAppStorageClusterEfficiency", 'STORAGE.NETAPPSTORAGEUTILIZATION': "storage.NetAppStorageUtilization", 'STORAGE.PUREARRAYUTILIZATION': "storage.PureArrayUtilization", 'STORAGE.PUREDISKUTILIZATION': "storage.PureDiskUtilization", 'STORAGE.PUREHOSTUTILIZATION': "storage.PureHostUtilization", 'STORAGE.PUREREPLICATIONBLACKOUT': "storage.PureReplicationBlackout", 'STORAGE.PUREVOLUMEUTILIZATION': "storage.PureVolumeUtilization", 'STORAGE.R0DRIVE': "storage.R0Drive", 'STORAGE.REMOTEKEYSETTING': "storage.RemoteKeySetting", 'STORAGE.SPANDRIVES': "storage.SpanDrives", 'STORAGE.STORAGECONTAINERHOSTMOUNTSTATUS': "storage.StorageContainerHostMountStatus", 'STORAGE.STORAGECONTAINERUTILIZATION': "storage.StorageContainerUtilization", 'STORAGE.VIRTUALDRIVECONFIGURATION': "storage.VirtualDriveConfiguration", 'STORAGE.VIRTUALDRIVEPOLICY': "storage.VirtualDrivePolicy", 'STORAGE.VOLUMEUTILIZATION': "storage.VolumeUtilization", 'SYSLOG.LOCALFILELOGGINGCLIENT': "syslog.LocalFileLoggingClient", 'SYSLOG.REMOTELOGGINGCLIENT': "syslog.RemoteLoggingClient", 'TAM.ACTION': "tam.Action", 'TAM.APIDATASOURCE': "tam.ApiDataSource", 'TAM.EOLADVISORYDETAILS': "tam.EolAdvisoryDetails", 'TAM.EOLSEVERITY': "tam.EolSeverity", 'TAM.IDENTIFIERS': "tam.Identifiers", 'TAM.MILESTONE': "tam.Milestone", 'TAM.PSIRTSEVERITY': "tam.PsirtSeverity", 'TAM.QUERYENTRY': "tam.QueryEntry", 'TAM.S3DATASOURCE': "tam.S3DataSource", 'TAM.SECURITYADVISORYDETAILS': "tam.SecurityAdvisoryDetails", 'TAM.TEXTFSMTEMPLATEDATASOURCE': "tam.TextFsmTemplateDataSource", 'TECHSUPPORTMANAGEMENT.APPLIANCEPARAM': "techsupportmanagement.ApplianceParam", 'TECHSUPPORTMANAGEMENT.NIAPARAM': "techsupportmanagement.NiaParam", 'TECHSUPPORTMANAGEMENT.PLATFORMPARAM': "techsupportmanagement.PlatformParam", 'TEMPLATE.TRANSFORMATIONSTAGE': "template.TransformationStage", 'TERRAFORM.CLOUDRESOURCE': "terraform.CloudResource", 'TERRAFORM.RUNSTATE': "terraform.Runstate", 'UCSD.CONNECTORPACK': "ucsd.ConnectorPack", 'UCSD.UCSDRESTOREPARAMETERS': "ucsd.UcsdRestoreParameters", 'UCSDCONNECTOR.RESTCLIENTMESSAGE': "ucsdconnector.RestClientMessage", 'UUIDPOOL.UUIDBLOCK': "uuidpool.UuidBlock", 'VIRTUALIZATION.ACTIONINFO': "virtualization.ActionInfo", 'VIRTUALIZATION.AWSVMCOMPUTECONFIGURATION': "virtualization.AwsVmComputeConfiguration", 'VIRTUALIZATION.AWSVMCONFIGURATION': "virtualization.AwsVmConfiguration", 'VIRTUALIZATION.AWSVMNETWORKCONFIGURATION': "virtualization.AwsVmNetworkConfiguration", 'VIRTUALIZATION.AWSVMSTORAGECONFIGURATION': "virtualization.AwsVmStorageConfiguration", 'VIRTUALIZATION.BONDSTATE': "virtualization.BondState", 'VIRTUALIZATION.CLOUDINITCONFIG': "virtualization.CloudInitConfig", 'VIRTUALIZATION.COMPUTECAPACITY': "virtualization.ComputeCapacity", 'VIRTUALIZATION.CPUALLOCATION': "virtualization.CpuAllocation", 'VIRTUALIZATION.CPUINFO': "virtualization.CpuInfo", 'VIRTUALIZATION.DISKSTATUS': "virtualization.DiskStatus", 'VIRTUALIZATION.ESXICLONECUSTOMSPEC': "virtualization.EsxiCloneCustomSpec", 'VIRTUALIZATION.ESXIHOSTCONFIGURATION': "virtualization.EsxiHostConfiguration", 'VIRTUALIZATION.ESXIOVACUSTOMSPEC': "virtualization.EsxiOvaCustomSpec", 'VIRTUALIZATION.ESXIVMCOMPUTECONFIGURATION': "virtualization.EsxiVmComputeConfiguration", 'VIRTUALIZATION.ESXIVMCONFIGURATION': "virtualization.EsxiVmConfiguration", 'VIRTUALIZATION.ESXIVMNETWORKCONFIGURATION': "virtualization.EsxiVmNetworkConfiguration", 'VIRTUALIZATION.ESXIVMSTORAGECONFIGURATION': "virtualization.EsxiVmStorageConfiguration", 'VIRTUALIZATION.GUESTINFO': "virtualization.GuestInfo", 'VIRTUALIZATION.HXAPVMCONFIGURATION': "virtualization.HxapVmConfiguration", 'VIRTUALIZATION.IPADDRESSINFO': "virtualization.IpAddressInfo", 'VIRTUALIZATION.MEMORYALLOCATION': "virtualization.MemoryAllocation", 'VIRTUALIZATION.MEMORYCAPACITY': "virtualization.MemoryCapacity", 'VIRTUALIZATION.NETWORKINTERFACE': "virtualization.NetworkInterface", 'VIRTUALIZATION.NETWORKPORT': "virtualization.NetworkPort", 'VIRTUALIZATION.PRODUCTINFO': "virtualization.ProductInfo", 'VIRTUALIZATION.STORAGECAPACITY': "virtualization.StorageCapacity", 'VIRTUALIZATION.VDISKCONFIG': "virtualization.VdiskConfig", 'VIRTUALIZATION.VIRTUALDISKCONFIG': "virtualization.VirtualDiskConfig", 'VIRTUALIZATION.VIRTUALMACHINEDISK': "virtualization.VirtualMachineDisk", 'VIRTUALIZATION.VMDISK': "virtualization.VmDisk", 'VIRTUALIZATION.VMESXIDISK': "virtualization.VmEsxiDisk", 'VIRTUALIZATION.VMINTERFACE': "virtualization.VmInterface", 'VIRTUALIZATION.VMWAREREMOTEDISPLAYINFO': "virtualization.VmwareRemoteDisplayInfo", 'VIRTUALIZATION.VMWARERESOURCECONSUMPTION': "virtualization.VmwareResourceConsumption", 'VIRTUALIZATION.VMWARESHARESINFO': "virtualization.VmwareSharesInfo", 'VIRTUALIZATION.VMWARETEAMINGANDFAILOVER': "virtualization.VmwareTeamingAndFailover", 'VIRTUALIZATION.VMWAREVLANRANGE': "virtualization.VmwareVlanRange", 'VIRTUALIZATION.VMWAREVMCPUSHAREINFO': "virtualization.VmwareVmCpuShareInfo", 'VIRTUALIZATION.VMWAREVMCPUSOCKETINFO': "virtualization.VmwareVmCpuSocketInfo", 'VIRTUALIZATION.VMWAREVMDISKCOMMITINFO': "virtualization.VmwareVmDiskCommitInfo", 'VIRTUALIZATION.VMWAREVMMEMORYSHAREINFO': "virtualization.VmwareVmMemoryShareInfo", 'VIRTUALIZATION.VOLUMEINFO': "virtualization.VolumeInfo", 'VMEDIA.MAPPING': "vmedia.Mapping", 'VNIC.ARFSSETTINGS': "vnic.ArfsSettings", 'VNIC.CDN': "vnic.Cdn", 'VNIC.COMPLETIONQUEUESETTINGS': "vnic.CompletionQueueSettings", 'VNIC.ETHINTERRUPTSETTINGS': "vnic.EthInterruptSettings", 'VNIC.ETHRXQUEUESETTINGS': "vnic.EthRxQueueSettings", 'VNIC.ETHTXQUEUESETTINGS': "vnic.EthTxQueueSettings", 'VNIC.FCERRORRECOVERYSETTINGS': "vnic.FcErrorRecoverySettings", 'VNIC.FCINTERRUPTSETTINGS': "vnic.FcInterruptSettings", 'VNIC.FCQUEUESETTINGS': "vnic.FcQueueSettings", 'VNIC.FLOGISETTINGS': "vnic.FlogiSettings", 'VNIC.ISCSIAUTHPROFILE': "vnic.IscsiAuthProfile", 'VNIC.LUN': "vnic.Lun", 'VNIC.NVGRESETTINGS': "vnic.NvgreSettings", 'VNIC.PLACEMENTSETTINGS': "vnic.PlacementSettings", 'VNIC.PLOGISETTINGS': "vnic.PlogiSettings", 'VNIC.ROCESETTINGS': "vnic.RoceSettings", 'VNIC.RSSHASHSETTINGS': "vnic.RssHashSettings", 'VNIC.SCSIQUEUESETTINGS': "vnic.ScsiQueueSettings", 'VNIC.TCPOFFLOADSETTINGS': "vnic.TcpOffloadSettings", 'VNIC.USNICSETTINGS': "vnic.UsnicSettings", 'VNIC.VIFSTATUS': "vnic.VifStatus", 'VNIC.VLANSETTINGS': "vnic.VlanSettings", 'VNIC.VMQSETTINGS': "vnic.VmqSettings", 'VNIC.VSANSETTINGS': "vnic.VsanSettings", 'VNIC.VXLANSETTINGS': "vnic.VxlanSettings", 'WORKFLOW.ACTIONWORKFLOWDEFINITION': "workflow.ActionWorkflowDefinition", 'WORKFLOW.ARRAYDATATYPE': "workflow.ArrayDataType", 'WORKFLOW.ASSOCIATEDROLES': "workflow.AssociatedRoles", 'WORKFLOW.CLICOMMAND': "workflow.CliCommand", 'WORKFLOW.COMMENTS': "workflow.Comments", 'WORKFLOW.CONSTRAINTS': "workflow.Constraints", 'WORKFLOW.CUSTOMARRAYITEM': "workflow.CustomArrayItem", 'WORKFLOW.CUSTOMDATAPROPERTY': "workflow.CustomDataProperty", 'WORKFLOW.CUSTOMDATATYPE': "workflow.CustomDataType", 'WORKFLOW.CUSTOMDATATYPEPROPERTIES': "workflow.CustomDataTypeProperties", 'WORKFLOW.DECISIONCASE': "workflow.DecisionCase", 'WORKFLOW.DECISIONTASK': "workflow.DecisionTask", 'WORKFLOW.DEFAULTVALUE': "workflow.DefaultValue", 'WORKFLOW.DISPLAYMETA': "workflow.DisplayMeta", 'WORKFLOW.DYNAMICWORKFLOWACTIONTASKLIST': "workflow.DynamicWorkflowActionTaskList", 'WORKFLOW.ENUMENTRY': "workflow.EnumEntry", 'WORKFLOW.EXPECTPROMPT': "workflow.ExpectPrompt", 'WORKFLOW.FAILUREENDTASK': "workflow.FailureEndTask", 'WORKFLOW.FILEDOWNLOADOP': "workflow.FileDownloadOp", 'WORKFLOW.FILEOPERATIONS': "workflow.FileOperations", 'WORKFLOW.FILETEMPLATEOP': "workflow.FileTemplateOp", 'WORKFLOW.FILETRANSFER': "workflow.FileTransfer", 'WORKFLOW.FORKTASK': "workflow.ForkTask", 'WORKFLOW.INITIATORCONTEXT': "workflow.InitiatorContext", 'WORKFLOW.INTERNALPROPERTIES': "workflow.InternalProperties", 'WORKFLOW.JOINTASK': "workflow.JoinTask", 'WORKFLOW.LOOPTASK': "workflow.LoopTask", 'WORKFLOW.MESSAGE': "workflow.Message", 'WORKFLOW.MOREFERENCEARRAYITEM': "workflow.MoReferenceArrayItem", 'WORKFLOW.MOREFERENCEDATATYPE': "workflow.MoReferenceDataType", 'WORKFLOW.MOREFERENCEPROPERTY': "workflow.MoReferenceProperty", 'WORKFLOW.PARAMETERSET': "workflow.ParameterSet", 'WORKFLOW.PRIMITIVEARRAYITEM': "workflow.PrimitiveArrayItem", 'WORKFLOW.PRIMITIVEDATAPROPERTY': "workflow.PrimitiveDataProperty", 'WORKFLOW.PRIMITIVEDATATYPE': "workflow.PrimitiveDataType", 'WORKFLOW.PROPERTIES': "workflow.Properties", 'WORKFLOW.RESULTHANDLER': "workflow.ResultHandler", 'WORKFLOW.ROLLBACKTASK': "workflow.RollbackTask", 'WORKFLOW.ROLLBACKWORKFLOWTASK': "workflow.RollbackWorkflowTask", 'WORKFLOW.SELECTORPROPERTY': "workflow.SelectorProperty", 'WORKFLOW.SSHCMD': "workflow.SshCmd", 'WORKFLOW.SSHCONFIG': "workflow.SshConfig", 'WORKFLOW.SSHSESSION': "workflow.SshSession", 'WORKFLOW.STARTTASK': "workflow.StartTask", 'WORKFLOW.SUBWORKFLOWTASK': "workflow.SubWorkflowTask", 'WORKFLOW.SUCCESSENDTASK': "workflow.SuccessEndTask", 'WORKFLOW.TARGETCONTEXT': "workflow.TargetContext", 'WORKFLOW.TARGETDATATYPE': "workflow.TargetDataType", 'WORKFLOW.TARGETPROPERTY': "workflow.TargetProperty", 'WORKFLOW.TASKCONSTRAINTS': "workflow.TaskConstraints", 'WORKFLOW.TASKRETRYINFO': "workflow.TaskRetryInfo", 'WORKFLOW.UIINPUTFILTER': "workflow.UiInputFilter", 'WORKFLOW.VALIDATIONERROR': "workflow.ValidationError", 'WORKFLOW.VALIDATIONINFORMATION': "workflow.ValidationInformation", 'WORKFLOW.WAITTASK': "workflow.WaitTask", 'WORKFLOW.WAITTASKPROMPT': "workflow.WaitTaskPrompt", 'WORKFLOW.WEBAPI': "workflow.WebApi", 'WORKFLOW.WORKERTASK': "workflow.WorkerTask", 'WORKFLOW.WORKFLOWCTX': "workflow.WorkflowCtx", 'WORKFLOW.WORKFLOWENGINEPROPERTIES': "workflow.WorkflowEngineProperties", 'WORKFLOW.WORKFLOWINFOPROPERTIES': "workflow.WorkflowInfoProperties", 'WORKFLOW.WORKFLOWPROPERTIES': "workflow.WorkflowProperties", 'WORKFLOW.XMLAPI': "workflow.XmlApi", 'X509.CERTIFICATE': "x509.Certificate", }, } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = True @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'class_id': (str,), # noqa: E501 'object_type': (str,), # noqa: E501 } @cached_property def discriminator(): lazy_import() val = { 'virtualization.EsxiCloneCustomSpec': VirtualizationEsxiCloneCustomSpec, 'virtualization.EsxiOvaCustomSpec': VirtualizationEsxiOvaCustomSpec, } if not val: return None return {'class_id': val} attribute_map = { 'class_id': 'ClassId', # noqa: E501 'object_type': 'ObjectType', # noqa: E501 } required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', '_composed_instances', '_var_name_to_model_instances', '_additional_properties_model_instances', ]) @convert_js_args_to_python_args def __init__(self, class_id, object_type, *args, **kwargs): # noqa: E501 """VirtualizationBaseCustomSpec - a model defined in OpenAPI Args: class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data. The enum values provides the list of concrete types that can be instantiated from this abstract type. object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property. The enum values provides the list of concrete types that can be instantiated from this abstract type. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) constant_args = { '_check_type': _check_type, '_path_to_item': _path_to_item, '_spec_property_naming': _spec_property_naming, '_configuration': _configuration, '_visited_composed_classes': self._visited_composed_classes, } required_args = { 'class_id': class_id, 'object_type': object_type, } model_args = {} model_args.update(required_args) model_args.update(kwargs) composed_info = validate_get_composed_info( constant_args, model_args, self) self._composed_instances = composed_info[0] self._var_name_to_model_instances = composed_info[1] self._additional_properties_model_instances = composed_info[2] unused_args = composed_info[3] for var_name, var_value in required_args.items(): setattr(self, var_name, var_value) for var_name, var_value in kwargs.items(): if var_name in unused_args and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ not self._additional_properties_model_instances: # discard variable. continue setattr(self, var_name, var_value) @cached_property def _composed_schemas(): # we need this here to make our import statements work # we must store _composed_schemas in here so the code is only run # when we invoke this method. If we kept this at the class # level we would get an error beause the class level # code would be run when this module is imported, and these composed # classes don't exist yet because their module has not finished # loading lazy_import() return { 'anyOf': [ ], 'allOf': [ MoBaseComplexType, ], 'oneOf': [ ], }
python
#!/usr/bin/env python3 # coding=utf8 """\ Our Standards Jill-Jênn Vie et Christoph Dürr - 2020 """ from sys import stdin def readint(): """ function to read an integer from stdin """ return int(stdin.readline()) def readstr(): """ function to read a string from stdin """ return stdin.readline().strip() def readarray(typ): """ function to read an array """ return list(map(typ, stdin.readline().split())) # pylint: disable=redefined-outer-name def readmatrix(n): """ function to read a matrix """ M = [] for _ in range(n): row = readarray(int) assert len(row) == n M.append(row) return M
python
import dash import dash_core_components as dcc import dash_html_components as html import plotly.graph_objs as go from ECAgent.Core import Model # Can be used to customize CSS of Visualizer external_stylesheets = ['https://rawgit.com/BrandonGower-Winter/ABMECS/master/Assets/VisualizerCustom.css', 'https://rawgit.com/BrandonGower-Winter/ABMECS/master/Assets/VisualizerBase.css'] class VisualInterface: """ Ths is the base class for Visual Interfaces. VisualInterface's utilize the dash package to create a WebApp to allow individuals to view the results of their model once a run has been completed or in real-time. There are a few things to note about the VisualInterface class: * By calling the VisualInterface.__init__() method, your WebApp will have features setup for you: Namely, play, stop, restart and step. It'll also include a banner with your System's name as a title on it. * A frameFreq of 0.0 means that your system is static and will only ever be constructed once. If you want a dynamic WebApp, you must set the frameFreq to some non-zero positive number. If your frameFreq is 0.0, the play, stop, restart and step buttons will not be added to your WebApp. * The server/WebApp will start once you call the VisualInterface.app.run_server(). * The frameFreq property determines how frequently (in milliseconds) the SystemManager.executeSystems() method is called and how often your your graphs will update. """ def __init__(self, name, model: Model, frameFreq: float = 0.0): self.name = name self.model = model self.frameFreq = frameFreq self.running = False # Is used to determine whether a dynamic model is running or not. # Create app self.app = dash.Dash( self.name, meta_tags=[{"name": "viewport", "content": "width=device-width"}], external_stylesheets=external_stylesheets ) # Create parameter lists self.displays = [] self.parameters = [] self.createBaseLayout() def isStatic(self) -> bool: return self.frameFreq == 0.0 def execute(self): self.render() def render(self): pass def createBaseLayout(self): """Creates the base layout""" # Create banner banner = html.Div( className="app-banner row", children=[ html.H2(className="h2-title", children=self.name), html.H2(className="h2-title-mobile", children=self.name), ], ) # Add parameter header self.addParameter(createLabel('parameter-heading', 'Parameters:')) # If framerate > 0, create the play, stop, and restart buttons and Timestep label if not self.isStatic(): # Add Play/Restart/Step Buttons banner.children.append( html.Div( className='div-play-buttons', id='dynamic-button', children=[ html.Button("Play", id='play-stop-button', n_clicks=0), html.Button('Restart', id='restart-button', n_clicks=0), html.Button('Step', id='step-button', n_clicks=0), dcc.Interval( id='interval-component', interval=self.frameFreq, n_intervals=0 ) ] ) ) # Add Timestep label self.parameters.append(createLabel('timestep-label', 'Timestep: 0')) # Apply Play/Stop Callback self.app.callback( dash.dependencies.Output('play-stop-button', 'children'), [dash.dependencies.Input('play-stop-button', 'n_clicks')] )(self.play_button_callback) # Apply executeSystems() on interval callback and Step button callback self.app.callback( dash.dependencies.Output('timestep-label', 'children'), [dash.dependencies.Input('interval-component', 'n_intervals'), dash.dependencies.Input('step-button', 'n_clicks')] )(self.execute_system_on_play_callback) self.app.layout = html.Div( children=[ # Error Message html.Div(id="error-message"), # Top Banner banner, # Body of the App html.Div( className="row app-body", children=[ # User Controls html.Div( className="four columns card", children=html.Div( className="bg-white user-control", children=self.parameters) ), # Graph html.Div( className="eight columns card-left", children=self.displays, style={'margin-left': 0} ), dcc.Store(id="error", storage_type="memory"), ], ), ] ) def addDisplay(self, content, add_break=True): self.displays.append(content) if add_break: self.displays.append(html.Br()) def addParameter(self, content): self.parameters.append(content) # #################################### Class Callbacks ########################################### def play_button_callback(self, n_clicks): if n_clicks % 2 == 0: self.running = False return 'Play' else: self.running = True return 'Stop' def execute_system_on_play_callback(self, n_intervals, n_clicks): context = dash.callback_context.triggered[0]['prop_id'].split('.')[0] if context == 'step-button': if not self.running: self.model.systemManager.executeSystems() elif self.running: self.model.systemManager.executeSystems() return "Timestep: {}".format(self.model.systemManager.timestep) # ############################## Graph and Parameter Functionality ############################## def createScatterPlot(title, data: [[[float], [float], dict]], layout_kwargs: dict = {}): """Creates a Scatter plot Figure. This function supports multiple traces supplied to the 'data' parameter Data should be supplied in the following format: [[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]] The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of the properties specified.. """ traces = [] for data_packet in data: scatter = go.Scatter(x=data_packet[0], y=data_packet[1]) traces.append(scatter) if len(data_packet) > 2: scatter.update(data_packet[2]) return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs)) def createScatterGLPlot(title, data: [[[float], [float], dict]], layout_kwargs: dict = {}): """Creates a Scatter plot Figure that will be rendered using WebGL. This function supports multiple traces supplied to the 'data' parameter Data should be supplied in the following format: [[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]] The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of the properties specified.. """ traces = [] for data_packet in data: scatter = go.Scattergl(x=data_packet[0], y=data_packet[1]) traces.append(scatter) if len(data_packet) > 2: scatter.update(data_packet[2]) return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs)) def createBarGraph(title: str, data: [[[float], [float], dict]], layout_kwargs: dict = {}): """Creates a Bar Graph Figure. This function supports multiple traces supplied to the 'data' parameter Data should be supplied in the following format: [[xdata_1,ydata_1, fig_layout_1], [xdata_2, ydata_2, fig_layout_2], ..., [xdata_n,ydata_n, fig_layout_n]] The 'fig_layout' property is optional. If it is supplied, the trace in question will be updated to include all of the properties specified.. """ traces = [] for data_packet in data: bar = go.Bar(x=data_packet[0], y=data_packet[1]) traces.append(bar) if len(data_packet) > 2: bar.update(data_packet[2]) return go.Figure(data=traces, layout=go.Layout(title=title, **layout_kwargs)) def createHeatMap(title: str, data: [[float]], heatmap_kwargs: dict = {}, layout_kwargs: dict = {}): """Creates a HeatMap Figure object using Plotly graph objects. The data object determines the dimensions of the heatmap. The len(data) will be the height. The len(data[i]) will be the width of the heatmap. The Heatmap is constructed in a bottom-up and left-to-right manner. Discrete X and Y categories can be specified, this is done by supplying xData and yData with the X and Y category name respectively. The len(xData) must be equal to the width of your Heatmap, while len(yData) must be equal to the height of your Heatmap. A custom color scale can be supplied, ensure that it follows the correct format and that the threshold values are normalized and that the color scales are in rgb like so 'rgb(r_val, g_val, b_val)'""" return go.Figure(data=go.Heatmap( z=data, **heatmap_kwargs ), layout=go.Layout(title=title, **layout_kwargs)) def createHeatMapGL(title: str, data: [[float]], heatmap_kwargs: dict = {}, layout_kwargs: dict = {}): """Creates a HeatMap Figure object using Plotly graph objects that will be rendered by WebGL. The data object determines the dimensions of the heatmap. The len(data) will be the height. The len(data[i]) will be the width of the heatmap. The Heatmap is constructed in a bottom-up and left-to-right manner. Discrete X and Y categories can be specified, this is done by supplying xData and yData with the X and Y category name respectively. The len(xData) must be equal to the width of your Heatmap, while len(yData) must be equal to the height of your Heatmap. A custom color scale can be supplied, ensure that it follows the correct format and that the threshold values are normalized and that the color scales are in rgb like so 'rgb(r_val, g_val, b_val)'""" return go.Figure(data=go.Heatmapgl( z=data, **heatmap_kwargs ), layout=go.Layout(title=title, **layout_kwargs)) def createContourMap(title: str, data: [[float]], contour_kwargs: dict = {}, layout_kwargs: dict = {}): """Creates a Contour Figure object using Plotly graph objects. The data object determines the dimensions of the Contour plot. The len(data) will be the height. The len(data[i]) will be the width of the contour plot. The contour plot is constructed in a bottom-up and left-to-right manner. The contour plot can be customized using the contour_kwargs dict. The dict will be supplied to the contour plot graph object when it is created. See the plotly api for a list of customizable properties. This can be similarly be applied to layout_kwargs which can change the layout of contour plot.""" return go.Figure(data=go.Contour( z=data, **contour_kwargs ), layout=go.Layout(title=title, **layout_kwargs)) def createTable(title: str, headers: [str], cells: [[]], header_kwargs: dict = {}, cell_kwargs: dict = {}, layout_kwargs: dict = {}): """Creates a Table figure using Plotly graph objects. Table headers and cells need to be supplied separately. The data format for the headers and cells are as follows: Headers: [hdr1, hdr2,...,hdrN] Cells: [column1_data, column2_data,..., columnN_data]. The Table headers and cells are customized separately using the header_kwargs and cell_kwargs parameters. The layout of the Table can also be customized using the layout_kwargs.""" return go.Figure(data=go.Table( header=dict(values=headers, **header_kwargs), cells=dict(values=cells, **cell_kwargs) ), layout=go.Layout(title=title, **layout_kwargs)) def createPieChart(title: str, labels: [str], values: [float], pie_kwargs: dict = {}, layout_kwargs: dict = {}): """ Creates a Pie Chart Figure using Plotly graph objects. Chart labels and values need to be supplied separately. The data format for the labels and values are as follows: Labels: [lbl1, lbl2,..., lblN] Values: [val1, val2,..., valN] The Pie chart can be customized using the pie_kwargs parameter. The layout of the Pie chart can be customized using the layout_kwargs parameter.""" return go.Figure(data=go.Pie(labels=labels, values=values, **pie_kwargs), layout=go.Layout(title=title, **layout_kwargs)) def createGraph(graphID: str, figure: go.Figure, classname: str = 'bg-white'): return html.Div( className=classname, children=[ dcc.Graph(id=graphID, figure=figure) ], style={'height': figure.layout.height} ) def createLiveGraph(graphID: str, figure: go.Figure, vs: VisualInterface, callback, classname: str = 'bg-white'): graph = createGraph(graphID, figure, classname) def update_live_graph_callback(n_intervals, n_clicks, figure): context = dash.callback_context.triggered[0]['prop_id'].split('.')[0] if (context == 'step-button' and not vs.running) or vs.running: return callback(figure) else: return figure # Add Callback vs.app.callback( dash.dependencies.Output(graphID, 'figure'), [dash.dependencies.Input('interval-component', 'n_intervals'), dash.dependencies.Input('step-button', 'n_clicks'), dash.dependencies.Input(graphID, 'figure')] )(update_live_graph_callback) return graph def createLabel(label_id, content): return html.Div(className="padding-top-bot", children=[html.H6(content, id=label_id)]) def createLiveLabel(label_id, initial_content, vs: VisualInterface, callback): label = createLabel(label_id, initial_content) def update_live_label_callback(n_intervals, n_clicks, children): context = dash.callback_context.triggered[0]['prop_id'].split('.')[0] if (context == 'step-button' and not vs.running) or vs.running: return callback(children) else: return children # Add Callback vs.app.callback( dash.dependencies.Output(label_id, 'children'), [dash.dependencies.Input('interval-component', 'n_intervals'), dash.dependencies.Input('step-button', 'n_clicks'), dash.dependencies.Input(label_id, 'children')] )(update_live_label_callback) return label def createSlider(slider_id: str, slider_name: str, vs: VisualInterface, set_val, min_val: float = 0.0, max_val: float = 1.0, step: float = 0.01): """This function will add a slider to the parameter window of the visual interface. It will also automatically add a callback function that will supply your custom function 'set_val' with the value of the slider""" # Add html slider = html.Div( className="padding-top-bot", children=[ html.H6('{}: [{}]'.format(slider_name, max_val), id=slider_id + '-title'), dcc.Slider( id=slider_id, min=min_val, max=max_val, value=max_val, step=step ) ] ) # Add callback def set_slider_val(value): set_val(value) return '{}: [{}]'.format(slider_name, value) vs.app.callback(dash.dependencies.Output(slider_id + '-title', 'children'), [dash.dependencies.Input(slider_id, 'value')])(set_slider_val) return slider def addRect(fig: go.Figure, x, y, width=1, height=1, **shape_kwargs): """Adds a rectangle to Figure 'fig'. x & y refer to the coordinates of the bottom left corner of the rectangle.""" x1 = x + width y1 = y + height fig.add_shape( x0=x, y0=y, x1=x1, y1=y1, type='rect', **shape_kwargs ) def addCircle(fig: go.Figure, x, y, radius=0.5, **shape_kwargs): """Adds a circle to Figure 'fig'. x & y are the coordinates of the center of the circle""" x0 = x - radius x1 = x + radius y0 = y - radius y1 = y + radius fig.add_shape( x0=x0, x1=x1, y0=y0, y1=y1, type='circle', **shape_kwargs ) def createTabs(labels: [str], tabs: []): return html.Div([ dcc.Tabs( [ dcc.Tab(label=labels[x], children=tabs[x]) for x in range(len(labels)) ] )])
python
"""Test ``X-Forwarded-For`` middleware.""" from __future__ import annotations from ipaddress import _BaseNetwork, ip_network from typing import Dict, List, Optional import pytest from fastapi import FastAPI, Request from httpx import AsyncClient from safir.middleware.x_forwarded import XForwardedMiddleware def build_app(proxies: Optional[List[_BaseNetwork]] = None) -> FastAPI: """Construct a test FastAPI app with the middleware registered.""" app = FastAPI() app.add_middleware(XForwardedMiddleware, proxies=proxies) return app @pytest.mark.asyncio async def test_ok() -> None: app = build_app([ip_network("11.0.0.0/8")]) @app.get("/") async def handler(request: Request) -> Dict[str, str]: assert request.state.forwarded_host == "foo.example.com" assert request.state.forwarded_proto == "https" assert request.client.host == "10.10.10.10" return {} async with AsyncClient(app=app, base_url="http://example.com") as client: r = await client.get( "/", headers={ "X-Forwarded-For": "10.10.10.10, 11.11.11.11", "X-Forwarded-Proto": "https, http", "X-Forwarded-Host": "foo.example.com", }, ) assert r.status_code == 200 @pytest.mark.asyncio async def test_defaults() -> None: app = build_app() @app.get("/") async def handler(request: Request) -> Dict[str, str]: assert request.state.forwarded_host == "foo.example.com" assert request.state.forwarded_proto == "http" assert request.client.host == "192.168.0.1" return {} async with AsyncClient(app=app, base_url="http://example.com") as client: r = await client.get( "/", headers={ "X-Forwarded-For": ("1.1.1.1, 192.168.0.1"), "X-Forwarded-Proto": "https, http", "X-Forwarded-Host": "foo.example.com", }, ) assert r.status_code == 200 @pytest.mark.asyncio async def test_no_forwards() -> None: app = build_app([ip_network("127.0.0.1")]) @app.get("/") async def handler(request: Request) -> Dict[str, str]: assert not request.state.forwarded_host assert not request.state.forwarded_proto assert request.client.host == "127.0.0.1" return {} async with AsyncClient(app=app, base_url="http://example.com") as client: r = await client.get("/") assert r.status_code == 200 @pytest.mark.asyncio async def test_all_filtered() -> None: app = build_app([ip_network("10.0.0.0/8")]) @app.get("/") async def handler(request: Request) -> Dict[str, str]: assert request.state.forwarded_host == "foo.example.com" assert request.state.forwarded_proto == "https" assert request.client.host == "10.10.10.10" return {} async with AsyncClient(app=app, base_url="http://example.com") as client: r = await client.get( "/", headers={ "X-Forwarded-For": "10.10.10.10, 10.0.0.1", "X-Forwarded-Proto": "https, http", "X-Forwarded-Host": "foo.example.com", }, ) assert r.status_code == 200 @pytest.mark.asyncio async def test_one_proto() -> None: app = build_app([ip_network("11.11.11.11")]) @app.get("/") async def handler(request: Request) -> Dict[str, str]: assert request.state.forwarded_host == "foo.example.com" assert request.state.forwarded_proto == "https" assert request.client.host == "10.10.10.10" return {} async with AsyncClient(app=app, base_url="http://example.com") as client: r = await client.get( "/", headers={ "X-Forwarded-For": "10.10.10.10, 11.11.11.11", "X-Forwarded-Proto": "https", "X-Forwarded-Host": "foo.example.com", }, ) assert r.status_code == 200 @pytest.mark.asyncio async def test_no_proto_or_host() -> None: app = build_app([ip_network("11.11.11.11")]) @app.get("/") async def handler(request: Request) -> Dict[str, str]: assert not request.state.forwarded_host assert not request.state.forwarded_proto assert request.client.host == "10.10.10.10" return {} async with AsyncClient(app=app, base_url="http://example.com") as client: r = await client.get( "/", headers={"X-Forwarded-For": "10.10.10.10, 11.11.11.11"} ) assert r.status_code == 200 @pytest.mark.asyncio async def test_too_many_headers() -> None: """Test handling of duplicate headers. httpx doesn't allow passing in duplicate headers, so we cannot test end to end. Instead, test by generating a mock request and then calling the underling middleware functions directly. """ state = { "type": "http", "headers": [ ("X-Forwarded-For", "10.10.10.10"), ("X-Forwarded-For", "10.10.10.1"), ("X-Forwarded-Proto", "https"), ("X-Forwarded-Proto", "http"), ("X-Forwarded-Host", "example.org"), ("X-Forwarded-Host", "example.com"), ], } request = Request(state) app = FastAPI() middleware = XForwardedMiddleware(app, proxies=[ip_network("10.0.0.0/8")]) assert middleware._get_forwarded_for(request) == [] assert middleware._get_forwarded_proto(request) == [] assert not middleware._get_forwarded_host(request)
python
# --------------------------------------------------------------------------- # MTDA Client # --------------------------------------------------------------------------- # # This software is a part of MTDA. # Copyright (c) Mentor, a Siemens business, 2017-2020 # # --------------------------------------------------------------------------- # SPDX-License-Identifier: MIT # --------------------------------------------------------------------------- import os import random import socket import time import zerorpc from mtda.main import MentorTestDeviceAgent import mtda.constants as CONSTS class Client: def __init__(self, host=None): agent = MentorTestDeviceAgent() agent.load_config(host) if agent.remote is not None: uri = "tcp://%s:%d" % (agent.remote, agent.ctrlport) self._impl = zerorpc.Client(heartbeat=20, timeout=2*60) self._impl.connect(uri) else: self._impl = agent self._agent = agent HOST = socket.gethostname() USER = os.getenv("USER") WORDS = "/usr/share/dict/words" if os.path.exists(WORDS): WORDS = open(WORDS).read().splitlines() name = random.choice(WORDS) if name.endswith("'s"): name = name.replace("'s", "") elif USER is not None and HOST is not None: name = "%s@%s" % (USER, HOST) else: name = "mtda" self._session = os.getenv('MTDA_SESSION', name) def agent_version(self): return self._impl.agent_version() def console_prefix_key(self): return self._agent.console_prefix_key() def command(self, args): return self._impl.command(args, self._session) def console_clear(self): return self._impl.console_clear(self._session) def console_dump(self): return self._impl.console_dump(self._session) def console_flush(self): return self._impl.console_flush(self._session) def console_getkey(self): return self._agent.console_getkey() def console_init(self): return self._agent.console_init() def console_head(self): return self._impl.console_head(self._session) def console_lines(self): return self._impl.console_lines(self._session) def console_locked(self): return self._impl.console_locked(self._session) def console_print(self, data): return self._impl.console_print(data, self._session) def console_prompt(self, newPrompt=None): return self._impl.console_prompt(newPrompt, self._session) def console_remote(self, host): return self._agent.console_remote(host) def console_run(self, cmd): return self._impl.console_run(cmd, self._session) def console_send(self, data, raw=False): return self._impl.console_send(data, raw, self._session) def console_tail(self): return self._impl.console_tail(self._session) def env_get(self, name): return self._impl.env_get(name, self._session) def env_set(self, name, value): return self._impl.env_set(name, value, self._session) def keyboard_write(self, data): return self._impl.keyboard_write(data, self._session) def power_locked(self): return self._impl.power_locked(self._session) def storage_bytes_written(self): return self._impl.storage_bytes_written(self._session) def storage_close(self): return self._impl.storage_close(self._session) def storage_locked(self): return self._impl.storage_locked(self._session) def storage_mount(self, part=None): return self._impl.storage_mount(part, self._session) def storage_open(self): tries = 60 while tries > 0: tries = tries - 1 status = self._impl.storage_open(self._session) if status is True: return True time.sleep(1) return False def storage_status(self): return self._impl.storage_status(self._session) def _storage_write(self, image, imgname, imgsize, callback=None): # Copy loop bytes_wanted = 0 data = image.read(self._agent.blksz) dataread = len(data) totalread = 0 while totalread < imgsize: totalread += dataread # Report progress via callback if callback is not None: callback(imgname, totalread, imgsize) # Write block to shared storage device bytes_wanted = self._impl.storage_write(data, self._session) # Check what to do next if bytes_wanted < 0: break elif bytes_wanted > 0: # Read next block data = image.read(bytes_wanted) dataread = len(data) else: # Agent may continue without further data data = b'' dataread = 0 # Close the local image image.close() # Wait for background writes to complete while True: status, writing, written = self._impl.storage_status(self._session) if writing is False: break if callback is not None: callback(imgname, totalread, imgsize) time.sleep(0.5) # Storage may be closed now status = self.storage_close() # Provide final update to specified callback if status is True and callback is not None: callback(imgname, totalread, imgsize) # Make sure an error is reported if a write error was received if bytes_wanted < 0: status = False return status def storage_update(self, dest, src=None, callback=None): path = dest if src is None else src imgname = os.path.basename(path) try: st = os.stat(path) imgsize = st.st_size image = open(path, "rb") except FileNotFoundError: return False status = self._impl.storage_update(dest, 0, self._session) if status is False: image.close() return False self._impl.storage_compression(CONSTS.IMAGE.RAW.value, self._session) return self._storage_write(image, imgname, imgsize, callback) def storage_write_image(self, path, callback=None): # Get size of the (compressed) image imgname = os.path.basename(path) # Open the specified image try: st = os.stat(path) imgsize = st.st_size if path.endswith(".bz2"): compression = CONSTS.IMAGE.BZ2.value elif path.endswith(".gz"): compression = CONSTS.IMAGE.GZ.value elif path.endswith(".zst"): compression = CONSTS.IMAGE.ZST.value else: compression = CONSTS.IMAGE.RAW.value self._impl.storage_compression(compression, self._session) image = open(path, "rb") except FileNotFoundError: return False # Open the shared storage device status = self.storage_open() if status is False: image.close() return False return self._storage_write(image, imgname, imgsize, callback) def storage_to_host(self): return self._impl.storage_to_host(self._session) def storage_to_target(self): return self._impl.storage_to_target(self._session) def storage_swap(self): return self._impl.storage_swap(self._session) def start(self): return self._agent.start() def remote(self): return self._agent.remote def session(self): return self._session def target_lock(self, retries=0): status = False while status is False: status = self._impl.target_lock(self._session) if retries <= 0 or status is True: break retries = retries - 1 time.sleep(60) return status def target_locked(self): return self._impl.target_locked(self._session) def target_off(self): return self._impl.target_off(self._session) def target_on(self): return self._impl.target_on(self._session) def target_status(self): return self._impl.target_status(self._session) def target_toggle(self): return self._impl.target_toggle(self._session) def target_unlock(self): return self._impl.target_unlock(self._session) def toggle_timestamps(self): return self._impl.toggle_timestamps() def usb_find_by_class(self, className): return self._impl.usb_find_by_class(className, self._session) def usb_has_class(self, className): return self._impl.usb_has_class(className, self._session) def usb_off(self, ndx): return self._impl.usb_off(ndx, self._session) def usb_off_by_class(self, className): return self._impl.usb_off_by_class(className, self._session) def usb_on(self, ndx): return self._impl.usb_on(ndx, self._session) def usb_on_by_class(self, className): return self._impl.usb_on_by_class(className, self._session) def usb_ports(self): return self._impl.usb_ports(self._session) def usb_status(self, ndx): return self._impl.usb_status(ndx, self._session) def usb_toggle(self, ndx): return self._impl.usb_toggle(ndx, self._session)
python
"""Standard modules""" import sys import numpy as np import ldp import matplotlib.pyplot as plt class SimMesh(object): def __init__(self, mesh, neg, sep, pos): self.mesh = mesh self.neg = neg self.pos = pos self.sep = sep class SimData(object): def __init__(self, ce, cse, phie, phis, j): self.ce = ce self.cse = cse self.phie = phie self.phis = phis self.j = j def get_sim_data(self, time_index, location): return SimData( self.ce[time_index, location], self.cse[time_index, location], self.phie[time_index, location], self.phis[time_index, location], self.j[time_index, location]) def get_var(parameter, time, location=None, delta_t=0.1, delete=None): """Fetch parameter data from a given location and time""" (x_parameter, y_parameter) = (parameter[:, 0], parameter[:, 1]) time_frame = np.nonzero(np.diff(x_parameter) < 0)[0] start = np.insert(time_frame+1, 0, 0) stop = np.append(time_frame, len(x_parameter)) time_range = np.arange(0, len(start))*delta_t time_index = np.nonzero(time_range == time)[0][0] data = y_parameter[start[time_index]:stop[time_index]+1] if location: data = data[ location == x_parameter[start[time_index]:stop[time_index]]] if delete: data = np.delete(data, delete) return np.array([data]) def nice_abs(number): """Return the absolute of the given number""" return ((np.sign(number)+1)/2)*np.abs(number) def reaction_flux(sim_data, params, const): """J""" reaction_flux0 = params['k_norm_ref'] * \ nice_abs((params['csmax']-sim_data.cse)/params['csmax']) ** \ (1-params['alpha']) * \ nice_abs(sim_data.cse/params['csmax']) ** params['alpha'] * \ nice_abs(sim_data.ce/const['ce0']) ** (1-params['alpha']) soc = sim_data.cse/params['csmax'] # eta = phis-phie-params['eref'](soc) eta = sim_data.phis-sim_data.phie-params['Uocp'][0](soc) F = 96487 R = 8.314 return np.array([reaction_flux0*( np.exp((1-params['alpha'])*F*eta/(R*const['Tref'])) - np.exp(-params['alpha']*F*eta/(R*const['Tref'])))]) def region(mesh): """Find the regions in the mesh""" xneg = np.nonzero(mesh <= 1)[0] xpos = np.nonzero(mesh > 2)[0] xsep = np.nonzero((mesh > 1) & (mesh <= 2))[0] if mesh[xneg[-1]] == mesh[xneg[-2]]: xsep = np.concatenate((1, xneg[-1], xsep)) xneg = np.delete(xneg, -1) if mesh[xsep[-1]] == mesh[xsep[-2]]: xpos = np.concatenate((1, xsep[-1], xpos)) xsep = np.delete(xsep, -1) return SimMesh(mesh, xneg, xsep, xpos) def assemble_comsol(time, data, space=None, dt=0.1): ce, cse, phie, phis, j = (np.empty((0, len(data['mesh']))) for i in range(5)) for ind in time: ce = np.append(ce, get_var(data['ce'], ind), axis=0) cse = np.append(cse, get_var(data['cse'], ind, delete=[80, 202]), axis=0) phie = np.append(phie, get_var(data['phie'], ind), axis=0) phis = np.append(phis, get_var(data['phis'], ind, delete=[80, 202]), axis=0) j = np.append(j, get_var(data['j'], ind, delete=[80, 202]), axis=0) return SimData(ce, cse, phie, phis, j) def plot_j(time, data, mesh, params): jneg = np.empty((0, len(mesh.neg))) jpos = np.empty((0, len(mesh.pos))) for ind in range(0,len(time)): jneg = np.append(jneg, reaction_flux(data.get_sim_data(ind, mesh.neg), params['neg'], params['const']), axis=0) jpos = np.append(jpos, reaction_flux(data.get_sim_data(ind, mesh.pos), params['pos'], params['const']), axis=0) plt.plot(mesh.neg, jneg[ind,:], mesh.pos, jpos[ind,:]) print('Neg rms: {}'.format(np.sqrt(np.mean(np.square(jneg-data.get_sim_data(slice(0,len(time)), mesh.neg).j), axis=1)))) print('Pos rms: {}'.format(np.sqrt(np.mean(np.square(jpos-data.get_sim_data(slice(0,len(time)), mesh.pos).j), axis=1)))) plt.grid() plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) plt.show() def main(): print('Loading Cell Parameters') params = dict() time = [5, 15, 25, 35, 45] sheet = ldp.read_excel( '../tests/gold_standard/GuAndWang_parameter_list.xlsx', 0) (ncol, pcol) = (2, 3) params['const'] = ldp.load_params(sheet, range(7, 15), ncol, pcol) params['neg'] = ldp.load_params(sheet, range(18, 43), ncol, pcol) params['sep'] = ldp.load_params(sheet, range(47, 52), ncol, pcol) params['pos'] = ldp.load_params(sheet, range(55, 75), ncol, pcol) comsol = ldp.load('../tests/gold_standard/guwang2.npz') comsol_parsed = assemble_comsol(time, comsol) comsol_mesh = region(comsol['mesh']) plot_j(time, comsol_parsed, comsol_mesh, params) return ce = get_var(comsol['ce'], 5) cse = get_var(comsol['cse'], 5, delete=[80, 202]) phie = get_var(comsol['phie'], 5) phis = get_var(comsol['phis'], 5, delete=[80, 202]) mesh_neg, mesh_sep, mesh_pos = region(comsol['mesh']) print(mesh_neg) print(reaction_flux(ce, cse, phie, phis, params['neg'], params['const'])) if __name__ == '__main__': sys.exit(main())
python
# -*- coding: utf-8 -*- #""" # Created on Mon Oct 28 15:12:43 2013 # #@author: laure # # BROKEN : Doesn't work ########################## #""" # import sys # # import soma_workflow.constants as constants # from soma_workflow.test.job_tests.job_tests import JobTests # from soma_workflow.configuration import LIGHT_MODE # from soma_workflow.configuration import LOCAL_MODE # from soma_workflow.configuration import REMOTE_MODE # # # class MPIParallelJobTest(JobTests): # ''' # Submission of a parallel job (MPI) # ''' # allowed_resources = [LIGHT_MODE, LOCAL_MODE, REMOTE_MODE] # # def setUp(self): # self.my_jobs = [] # self.my_transfers = [] # self.node_num = 4 # info = self.job_examples.mpi_job_submission(node_num=self.node_num) # self.my_jobs.append(info[0]) # self.output_files = info[1] # # def tearDown(self): # super(MPIParallelJobTest, self).tearDown() # for file in self.output_files: # if os.path.isfile(file): os.remove(file) # # def test_result(self): # jobid = self.my_jobs[0] # self.wf_ctrl.wait_job(self.my_jobs) # # status = self.wf_ctrl.job_status(jobid) # self.failUnless(status == constants.DONE, # 'Job %s status after wait: %s' % (jobid, status)) # job_termination_status = self.wf_ctrl.job_termination_status(jobid) # exit_status = job_termination_status[0] # self.failUnless(exit_status == constants.FINISHED_REGULARLY, # 'Job %s exit status: %s' % (jobid, exit_status)) # exit_value = job_termination_status[1] # self.failUnless(exit_value == 0, # 'Job exit value: %d' % exit_value) # # sys.stdout.write("stdout: \n") # line = self.wf_ctrl.stdoutReadLine(jobid) # process_num = 1 # while line: # splitted_line = line.split() # if splitted_line[0] == "Greetings": # self.failUnless(line.rstrip() == "Greetings from process %d!" % # (process_num), # "stdout line: %sinstead of : " # "'Greetings from process %d!'" % # (line, process_num)) # process_num = process_num + 1 # line = self.wf_ctrl.stdoutReadLine(jobid) # # self.failUnless(process_num == self.node_num, # "%d process(es) run instead of %d." % # (process_num - 1, self.node_num)) # # # if __name__ == '__main__': # MPIParallelJobTest.run_test(debug=False) # sys.exit(0)
python
from matplotlib import colors import matplotlib.pyplot as plt from copy import deepcopy import numpy as np import matplotlib.gridspec as gridspec from scipy.interpolate import interp1d class TrianglePlot(object): _default_contour_colors = [(colors.cnames['darkslategrey'], colors.cnames['black'], 'k'), (colors.cnames['dodgerblue'], colors.cnames['blue'], 'k'), (colors.cnames['orchid'], colors.cnames['darkviolet'], 'k'), (colors.cnames['lightcoral'], colors.cnames['red'], 'k')] truth_color = 'g' spacing = np.array([0.1, 0.1, 0.05, 0.05, 0.2, 0.11]) spacing_scale = 1. _tick_rotation = 0 _color_eval = 0.9 show_intervals_68 = False def __init__(self, independent_likelihoods_list, param_ranges=None, cmap='gist_heat'): """ :param independent_likelihoods_list: a list of IndependentLikelihoods classes (see trikde.pdfs) :param cmap: name of the color map to use if not using filled contours :param custom_ticks: """ self.param_names = independent_likelihoods_list[0].param_names self._nchains = len(independent_likelihoods_list) if param_ranges is None: parameter_ranges = independent_likelihoods_list[0].param_ranges else: parameter_ranges = param_ranges if isinstance(parameter_ranges, list): self._prange_list = parameter_ranges self.parameter_ranges = {} for i, pname in enumerate(self.param_names): self.parameter_ranges.update({pname:parameter_ranges[i]}) elif isinstance(parameter_ranges, dict): self.parameter_ranges = parameter_ranges self._prange_list = [] for pi in self.param_names: self._prange_list.append(self.parameter_ranges[pi]) self._NDdensity_list = independent_likelihoods_list self.set_cmap(cmap) def _load_projection_1D(self, pname, idx): return self._NDdensity_list[idx].projection_1D(pname) def _load_projection_2D(self, p1, p2, idx): return self._NDdensity_list[idx].projection_2D(p1, p2) def set_cmap(self, newcmap, color_eval=0.9, marginal_col=None): self.cmap = newcmap self.cmap_call = plt.get_cmap(newcmap) self._color_eval = color_eval self._marginal_col = marginal_col def make_joint(self, p1, p2, contour_colors=None, levels=[0.05, 0.22, 1], filled_contours=True, contour_alpha=0.6, fig_size=8, label_scale=1, tick_label_font=12, xtick_label_rotate=0, show_contours=True): self.fig = plt.figure(1) self._init(fig_size) ax = plt.subplot(111) if contour_colors is None: contour_colors = self._default_contour_colors for i in range(self._nchains): axes = self._make_joint_i(p1, p2, ax, i, contour_colors=contour_colors, levels=levels, filled_contours=filled_contours, contour_alpha=contour_alpha, labsize=15*label_scale, tick_label_font=tick_label_font, xtick_label_rotate=xtick_label_rotate, show_contours=show_contours) return axes def make_triplot(self, contour_levels=[0.05, 0.22, 1], filled_contours=True, contour_alpha=0.6, fig_size=8, truths=None, contour_colors=None, axis_label_font=16, tick_label_font=12, xtick_label_rotate=0, show_contours=True, marginal_alpha=0.6, show_intervals=True, display_params=None): self.fig = plt.figure(1) self._init(fig_size) axes = [] counter = 1 if display_params is None: display_params = self.param_names n_subplots = len(display_params) gs1 = gridspec.GridSpec(n_subplots, n_subplots) gs1.update(wspace=0.15, hspace=0.15) for row in range(n_subplots): for col in range(n_subplots): axes.append(plt.subplot(gs1[counter-1])) counter += 1 if contour_colors is None: contour_colors = self._default_contour_colors self._auto_scale = [] for i in range(self._nchains): axes.append(self._make_triplot_i(axes, i, contour_colors, contour_levels, filled_contours, contour_alpha, fig_size, truths, tick_label_font=tick_label_font, xtick_label_rotate=xtick_label_rotate, axis_label_font=axis_label_font, cmap=self.cmap_call, show_contours=show_contours, marginal_alpha=marginal_alpha, show_intervals=show_intervals, display_params=display_params)) for key in display_params: max_h = [] for scale in self._auto_scale: max_h.append(scale[key][1]) plot_index = scale[key][0] max_h = max(max_h) axes[plot_index].set_ylim(0., 1.1 * max_h) self._auto_scale = [] plt.subplots_adjust(left=self.spacing[0] * self.spacing_scale, bottom=self.spacing[1] * self.spacing_scale, right=1 - self.spacing[2] * self.spacing_scale, top=1 - self.spacing[3] * self.spacing_scale, wspace=self.spacing[4] * self.spacing_scale, hspace=self.spacing[5] * self.spacing_scale) return axes def make_marginal(self, p1, contour_colors=None, levels=[0.05, 0.22, 1], filled_contours=True, contour_alpha=0.6, param_names=None, fig_size=8, truths=None, load_from_file=True, transpose_idx=None, bandwidth_scale=0.7, label_scale=1, cmap=None, xticklabel_rotate=0, bar_alpha=0.7, bar_colors=['k','m','g','r'], height_scale=1.1, show_low=False, show_high=False): self.fig = plt.figure(1) self._init(fig_size) ax = plt.subplot(111) self._auto_scale = [] if contour_colors is None: contour_colors = self._default_contour_colors self._auto_scale = [] for i in range(self._nchains): out = self._make_marginal_i(p1, ax, i, contour_colors, levels, filled_contours, contour_alpha, param_names, fig_size, truths, load_from_file=load_from_file, transpose_idx=transpose_idx, bandwidth_scale=bandwidth_scale, label_scale=label_scale, cmap=cmap, xticklabel_rotate=xticklabel_rotate, bar_alpha=bar_alpha, bar_color=bar_colors[i], show_low=show_low, show_high=show_high) scales = [] for c in range(0, self._nchains): scales.append(self._auto_scale[c][0]) maxh = np.max(scales) * height_scale ax.set_ylim(0, maxh) pmin, pmax = self._get_param_minmax(p1) asp = maxh * (pmax - pmin) ** -1 ax.set_aspect(asp ** -1) self._auto_scale = [] return out def _make_marginal_i(self, p1, ax, color_index, contour_colors=None, levels=[0.05, 0.22, 1], filled_contours=True, contour_alpha=0.6, param_names=None, fig_size=8, truths=None, labsize=15, tick_label_font=14, load_from_file=True, transpose_idx=None, bandwidth_scale=0.7, label_scale=None, cmap=None, xticklabel_rotate=0, bar_alpha=0.7, bar_color=None, show_low=False, show_high=False): autoscale = [] density = self._load_projection_1D(p1, color_index) xtick_locs, xtick_labels, xlabel, rotation = self.ticks_and_labels(p1) pmin, pmax = self._get_param_minmax(p1) coords = np.linspace(pmin, pmax, len(density)) bar_centers, bar_width, bar_heights = self._bar_plot_heights(density, coords, None) bar_heights *= np.sum(bar_heights) ** -1 * len(bar_centers) ** -1 autoscale.append(np.max(bar_heights)) max_idx = np.argmax(bar_heights) for i, y in enumerate(bar_heights): x1, x2 = bar_centers[i] - bar_width * .5, bar_centers[i] + bar_width * .5 ax.plot([x1, x2], [y, y], color=bar_color, alpha=bar_alpha) ax.fill_between([x1, x2], y, color=bar_color, alpha=0.6) ax.plot([x1, x1], [0, y], color=bar_color, alpha=bar_alpha) ax.plot([x2, x2], [0, y], color=bar_color, alpha=bar_alpha) ax.set_xlim(pmin, pmax) ax.set_yticks([]) mean_of_distribution, [low68, high68] = self._confidence_int(pmin, pmax, bar_centers, bar_heights, 1) mean_of_distribution, [low95, high95] = self._confidence_int(pmin, pmax, bar_centers, bar_heights, 2) mean_of_distribution = 0 for i in range(0, len(bar_heights)): mean_of_distribution += bar_heights[i] * bar_centers[i] / np.sum(bar_heights) if low95 is not None and show_low: ax.axvline(low95, color=bar_color, alpha=0.8, linewidth=2.5, linestyle='-.') if high95 is not None and show_high: ax.axvline(high95, color=bar_color, alpha=0.8, linewidth=2.5, linestyle='-.') ax.set_xticks(xtick_locs) ax.set_xticklabels(xtick_labels, fontsize=tick_label_font, rotation=xticklabel_rotate) if xlabel == r'$\frac{r_{\rm{core}}}{r_s}$': ax.set_xlabel(xlabel, fontsize=40 * label_scale) else: ax.set_xlabel(xlabel, fontsize=labsize * label_scale) if truths is not None: t = deepcopy(truths[p1]) if isinstance(t, float) or isinstance(t, int): pmin, pmax = self._get_param_minmax(p1) if t <= pmin: t = pmin * 1.075 ax.axvline(t, linestyle='--', color=self.truth_color, linewidth=3) elif isinstance(t, list): ax.axvspan(t[0], t[1], alpha=0.25, color=self.truth_color) self._auto_scale.append(autoscale) return ax def _make_joint_i(self, p1, p2, ax, color_index, contour_colors=None, levels=[0.05, 0.22, 1], filled_contours=True, contour_alpha=0.6, labsize=None, tick_label_font=None, xtick_label_rotate=None, show_contours=None): density = self._load_projection_2D(p1, p2, color_index) extent, aspect = self._extent_aspect([p1, p2]) pmin1, pmax1 = extent[0], extent[1] pmin2, pmax2 = extent[2], extent[3] xtick_locs, xtick_labels, xlabel, rotation = self.ticks_and_labels(p1) ytick_locs, ytick_labels, ylabel, _ = self.ticks_and_labels(p2) if filled_contours: coordsx = np.linspace(extent[0], extent[1], density.shape[0]) coordsy = np.linspace(extent[2], extent[3], density.shape[1]) ax.imshow(density, extent=extent, aspect=aspect, origin='lower', cmap=self.cmap, alpha=0) self._contours(coordsx, coordsy, density, ax, extent=extent, contour_colors=contour_colors[color_index], contour_alpha=contour_alpha, levels=levels) ax.set_xlim(pmin1, pmax1) ax.set_ylim(pmin2, pmax2) else: coordsx = np.linspace(extent[0], extent[1], density.shape[0]) coordsy = np.linspace(extent[2], extent[3], density.shape[1]) ax.imshow(density, origin='lower', cmap=self.cmap, alpha=1, vmin=0, vmax=np.max(density), aspect=aspect, extent=extent) if show_contours: self._contours(coordsx, coordsy, density, ax, extent=extent, filled_contours=False, contour_colors=contour_colors[color_index], contour_alpha=contour_alpha, levels=levels) ax.set_xlim(pmin1, pmax1) ax.set_ylim(pmin2, pmax2) ax.set_xticks(xtick_locs) ax.set_xticklabels(xtick_labels, fontsize=tick_label_font, rotation=xtick_label_rotate) ax.set_yticks(ytick_locs) ax.set_yticklabels(ytick_labels, fontsize=tick_label_font) if xlabel == r'$\frac{r_{\rm{core}}}{r_s}$': ax.set_xlabel(xlabel, fontsize=40) elif ylabel == r'$\frac{r_{\rm{core}}}{r_s}$': ax.set_ylabel(ylabel, fontsize=40) else: ax.set_xlabel(xlabel, fontsize=labsize) ax.set_ylabel(ylabel, fontsize=labsize) return ax def _make_triplot_i(self, axes, color_index, contour_colors=None, levels=[0.05, 0.22, 1], filled_contours=True, contour_alpha=0.6, fig_size=8, truths=None, tick_label_font=14, xtick_label_rotate=0, axis_label_font=None, cmap=None, show_contours=True, marginal_alpha=0.9, show_intervals=True, display_params=None): size_scale = len(display_params) * 0.1 + 1 self.fig.set_size_inches(fig_size * size_scale, fig_size * size_scale) marg_in_row, plot_index = 0, 0 n_subplots = len(display_params) self._reference_grid = None autoscale = {} self.triplot_densities = [] self.joint_names = [] row = 0 col = 0 for _ in range(n_subplots): marg_done = False for _ in range(n_subplots): if self.param_names[row] not in display_params: continue elif self.param_names[col] not in display_params: continue if col < marg_in_row: density = self._load_projection_2D(display_params[row], display_params[col], color_index) self.triplot_densities.append(density) self.joint_names.append(display_params[row]+'_'+display_params[col]) extent, aspect = self._extent_aspect([display_params[col], display_params[row]]) pmin1, pmax1 = extent[0], extent[1] pmin2, pmax2 = extent[2], extent[3] xtick_locs, xtick_labels, xlabel, rotation = self.ticks_and_labels(display_params[col]) ytick_locs, ytick_labels, ylabel, _ = self.ticks_and_labels(display_params[row]) if row == n_subplots - 1: axes[plot_index].set_xticks(xtick_locs) axes[plot_index].set_xticklabels(xtick_labels, fontsize=tick_label_font, rotation=xtick_label_rotate) if col == 0: axes[plot_index].set_yticks(ytick_locs) axes[plot_index].set_yticklabels(ytick_labels, fontsize=tick_label_font) axes[plot_index].set_ylabel(ylabel, fontsize=axis_label_font) else: axes[plot_index].set_yticks([]) axes[plot_index].set_yticklabels([]) axes[plot_index].set_xlabel(xlabel, fontsize=axis_label_font) elif col == 0: axes[plot_index].set_yticks(ytick_locs) axes[plot_index].set_yticklabels(ytick_labels, fontsize=tick_label_font) axes[plot_index].set_xticks([]) axes[plot_index].set_ylabel(ylabel, fontsize=axis_label_font) else: axes[plot_index].set_xticks([]) axes[plot_index].set_yticks([]) axes[plot_index].set_xticklabels([]) axes[plot_index].set_yticklabels([]) if filled_contours: coordsx = np.linspace(extent[0], extent[1], density.shape[0]) coordsy = np.linspace(extent[2], extent[3], density.shape[1]) axes[plot_index].imshow(density.T, extent=extent, aspect=aspect, origin='lower', cmap=self.cmap, alpha=0) self._contours(coordsx, coordsy, density.T, axes[plot_index], extent=extent, contour_colors=contour_colors[color_index], contour_alpha=contour_alpha, levels=levels) axes[plot_index].set_xlim(pmin1, pmax1) axes[plot_index].set_ylim(pmin2, pmax2) else: axes[plot_index].imshow(density.T, origin='lower', cmap=self.cmap, alpha=1, vmin=0, vmax=np.max(density), aspect=aspect, extent=extent) if show_contours: coordsx = np.linspace(extent[0], extent[1], density.shape[0]) coordsy = np.linspace(extent[2], extent[3], density.shape[1]) self._contours(coordsx, coordsy, density.T, axes[plot_index], filled_contours=False, extent=extent, contour_colors=contour_colors[color_index], contour_alpha=contour_alpha, levels=levels) axes[plot_index].set_xlim(pmin1, pmax1) axes[plot_index].set_ylim(pmin2, pmax2) axes[plot_index].set_xlim(pmin1, pmax1) axes[plot_index].set_ylim(pmin2, pmax2) if truths is not None: t1, t2 = truths[display_params[col]], truths[display_params[row]] axes[plot_index].scatter(t1, t2, color=self.truth_color, s=50) axes[plot_index].axvline(t1, linestyle='--', color=self.truth_color, linewidth=3) axes[plot_index].axhline(t2, linestyle='--', color=self.truth_color, linewidth=3) elif marg_in_row == col and marg_done is False: marg_done = True marg_in_row += 1 density = self._load_projection_1D(display_params[col], color_index) xtick_locs, xtick_labels, xlabel, rotation = self.ticks_and_labels(display_params[col]) pmin, pmax = self._get_param_minmax(display_params[col]) coords = np.linspace(pmin, pmax, len(density)) bar_centers, bar_width, bar_heights = self._bar_plot_heights(density, coords, None) bar_heights *= (np.sum(bar_heights) * len(bar_centers)) ** -1 autoscale[display_params[col]] = [plot_index, max(bar_heights)] for i, y in enumerate(bar_heights): x1, x2 = bar_centers[i] - bar_width * .5, bar_centers[i] + bar_width * .5 if filled_contours: axes[plot_index].plot([x1, x2], [y, y], color=contour_colors[color_index][1], alpha=1) axes[plot_index].fill_between([x1, x2], y, color=contour_colors[color_index][1], alpha=marginal_alpha) axes[plot_index].plot([x1, x1], [0, y], color=contour_colors[color_index][1], alpha=1) axes[plot_index].plot([x2, x2], [0, y], color=contour_colors[color_index][1], alpha=1) else: if self._marginal_col is None: marginal_col = cmap(self._color_eval) else: marginal_col = self._marginal_col axes[plot_index].plot([x1, x2], [y, y], color=marginal_col, alpha=1) axes[plot_index].fill_between([x1, x2], y, color=marginal_col, alpha=marginal_alpha) axes[plot_index].plot([x1, x1], [0, y], color=marginal_col, alpha=1) axes[plot_index].plot([x2, x2], [0, y], color=marginal_col, alpha=1) axes[plot_index].set_xlim(pmin, pmax) axes[plot_index].set_yticks([]) if show_intervals: mean_of_distribution, [low68, high68] = self._confidence_int(pmin, pmax, bar_centers, bar_heights,1) mean_of_distribution, [low95, high95] = self._confidence_int(pmin, pmax, bar_centers, bar_heights,2) if show_intervals and low95 is not None: axes[plot_index].axvline(low95, color=contour_colors[color_index][1], alpha=0.8, linewidth=2.5, linestyle='-.') if show_intervals and high95 is not None: axes[plot_index].axvline(high95, color=contour_colors[color_index][1], alpha=0.8, linewidth=2.5, linestyle='-.') if self.show_intervals_68 and low68 is not None: axes[plot_index].axvline(low68, color=contour_colors[color_index][1], alpha=0.8, linewidth=2.5, linestyle=':') if self.show_intervals_68 and high68 is not None: axes[plot_index].axvline(high68, color=contour_colors[color_index][1], alpha=0.8, linewidth=2.5, linestyle=':') if col != n_subplots - 1: axes[plot_index].set_xticks([]) else: axes[plot_index].set_xticks(xtick_locs) axes[plot_index].set_xticklabels(xtick_labels, fontsize=tick_label_font, rotation=xtick_label_rotate) axes[plot_index].set_xlabel(xlabel, fontsize=axis_label_font) if truths is not None: t = deepcopy(truths[display_params[col]]) pmin, pmax = self._get_param_minmax(display_params[col]) if isinstance(t, float) or isinstance(t, int): if t <= pmin: t_ = pmin * 1.075 else: t_ = t axes[plot_index].axvline(t_, linestyle='--', color=self.truth_color, linewidth=3) else: t_ = 0.5*(t[0] + t[1]) axes[plot_index].axvline(t_, linestyle='--', color=self.truth_color, linewidth=3) axes[plot_index].axvspan(t[0], t[1], color=self.truth_color, alpha=0.25) else: axes[plot_index].axis('off') plot_index += 1 col += 1 row += 1 col = 0 self._auto_scale.append(autoscale) def _confidence_int(self, pmin, pmax, centers, heights, num_sigma, thresh=None): centers = np.array(centers) heights = np.array(heights) heights *= np.max(heights) ** -1 prob_interp = interp1d(centers, heights, bounds_error=False, fill_value=0) samples = [] while len(samples)<10000: samp = np.random.uniform(pmin, pmax) prob = prob_interp(samp) u = np.random.uniform(0,1) if prob >= u: samples.append(samp) #print('num sigma:', num_sigma) mu, sigmas = compute_confidence_intervals(samples, num_sigma, thresh) return mu, [mu-sigmas[0], mu+sigmas[1]] def _extent_aspect(self, param_names): aspect = (self.parameter_ranges[param_names[0]][1] - self.parameter_ranges[param_names[0]][0]) * \ (self.parameter_ranges[param_names[1]][1] - self.parameter_ranges[param_names[1]][0]) ** -1 extent = [self.parameter_ranges[param_names[0]][0], self.parameter_ranges[param_names[0]][1], self.parameter_ranges[param_names[1]][0], self.parameter_ranges[param_names[1]][1]] return extent, aspect def _init(self, fig_size): self._tick_lab_font = 12 * fig_size * 7 ** -1 self._label_font = 15 * fig_size * 7 ** -1 plt.rcParams['axes.linewidth'] = 2.5 * fig_size * 7 ** -1 plt.rcParams['xtick.major.width'] = 2.5 * fig_size * 7 ** -1 plt.rcParams['xtick.major.size'] = 6 * fig_size * 7 ** -1 plt.rcParams['xtick.minor.size'] = 2 * fig_size * 7 ** -1 plt.rcParams['ytick.major.width'] = 2.5 * fig_size * 7 ** -1 plt.rcParams['ytick.major.size'] = 6 * fig_size * 7 ** -1 plt.rcParams['ytick.minor.size'] = 2 * fig_size * 7 ** -1 def _get_param_minmax(self, pname): ranges = self.parameter_ranges[pname] return ranges[0], ranges[1] def _get_param_inds(self, params): inds = [] for pi in params: for i, name in enumerate(self.param_names): if pi == name: inds.append(i) break return np.array(inds) def _bar_plot_heights(self, bar_heights, coords, rebin): if rebin is not None: new = [] if len(bar_heights) % rebin == 0: fac = int(len(bar_heights) / rebin) for i in range(0, len(bar_heights), fac): new.append(np.mean(bar_heights[i:(i + fac)])) bar_heights = np.array(new) else: raise ValueError('must be divisible by rebin.') bar_width = np.absolute(coords[-1] - coords[0]) * len(bar_heights) ** -1 bar_centers = [] for i in range(0, len(bar_heights)): bar_centers.append(coords[0] + bar_width * (0.5 + i)) integral = np.sum(bar_heights) * bar_width * len(bar_centers) ** -1 bar_heights = bar_heights * integral ** -1 return bar_centers, bar_width, bar_heights def _contours(self, x, y, grid, ax, linewidths=4, filled_contours=True, contour_colors='', contour_alpha=1., extent=None, levels=[0.05, 0.32, 1]): levels = np.array(levels) * np.max(grid) X, Y = np.meshgrid(x, y) if filled_contours: ax.contour(X, Y, grid, levels, extent=extent, colors=contour_colors, linewidths=linewidths, zorder=1, linestyles=['dashed', 'solid']) ax.contourf(X, Y, grid, [levels[0], levels[1]], colors=[contour_colors[0], contour_colors[1]], alpha=contour_alpha * 0.5, zorder=1, extent=extent) ax.contourf(X, Y, grid, [levels[1], levels[2]], colors=[contour_colors[1], contour_colors[2]], alpha=contour_alpha, zorder=1, extent=extent) else: ax.contour(X, Y, grid, extent=extent, colors=contour_colors, zorder=1, levels=levels, linewidths=linewidths) def ticks_and_labels(self, pname): rotation = self._tick_rotation decimals, nticks = auto_decimal_places(self.parameter_ranges[pname][0], self.parameter_ranges[pname][1]) tick_locs = np.round(np.linspace(self.parameter_ranges[pname][0], self.parameter_ranges[pname][1], nticks), decimals) tick_labels = tick_locs return tick_locs, tick_labels, pname, rotation def get_parameter_confidence_interval(self, parameter, clevel, chain_num=None, show_percentage=False, return_intervals=False, print_intervals=True, thresh=None): if print_intervals: print('parameter name: ', parameter) if thresh is None: if show_percentage: print('68% confidence intervals: \nformat: median (lower, upper) (-%, +%)\n') else: print('68% confidence intervals: \nformat: median (lower, upper) (param_min, param_max)\n') else: if show_percentage: print(str(100 * thresh) + '% confidence intervals: \nformat: median (lower, upper) (-%, +%)\n') else: print(str(100 * thresh) + '% confidence intervals: \nformat: median (lower, upper)\n') medians, uppers, lowers = [], [], [] for idx in range(0, self._nchains): if chain_num is not None: if idx != chain_num: continue samples = self._load_projection_1D(parameter, idx) pmin, pmax = self._get_param_minmax(parameter) coords = np.linspace(pmin, pmax, len(samples)) bar_centers, bar_widths, bar_heights = self._bar_plot_heights(samples, coords, None) median, [lower, upper] = self._confidence_int(pmin, pmax, bar_centers, bar_heights, clevel, thresh) #chain.append({''}) if print_intervals: print('SAMPLES ' + str(idx + 1) + ':') if show_percentage: print(str(median) + ' (' + str(lower) + ', ' + str(upper) + ')') else: print(str(median) + ' ('+str(lower)+', '+str(upper)+')') print('width: ', upper - lower) medians.append(median) uppers.append(upper) lowers.append(lower) if return_intervals: return (medians, uppers, lowers) else: return None def auto_decimal_places(param_min, param_max): nticks = 5 if param_min == 0: OM_low = -1 else: OM_low = int(np.log10(abs(param_min))) if param_max == 0: OM_high = -1 else: OM_high = int(np.log10(abs(param_max))) OM_min = min(OM_low, OM_high) if OM_min > 0: decimals = 0 else: decimals = abs(OM_min) + 2 dynamic_range = abs(OM_high - OM_low) if dynamic_range > 0: decimals += 0 else: decimals += 1 if decimals > 2: nticks -= 1 if decimals > 3: nticks -= 1 if decimals > 4: nticks -= 1 return decimals, nticks def compute_confidence_intervals_histogram(sample, num_sigma): """ computes the upper and lower sigma from the median value. This functions gives good error estimates for skewed pdf's :param sample: 1-D sample :return: median, lower_sigma, upper_sigma """ if num_sigma > 3: raise ValueError("Number of sigma-constraints restricted to three. %s not valid" % num_sigma) num = len(sample) median = np.median(sample) sorted_sample = np.sort(sample) num_threshold1 = int(round((num-1)*0.841345)) num_threshold2 = int(round((num-1)*0.977249868)) num_threshold3 = int(round((num-1)*0.998650102)) if num_sigma == 1: upper_sigma1 = sorted_sample[num_threshold1 - 1] lower_sigma1 = sorted_sample[num - num_threshold1 - 1] return median, [median-lower_sigma1, upper_sigma1-median] if num_sigma == 2: upper_sigma2 = sorted_sample[num_threshold2 - 1] lower_sigma2 = sorted_sample[num - num_threshold2 - 1] return median, [median-lower_sigma2, upper_sigma2-median] def compute_confidence_intervals(sample, num_sigma, thresh=None): """ computes the upper and lower sigma from the median value. This functions gives good error estimates for skewed pdf's :param sample: 1-D sample :return: median, lower_sigma, upper_sigma """ if thresh is not None and num_sigma > 3: raise ValueError("Number of sigma-constraints restricted to three. %s not valid" % num_sigma) num = len(sample) median = np.median(sample) sorted_sample = np.sort(sample) if thresh is None: num_threshold1 = int(round((num-1)*0.841345)) num_threshold2 = int(round((num-1)*0.977249868)) num_threshold3 = int(round((num-1)*0.998650102)) if num_sigma == 1: upper_sigma1 = sorted_sample[num_threshold1 - 1] lower_sigma1 = sorted_sample[num - num_threshold1 - 1] return median, [median-lower_sigma1, upper_sigma1-median] if num_sigma == 2: upper_sigma2 = sorted_sample[num_threshold2 - 1] lower_sigma2 = sorted_sample[num - num_threshold2 - 1] return median, [median-lower_sigma2, upper_sigma2-median] else: assert thresh <= 1 thresh = (1 + thresh)/2 num_threshold = int(round((num-1) * thresh)) upper = sorted_sample[num_threshold - 1] lower = sorted_sample[num - num_threshold - 1] return median, [median - lower, upper - median]
python
from django.apps import AppConfig class CoinapiConfig(AppConfig): name = 'coinapi'
python
import os import torch import numpy as np import torch.nn as nn # import torch.nn.functional as F import torch.distributed as dist import datetime import pandas as pd from asyncfeddr.utils.models import SimpleNetMNIST, SimpleNetFEMNIST from asyncfeddr.utils.serialization import ravel_model_params, unravel_model_params from asyncfeddr.utils.messaging import MessageCode, send_message import torch.optim as optim from asyncfeddr.optim.perturbed_sgd import PerturbedSGD import time import torchvision.models as models from asyncfeddr.utils.dataset import partition_dataset def extract_model(sender, message_code, parameter): if message_code == MessageCode.ParameterUpdate: return parameter, False elif message_code == MessageCode.Terminate: return parameter, True else: raise ValueError('undefined message code') def worker_main(args): trainloader, testloader = partition_dataset(args) torch.manual_seed(args.seed) if args.dataset == 'MNIST': model = SimpleNetMNIST() elif args.dataset == 'FEMNIST': model = SimpleNetFEMNIST() optimizer = PerturbedSGD(model.parameters(), lr=args.lr, mu=1.0/args.eta) alpha = args.alpha # train model.train() # model size model_size = ravel_model_params(model).numel() # communication buffer m_parameter = torch.zeros(ravel_model_params(model).numel() + 2) # FedDR local variables y_i = torch.zeros(model_size) x_hat = torch.zeros(model_size) x_i = ravel_model_params(model) criterion = nn.CrossEntropyLoss() while True: _ = dist.recv(tensor=m_parameter) latest_model, terminate = extract_model( int(m_parameter[0].item()), MessageCode(m_parameter[1].item()), m_parameter[2:]) if terminate: break # start local update start_time = datetime.datetime.now() # update y_i y_i = y_i + alpha*(latest_model - x_i) # update x_i optimizer.update_v_star(y_i) # loop over the dataset multiple times for epoch in range(args.epochs): for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = model(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # update x_i x_i = ravel_model_params(model) # update x_hat x_hat = 2*x_i - y_i end_time = datetime.datetime.now() training_time = (end_time - start_time).total_seconds() # add a delay if args.worker_max_delay > 0: time.sleep(args.worker_max_delay*(args.rank-1)/args.world_size) # sending parameters to server send_message(MessageCode.ParameterUpdate, x_hat) # finish training print('Rank {:2} Finished Training'.format(args.rank))
python
# This code is part of Qiskit. # # (C) Copyright IBM 2021. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """ Optional matplotlib helper functions """ import functools try: # pylint: disable = unused-import from matplotlib import pyplot HAS_MATPLOTLIB = True except ImportError: pyplot = None HAS_MATPLOTLIB = False def requires_matplotlib(func): """Decorator for functions requiring matplotlib""" @functools.wraps(func) def wrapped(*args, **kwargs): if not HAS_MATPLOTLIB: raise ImportError( f"{func} requires matplotlib to generate curve fit plot." ' Run "pip install matplotlib" before.' ) # Analysis/plotting is done in a separate thread (so it doesn't block the # main thread), but matplotlib doesn't support GUI mode in a child thread. # The code below switches to a non-GUI backend "Agg" when creating the # plot. An alternative is to run this in a separate process, but then # we'd need to deal with pickling issues. saved_backend = pyplot.get_backend() pyplot.switch_backend("Agg") try: ret_val = func(*args, **kwargs) finally: pyplot.switch_backend(saved_backend) return ret_val return wrapped
python
"""Utility functions for commissioning tests.""" # STDLIB import os import sys from collections import Iterable # THIRD-PARTY import numpy as np import pytest from numpy.testing import assert_allclose # ASTROLIB try: import pysynphot as S from pysynphot.spparser import parse_spec as old_parse_spec except ImportError: HAS_PYSYNPHOT = False else: HAS_PYSYNPHOT = True # LOCAL from synphot import Observation from ..config import conf from ..spectrum import band from ..spparser import parse_spec use_pysynphot = pytest.mark.skipif('not HAS_PYSYNPHOT') # Currently, this is here because only commissioning tests are considered # slow. If there are slow tests in the core unit tests, we can move this # one level higher. try: slow = pytest.mark.skipif(not pytest.config.getoption('--slow'), reason='need --slow option to run') except AttributeError: # Not using pytest slow = pytest.mark.skipif(True, reason='need --slow option to run') __all__ = ['use_pysynphot', 'slow', 'count_outliers', 'CommCase', 'ThermCase'] def count_outliers(data, sigma=3.0): """Count outliers in given data. This is as defined in similar method in ``SpecCase`` in ``astrolib/pysynphot/from_commissioning/conv_base.py``. .. note:: This is not used but kept for reference. Parameters ---------- data : ndarray Result differences to be analyzed. sigma : float Values outside this number of sigma of std. dev. around mean are considered outliers. Returns ------- n_outliers : int Number of outlier data points. """ return np.count_nonzero(abs(data) > (data.mean() + sigma * data.std())) @use_pysynphot @slow @pytest.mark.remote_data class CommCase: """Base class for commissioning tests.""" obsmode = None # Observation mode string spectrum = None # SYNPHOT-like string to construct spectrum force = None # Default tables are the latest available as of 2016-07-25. tables = { 'graphtable': os.path.join('mtab$OLD_FILES', '07r1502mm_tmg.fits'), 'comptable': os.path.join('mtab$OLD_FILES', '07r1502nm_tmc.fits'), 'thermtable': 'mtab$tae17277m_tmt.fits'} def setup_class(self): """Subclass needs to define ``obsmode`` and ``spectrum`` class variables for this to work. """ if not HAS_PYSYNPHOT: raise ImportError( 'ASTROLIB PYSYNPHOT must be installed to run these tests') # Make sure both software use the same graph and component tables. conf.graphtable = self.tables['graphtable'] conf.comptable = self.tables['comptable'] conf.thermtable = self.tables['thermtable'] S.setref(graphtable=self.tables['graphtable'], comptable=self.tables['comptable'], thermtable=self.tables['thermtable']) # Construct spectra for both software. self.sp = parse_spec(self.spectrum) self.bp = band(self.obsmode) # Astropy version has no prior knowledge of instrument-specific # binset, so it has to be set explicitly. if hasattr(self.bp, 'binset'): self.obs = Observation(self.sp, self.bp, force=self.force, binset=self.bp.binset) else: self.obs = Observation(self.sp, self.bp, force=self.force) # Astropy version does not assume a default waveset # (you either have it or you don't). If there is no # waveset, no point comparing obs waveset against ASTROLIB. if self.sp.waveset is None or self.bp.waveset is None: self._has_obswave = False else: self._has_obswave = True self.spref = old_parse_spec(self.spectrum) self.bpref = S.ObsBandpass(self.obsmode) self.obsref = S.Observation(self.spref, self.bpref, force=self.force) # Ensure we are comparing in the same units self.bpref.convert(self.bp._internal_wave_unit.name) self.spref.convert(self.sp._internal_wave_unit.name) self.spref.convert(self.sp._internal_flux_unit.name) self.obsref.convert(self.obs._internal_wave_unit.name) self.obsref.convert(self.obs._internal_flux_unit.name) @staticmethod def _get_new_wave(sp): """Astropy version does not assume a default waveset (you either have it or you don't). This is a convenience method to duck-type ASTROLIB waveset behavior. """ wave = sp.waveset if wave is None: wave = conf.waveset_array else: wave = wave.value return wave def _assert_allclose(self, actual, desired, rtol=1e-07, atol=sys.float_info.min): """``assert_allclose`` only report percentage but we also want to know some extra info conveniently.""" if isinstance(actual, Iterable): ntot = len(actual) else: ntot = 1 n = np.count_nonzero( abs(actual - desired) > atol + rtol * abs(desired)) msg = (f'obsmode: {self.obsmode}\n' f'spectrum: {self.spectrum}\n' f'(mismatch {n}/{ntot})') assert_allclose(actual, desired, rtol=rtol, atol=atol, err_msg=msg) # TODO: Confirm whether non-default atol is acceptable. # Have to use this value to avoid AssertionError for very # small non-zero flux values like 1.8e-26 to 2e-311. def _compare_nonzero(self, new, old, thresh=0.01, atol=1e-29): """Compare normally when results from both are non-zero.""" i = (new != 0) & (old != 0) # Make sure non-zero atol is not too high, otherwise just let it fail. if atol > (thresh * min(new.max(), old.max())): atol = sys.float_info.min self._assert_allclose(new[i], old[i], rtol=thresh, atol=atol) def _compare_zero(self, new, old, thresh=0.01): """Special handling for comparison when one of the results is zero. This is because ``rtol`` will not work.""" i = ((new == 0) | (old == 0)) & (new != old) try: self._assert_allclose(new[i], old[i], rtol=thresh) except AssertionError as e: pytest.xfail(str(e)) # TODO: Will revisit later def test_band_wave(self, thresh=0.01): """Test bandpass waveset.""" wave = self._get_new_wave(self.bp) self._assert_allclose(wave, self.bpref.wave, rtol=thresh) def test_spec_wave(self, thresh=0.01): """Test source spectrum waveset.""" wave = self._get_new_wave(self.sp) # TODO: Failure due to different wavesets for blackbody; Ignore? try: self._assert_allclose(wave, self.spref.wave, rtol=thresh) except (AssertionError, ValueError): self._has_obswave = False # Skip obs waveset tests if 'bb(' in self.spectrum: pytest.xfail('Blackbody waveset implementations are different') elif 'unit(' in self.spectrum: pytest.xfail('Flat does not use default waveset anymore') else: raise def test_obs_wave(self, thresh=0.01): """Test observation waveset.""" if not self._has_obswave: # Nothing to test return # Native wave = self.obs.waveset.value # TODO: Failure due to different wavesets for blackbody; Ignore? try: self._assert_allclose(wave, self.obsref.wave, rtol=thresh) except (AssertionError, ValueError): if 'bb(' in self.spectrum: pytest.xfail('Blackbody waveset implementations are different') elif 'unit(' in self.spectrum: self._has_obswave = False # Skip binned flux test pytest.xfail('Flat does not use default waveset anymore') else: raise # Binned binset = self.obs.binset.value self._assert_allclose(binset, self.obsref.binwave, rtol=thresh) @pytest.mark.parametrize('thrutype', ['zero', 'nonzero']) def test_band_thru(self, thrutype, thresh=0.01): """Test bandpass throughput, which is always between 0 and 1.""" wave = self.bpref.wave thru = self.bp(wave).value if thrutype == 'zero': self._compare_zero(thru, self.bpref.throughput, thresh=thresh) else: # nonzero self._compare_nonzero(thru, self.bpref.throughput, thresh=thresh) @pytest.mark.parametrize('fluxtype', ['zero', 'nonzero']) def test_spec_flux(self, fluxtype, thresh=0.01): """Test flux for source spectrum in PHOTLAM.""" wave = self.spref.wave flux = self.sp(wave).value if fluxtype == 'zero': self._compare_zero(flux, self.spref.flux, thresh=thresh) else: # nonzero self._compare_nonzero(flux, self.spref.flux, thresh=thresh) @pytest.mark.parametrize('fluxtype', ['zero', 'nonzero']) def test_obs_flux(self, fluxtype, thresh=0.01): """Test flux for observation in PHOTLAM.""" wave = self.obsref.wave flux = self.obs(wave).value # Native if fluxtype == 'zero': self._compare_zero(flux, self.obsref.flux, thresh=thresh) else: # nonzero self._compare_nonzero(flux, self.obsref.flux, thresh=thresh) if not self._has_obswave: # Do not compare binned flux return # Binned (cannot be resampled) binflux = self.obs.binflux.value if fluxtype == 'zero': self._compare_zero(binflux, self.obsref.binflux, thresh=thresh) else: # nonzero try: self._compare_nonzero(binflux, self.obsref.binflux, thresh=thresh) except AssertionError as e: if 'unit(' in self.spectrum: pytest.xfail('Flat does not use default waveset anymore:\n' f'{repr(e)}') else: raise def test_countrate(self, thresh=0.01): """Test observation countrate calculations.""" ans = self.obsref.countrate() # Astropy version does not assume a default area. val = self.obs.countrate(conf.area).value self._assert_allclose(val, ans, rtol=thresh) def test_efflam(self, thresh=0.01): """Test observation effective wavelength.""" ans = self.obsref.efflam() val = self.obs.effective_wavelength().value self._assert_allclose(val, ans, rtol=thresh) def teardown_class(self): """Reset config for both software.""" for cfgname in self.tables: conf.reset(cfgname) S.setref() class ThermCase(CommCase): """Commissioning tests with thermal component.""" @pytest.mark.parametrize('fluxtype', ['zero', 'nonzero']) def test_therm_spec(self, fluxtype, thresh=0.01): """Test bandpass thermal spectrum.""" thspref = self.bpref.obsmode.ThermalSpectrum() thsp = self.bp.obsmode.thermal_spectrum() # Make sure comparing same units thspref.convert(thsp._internal_wave_unit.name) thspref.convert(thsp._internal_flux_unit.name) # waveset not expected to be same here, so just compare flux flux = thsp(thspref.wave).value if fluxtype == 'zero': self._compare_zero(flux, thspref.flux, thresh=thresh) else: # nonzero # TODO: Is the refactored version really better? try: self._compare_nonzero(flux, thspref.flux, thresh=thresh) except AssertionError: pytest.xfail('New thermal spectrum samples better') def test_thermback(self, thresh=0.01): """Test bandpass thermal background.""" ans = self.bpref.thermback() val = self.bp.thermback().value self._assert_allclose(val, ans, rtol=thresh)
python
# -*- coding: utf-8 -*- """ Created on Thu Nov 28 19:11:30 2019 @author: wenbin """ """ 实现一个数据结构,使其具有以下方法:压栈,弹栈,取栈顶元素,判断栈是否为空以及获取栈中元素个数. 链表实现stack """ class LNode: def __init__(self , x = 0 , y = None): self.Data = x self.Next = y class MyStack: def __init__(self): self.Data = None self.Next = None # 判断stack是否为空,如果为空返回true, 否则返回false def empty(self): if self.Next == None: return True else: return False # 获取栈中元素的个数 def size(self): size = 0 p = self.Next while p != None: p = p.Next size += 1 return size # 入栈 def push(self , e): p = LNode(x = e , y = self.Next) self.Next = p # 出栈 def pop(self): tmp = self.Next if tmp != None: self.Next = tmp.Next return tmp.Data else: print("stack has been empty!") return None # 取得栈顶元素 def top(self): if self.Next != None: return self.Next.Data else: print("Stack has been empty!") return None if __name__ == "__main__": stack = MyStack() stack.push(5) stack.push(3) print("栈顶元素为:" , stack.top()) print("栈大小为:" , stack.size()) x = stack.pop() print("pop successfully! The element is : " , x) x = stack.pop() print("pop successfully! The element is : " , x) x = stack.pop()
python
# Digitar algorithm for plucked-string synthesis # Demo with "Frere Jacques" # Abe Karplus, 2016 import wave import array sampling = 48e3 # Hz bpm = 100 notenames = {'C': 0, 'D': 2, 'E': 4, 'F': 5, 'G': 7, 'A': 9, 'B': 11} def notepitch(n): step = notenames[n[0]] octind = 2 if n[1] == '#': step += 1 elif n[1] == 'b': step -= 1 else: octind = 1 octv = int(n[octind:]) exp = 12*octv+step-57 return 440 * 2**(exp/12) def lerp(tbl, phase): whole, frac = phase >> 16, phase & 0xFFFF x0 = tbl[whole] x1 = tbl[(whole+1)&0xFF] return ((x0 * ((1<<16)-frac)) + (x1 * frac))>>16 def randwords(): y = 2463534242 while True: y ^= (y << 13) & 0xFFFFFFFF y ^= (y >> 17) y ^= (y << 5) & 0xFFFFFFFF yield (y & 0xFFFF) - 32768 yield (y >> 16) - 32768 rw = randwords() def pluck(note, dur): out = [] tbl = [next(rw)//4 for n in range(256)] phase = 0 pos = 0 inc = int(round(notepitch(note)*2**24/sampling)) for n in range(int(dur*sampling)): tbl[pos] = (tbl[pos] + tbl[(pos-1)&0xFF])//2 pos += 1 pos &= 0xFF out.append(lerp(tbl, phase)) phase += inc phase &= 0xFFFFFF return out crochet = 60/bpm song = [] songdur = 0.0 dampfrac = 1/8 def addnotes(notes, tm): global songdur for n in notes: song.append((n, songdur, tm*(1-dampfrac), tm*dampfrac)) songdur += tm def quarter(notes): addnotes(notes, crochet) def eighth(notes): addnotes(notes, crochet/2) def half(notes): addnotes(notes, crochet*2) quarter(['F3']) quarter(['G3']) quarter(['A3']) quarter(['F3']) quarter(['F3']) quarter(['G3']) quarter(['A3']) quarter(['F3']) quarter(['A3']) quarter(['B3']) half(['C4']) quarter(['A3']) quarter(['B3']) half(['C4']) eighth(['C4']) eighth(['D4']) eighth(['C4']) eighth(['B3']) quarter(['A3']) quarter(['F3']) eighth(['C4']) eighth(['D4']) eighth(['C4']) eighth(['B3']) quarter(['A3']) quarter(['F3']) quarter(['F3']) quarter(['C3']) half(['F3']) quarter(['F3']) quarter(['C3']) half(['F3']) with wave.open('pluck.wav', 'wb') as f: f.setnchannels(1) f.setsampwidth(2) f.setframerate(sampling) out = array.array('h', [0]*int(sampling*songdur)) for note, start, dur, damp in song: buf = pluck(note, dur+damp) for n in range(int(dur*sampling)): out[n+int(start*sampling)] += buf[n] for n in range(int(dur*sampling), int((dur+damp)*sampling)): out[n+int(start*sampling)] += int(buf[n]*((dur+damp)*sampling-n)/(damp*sampling)) f.writeframes(array.array('h', out))
python
from django.test import SimpleTestCase from corehq.apps.app_manager.xpath import ( CaseSelectionXPath, CaseTypeXpath, LedgerdbXpath, XPath, ) class XPathTest(SimpleTestCase): def test_paren(self): xp = XPath('/data/q1') self.assertEqual('/data/q1', xp.paren()) self.assertEqual('(/data/q1)', xp.paren(force=True)) self.assertEqual('(/data/q1)', XPath('/data/q1', compound=True).paren()) def test_slash(self): self.assertEqual('/data/1/2', XPath().slash('/data').slash('1').slash('2')) self.assertEqual('/data/1/2', XPath('/data').slash('1').slash('2')) def test_select(self): self.assertEqual("/data/1[anything]", XPath('/data/1').select_raw('anything')) self.assertEqual("/data/1[a='b']", XPath('/data/1').select('a', 'b')) self.assertEqual("/data/1[a=/data/b]", XPath('/data/1').select('a', XPath('/data/b'))) def test_count(self): self.assertEqual('count(/data/a)', XPath('/data/a').count()) def test_eq_neq(self): self.assertEqual('a = b', XPath('a').eq('b')) self.assertEqual('a != b', XPath('a').neq('b')) def test_if(self): self.assertEqual('if(a, b, c)', XPath.if_('a', 'b', 'c')) def test_and_or(self): self.assertEqual('a and b and c', XPath.and_('a', 'b', 'c')) self.assertEqual('a and (b and c)', XPath.and_('a', XPath.and_('b', 'c'))) self.assertEqual('a or b or c', XPath.or_('a', 'b', 'c')) self.assertEqual('(a or b) or c', XPath.or_(XPath.or_('a', 'b'), XPath('c'))) def test_not(self): self.assertEqual('not a', XPath.not_('a')) self.assertEqual('not (a or b)', XPath.not_(XPath.or_('a', 'b'))) def test_date(self): self.assertEqual('date(a)', XPath.date('a')) def test_int(self): self.assertEqual('int(a)', XPath.int('a')) def test_complex(self): xp = XPath.and_( XPath('a').eq('1'), XPath('b').neq(XPath.string('')), XPath.or_( XPath('c').eq(XPath.string('')), XPath.date('d').neq('today()') )) self.assertEqual("a = 1 and b != '' and (c = '' or date(d) != today())", xp) class CaseSelectionXPathTests(SimpleTestCase): def setUp(self): self.select_by_water = CaseSelectionXPath("'black'") self.select_by_water.selector = 'water' def test_case(self): self.assertEqual( self.select_by_water.case(), "instance('casedb')/casedb/case[water='black']" ) def test_instance_name(self): self.assertEqual( self.select_by_water.case(instance_name='doobiedb'), "instance('doobiedb')/doobiedb/case[water='black']" ) def test_case_name(self): self.assertEqual( self.select_by_water.case(instance_name='doobiedb', case_name='song'), "instance('doobiedb')/doobiedb/song[water='black']" ) def test_case_type(self): self.assertEqual( CaseTypeXpath('song').case(), "instance('casedb')/casedb/case[@case_type='song']" ) def test_ledger(self): self.assertEqual( LedgerdbXpath('ledger_id').ledger(), "instance('ledgerdb')/ledgerdb/ledger[@entity-id=instance('commcaresession')/session/data/ledger_id]" )
python
__version__ = "1.2.0" from .utils import drawLandmark_multiple, detection_adapter, bbox_from_pts, Aligner from .fast_alignment import * from .face_detection import * from .face_reconstruction import *
python
import unittest class TestTransition(unittest.TestCase): @unittest.skip("") def test___init__(self): # transition = Transition(type_, element, nuclear_charge, charge, wavelength, temperature, density, pec) assert False # TODO: implement your test here @unittest.skip("") def test_energy(self): # transition = Transition(type_, element, nuclear_charge, charge, wavelength, temperature, density, pec) # self.assertEqual(expected, transition.energy()) assert False # TODO: implement your test here @unittest.skip("") def test_interpolate(self): # transition = Transition(type_, element, nuclear_charge, charge, wavelength, temperature, density, pec) # self.assertEqual(expected, transition.interpolate(temperature_grid, density_grid)) assert False # TODO: implement your test here class TestTransitionPool(unittest.TestCase): @unittest.skip("") def test___init__(self): # transition_pool = TransitionPool(transitions) assert False # TODO: implement your test here @unittest.skip("") def test___iter__(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.__iter__()) assert False # TODO: implement your test here @unittest.skip("") def test_append_file(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.append_file(filename)) assert False # TODO: implement your test here @unittest.skip("") def test_append_files(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.append_files(files)) assert False # TODO: implement your test here @unittest.skip("") def test_coeffs(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.coeffs()) assert False # TODO: implement your test here @unittest.skip("") def test_create_atomic_data(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.create_atomic_data(ad)) assert False # TODO: implement your test here @unittest.skip("") def test_energies(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.energies()) assert False # TODO: implement your test here @unittest.skip("") def test_filter_energy(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.filter_energy(lo, hi, unit)) assert False # TODO: implement your test here @unittest.skip("") def test_filter_type(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.filter_type(*type_names)) assert False # TODO: implement your test here @unittest.skip("") def test_from_adf15(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.from_adf15(files)) assert False # TODO: implement your test here @unittest.skip("") def test_interpolate(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.interpolate(temperature_grid, density_grid)) assert False # TODO: implement your test here @unittest.skip("") def test_size(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.size()) assert False # TODO: implement your test here @unittest.skip("") def test_sum_transitions(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.sum_transitions()) assert False # TODO: implement your test here @unittest.skip("") def test_wavelengths(self): # transition_pool = TransitionPool(transitions) # self.assertEqual(expected, transition_pool.wavelengths()) assert False # TODO: implement your test here class TestPBremsstrahlung(unittest.TestCase): @unittest.skip("") def test_p_bremsstrahlung(self): # self.assertEqual(expected, P_bremsstrahlung(k, Te, ne)) assert False # TODO: implement your test here class TestCoefficientFactory(unittest.TestCase): @unittest.skip("") def test___init__(self): # coefficient_factory = CoefficientFactory(atomic_data, transition_pool, clip_limit) assert False # TODO: implement your test here @unittest.skip("") def test_create(self): # coefficient_factory = CoefficientFactory(atomic_data, transition_pool, clip_limit) # self.assertEqual(expected, coefficient_factory.create(temperature_grid, density_grid)) assert False # TODO: implement your test here if __name__ == '__main__': unittest.main()
python
from django.test import TestCase from meadow.models import Book from meadow.tests.factories.book import BookFactory from meadow.utils.book_searcher import book_preview, search_by_title class BookPreviewTestCase(TestCase): def test_book_preview_book_exists(self): some_book = BookFactory() result = book_preview(some_book.id) self.assertEqual(result["title"], some_book.title) self.assertEqual(result["description"], some_book.description) self.assertEqual(result["author"]["first_name"], some_book.author.first_name) self.assertEqual(result["author"]["last_name"], some_book.author.last_name) def test_book_preview_book_doesnot_exist(self): some_book = BookFactory() # there is definitely no book with invalid_id in the DB invalid_id = some_book.id + 1 # the function should raise an exception if the id is invalid with self.assertRaises(Book.DoesNotExist): book_preview(invalid_id) class BookSearchTestCase(TestCase): def test_search_empty_title(self): books = [BookFactory() for _ in range(5)] title = "" result = search_by_title(title) self.assertEqual(len(books), len(result)) def test_search_some_unique_title(self): books = [BookFactory() for _ in range(5)] book_to_search = books[1] title = book_to_search.title result = search_by_title(title) self.assertEqual(len(result), 1) self.assertEqual(result[0].title, title) def test_search_title_doesnot_exist(self): [BookFactory() for _ in range(5)] title = "Some cook title which doesn't exist in DB" result = search_by_title(title) self.assertEqual(result, [])
python
def f(): pass a = f() b = f() c = f() str
python
from itertools import islice from queue import Queue from typing import Iterator import numpy as np def limited_queue_iterator(queue: Queue, max_num_elements: int) -> Iterator: """Construct an iterator from a queue. The iterator will stop after max_num_elements.""" for _ in range(max_num_elements): yield queue.get() def sampled_iterator(input_iter, num_elements: int, sampling_rate: float): if sampling_rate == 1.0: yield from islice(input_iter, num_elements) else: num_taken = 0 for element in input_iter: if np.random.rand() < sampling_rate: yield element num_taken += 1 if num_taken >= num_elements: break
python
""" This module illustrates code that accepts a single integer, a character, and an uppercase flag as positional arguments and print this character 'n' amount of times. If the uppercase flag is set to true, it prints uppercased. """ import argparse def main(character, number): print (character * number) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('number', type=int, help='A number') parser.add_argument('-c', type=str, help='Character to print (defaults to #)', default='#') parser.add_argument('-U', action='store_true', default=False, dest='uppercase', help='Uppercase the character (defaults to False)') args = parser.parse_args() if args.uppercase: args.c = args.c.upper() main(args.c, args.number)
python
""" Export module """ import os import os.path import sqlite3 import sys import regex as re # pylint: disable=E0611 # Defined at runtime from .index import Index class Export: """ Exports database rows into a text file line-by-line. """ @staticmethod def stream(dbfile, output): """ Iterates over each row in dbfile and writes text to output file Args: dbfile: SQLite file to read output: output file to store text """ with open(output, "w", encoding="utf-8") as out: # Connection to database file db = sqlite3.connect(dbfile) cur = db.cursor() # Get all indexed text cur.execute(Index.SECTION_QUERY) count = 0 for _, name, text in cur: if not name or not re.search(Index.SECTION_FILTER, name.lower()): count += 1 if count % 1000 == 0: print(f"Streamed {count} documents", end="\r") # Write row if text: out.write(text + "\n") print(f"Iterated over {count} total rows") # Free database resources db.close() @staticmethod def run(output, path): """ Exports data from database to text file, line by line. Args: output: output file path path: model path, if None uses default path """ # Derive path to dbfile dbfile = os.path.join(path, "articles.sqlite") # Stream text from database to file Export.stream(dbfile, output) if __name__ == "__main__": # Export data Export.run(sys.argv[1], sys.argv[2] if len(sys.argv) > 2 else None)
python
#!/usr/bin/env python import argparse import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional try: import comet_ml use_tensorboard = False except ImportError: use_tensorboard = True import datasets import numpy as np import torch import transformers from datasets import concatenate_datasets, load_dataset, load_metric from scipy.stats import entropy from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: Optional[str] = field( default=None, metadata={ "help": "The name of the task to train on: " + ", ".join(task_to_keys.keys()) }, ) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}, ) dataset_config_name: Optional[str] = field( default=None, metadata={ "help": "The configuration name of the dataset to use (via the datasets library)." }, ) max_seq_length: int = field( default=128, metadata={ "help": "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}, ) pad_to_max_length: bool = field( default=True, metadata={ "help": "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."}, ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the test data."}, ) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError( "Unknown task, you should pick one in " + ",".join(task_to_keys.keys()) ) elif self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError( "Need either a GLUE task, a training/validation file or a dataset name." ) else: train_extension = self.train_file.split(".")[-1] assert train_extension in [ "csv", "json", ], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={ "help": "Path to pretrained model or model identifier from huggingface.co/models" } ) config_name: Optional[str] = field( default=None, metadata={ "help": "Pretrained config name or path if not the same as model_name" }, ) tokenizer_name: Optional[str] = field( default=None, metadata={ "help": "Pretrained tokenizer name or path if not the same as model_name" }, ) cache_dir: Optional[str] = field( default=None, metadata={ "help": "Where do you want to store the pretrained models downloaded from huggingface.co" }, ) use_fast_tokenizer: bool = field( default=True, metadata={ "help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not." }, ) model_revision: str = field( default="main", metadata={ "help": "The specific model version to use (can be a branch name, tag name or commit id)." }, ) use_auth_token: bool = field( default=False, metadata={ "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " "with private models)." }, ) def _train(raw_datasets, args_dict=None): """Reference: https://github.com/huggingface/transformers/blob/master/examples/pytorch/text-classification/run_glue.py""" parser = HfArgumentParser( (ModelArguments, DataTrainingArguments, TrainingArguments) ) if args_dict is not None: model_args, data_args, training_args = parser.parse_dict(args_dict) elif len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1]) ) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if ( os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif ( last_checkpoint is not None and training_args.resume_from_checkpoint is None ): logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = raw_datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = raw_datasets["train"].features["label"].dtype in [ "float32", "float64", ] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Preprocessing the raw_datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [ name for name in raw_datasets["train"].column_names if name != "label" ] if ( "sentence1" in non_label_column_names and "sentence2" in non_label_column_names ): sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): label_to_id = { i: int(label_name_to_id[label_list[i]]) for i in range(num_labels) } else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if label_to_id is not None: model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in config.label2id.items()} elif data_args.task_name is not None and not is_regression: model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.id2label = {id: label for label, id in config.label2id.items()} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer( *args, padding=padding, max_length=max_seq_length, truncation=True ) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and "label" in examples: result["label"] = [ (label_to_id[l] if l != -1 else -1) for l in examples["label"] ] return result with training_args.main_process_first(desc="dataset map pre-processing"): raw_datasets = raw_datasets.map( preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if ( "validation" not in raw_datasets and "validation_matched" not in raw_datasets ): raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets[ "validation_matched" if data_args.task_name == "mnli" else "validation" ] if data_args.max_eval_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) if ( training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None ): if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets[ "test_matched" if data_args.task_name == "mnli" else "test" ] if data_args.max_predict_samples is not None: predict_dataset = predict_dataset.select( range(data_args.max_predict_samples) ) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Get the metric function if data_args.task_name is not None: metric = load_metric("glue", data_args.task_name) else: metric = load_metric("accuracy") # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result elif is_regression: return {"mse": ((preds - p.label_ids) ** 2).mean().item()} else: return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics_prefix = f"train_size_{min(max_train_samples, len(train_dataset))}_4e_all" if trainer.is_world_process_zero() and not use_tensorboard: experiment = comet_ml.config.get_global_experiment() if experiment is not None: experiment.set_name(metrics_prefix) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics(metrics_prefix + "_train_metrics", metrics) trainer.save_metrics(metrics_prefix + "_train_metrics", metrics) trainer.save_state() # Evaluation evaluation_metrics = {} if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") eval_datasets.append(raw_datasets["validation_mismatched"]) for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_eval_samples = ( data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) ) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics(metrics_prefix + "eval_metrics", metrics) trainer.save_metrics(metrics_prefix + "eval_metrics", metrics) evaluation_metrics = metrics test_predictions = None if training_args.do_predict: logger.info("*** Predict ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] predict_datasets = [predict_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") predict_datasets.append(raw_datasets["test_mismatched"]) for predict_dataset, task in zip(predict_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. predict_dataset = predict_dataset.remove_columns("label") test_predictions = trainer.predict( predict_dataset, metric_key_prefix=metrics_prefix + "_predict_metrics" ).predictions return evaluation_metrics, test_predictions def run_on_all_train_set(hf_args, raw_datasets): evaluation_metrics, _ = _train(raw_datasets, args_dict=hf_args) def _calculate_entropy(logits): probas = torch.nn.Softmax(dim=1)(torch.from_numpy(logits)) samples_entropy = entropy(probas.transpose(0, 1).cpu()) samples_entropy = torch.from_numpy(samples_entropy) return samples_entropy def _ask_oracle(unlabled_samples): # In our example, the original dataset is already labeled # However, in case where you have an unlabled dataset, here is where you send the data to a labeling platform return unlabled_samples def run_active_learning( hf_args, raw_datasets, target_score, initial_train_dataset_size, query_samples_count ): original_train_dataset = raw_datasets["train"] train_dataset = original_train_dataset.select( random.sample( range(original_train_dataset.num_rows), int(original_train_dataset.num_rows * initial_train_dataset_size), ) ) # fake unlabled dataset unlabeled_dataset = original_train_dataset.filter( lambda s: s["idx"] not in train_dataset["idx"] ) raw_datasets["train"] = train_dataset raw_datasets["test"] = unlabeled_dataset hf_args["do_predict"] = True current_score = -1 while unlabeled_dataset.num_rows > 0 and current_score < target_score: logger.info(f'Training using {raw_datasets["train"].num_rows}') evaluation_metrics, test_predictions = _train(raw_datasets, args_dict=hf_args) current_score = evaluation_metrics["eval_combined_score"] samples_entropy = _calculate_entropy(test_predictions) samples_entropy = torch.topk(samples_entropy, query_samples_count) new_train_samples = unlabeled_dataset.select(samples_entropy.indices.tolist()) new_train_samples = _ask_oracle(new_train_samples) extended_train_dataset = concatenate_datasets( [raw_datasets["train"], new_train_samples], info=original_train_dataset.info, ) unlabeled_dataset = original_train_dataset.filter( lambda s: s["idx"] not in extended_train_dataset["idx"] ) raw_datasets["train"] = extended_train_dataset raw_datasets["test"] = unlabeled_dataset def main( task_name, do_al, random_seed, target_score=None, initial_train_dataset_size=None, query_samples_count=None, epochs=3, batch_size=32, ): random.seed(random_seed) if use_tensorboard: hf_args.update( { "logging_dir": f"/tmp/{task_name}/tensorboard", "report_to": "tensorboard", } ) raw_datasets = load_dataset("glue", task_name) hf_args = { "model_name_or_path": "bert-base-cased", "task_name": task_name, "do_train": True, "do_eval": True, "max_seq_length": 128, "per_device_train_batch_size": batch_size, "per_device_eval_batch_size": batch_size, "learning_rate": 2e-5, "overwrite_output_dir": True, "output_dir": f"/tmp/{task_name}/", "logging_strategy": "steps", "logging_steps": 50, "evaluation_strategy": "steps", "eval_steps": 50, "seed": 12, "max_steps": int((raw_datasets["train"].num_rows / batch_size) * epochs), } # Using max_steps instead of epochs so that all active learning experiment run # number of iterations if not do_al: run_on_all_train_set(hf_args, raw_datasets) else: run_active_learning( hf_args, raw_datasets, target_score, initial_train_dataset_size, query_samples_count, ) def _parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--do_al", action="store_true", default=False) parser.add_argument("--target_score", type=float, default=0.8568075117370892) parser.add_argument("--task_name", type=str, default="mrpc") parser.add_argument("--random_seed", type=int, default=123) parser.add_argument("--initial_train_dataset_size", type=float, default=0.3) parser.add_argument("--query_samples_count", type=int, default=256) return parser.parse_args() if __name__ == "__main__": args = _parse_args() main( args.task_name, args.do_al, args.random_seed, target_score=args.target_score, initial_train_dataset_size=args.initial_train_dataset_size, query_samples_count=args.query_samples_count, )
python