content
stringlengths
0
894k
type
stringclasses
2 values
import sys, logging, time, resource, gc, os import multiprocessing from multiprocessing import Pool from util import print_datetime import numpy as np import gurobipy as grb import torch def estimate_weights_no_neighbors(YT, M, XT, prior_x_parameter_set, sigma_yx_inverse, X_constraint, dropout_mode, replicate): """Estimate weights for a single replicate in the SpiceMix model without considering neighbors. This is essentially a benchmarking convenience function, and should return similar results to running vanilla NMF. Args: YT: transpose of gene expression matrix for sample, with shape (num_cells, num_genes) M: current estimate of metagene matrix, with shape (num_genes, num_metagenes) XT: transpose of metagene weights for sample, with shape Returns: New estimate of transposed metagene weight matrix XT. """ if dropout_mode != 'raw': raise NotImplemented logging.info(f'{print_datetime()}Estimating weights without neighbors in repli {replicate}') _, num_metagenes = XT.shape updated_XT = np.zeros_like(XT) weight_model = grb.Model('X w/o n') weight_model.Params.OptimalityTol=1e-4 weight_model.Params.FeasibilityTol=1e-4 weight_model.setParam('OutputFlag', False) weight_model.Params.Threads = 1 weight_variables = weight_model.addVars(num_metagenes, lb=0.) assert X_constraint == 'none' # Adding shared components of the objective # quadratic term in log Pr[ Y | X, Theta ] shared_objective = 0 if dropout_mode == 'raw': # MTM = M.T @ M * (sigma_yx_inverse**2 / 2.) MTM = (M.T @ M + 1e-6 * np.eye(num_metagenes)) * (sigma_yx_inverse ** 2 / 2.) shared_objective += grb.quicksum([weight_variables[index] * MTM[index, index] * weight_variables[index] for index in range(num_metagenes)]) MTM *= 2 shared_objective += grb.quicksum([weight_variables[index] * MTM[index, j] * weight_variables[j] for index in range(num_metagenes) for j in range(index+1, num_metagenes)]) del MTM YTM = YT @ M * (-sigma_yx_inverse ** 2) else: raise NotImplementedError # prior on X prior_x_mode, *prior_x_parameters = prior_x_parameter_set if prior_x_mode in ('Truncated Gaussian', 'Gaussian'): mu_x, sigma_x_inv = prior_x_parameters assert (sigma_x_inv > 0).all() t = sigma_x_inv ** 2 / 2 shared_objective += grb.quicksum([t[metagene] * weight_variables[metagene] * weight_variables[metagene] for metagene in range(num_metagenes)]) t *= - 2 * mu_x shared_objective += grb.quicksum([t[metagene] * weight_variables[metagene] for metagene in range(num_metagenes)]) shared_objective += np.dot(mu_x**2, sigma_x_inv**2) / 2 elif prior_x_mode in ('Exponential', 'Exponential shared', 'Exponential shared fixed'): lambda_x, = prior_x_parameters assert (lambda_x >= 0).all() shared_objective += grb.quicksum([lambda_x[metagene] * weight_variables[metagene] for metagene in range(num_metagenes)]) else: raise NotImplementedError for cell_index, (y, yTM) in enumerate(zip(YT, YTM)): objective = shared_objective + grb.quicksum(yTM[metagene] * weight_variables[metagene] for metagene in range(num_metagenes)) + np.dot(y, y) * sigma_yx_inverse / 2. weight_model.setObjective(objective, grb.GRB.MINIMIZE) weight_model.optimize() updated_XT[cell_index] = [weight_variables[metagene].x for metagene in range(num_metagenes)] return updated_XT def estimate_weights_icm(YT, E, M, XT, prior_x_parameter_set, sigma_yx_inverse, sigma_x_inverse, X_constraint, dropout_mode, pairwise_potential_mode, replicate): r"""Estimate weights for a single replicate in the SpiceMix model using the Iterated Conditional Model (ICM). Notes: .. math:: \hat{X}_{\text{MAP}} &= \mathop{\text{\argmax}}_{X \in \mathbb{R}_+^{K \times N}} \left{ \sum_{i \in \mathcal{V}}\right} \\ s_i &= \frac{ - \lambda_x^\top z_i}{(Mz_i)^\top Mz_i} \\ z_i &= \frac{}{} We write XT in terms of size factors S such that XT = S * ZT. Args: YT: transpose of gene expression matrix for replicate, with shape (num_cells, num_genes) E: adjacency list for neighborhood graph in this replicate M: current estimate of metagene matrix, with shape (num_genes, num_metagenes) XT: transpose of weight matrix, with shape (num_cells, num_metagenes) prior_x_parameter_set: set of parameters defining prior distribution on weights, with structure (prior_x_mode, ∗prior_x_parameters) sigma_yx_inverse: TODO sigma_x_inverse: inverse of metagene affinity matrix X_constraint: constraint on elements of weight matrix dropout_mode: TODO: pairwise_potential_mode: TODO Returns: New estimate of transposed metagene weight matrix XT. """ prior_x_mode, *prior_x_parameters = prior_x_parameter_set num_cells, _ = YT.shape _, num_metagenes = M.shape MTM = None YTM = None # Precomputing some important matrix products if dropout_mode == 'raw': MTM = M.T @ M * sigma_yx_inverse**2 / 2 YTM = YT @ M * sigma_yx_inverse**2 / 2 else: raise NotImplementedError def calculate_objective(S, ZT): """Calculate current value of ICM objective. Args: YT: transpose of gene expression matrix for a particular sample S: a vector of total metagene expressions for each cell ZT: current estimate of weights for the sample, divided by the total for each cell Returns: value of ICM objective """ objective = 0 difference = YT - ( S * ZT ) @ M.T if dropout_mode == 'raw': difference = difference.ravel() else: raise NotImplementedError objective += np.dot(difference, difference) * sigma_yx_inverse**2 / 2 if pairwise_potential_mode == 'normalized': for neighbors, z_i in zip(E.values(), ZT): objective += z_i @ sigma_x_inverse @ ZT[neighbors].sum(axis=0) / 2 else: raise NotImplementedError if prior_x_mode in ('Exponential', 'Exponential shared', 'Exponential shared fixed'): lambda_x, = prior_x_parameters objective += lambda_x @ (S * ZT).sum(axis=0) del lambda_x else: raise NotImplementedError objective /= YT.size return objective def update_s_i(z_i, yTM): """Calculate closed form update for s_i. Assuming fixed value of z_i, update for s_i takes the following form: TODO Args: z_i: current estimate of normalized metagene expression neighbors: list of neighbors of current cell yTM: row of YTM corresponding to current cell MTM: row of MTM corresponding to current cell Returns: Updated estimate of s_i """ denominator = z_i @ MTM @ z_i numerator = yTM @ z_i if prior_x_mode in ('Exponential', 'Exponential shared', 'Exponential shared fixed'): lambda_x, = prior_x_parameters # TODO: do we need the 1/2 here? numerator -= lambda_x @ z_i / 2 del lambda_x else: raise NotImplementedError numerator = np.maximum(numerator, 0) s_i_new = numerator / denominator return s_i_new def update_z_i(s_i, y_i, yTM, eta): """Calculate update for z_i using Gurobi simplex algorithm. Assuming fixed value of s_i, update for z_i is a linear program of the following form: TODO Args: s_i: current estimate of size factor yTM: row of YTM corresponding to current cell eta: aggregate contribution of neighbor z_j's, weighted by affinity matrix (sigma_x_inverse) Returns: Updated estimate of z_i """ objective = 0 # Element-wise matrix multiplication (Mz_is_i)^\top(Mz_is_i) factor = s_i**2 * MTM objective += grb.quicksum([weight_variables[index] * factor[index, index] * weight_variables[index] for index in range(num_metagenes)]) factor *= 2 objective += grb.quicksum([weight_variables[index] * factor[index, j] * weight_variables[j] for index in range(num_metagenes) for j in range(index+1, num_metagenes)]) # Adding terms for -2 y_i M z_i s_i factor = -2 * s_i * yTM # TODO: fix formula below # objective += grb.quicksum([weight_variables[index] * factor[index] for index in range(num_metagenes)]) # objective += y_i @ y_i # objective *= sigma_yx_inverse**2 / 2 factor += eta # factor = eta if prior_x_mode in ('Exponential'): lambda_x, = prior_x_parameters factor += lambda_x * s_i del lambda_x elif prior_x_mode in ('Exponential shared', 'Exponential shared fixed'): pass else: raise NotImplementedError objective += grb.quicksum([weight_variables[index] * factor[index] for index in range(num_metagenes)]) # TODO: is this line necessary? Doesn't seem like z_i affects this term of the objective objective += y_i @ y_i * sigma_yx_inverse**2 / 2 weight_model.setObjective(objective, grb.GRB.MINIMIZE) weight_model.optimize() z_i_new = np.array([weight_variables[index].x for index in range(num_metagenes)]) return z_i_new global_iterations = 100 local_iterations = 100 weight_model = grb.Model('ICM') weight_model.Params.OptimalityTol=1e-4 weight_model.Params.FeasibilityTol=1e-4 weight_model.Params.OutputFlag = False weight_model.Params.Threads = 1 weight_model.Params.BarConvTol = 1e-6 weight_variables = weight_model.addVars(num_metagenes, lb=0.) weight_model.addConstr(weight_variables.sum() == 1) S = XT.sum(axis=1, keepdims=True) ZT = XT / (S + 1e-30) last_objective = calculate_objective(S, ZT) best_objective, best_iteration = last_objective, -1 for global_iteration in range(global_iterations): last_ZT = np.copy(ZT) last_S = np.copy(S) locally_converged = False if pairwise_potential_mode == 'normalized': for index, (neighbors, y_i, yTM, z_i, s_i) in enumerate(zip(E.values(), YT, YTM, ZT, S)): eta = ZT[neighbors].sum(axis=0) @ sigma_x_inverse for local_iteration in range(local_iterations): s_i_new = update_s_i(z_i, yTM) s_i_new = np.maximum(s_i_new, 1e-15) delta_s_i = s_i_new - s_i s_i = s_i_new z_i_new = update_z_i(s_i, y_i, yTM, eta) delta_z_i = z_i_new - z_i z_i = z_i_new locally_converged |= (np.abs(delta_s_i) / (s_i + 1e-15) < 1e-3 and np.abs(delta_z_i).max() < 1e-3) if locally_converged: break if not locally_converged: logging.warning(f'Cell {i} in the {replicate}-th replicate did not converge in {local_iterations} iterations;\ts = {s:.2e}, delta_s_i = {delta_s_i:.2e}, max delta_z_i = {np.abs(delta_z_i).max():.2e}') ZT[index] = z_i S[index] = s_i else: raise NotImplementedError globally_converged = False dZT = ZT - last_ZT dS = S - last_S current_objective = calculate_objective(S, ZT) globally_converged |= (np.abs(dZT).max() < 1e-2 and np.abs(dS / (S + 1e-15)).max() < 1e-2 and current_objective > last_objective - 1e-4) # TODO: do we need to keep this? force_show_flag = False # force_show_flag |= np.abs(dZT).max() > 1-1e-5 if global_iteration % 5 == 0 or globally_converged or force_show_flag: print(f'>{replicate} current_objective at iteration {global_iteration} = {current_objective:.2e},\tdiff = {np.abs(dZT).max():.2e}\t{np.abs(dS).max():.2e}\t{current_objective - last_objective:.2e}') print( f'ZT summary statistics: ' f'# <0 = {(ZT < 0).sum().astype(np.float) / num_cells:.1f}, ' f'# =0 = {(ZT == 0).sum().astype(np.float) / num_cells:.1f}, ' f'# <1e-10 = {(ZT < 1e-10).sum().astype(np.float) / num_cells:.1f}, ' f'# <1e-5 = {(ZT < 1e-5).sum().astype(np.float) / num_cells:.1f}, ' f'# <1e-2 = {(ZT < 1e-2).sum().astype(np.float) / num_cells:.1f}, ' f'# >1e-1 = {(ZT > 1e-1).sum().astype(np.float) / num_cells:.1f}' ) print( f'S summary statistics: ' f'# 0 = {(S == 0).sum()}, ' f'min = {S.min():.1e}, ' f'max = {S.max():.1e}' ) sys.stdout.flush() # TODO: do we need this assertion still? assert not current_objective > last_objective + 1e-6 last_objective = current_objective if current_objective < best_objective: best_objective, best_iteration = current_objective, global_iteration if globally_converged: break del weight_model # Enforce positivity constraint on S XT = np.maximum(S, 1e-15) * ZT return XT
python
# visualizer.py # Contains functions for image visualization import cv2 as cv import matplotlib.pyplot as plt import numpy as np import random import skimage.io as io import torch from operator import itemgetter from PIL import Image from torchvision import datasets, models, transforms from metrics import getPercentMask, calculateIoU # Minimum fraction of an image the object must occupy in order to be considered prominent PROMINENT_PERCENT_THRESHOLD = 0.3 # Extract images with one very prominent object and other possible smaller objects OTHER_OBJ_THRESH = 0.1 # Maximum fraction of an image the object must occupy in order to be considered prominent MAX_PERCENT = 0.9 # Default input dimensions IMG_SIZE = 224 # Maximum number of objects that are considered to be prominent MAX_PROMINENT_NUM = 4 # Displays an image def imshow(img, show_axis=False, save=False, save_path=None): if not show_axis: plt.axis('off') plt.imshow(img) if save: plt.savefig(save_path) plt.show() plt.clf() # Returns bit mask for objects of interset in image def getBitMask(annotations, cocoData): mask = cocoData.coco.annToMask(annotations[0]) # Create conglomerate mask over all objects in image for i in range(len(annotations)): mask = mask | cocoData.coco.annToMask(annotations[i]) #imshow(mask) return mask # Returns masked image def getMaskedImg(img, mask): mask_arr = np.array(mask) # Reshape to give 3rd axis for broadcasting to 3 channels mask_arr = np.expand_dims(mask_arr, axis=-1) masked_img = np.array(img) masked_img = masked_img * mask_arr return masked_img # Given a tensor of images in NCHW format, converts to numpy images def tensorToNpImg(tensor, img_type='mask'): image = tensor.detach().numpy() # Re-normalize for imshow plotting if(img_type != 'mask'): image = image/255 image = np.transpose(image, [1,2,0]) return image.squeeze() def thresholdProbMask(prob_mask, threshold=0.5): prob_mask[prob_mask>threshold] = 1 prob_mask[prob_mask<=threshold] = 0 return prob_mask # Given model, input image, and target mask # Evaulates output mask using model and displays against target def extractProminent(model, img, target): plt.figure() plt.subplot(1,3,1) plt.imshow(tensorToNpImg(img, 'img')); plt.axis('off') plt.subplot(1,3,2) plt.imshow(tensorToNpImg(target)); plt.axis('off') res = torch.sigmoid(model(img.unsqueeze(0).float())) plt.subplot(1,3,3) generatedMask = thresholdProbMask(tensorToNpImg(res.squeeze(0))) plt.imshow(generatedMask); plt.axis('off') print("IoU:", calculateIoU(res, target)) # Plots curve for given train and validation arrays # ctype={'Accuracy","Loss"} def plotCurve(train_val, valid_val, num_epochs, ctype): plt.title('Train vs Validation {}'.format(ctype)) plt.plot(range(num_epochs), train_val, label='Train') plt.plot(range(num_epochs), valid_val, label='Validation') plt.xlabel('Epoch') plt.ylabel(ctype) plt.legend(loc='best') plt.show() def plotPerformance(train_loss, valid_loss, train_acc, valid_acc, num_epochs): # Plot loss curves plotCurve(train_loss, valid_loss, num_epochs, ctype = 'Loss') # Plot accuracy curves plotCurve(train_acc, valid_acc, num_epochs, ctype = 'IoU') # Simple erosion-dilation denoiser def denoise(img, kernel_size=5): return cv.morphologyEx(img, cv.MORPH_OPEN, cv.getStructuringElement(cv.MORPH_RECT,(kernel_size,kernel_size)))
python
import os import pandas as pd # Configuration and constant definitions for the API # Search TEMPLATES_INDEX_FILENAME = 'templates.pkl' SEARCH_INDEX_FILENAME = 'index_clean.pkl'#os.path.join('images', 'index_4.df') SEARCH_READER_FN = pd.read_pickle SEARCH_COLUMNS = ['fusion_text_glove', 'title_glove', 'ocr_glove', 'img_embedding'] SEARCH_MAX_DIMS = [300, 300, 300, 512]#[30,30,30,50] # Models PRETRAINED_MODELS_DIR = 'pretrained' if not os.path.isdir(PRETRAINED_MODELS_DIR): os.makedirs(PRETRAINED_MODELS_DIR) EMBEDDINGS_FILENAME = os.path.join(PRETRAINED_MODELS_DIR, 'glove.6B.300d_dict.pickle') EMBEDDINGS_URL = 'https://cloud.tsinghua.edu.cn/f/0e2ab878bb5d4698b344/?dl=1' # Temp images ALLOWED_IMAGE_EXTENSIONS = [".jpg", ".png", ".gif"] TEMP_IMAGES_DIR = os.path.join('images', 'external') if not os.path.isdir(TEMP_IMAGES_DIR): os.makedirs(TEMP_IMAGES_DIR)
python
from setuptools import setup, find_packages setup( name='acl-iitbbs', version='0.1', description='Fetch attendance and result from ERP and Pretty Print it on Terminal.', author='Aman Pratap Singh', author_email='[email protected]', url='https://github.com/apsknight/acl', py_modules=['acl'], packages=find_packages(), install_requires=[ 'Click', 'robobrowser', 'bs4', 'tabulate' ], entry_points=''' [console_scripts] acl=source:attendance ''', )
python
'''helper functions to deal wit datetime strings''' from __future__ import unicode_literals, print_function import re from datetime import datetime # REGEX! DATE_RE = r'(\d{4}-\d{2}-\d{2})|(\d{4}-\d{3})' SEC_RE = r'(:(?P<second>\d{2})(\.\d+)?)' RAWTIME_RE = r'(?P<hour>\d{1,2})(:(?P<minute>\d{2})%s?)?' % (SEC_RE) AMPM_RE = r'am|pm|a\.m\.|p\.m\.|AM|PM|A\.M\.|P\.M\.' TIMEZONE_RE = r'Z|[+-]\d{2}:?\d{2}?' TIME_RE = (r'(?P<rawtime>%s)( ?(?P<ampm>%s))?( ?(?P<tz>%s))?' % (RAWTIME_RE, AMPM_RE, TIMEZONE_RE)) DATETIME_RE = (r'(?P<date>%s)(?P<separator>[T ])(?P<time>%s)' % (DATE_RE, TIME_RE)) def normalize_datetime(dtstr, match=None): """Try to normalize a datetime string. 1. Convert 12-hour time to 24-hour time pass match in if we have already calculated it to avoid rework """ match = match or (dtstr and re.match(DATETIME_RE + '$', dtstr)) if match: datestr = match.group('date') hourstr = match.group('hour') minutestr = match.group('minute') or '00' secondstr = match.group('second') ampmstr = match.group('ampm') separator = match.group('separator') # convert ordinal date YYYY-DDD to YYYY-MM-DD try: datestr = datetime.strptime(datestr, '%Y-%j').strftime('%Y-%m-%d') except ValueError: # datestr was not in YYYY-DDD format pass # 12 to 24 time conversion if ampmstr: hourstr = match.group('hour') hourint = int(hourstr) if (ampmstr.startswith('a') or ampmstr.startswith('A')) and hourint == 12: hourstr = '00' if (ampmstr.startswith('p') or ampmstr.startswith('P')) and hourint < 12: hourstr = str(hourint + 12) dtstr = '%s%s%s:%s' % ( datestr, separator, hourstr, minutestr) if secondstr: dtstr += ':'+secondstr tzstr = match.group('tz') if tzstr: dtstr += tzstr return dtstr
python
class SofaException(Exception): def __init__(self, message): super(SofaException, self).__init__(message) class ConfigurationException(SofaException): def __init__(self, message): super(ConfigurationException, self).__init__(message)
python
#!/usr/bin/env python # # Copyright (c) 2013-2018 Nest Labs, Inc. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import md5 from nestlabs.breadcrumbs.Event import * UID_BYTES = 2 def backslashify(aString): return aString.replace('\n', ' \\\n') def applyIndent(aString, indent): return aString.replace('$', '%s' % (' '*(2*indent))) class EventDescriptor: """A simple class to wrap around the event description""" def __init__(self, filename, name, param_sizes, description): self.filename = filename.strip('\n') self.name = name self.param_sizes = param_sizes self.description = description return None def __getstate__(self): return (self.filename, self.name, self.param_sizes, self.description) def __setstate__(self, a_dict): self.filename = a_dict[0] self.name = a_dict[1] self.param_sizes = a_dict[2] self.description = a_dict[3] def name(self): return self.name def param_sizes(self): return self.param_sizes def description(self): return self.description def cksum(self): stringToHash = '%s %s' % (self.name, self.name) cksumr = md5.new(stringToHash) return cksumr.hexdigest()[:UID_BYTES*2] def __str__(self): return "%s %s | %s" % (self.name, self.param_sizes, self.description) def get_param_list(self): retval = '' i = 0 for s in range(len(self.param_sizes)): retval += 'arg%d, ' % i i += 1 return retval[0:-2] def get_args(self): retval = '' i = 0 for sz in self.param_sizes: retval += '$$$%s, arg%d,\n' % ( Event.get_param_size(sz[1]), i) i += 1 return retval def get_verify_string(self): retval = '' i = 0 for s in self.param_sizes: retval += ('$$nlCHECK(sizeof(arg%d) == %s);\n' % (i, Event.get_param_size(s[1]))) i += 1 return retval def get_macro(self, indent_val): indent1=' '*((indent_val)*2) indent2=' '*((indent_val + 1)*2) aString = """\ #define nlBREADCRUMBS_%s(%s) $do{ $$nl_breadcrumbs((k%s << %d), %s$$$-1); $} while(0)""" % (self.name, self.get_param_list(), self.name, UID_BYTES*8, self.get_args()) return applyIndent("%s\n\n" % backslashify(aString), 2)
python
#!/usr/bin/env python # -*- Coding: UTF-8 -*- # @Time : 12/8/18 7:02 PM # @Author : Terry LAI # @Email : [email protected] # @File : keyboard.py from pymouse import PyMouse from pykeyboard import PyKeyboard from socket import socket, AF_INET, SOCK_STREAM port = 20000 # -*- coding: utf-8 -*- client_addr = [] client_socket = {} ########################################################################### ## Python code generated with wxFormBuilder (version Sep 12 2010) ## http://www.wxformbuilder.org/ ## ## PLEASE DO "NOT" EDIT THIS FILE! ########################################################################### import wx from socketserver import ThreadingTCPServer ########################################################################### ## Class MotionGame ########################################################################### class MotionGame(wx.Frame): def __init__(self, parent): wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=wx.EmptyString, pos=wx.DefaultPosition, size=wx.Size(500, 300), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL) self.SetSizeHintsSz(wx.DefaultSize, wx.DefaultSize) bSizer11 = wx.BoxSizer(wx.VERTICAL) self.m_staticText1 = wx.StaticText(self, wx.ID_ANY, u"ECE 5413 Motion Game", wx.DefaultPosition, wx.DefaultSize, 0) self.m_staticText1.Wrap(-1) bSizer11.Add(self.m_staticText1, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5) self.m_button1 = wx.Button(self, wx.ID_ANY, u"Start Server", wx.DefaultPosition, wx.DefaultSize, 0) bSizer11.Add(self.m_button1, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5) self.m_staticText2 = wx.StaticText(self, wx.ID_ANY, u"server is down", wx.DefaultPosition, wx.DefaultSize, 0) self.m_staticText2.Wrap(-1) bSizer11.Add(self.m_staticText2, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 5) gbSizer1 = wx.GridBagSizer(0, 0) gbSizer1.SetFlexibleDirection(wx.BOTH) gbSizer1.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED) self.m_staticText12 = wx.StaticText(self, wx.ID_ANY, u"Game 1", wx.Point(20, 20), wx.DefaultSize, wx.ALIGN_CENTRE) self.m_staticText12.Wrap(-1) gbSizer1.Add(self.m_staticText12, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5) self.m_button2 = wx.Button(self, wx.ID_ANY, u"Set Game 1", wx.DefaultPosition, wx.DefaultSize, 0) gbSizer1.Add(self.m_button2, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL, 5) self.m_staticText14 = wx.StaticText(self, wx.ID_ANY, u"Player 1", wx.DefaultPosition, wx.DefaultSize, 0) self.m_staticText14.Wrap(-1) gbSizer1.Add(self.m_staticText14, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 5) self.m_staticText4 = wx.StaticText(self, wx.ID_ANY, u"disconnected", wx.DefaultPosition, wx.DefaultSize, 0) self.m_staticText4.Wrap(-1) gbSizer1.Add(self.m_staticText4, wx.GBPosition(0, 3), wx.GBSpan(1, 1), wx.ALL, 5) bSizer11.Add(gbSizer1, 1, wx.EXPAND, 5) gbSizer11 = wx.GridBagSizer(0, 0) gbSizer11.SetFlexibleDirection(wx.BOTH) gbSizer11.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED) self.m_staticText121 = wx.StaticText(self, wx.ID_ANY, u"Game 2", wx.Point(20, 20), wx.DefaultSize, wx.ALIGN_CENTRE) self.m_staticText121.Wrap(-1) gbSizer11.Add(self.m_staticText121, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALL | wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT, 5) self.m_button3 = wx.Button(self, wx.ID_ANY, u"Set Game 2", wx.DefaultPosition, wx.DefaultSize, 0) gbSizer11.Add(self.m_button3, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL, 5) self.m_staticText141 = wx.StaticText(self, wx.ID_ANY, u"Player 1", wx.DefaultPosition, wx.DefaultSize, 0) self.m_staticText141.Wrap(-1) gbSizer11.Add(self.m_staticText141, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 5) self.m_staticText5 = wx.StaticText(self, wx.ID_ANY, u"disconnected", wx.DefaultPosition, wx.DefaultSize, 0) self.m_staticText5.Wrap(-1) gbSizer11.Add(self.m_staticText5, wx.GBPosition(0, 3), wx.GBSpan(1, 1), wx.ALL, 5) self.m_staticText40 = wx.StaticText(self, wx.ID_ANY, u"Player 2", wx.DefaultPosition, wx.DefaultSize, 0) self.m_staticText40.Wrap(-1) gbSizer11.Add(self.m_staticText40, wx.GBPosition(0, 4), wx.GBSpan(1, 1), wx.ALL, 5) self.m_staticText6 = wx.StaticText(self, wx.ID_ANY, u"disconnected", wx.DefaultPosition, wx.DefaultSize, 0) self.m_staticText6.Wrap(-1) gbSizer11.Add(self.m_staticText6, wx.GBPosition(0, 5), wx.GBSpan(1, 1), wx.ALL, 5) bSizer11.Add(gbSizer11, 1, wx.EXPAND, 5) bSizer12 = wx.BoxSizer(wx.VERTICAL) self.m_staticText57 = wx.StaticText(self, wx.ID_ANY, u"Game 2 Link: ", wx.DefaultPosition, wx.Size(50, -1), 0) self.m_staticText57.Wrap(-1) self.m_staticText57.SetMaxSize(wx.Size(100, -1)) bSizer12.Add(self.m_staticText57, 1, wx.ALL | wx.EXPAND, 5) self.m_textCtrl12 = wx.TextCtrl(self, wx.ID_ANY, u"http://www.4399.com/flash/187228_1.htm", wx.DefaultPosition, wx.DefaultSize, 0) bSizer12.Add(self.m_textCtrl12, 0, wx.ALL | wx.EXPAND, 5) bSizer11.Add(bSizer12, 1, wx.EXPAND, 5) self.SetSizer(bSizer11) self.Layout() self.Centre(wx.BOTH) # Connect Events self.m_button1.Bind(wx.EVT_BUTTON, self.start_server) self.m_button2.Bind(wx.EVT_BUTTON, self.set_game1) self.m_button3.Bind(wx.EVT_BUTTON, self.set_game2) def __del__(self): pass # Virtual event handlers, overide them in your derived class def start_server(self, event): frame.m_staticText2.SetLabel("Server is Running !!! ") print("start server") timer = threading.Timer(timer_period, fun_timer) timer.start() # 第一对参数是(host, port) server = ThreadingTCPServer(('', port), EchoHandler) server_thread = threading.Thread(target=server.serve_forever) # Exit the server thread when the main thread terminates server_thread.daemon = True server_thread.start() #sudo netstat -lntup|grep 20000 #ps -ef|grep python // 查看在python中的所有进程 #kill -9 51976 // -9指是强制关闭进程,有时候直接用`kill 51976`是杀不死进程的 def set_game1(self, event): global mode global mode_1_flag global mode_2_flag mode_1_flag = True mode = 1 print("Mode 1") for key,value in client_socket.items(): value.sendall(bytes([0x11,0x22,0x33])) def set_game2(self, event): global mode global mode_1_flag global mode_2_flag mode_2_flag = True mode = 2 print("Mode 2") for key,value in client_socket.items(): try: value.sendall(bytes([0x11, 0x22, 0x33])) except IOError: pass else: pass m = PyMouse() k = PyKeyboard() from socketserver import BaseRequestHandler, TCPServer buffer_size = 10 key_flag = False import threading timer_period = 0.1 def fun_timer(): global key_flag #print('Hello Timer!') key_flag = True global timer timer = threading.Timer(timer_period, fun_timer) timer.start() previous_key = 0 mode = 1 frame =None mode_1_flag= False mode_2_flag= False d = {} # 继承BaseRequestHandler这个base class,并重定义handle() class EchoHandler(BaseRequestHandler): def setup(self): ip = self.client_address[0].strip() # 获取客户端的ip port = self.client_address[1] # 获取客户端的port print(ip+":"+str(port)+" is connect!") client_addr.append(self.client_address) # 保存到队列中 client_socket[self.client_address] = self.request # 保存套接字socket def finish(self): print("client is disconnect!") client_addr.remove(self.client_address) del client_socket[self.client_addr] def handle(self): global key_flag global previous_key global mode_1_flag global mode_2_flag print('Got connection from', self.client_address) print(type(self.request)) # self.request is the TCP socket connected to the client count = 0 msg = [] while True: # 8192代表每次读取8192字节 temp = self.request.recv(buffer_size) msg.extend(temp) while len(msg) >= 2 and (msg[0]!=0xa0 or msg[1]!=0xa1): msg.pop(0) if len(msg)<buffer_size: continue if not key_flag: continue up = msg[2] down = msg[3] left = msg[4] right = msg[5] node = msg[6] if node == 1: frame.m_staticText4.SetLabel("Connected !!! ") frame.m_staticText5.SetLabel("Connected !!! ") if node == 2: frame.m_staticText6.SetLabel("Connected !!! ") if mode == 1: key = 0 if up and not left and not right: key =1 if down and not left and not right: key =2 if left: key =3 if right: key =4 if key != 0 and previous_key != key: print(key) if key == 1: k.press_key("up") print(" node 1 up") # else: # k.release_key("up") if key == 2: k.press_key("down") print(" node 1 down") # else: # k.release_key("down") if key == 3: k.press_key("left") print(" node 1 left") # else: # k.release_key("left") if key == 4: k.press_key("right") print(" node 1 right") # else: # k.release_key("right") previous_key = key if mode == 2: if node == 1: if up == 1: k.press_key("up") print(" node 1 up") else: k.release_key("up") if down == 1: k.press_key("down") print(" node 1 down") else: k.release_key("down") if left == 1: k.press_key("left") print(" node 1 left") else: k.release_key("left") if right == 1: k.press_key("right") print(" node 1 right") else: k.release_key("right") if node == 2: if up == 1: k.press_key("w") print(" node 2 up") else: k.release_key("w") if down == 1: k.press_key("s") print(" node 2 down") else: k.release_key("s") if left == 1: k.press_key("a") print(" node 2 left") else: k.release_key("a") if right == 1: k.press_key("d") print(" node 2 right") else: k.release_key("d") msg = [] #key_flag = False if __name__ == '__main__': app = wx.App() # 实例化一个主循环<br> frame = MotionGame(None) # 实例化一个窗口<br> frame.Show() # 调用窗口展示功能<br> app.MainLoop() # 启动主循环
python
from mock import MagicMock, patch import unittest from cassandras3.cli.restore import do_restore from cassandras3.util.nodetool import NodeTool class TestRestoreClient(unittest.TestCase): @patch('cassandras3.cli.restore.ClientCache') @patch('cassandras3.cli.restore.NodeTool') def test_restore(self, nodetool_constructor, _): self._setup_mocks(nodetool_constructor) do_restore( 'us-east-1', 'localhost', 7199, 'backup-id', 'system', 'some-host', 'test') self.mock_nodetool.restore.assert_called_with('system', 'test', 'backup-id') @patch('cassandras3.cli.restore.ClientCache') @patch('cassandras3.cli.restore.NodeTool') def test_restore_no_hostname(self, nodetool_constructor, _): self._setup_mocks(nodetool_constructor) do_restore( 'us-east-1', 'localhost', 7199, 'backup-id', 'system', '', 'test') self.mock_nodetool.restore.assert_called_with('system', 'test', 'backup-id') def _setup_mocks(self, nodetool_constructor): self.mock_nodetool = MagicMock(spec=NodeTool) nodetool_constructor.return_value = self.mock_nodetool
python
import csv import datetime import json import logging import os import time import click import structlog from dsaps import helpers from dsaps.models import Client, Collection logger = structlog.get_logger() def validate_path(ctx, param, value): """Validates the formatting of the submitted path""" if value[-1] == "/": return value else: raise click.BadParameter("Include / at the end of the path.") @click.group(chain=True) @click.option( "--url", envvar="DSPACE_URL", required=True, ) @click.option( "-e", "--email", envvar="DSPACE_EMAIL", required=True, help="The email of the user for authentication.", ) @click.option( "-p", "--password", envvar="DSPACE_PASSWORD", required=True, hide_input=True, help="The password for authentication.", ) @click.pass_context def main(ctx, url, email, password): ctx.obj = {} if os.path.isdir("logs") is False: os.mkdir("logs") dt = datetime.datetime.utcnow().isoformat(timespec="seconds") log_suffix = f"{dt}.log" structlog.configure( processors=[ structlog.stdlib.filter_by_level, structlog.stdlib.add_log_level, structlog.stdlib.PositionalArgumentsFormatter(), structlog.processors.TimeStamper(fmt="iso"), structlog.processors.JSONRenderer(), ], context_class=dict, logger_factory=structlog.stdlib.LoggerFactory(), ) logging.basicConfig( format="%(message)s", handlers=[logging.FileHandler(f"logs/log-{log_suffix}", "w")], level=logging.INFO, ) logger.info("Application start") client = Client(url) client.authenticate(email, password) start_time = time.time() ctx.obj["client"] = client ctx.obj["start_time"] = start_time ctx.obj["log_suffix"] = log_suffix @main.command() @click.option( "-m", "--metadata-csv", required=True, type=click.Path(exists=True, file_okay=True, dir_okay=False), help="The path to the CSV file of metadata for the items.", ) @click.option( "-f", "--field-map", required=True, type=click.Path(exists=True, file_okay=True, dir_okay=False), help="The path to JSON field mapping file.", ) @click.option( "-d", "--content-directory", required=True, type=click.Path(exists=True, dir_okay=True, file_okay=False), help="The full path to the content, either a directory of files " "or a URL for the storage location.", ) @click.option( "-t", "--file-type", help="The file type to be uploaded, if limited to one file " "type.", default="*", ) @click.option( "-r", "--ingest-report", is_flag=True, help="Create ingest report for updating other systems.", ) @click.option( "-c", "--collection-handle", help="The handle of the collection to which items are being " "added.", default=None, ) @click.pass_context def additems( ctx, metadata_csv, field_map, content_directory, file_type, ingest_report, collection_handle, ): """Add items to a specified collection from a metadata CSV, a field mapping file, and a directory of files. May be run in conjunction with the newcollection CLI command.""" client = ctx.obj["client"] start_time = ctx.obj["start_time"] if "collection_uuid" not in ctx.obj and collection_handle is None: raise click.UsageError( "collection_handle option must be used or " "additems must be run after newcollection " "command." ) elif "collection_uuid" in ctx.obj: collection_uuid = ctx.obj["collection_uuid"] else: collection_uuid = client.get_uuid_from_handle(collection_handle) with open(metadata_csv, "r") as csvfile, open(field_map, "r") as jsonfile: metadata = csv.DictReader(csvfile) mapping = json.load(jsonfile) collection = Collection.create_metadata_for_items_from_csv(metadata, mapping) for item in collection.items: item.bitstreams_in_directory(content_directory, file_type) collection.uuid = collection_uuid items = collection.post_items(client) if ingest_report: report_name = metadata_csv.replace(".csv", "-ingest.csv") helpers.create_ingest_report(items, report_name) elapsed_time = datetime.timedelta(seconds=time.time() - start_time) logger.info(f"Total runtime : {elapsed_time}") @main.command() @click.option( "-c", "--community-handle", required=True, help="The handle of the community in which to create the ," "collection.", ) @click.option( "-n", "--collection-name", required=True, help="The name of the collection to be created.", ) @click.pass_context def newcollection(ctx, community_handle, collection_name): """Post a new collection to a specified community. Used in conjunction with the additems CLI command to populate the new collection with items.""" client = ctx.obj["client"] collection_uuid = client.post_coll_to_comm(community_handle, collection_name) ctx.obj["collection_uuid"] = collection_uuid @main.command() @click.option( "-m", "--metadata-csv", required=True, type=click.Path(exists=True, file_okay=True, dir_okay=False), help="The path of the CSV file of metadata.", ) @click.option( "-o", "--output-directory", type=click.Path(exists=True, file_okay=False), default=f"{os.getcwd()}/", callback=validate_path, help="The path of the output files, include / at the end of the " "path.", ) @click.option( "-d", "--content-directory", required=True, help="The full path to the content, either a directory of files " "or a URL for the storage location.", ) @click.option( "-t", "--file-type", help="The file type to be uploaded, if limited to one file " "type.", default="*", ) def reconcile(metadata_csv, output_directory, content_directory, file_type): """Run a reconciliation of the specified files and metadata to produce reports of files with no metadata, metadata with no files, metadata matched to files, and an updated version of the metadata CSV with only the records that have matching files.""" file_ids = helpers.create_file_list(content_directory, file_type) metadata_ids = helpers.create_metadata_id_list(metadata_csv) metadata_matches = helpers.match_metadata_to_files(file_ids, metadata_ids) file_matches = helpers.match_files_to_metadata(file_ids, metadata_ids) no_files = set(metadata_ids) - set(metadata_matches) no_metadata = set(file_ids) - set(file_matches) helpers.create_csv_from_list(no_metadata, f"{output_directory}no_metadata") helpers.create_csv_from_list(no_files, f"{output_directory}no_files") helpers.create_csv_from_list( metadata_matches, f"{output_directory}metadata_matches" ) helpers.update_metadata_csv(metadata_csv, output_directory, metadata_matches)
python
from datetime import date, datetime, timedelta #Yesterday as the request date for the client def get_request_date(): dt = datetime.today() - timedelta(days=1) return dt.strftime('%Y-%m-%d')
python
# Licensed under a 3-clause BSD style license - see LICENSE.rst from __future__ import print_function from astropy.extern.six import BytesIO from astropy.table import Table from ..query import BaseQuery from ..utils import commons from ..utils import async_to_sync from . import conf __all__ = ['Heasarc', 'HeasarcClass'] @async_to_sync class HeasarcClass(BaseQuery): """HEASARC query class. """ URL = conf.server TIMEOUT = conf.timeout def query_object_async(self, object_name, mission, cache=True, get_query_payload=False): """TODO: document this! (maybe start by copying over from some other service.) """ request_payload = dict() request_payload['object_name'] = object_name request_payload['tablehead'] = ('BATCHRETRIEVALCATALOG_2.0 {}' .format(mission)) request_payload['Action'] = 'Query' request_payload['displaymode'] = 'FitsDisplay' if get_query_payload: return request_payload response = self._request('GET', self.URL, params=request_payload, timeout=self.TIMEOUT, cache=cache) return response def _parse_result(self, response, verbose=False): # if verbose is False then suppress any VOTable related warnings if not verbose: commons.suppress_vo_warnings() data = BytesIO(response.content) table = Table.read(data, hdu=1) return table Heasarc = HeasarcClass()
python
""" 'FRAUDAR: Bounding Graph Fraud in the Face of camouflage' Spot fraudsters in the presence of camouflage or hijacked accounts. An algorithm that is camouflage-resistant, provides upper bounds on the effectiveness of fraudsters, and the algorithm is effective in real-world data. Article: https://bhooi.github.io/papers/fraudar_kdd16.pdf """ from UGFraud.Utils.helper import * from UGFraud.Detector.Fraudar import * import copy as cp import sys import os sys.path.insert(0, os.path.abspath('../../')) def listToSparseMatrix(edgesSource, edgesDest): m = max(edgesSource) + 1 n = max(edgesDest) + 1 M = sparse.coo_matrix(([1] * len(edgesSource), (edgesSource, edgesDest)), shape=(m, n)) M1 = M > 0 return M1.astype('int') @timer def runFraudar(graph, multiple=0): new_upriors = node_attr_filter(graph, 'types', 'user', 'prior') new_rpriors = edge_attr_filter(graph, 'types', 'review', 'prior') # print('Start detection on the new graph with Fraudar') user_to_product = {} prod_to_user = {} u_id_dict = node_attr_filter(graph, 'types', 'user', 'types') for u_id in u_id_dict.keys(): if u_id not in user_to_product: user_to_product[u_id] = [] for p_id in graph[u_id].keys(): if p_id not in prod_to_user: prod_to_user[p_id] = [] user_to_product[u_id].append(p_id) prod_to_user[p_id].append(u_id) u_id2idx = {} p_id2idx = {} idx2u_id = {} idx2p_id = {} i = 0 for u_id in user_to_product.keys(): u_id2idx[u_id] = i idx2u_id[i] = u_id i += 1 i = 0 for p_id in prod_to_user.keys(): p_id2idx[p_id] = i idx2p_id[i] = p_id i += 1 edgesSource = [] edgesDest = [] for u_id in u_id_dict.keys(): for p_id in graph[u_id].keys(): edgesSource.append(u_id2idx[u_id]) edgesDest.append(p_id2idx[p_id]) M = listToSparseMatrix(edgesSource, edgesDest) # print("finished reading data ") if multiple == 0: # detect all dense blocks res = detect_blocks(M, logWeightedAveDegree) else: # detect the top #multiple dense blocks res = detectMultiple(M, logWeightedAveDegree, multiple) detected_users = {} weight_dict = {} for lwRes in res: detected_u_idx = lwRes[0][0] detected_p_idx = lwRes[0][1] weight = lwRes[1] weight_dict[weight] = weight for i in detected_u_idx: uid_tmp = idx2u_id[i] if uid_tmp not in detected_users.keys(): detected_users[uid_tmp] = weight max_den = res[0][1] min_den = res[-1][1] den_interval = max_den - min_den ranked_rpriors = [(review, new_rpriors[review]) for review in new_rpriors.keys()] ranked_rpriors = sorted(ranked_rpriors, reverse=True, key=lambda x: x[1]) r_max, r_mean, r_min = ranked_rpriors[0][1], ranked_rpriors[int(len(ranked_rpriors) / 2)][1], ranked_rpriors[-1][1] aux_rpriors = cp.deepcopy(new_rpriors) for i, p in aux_rpriors.items(): new_rpriors[i] = (p - r_min) / (r_max - r_min) user_density = {} for u in new_upriors.keys(): if u in detected_users.keys(): user_density[u] = (detected_users[u] - min_den) / den_interval else: user_density[u] = 1e-6 user_prob = {} review_prob = {} for review in new_rpriors.keys(): review_prob.update({review: 1e-6}) user_prob.update({review[0]: 1e-6}) print(len(detected_users)) print(detected_users['302']) for user in detected_users.keys(): user_prob.update({user: user_density[user]}) for prod in graph[user].keys(): review_prob.update({(user, prod): user_density[user]}) return user_prob, review_prob if __name__ == '__main__': # data source file_name = 'Yelp_graph_data.json' G = load_graph(file_name) review_ground_truth = edge_attr_filter(G, 'types', 'review', 'label') # run Fraudar on the reviews userBelief, reviewBelief = runFraudar(G, multiple=0) reviewBelief = scale_value(reviewBelief) review_AUC, review_AP = evaluate(review_ground_truth, reviewBelief) print('review AUC = {}'.format(review_AUC)) print('review AP = {}'.format(review_AP))
python
""" dear Nessus dev, if you want to see where there is issues with your REST API, please modify `lying_type` and `lying_exist` to become NOP """ import functools from typing import TypeVar, Mapping, Union, Callable, Any, Optional T = TypeVar('T') U = TypeVar('U') V = TypeVar('V') JsonType = Union[int, str, bool] class Object: def __repr__(self) -> str: """ more magic, we want a generic way to repr a model, so we take the current values of self and the args to the init function and try to match them together :return: repr of the model """ classname = self.__class__.__name__ init = getattr(self, '__init__') args = init.__code__.co_varnames[1:] args_str = ['{{{}!r}}'.format(a) for a in args] ret = '{classname}({args})'.format(classname=classname, args=', '.join(args_str)) values = dict() for k, v in self.__dict__.items(): if k in args: real_key = k else: real_key = next(arg for arg in args if arg.endswith(k)) values[real_key] = v return ret.format(**values) def lying_type(value: U, excepted_type: Callable[[U], Any], actual_type: Callable[[U], T] = lambda x: x, default: V = ...) -> Union[T,Any]: """ document that we excepted the given type for the given value, but it was not the case a NOP would be `return excepted_type(value)` :param value: value we got :param excepted_type: type we excepted :param actual_type: real type we got :return: type we got """ if default is not ...: return default return actual_type(value) def __default_if_args(if_no_arg: Callable[[], T], if_arg: Callable[[Any], T], *args) -> T: """ if it was given one arg, call `if_arg` with it, if got no arg, call `if_no_arg` :param if_no_arg: to call if no arg :param if_arg: to call if arg :param args: passed to `if_arg` :return: result from either `if_no_arg` or `if_arg` """ assert len(args) in (0, 1) if args: return if_arg(*args) return if_no_arg() def lying_exist_and_type(json_dict: Mapping[str, JsonType], excepted_name: str, excepted_type: Callable[[Any], T], actual_type: Callable[[Any], U], default: Optional[U] = None) -> U: if excepted_name in json_dict: return actual_type(json_dict[excepted_name]) else: return default def allow_to_exist(json_dict: Mapping[str, JsonType], excepted_name: str, excepted_type: Callable[[Any], T]) -> U: if excepted_name in json_dict: return excepted_type(json_dict[excepted_name]) else: return None def lying_exist(json_dict: Mapping[str, JsonType], excepted_name: str, excepted_type: Callable[[Any], T], default: U = ...) -> Union[T, U]: """ document that we excepted the given key, but it was not the case a NOP would be `return excepted_type(json_dict[excepted_name])` :param json_dict: where to look for the value :param excepted_name: key we excepted to find :param excepted_type: type of the value we excepted to find :param default: optional default value to return (we also use a bit of magic (`...`) to be able to pass None) :return: either the value if existing or the default """ # we use this magic to be able to pass either `int` as `excepted_type` (which can take (0, 1) arg or one of our # `model.from_json` which have to have a single arg if default is not ...: to_call = functools.partial(__default_if_args, lambda: default, excepted_type) else: to_call = excepted_type if excepted_name in json_dict: return to_call(json_dict[excepted_name]) else: return to_call()
python
from .models import redshiftdata_backends from ..core.models import base_decorator mock_redshiftdata = base_decorator(redshiftdata_backends)
python
import time import unittest from cryptography.shell_game import ShellGame class ShellGameTests(unittest.TestCase): def setUp(self): self.start_time = time.time() def tearDown(self): t = self.start_time - time.time() print("%s: %.3f" % (self.id(), t)) def test_1(self): time.sleep(1) shell = ShellGame(5, []) self.assertEqual(5, shell.find_the_ball(), "An Empty swap does nothin") def test_2(self): time.sleep(2) shell = ShellGame(0, [(0, 1), (2, 1), (0, 1)]) self.assertEqual(1, shell.find_the_ball(), "Find the ball in position 2") def test_3(self): time.sleep(3) shell = ShellGame(4, [[0, 9], [9, 3], [3, 7], [7, 8], [8, 2], [4, 5]]) self.assertEqual(5, shell.find_the_ball(), "Nope! Expected 5.") if __name__ == "__main__": suite = unittest.TestLoader().loadTestsFromTestCase(ShellGameTests) unittest.TextTestRunner(verbosity=0).run(suite)
python
""" Functions for interacting with timestamps and datetime objects """ import datetime from typing import Optional def to_utc_ms(dt: datetime.datetime) -> Optional[int]: """ Convert a datetime object to UTC epoch milliseconds Returns ------- timstamp_ms : int Timestamp """ if dt is None: return None return int(dt.replace(tzinfo=datetime.timezone.utc).timestamp() * 1000.0) def from_utc_ms(utc: Optional[int]) -> Optional[datetime.datetime]: """ Convert a UTC epoch milliseconds timestamp to a datetime object Parameters ---------- utc : int Timestamp Returns ------- dt : datetime.datetime Datetime object """ if utc is None: return None return datetime.datetime.fromtimestamp(utc / 1000.0, tz=datetime.timezone.utc)
python
import datetime import uuid from typing import cast from unittest import mock from unittest.mock import ANY, patch import pytest import pytz from constance.test import override_config from django.core import mail from django.urls.base import reverse from django.utils import timezone from rest_framework import status from posthog.constants import AvailableFeature from posthog.models import Dashboard, Organization, Team, User, organization from posthog.models.organization import OrganizationInvite, OrganizationMembership from posthog.test.base import APIBaseTest from posthog.utils import get_instance_realm MOCK_GITLAB_SSO_RESPONSE = { "access_token": "123", "email": "[email protected]", "name": "John Doe", } class TestSignupAPI(APIBaseTest): @classmethod def setUpTestData(cls): # Do not set up any test data pass @pytest.mark.skip_on_multitenancy @patch("posthoganalytics.capture") def test_api_sign_up(self, mock_capture): # Ensure the internal system metrics org doesn't prevent org-creation Organization.objects.create(name="PostHog Internal Metrics", for_internal_metrics=True) response = self.client.post( "/api/signup/", { "first_name": "John", "email": "[email protected]", "password": "notsecure", "organization_name": "Hedgehogs United, LLC", "email_opt_in": False, }, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) user = cast(User, User.objects.order_by("-pk")[0]) team = cast(Team, user.team) organization = cast(Organization, user.organization) self.assertEqual( response.json(), { "id": user.pk, "uuid": str(user.uuid), "distinct_id": user.distinct_id, "first_name": "John", "email": "[email protected]", "redirect_url": "/ingestion", }, ) # Assert that the user was properly created self.assertEqual(user.first_name, "John") self.assertEqual(user.email, "[email protected]") self.assertEqual(user.email_opt_in, False) # Assert that the team was properly created self.assertEqual(team.name, "Default Project") # Assert that the org was properly created self.assertEqual(organization.name, "Hedgehogs United, LLC") # Assert that the sign up event & identify calls were sent to PostHog analytics mock_capture.assert_called_once() self.assertEqual(user.distinct_id, mock_capture.call_args.args[0]) self.assertEqual("user signed up", mock_capture.call_args.args[1]) # Assert that key properties were set properly event_props = mock_capture.call_args.kwargs["properties"] self.assertEqual(event_props["is_first_user"], True) self.assertEqual(event_props["is_organization_first_user"], True) self.assertEqual(event_props["new_onboarding_enabled"], False) self.assertEqual(event_props["signup_backend_processor"], "OrganizationSignupSerializer") self.assertEqual(event_props["signup_social_provider"], "") self.assertEqual(event_props["realm"], get_instance_realm()) # Assert that the user is logged in response = self.client.get("/api/users/@me/") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.json()["email"], "[email protected]") # Assert that the password was correctly saved self.assertTrue(user.check_password("notsecure")) @pytest.mark.skip_on_multitenancy def test_signup_disallowed_on_self_hosted_by_default(self): with self.settings(MULTI_TENANCY=False): response = self.client.post( "/api/signup/", {"first_name": "Jane", "email": "[email protected]", "password": "notsecure"}, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) response = self.client.post( "/api/signup/", {"first_name": "Jane", "email": "[email protected]", "password": "notsecure"}, ) self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN) self.assertEqual( response.json(), { "attr": None, "code": "permission_denied", "detail": "New organizations cannot be created in this instance. Contact your administrator if you" " think this is a mistake.", "type": "authentication_error", }, ) @pytest.mark.ee def test_signup_allowed_on_self_hosted_with_env_var(self): from ee.models.license import License, LicenseManager super(LicenseManager, cast(LicenseManager, License.objects)).create( key="key_123", plan="enterprise", valid_until=timezone.datetime(2038, 1, 19, 3, 14, 7), max_users=3, ) Organization.objects.create(name="name") User.objects.create(first_name="name", email="[email protected]") count = Organization.objects.count() with self.settings(MULTI_TENANCY=False, MULTI_ORG_ENABLED=True): response = self.client.post( "/api/signup/", {"first_name": "Jane", "email": "[email protected]", "password": "notsecure"}, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.json()["email"], "[email protected]") self.assertEqual(Organization.objects.count(), count + 1) @pytest.mark.skip_on_multitenancy @patch("posthoganalytics.capture") @patch("posthoganalytics.identify") def test_signup_minimum_attrs(self, mock_identify, mock_capture): response = self.client.post( "/api/signup/", {"first_name": "Jane", "email": "[email protected]", "password": "notsecure"}, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) user = cast(User, User.objects.order_by("-pk").get()) organization = cast(Organization, user.organization) self.assertEqual( response.json(), { "id": user.pk, "uuid": str(user.uuid), "distinct_id": user.distinct_id, "first_name": "Jane", "email": "[email protected]", "redirect_url": "/ingestion", }, ) # Assert that the user & org were properly created self.assertEqual(user.first_name, "Jane") self.assertEqual(user.email, "[email protected]") self.assertEqual(user.email_opt_in, True) # Defaults to True self.assertEqual(organization.name, "Jane") # Assert that the sign up event & identify calls were sent to PostHog analytics mock_identify.assert_called_once() mock_capture.assert_called_once() self.assertEqual(user.distinct_id, mock_capture.call_args.args[0]) self.assertEqual("user signed up", mock_capture.call_args.args[1]) # Assert that key properties were set properly event_props = mock_capture.call_args.kwargs["properties"] self.assertEqual(event_props["is_first_user"], True) self.assertEqual(event_props["is_organization_first_user"], True) self.assertEqual(event_props["new_onboarding_enabled"], False) self.assertEqual(event_props["signup_backend_processor"], "OrganizationSignupSerializer") self.assertEqual(event_props["signup_social_provider"], "") self.assertEqual(event_props["realm"], get_instance_realm()) # Assert that the user is logged in response = self.client.get("/api/users/@me/") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.json()["email"], "[email protected]") # Assert that the password was correctly saved self.assertTrue(user.check_password("notsecure")) def test_cant_sign_up_without_required_attributes(self): count: int = User.objects.count() team_count: int = Team.objects.count() org_count: int = Organization.objects.count() required_attributes = [ "first_name", "email", "password", ] for attribute in required_attributes: body = { "first_name": "Jane", "email": "[email protected]", "password": "notsecure", } body.pop(attribute) # Make sure the endpoint works with and without the trailing slash response = self.client.post("/api/signup", body) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "required", "detail": "This field is required.", "attr": attribute, }, ) self.assertEqual(User.objects.count(), count) self.assertEqual(Team.objects.count(), team_count) self.assertEqual(Organization.objects.count(), org_count) def test_cant_sign_up_with_short_password(self): count: int = User.objects.count() team_count: int = Team.objects.count() response = self.client.post( "/api/signup/", {"first_name": "Jane", "email": "[email protected]", "password": "123"}, ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "password_too_short", "detail": "This password is too short. It must contain at least 8 characters.", "attr": "password", }, ) self.assertEqual(User.objects.count(), count) self.assertEqual(Team.objects.count(), team_count) @patch("posthoganalytics.feature_enabled") def test_default_dashboard_is_created_on_signup(self, mock_feature_enabled): """ Tests that the default web app dashboard is created on signup. Note: This feature is currently behind a feature flag. """ response = self.client.post( "/api/signup/", { "first_name": "Jane", "email": "[email protected]", "password": "notsecure", "redirect_url": "/ingestion", }, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) user: User = User.objects.order_by("-pk").get() mock_feature_enabled.assert_any_call("new-onboarding-2822", user.distinct_id) self.assertEqual( response.json(), { "id": user.pk, "uuid": str(user.uuid), "distinct_id": user.distinct_id, "first_name": "Jane", "email": "[email protected]", "redirect_url": "/personalization", }, ) dashboard: Dashboard = Dashboard.objects.first() # type: ignore self.assertEqual(dashboard.team, user.team) self.assertEqual(dashboard.items.count(), 1) self.assertEqual(dashboard.name, "Web Analytics") self.assertEqual( dashboard.items.all()[0].description, "Shows a conversion funnel from sign up to watching a movie." ) # Particularly assert that the default dashboards are not created (because we create special demo dashboards) self.assertEqual(Dashboard.objects.filter(team=user.team).count(), 3) # Web, app & revenue demo dashboards @mock.patch("social_core.backends.base.BaseAuth.request") @pytest.mark.ee def test_api_can_use_social_login_to_create_organization_if_enabled(self, mock_request): Organization.objects.create(name="Test org") from ee.models.license import License, LicenseManager super(LicenseManager, cast(LicenseManager, License.objects)).create( key="key_123", plan="enterprise", valid_until=timezone.datetime(2038, 1, 19, 3, 14, 7), max_users=3, ) response = self.client.get(reverse("social:begin", kwargs={"backend": "gitlab"})) self.assertEqual(response.status_code, status.HTTP_302_FOUND) url = reverse("social:complete", kwargs={"backend": "gitlab"}) url += f"?code=2&state={response.client.session['gitlab_state']}" mock_request.return_value.json.return_value = MOCK_GITLAB_SSO_RESPONSE with self.settings(MULTI_ORG_ENABLED=True): response = self.client.get(url, follow=True) self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True` self.assertRedirects(response, "/signup/finish/") # page where user will create a new org @mock.patch("social_core.backends.base.BaseAuth.request") @pytest.mark.ee @pytest.mark.skip_on_multitenancy def test_api_cannot_use_social_login_to_create_organization_if_disabled(self, mock_request): Organization.objects.create(name="Test org") # Even with a valid license, because `MULTI_ORG_ENABLED` is not enabled, no new organizations will be allowed. from ee.models.license import License, LicenseManager super(LicenseManager, cast(LicenseManager, License.objects)).create( key="key_123", plan="enterprise", valid_until=timezone.datetime(2038, 1, 19, 3, 14, 7), max_users=3, ) response = self.client.get(reverse("social:begin", kwargs={"backend": "gitlab"})) self.assertEqual(response.status_code, status.HTTP_302_FOUND) url = reverse("social:complete", kwargs={"backend": "gitlab"}) url += f"?code=2&state={response.client.session['gitlab_state']}" mock_request.return_value.json.return_value = MOCK_GITLAB_SSO_RESPONSE response = self.client.get(url, follow=True) self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True` self.assertRedirects( response, "/login?error=no_new_organizations" ) # show the user an error; operation not permitted @mock.patch("social_core.backends.base.BaseAuth.request") @pytest.mark.ee def test_api_social_login_to_create_organization(self, mock_request): response = self.client.get(reverse("social:begin", kwargs={"backend": "google-oauth2"})) self.assertEqual(response.status_code, status.HTTP_302_FOUND) url = reverse("social:complete", kwargs={"backend": "google-oauth2"}) url += f"?code=2&state={response.client.session['google-oauth2_state']}" mock_request.return_value.json.return_value = MOCK_GITLAB_SSO_RESPONSE response = self.client.get(url, follow=True) self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True` self.assertRedirects(response, "/signup/finish/") # page where user will create a new org @mock.patch("social_core.backends.base.BaseAuth.request") @pytest.mark.skip_on_multitenancy @pytest.mark.ee def test_api_social_login_cannot_create_second_organization(self, mock_request): Organization.objects.create(name="Test org") response = self.client.get(reverse("social:begin", kwargs={"backend": "google-oauth2"})) self.assertEqual(response.status_code, status.HTTP_302_FOUND) url = reverse("social:complete", kwargs={"backend": "google-oauth2"}) url += f"?code=2&state={response.client.session['google-oauth2_state']}" mock_request.return_value.json.return_value = MOCK_GITLAB_SSO_RESPONSE response = self.client.get(url, follow=True) self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True` self.assertRedirects( response, "/login?error=no_new_organizations" ) # show the user an error; operation not permitted @mock.patch("social_core.backends.base.BaseAuth.request") @pytest.mark.skip_on_multitenancy @pytest.mark.ee def test_social_signup_with_whitelisted_domain(self, mock_request): new_org = Organization.objects.create(name="Hogflix Movies", domain_whitelist=["hogflix.posthog.com"]) new_project = Team.objects.create(organization=new_org, name="My First Project") user_count = User.objects.count() response = self.client.get(reverse("social:begin", kwargs={"backend": "google-oauth2"})) self.assertEqual(response.status_code, 302) url = reverse("social:complete", kwargs={"backend": "google-oauth2"}) url += f"?code=2&state={response.client.session['google-oauth2_state']}" mock_request.return_value.json.return_value = {"access_token": "123", "email": "[email protected]"} response = self.client.get(url, follow=True) self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True` self.assertRedirects(response, "/") self.assertEqual(User.objects.count(), user_count + 1) user = cast(User, User.objects.last()) self.assertEqual(user.email, "[email protected]") self.assertEqual(user.organization, new_org) self.assertEqual(user.team, new_project) self.assertEqual(user.organization_memberships.count(), 1) self.assertEqual( cast(OrganizationMembership, user.organization_memberships.first()).level, OrganizationMembership.Level.MEMBER, ) @mock.patch("social_core.backends.base.BaseAuth.request") @pytest.mark.ee def test_social_signup_to_existing_org_with_whitelisted_domains_is_disabled_in_cloud(self, mock_request): Organization.objects.create(name="Hogflix Movies", domain_whitelist=["hogflix.posthog.com"]) user_count = User.objects.count() org_count = Organization.objects.count() response = self.client.get(reverse("social:begin", kwargs={"backend": "google-oauth2"})) self.assertEqual(response.status_code, 302) url = reverse("social:complete", kwargs={"backend": "google-oauth2"}) url += f"?code=2&state={response.client.session['google-oauth2_state']}" mock_request.return_value.json.return_value = {"access_token": "123", "email": "[email protected]"} with self.settings(MULTI_TENANCY=True): response = self.client.get(url, follow=True) self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True` self.assertRedirects(response, "/signup/finish/") # page where user will create a new org self.assertEqual(User.objects.count(), user_count) self.assertEqual(Organization.objects.count(), org_count) @mock.patch("social_core.backends.base.BaseAuth.request") @pytest.mark.skip_on_multitenancy @pytest.mark.ee def test_api_cannot_use_whitelist_for_different_domain(self, mock_request): Organization.objects.create(name="Test org", domain_whitelist=["good.com"]) response = self.client.get(reverse("social:begin", kwargs={"backend": "google-oauth2"})) self.assertEqual(response.status_code, status.HTTP_302_FOUND) url = reverse("social:complete", kwargs={"backend": "google-oauth2"}) url += f"?code=2&state={response.client.session['google-oauth2_state']}" mock_request.return_value.json.return_value = {"access_token": "123", "email": "[email protected]"} response = self.client.get(url, follow=True) self.assertEqual(response.status_code, status.HTTP_200_OK) # because `follow=True` self.assertRedirects( response, "/login?error=no_new_organizations" ) # show the user an error; operation not permitted class TestInviteSignup(APIBaseTest): """ Tests the sign up process for users with an invite (i.e. existing organization). """ CONFIG_EMAIL = None # Invite pre-validation def test_api_invite_sign_up_prevalidate(self): invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) response = self.client.get(f"/api/signup/{invite.id}/") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.json(), { "id": str(invite.id), "target_email": "t*****[email protected]", "first_name": "", "organization_name": self.CONFIG_ORGANIZATION_NAME, }, ) def test_api_invite_sign_up_with_first_name_prevalidate(self): invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, first_name="Jane" ) response = self.client.get(f"/api/signup/{invite.id}/") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.json(), { "id": str(invite.id), "target_email": "t*****[email protected]", "first_name": "Jane", "organization_name": self.CONFIG_ORGANIZATION_NAME, }, ) def test_api_invite_sign_up_prevalidate_for_existing_user(self): user = self._create_user("[email protected]", "test_password") new_org = Organization.objects.create(name="Test, Inc") invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=new_org, ) self.client.force_login(user) response = self.client.get(f"/api/signup/{invite.id}/") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual( response.json(), { "id": str(invite.id), "target_email": "t*****[email protected]", "first_name": "", "organization_name": "Test, Inc", }, ) def test_api_invite_sign_up_prevalidate_invalid_invite(self): for invalid_invite in [uuid.uuid4(), "abc", "1234"]: response = self.client.get(f"/api/signup/{invalid_invite}/") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "invalid_input", "detail": "The provided invite ID is not valid.", "attr": None, }, ) def test_existing_user_cant_claim_invite_if_it_doesnt_match_target_email(self): user = self._create_user("[email protected]", "test_password") invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) self.client.force_login(user) response = self.client.get(f"/api/signup/{invite.id}/") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "invalid_recipient", "detail": "This invite is intended for another email address: t*****[email protected]." " You tried to sign up with [email protected].", "attr": None, }, ) def test_api_invite_sign_up_prevalidate_expired_invite(self): invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) invite.created_at = datetime.datetime(2020, 12, 1, tzinfo=pytz.UTC) invite.save() response = self.client.get(f"/api/signup/{invite.id}/") self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "expired", "detail": "This invite has expired. Please ask your admin for a new one.", "attr": None, }, ) # Signup (using invite) @patch("posthoganalytics.capture") def test_api_invite_sign_up(self, mock_capture): invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) response = self.client.post( f"/api/signup/{invite.id}/", {"first_name": "Alice", "password": "test_password", "email_opt_in": True}, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) user = cast(User, User.objects.order_by("-pk")[0]) self.assertEqual( response.json(), { "id": user.pk, "uuid": str(user.uuid), "distinct_id": user.distinct_id, "first_name": "Alice", "email": "[email protected]", }, ) # User is now a member of the organization self.assertEqual(user.organization_memberships.count(), 1) self.assertEqual(user.organization_memberships.first().organization, self.organization) # type: ignore # Defaults are set correctly self.assertEqual(user.organization, self.organization) self.assertEqual(user.team, self.team) # Assert that the user was properly created self.assertEqual(user.first_name, "Alice") self.assertEqual(user.email, "[email protected]") self.assertEqual(user.email_opt_in, True) # Assert that the sign up event & identify calls were sent to PostHog analytics mock_capture.assert_called_once() self.assertEqual(user.distinct_id, mock_capture.call_args.args[0]) self.assertEqual("user signed up", mock_capture.call_args.args[1]) # Assert that key properties were set properly event_props = mock_capture.call_args.kwargs["properties"] self.assertEqual(event_props["is_first_user"], False) self.assertEqual(event_props["is_organization_first_user"], False) self.assertEqual(event_props["new_onboarding_enabled"], False) self.assertEqual(event_props["signup_backend_processor"], "OrganizationInviteSignupSerializer") self.assertEqual(event_props["signup_social_provider"], "") self.assertEqual(event_props["realm"], get_instance_realm()) # Assert that the user is logged in response = self.client.get("/api/users/@me/") self.assertEqual(response.status_code, status.HTTP_200_OK) self.assertEqual(response.json()["email"], "[email protected]") # Assert that the password was correctly saved self.assertTrue(user.check_password("test_password")) @pytest.mark.ee def test_api_invite_sign_up_where_there_are_no_default_non_private_projects(self): self.client.logout() invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) self.organization.available_features = [AvailableFeature.PROJECT_BASED_PERMISSIONING] self.organization.save() self.team.access_control = True self.team.save() response = self.client.post( f"/api/signup/{invite.id}/", {"first_name": "Alice", "password": "test_password", "email_opt_in": True}, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) user = cast(User, User.objects.order_by("-pk")[0]) self.assertEqual(user.organization_memberships.count(), 1) self.assertEqual(user.organization, self.organization) # here self.assertEqual( user.current_team, None ) # User is not assigned to a project, as there are no non-private projects self.assertEqual(user.team, None) def test_api_invite_sign_up_where_default_project_is_private(self): self.client.logout() self.team.access_control = True self.team.save() team = Team.objects.create(name="Public project", organization=self.organization, access_control=False) invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) response = self.client.post( f"/api/signup/{invite.id}/", {"first_name": "Charlie", "password": "test_password"}, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) user = cast(User, User.objects.order_by("-pk")[0]) self.assertEqual(user.organization_memberships.count(), 1) self.assertEqual(user.organization, self.organization) self.assertEqual(user.current_team, team) self.assertEqual(user.team, team) def test_api_invite_sign_up_member_joined_email_is_not_sent_for_initial_member(self): invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) with self.settings(EMAIL_ENABLED=True, EMAIL_HOST="localhost", SITE_URL="http://test.posthog.com"): response = self.client.post( f"/api/signup/{invite.id}/", {"first_name": "Alice", "password": "test_password", "email_opt_in": True}, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(len(mail.outbox), 0) @override_config(EMAIL_HOST="localhost") def test_api_invite_sign_up_member_joined_email_is_sent_for_next_members(self): initial_user = User.objects.create_and_join(self.organization, "[email protected]", None) invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) with self.settings(EMAIL_ENABLED=True, SITE_URL="http://test.posthog.com"): response = self.client.post( f"/api/signup/{invite.id}/", {"first_name": "Alice", "password": "test_password", "email_opt_in": True}, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(len(mail.outbox), 1) self.assertListEqual(mail.outbox[0].to, [initial_user.email]) def test_api_invite_sign_up_member_joined_email_is_not_sent_if_disabled(self): self.organization.is_member_join_email_enabled = False self.organization.save() initial_user = User.objects.create_and_join(self.organization, "[email protected]", None) invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) with self.settings(EMAIL_ENABLED=True, EMAIL_HOST="localhost", SITE_URL="http://test.posthog.com"): response = self.client.post( f"/api/signup/{invite.id}/", {"first_name": "Alice", "password": "test_password", "email_opt_in": True}, ) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(len(mail.outbox), 0) @patch("posthoganalytics.identify") @patch("posthoganalytics.capture") def test_existing_user_can_sign_up_to_a_new_organization(self, mock_capture, mock_identify): user = self._create_user("[email protected]", "test_password") new_org = Organization.objects.create(name="TestCo") new_team = Team.objects.create(organization=new_org) invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=new_org, ) self.client.force_login(user) count = User.objects.count() with self.settings(MULTI_TENANCY=True): response = self.client.post(f"/api/signup/{invite.id}/") self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.json(), { "id": user.pk, "uuid": str(user.uuid), "distinct_id": user.distinct_id, "first_name": "", "email": "[email protected]", }, ) # No new user is created self.assertEqual(User.objects.count(), count) # User is now a member of the organization user.refresh_from_db() self.assertEqual(user.organization_memberships.count(), 2) self.assertTrue(user.organization_memberships.filter(organization=new_org).exists()) # User is now changed to the new organization self.assertEqual(user.organization, new_org) self.assertEqual(user.team, new_team) # User is not changed self.assertEqual(user.first_name, "") self.assertEqual(user.email, "[email protected]") # Assert that the sign up event & identify calls were sent to PostHog analytics mock_capture.assert_called_once_with( user.distinct_id, "user joined organization", properties={ "organization_id": str(new_org.id), "user_number_of_org_membership": 2, "org_current_invite_count": 0, "org_current_project_count": 1, "org_current_members_count": 1, }, groups={"instance": ANY, "organization": str(new_org.id)}, ) mock_identify.assert_called_once() # Assert that the user remains logged in response = self.client.get("/api/users/@me/") self.assertEqual(response.status_code, status.HTTP_200_OK) @patch("posthoganalytics.capture") def test_cannot_use_claim_invite_endpoint_to_update_user(self, mock_capture): """ Tests that a user cannot use the claim invite endpoint to change their name or password (as this endpoint does not do any checks that might be required). """ new_org = Organization.objects.create(name="TestCo") user = self._create_user("[email protected]", "test_password") user2 = self._create_user("[email protected]") user2.join(organization=new_org) Team.objects.create(organization=new_org) invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=new_org, ) self.client.force_login(user) response = self.client.post(f"/api/signup/{invite.id}/", {"first_name": "Bob", "password": "new_password"}) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual( response.json(), { "id": user.pk, "uuid": str(user.uuid), "distinct_id": user.distinct_id, "first_name": "", "email": "[email protected]", }, # note the unchanged attributes ) # User is subscribed to the new organization user.refresh_from_db() self.assertTrue(user.organization_memberships.filter(organization=new_org).exists()) # User is not changed self.assertEqual(user.first_name, "") self.assertFalse(user.check_password("new_password")) # Password is not updated # Assert that the sign up event & identify calls were sent to PostHog analytics mock_capture.assert_called_once_with( user.distinct_id, "user joined organization", properties={ "organization_id": str(new_org.id), "user_number_of_org_membership": 2, "org_current_invite_count": 0, "org_current_project_count": 1, "org_current_members_count": 2, }, groups={"instance": ANY, "organization": str(new_org.id)}, ) def test_cant_claim_sign_up_invite_without_required_attributes(self): count: int = User.objects.count() team_count: int = Team.objects.count() org_count: int = Organization.objects.count() required_attributes = [ "first_name", "password", ] invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) for attribute in required_attributes: body = { "first_name": "Charlie", "password": "test_password", } body.pop(attribute) response = self.client.post(f"/api/signup/{invite.id}/", body) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "required", "detail": "This field is required.", "attr": attribute, }, ) self.assertEqual(User.objects.count(), count) self.assertEqual(Team.objects.count(), team_count) self.assertEqual(Organization.objects.count(), org_count) def test_cant_claim_invite_sign_up_with_short_password(self): count: int = User.objects.count() team_count: int = Team.objects.count() org_count: int = Organization.objects.count() invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) response = self.client.post(f"/api/signup/{invite.id}/", {"first_name": "Charlie", "password": "123"}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "password_too_short", "detail": "This password is too short. It must contain at least 8 characters.", "attr": "password", }, ) self.assertEqual(User.objects.count(), count) self.assertEqual(Team.objects.count(), team_count) self.assertEqual(Organization.objects.count(), org_count) def test_cant_claim_invalid_invite(self): count: int = User.objects.count() team_count: int = Team.objects.count() org_count: int = Organization.objects.count() response = self.client.post( f"/api/signup/{uuid.uuid4()}/", {"first_name": "Charlie", "password": "test_password"} ) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "invalid_input", "detail": "The provided invite ID is not valid.", "attr": None, }, ) self.assertEqual(User.objects.count(), count) self.assertEqual(Team.objects.count(), team_count) self.assertEqual(Organization.objects.count(), org_count) def test_cant_claim_expired_invite(self): count: int = User.objects.count() team_count: int = Team.objects.count() org_count: int = Organization.objects.count() invite: OrganizationInvite = OrganizationInvite.objects.create( target_email="[email protected]", organization=self.organization, ) invite.created_at = datetime.datetime(2020, 3, 3, tzinfo=pytz.UTC) invite.save() response = self.client.post(f"/api/signup/{invite.id}/", {"first_name": "Charlie", "password": "test_password"}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "expired", "detail": "This invite has expired. Please ask your admin for a new one.", "attr": None, }, ) self.assertEqual(User.objects.count(), count) self.assertEqual(Team.objects.count(), team_count) self.assertEqual(Organization.objects.count(), org_count) # Social signup (use invite) def test_api_social_invite_sign_up(self): Organization.objects.all().delete() # Can only create organizations in fresh instances # simulate SSO process started session = self.client.session session.update({"backend": "google-oauth2"}) session.save() response = self.client.post("/api/social_signup", {"organization_name": "Tech R Us", "email_opt_in": False}) self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(response.json(), {"continue_url": "/complete/google-oauth2/"}) # Check the values were saved in the session self.assertEqual(self.client.session.get("organization_name"), "Tech R Us") self.assertEqual(self.client.session.get("email_opt_in"), False) self.assertEqual(self.client.session.get_expiry_age(), 3600) def test_cannot_use_social_invite_sign_up_if_social_session_is_not_active(self): Organization.objects.all().delete() # Can only create organizations in fresh instances response = self.client.post("/api/social_signup", {"organization_name": "Tech R Us", "email_opt_in": False}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "invalid_input", "detail": "Inactive social login session. Go to /login and log in before continuing.", "attr": None, }, ) self.assertEqual(len(self.client.session.keys()), 0) # Nothing is saved in the session def test_cannot_use_social_invite_sign_up_without_required_attributes(self): Organization.objects.all().delete() # Can only create organizations in fresh instances response = self.client.post("/api/social_signup", {"email_opt_in": False}) self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) self.assertEqual( response.json(), { "type": "validation_error", "code": "required", "detail": "This field is required.", "attr": "organization_name", }, ) self.assertEqual(len(self.client.session.keys()), 0) # Nothing is saved in the session
python
name = 'libseq' from libseq.libseq import *
python
import eel if __name__ == '__main__': eel.init('web') eel.start('index.html', mode="chrome", size=(1296, 775))
python
import os def create_termuxconfig(): ATTR = ["API_ID", "API_HASH", "SESSION", "DB_URI", "LOG_CHAT", "TOKEN"] file = open("termuxconfig.py", "w+") file.write("class Termuxconfig:\n\ttemp = 'value'\n") for x in ATTR: myvar = vars() # string to variable if x == "DB_URI": value = createdb() else: data = input(f"\nEnter your {x}: ") value = int(data) if data and data == "LOG_CHAT" else f"'{data}'" myvar[x] = value file.write(f"""\t{x.replace('"', "")} = {value}\n""") file.close() return True def startdb(): if os.path.exists("/data/data/com.termux/files/usr/var/lib/postgresql"): os.system("pg_ctl -D $PREFIX/var/lib/postgresql start") else: try: from termuxconfig import Termuxconfig except (ImportError, ModuleNotFoundError): os.system("cd ~ && cd Tron && ./start.sh") try: Termuxconfig.DB_URI except AttributeError: file = open("termuxconfig.py", "a") file.write(f"\tDB_URI = {create_db()}\n") file.close() def createdb(): os.system("pkg install postgresql") os.system("clear") os.system("mkdir -p $PREFIX/var/lib/postgresql") os.system("initdb $PREFIX/var/lib/postgresql") os.system("clear") username = str(input("\nEnter your database account username: ")) password = str(input("\nEnter your database account password: ")) dbname = str(input("\nEnter your database name: ")) print("\n") os.system(f"createuser --superuser --pwprompt {username}") os.system(f"createdb {dbname}") os.system("pg_ctl -D $PREFIX/var/lib/postgresql start") return f"'postgres://{username}:{password}@127.0.0.1:5432/{dbname}'"
python
# GUI frame for the sineTransformations_function.py try: # for Python2 from Tkinter import * ## notice capitalized T in Tkinter import tkFileDialog, tkMessageBox except ImportError: # for Python3 from tkinter import * ## notice lowercase 't' in tkinter here from tkinter import filedialog as tkFileDialog from tkinter import messagebox as tkMessageBox import sys, os from scipy.io.wavfile import read import numpy as np import sineTransformations_function as sT sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../models/')) import utilFunctions as UF class SineTransformations_frame: def __init__(self, parent): self.parent = parent self.initUI() def initUI(self): choose_label = "inputFile:" Label(self.parent, text=choose_label).grid(row=0, column=0, sticky=W, padx=5, pady=(10,2)) #TEXTBOX TO PRINT PATH OF THE SOUND FILE self.filelocation = Entry(self.parent) self.filelocation.focus_set() self.filelocation["width"] = 32 self.filelocation.grid(row=0,column=0, sticky=W, padx=(70, 5), pady=(10,2)) self.filelocation.delete(0, END) self.filelocation.insert(0, '../../sounds/mridangam.wav') #BUTTON TO BROWSE SOUND FILE open_file = Button(self.parent, text="...", command=self.browse_file) #see: def browse_file(self) open_file.grid(row=0, column=0, sticky=W, padx=(340, 6), pady=(10,2)) #put it beside the filelocation textbox #BUTTON TO PREVIEW SOUND FILE preview = Button(self.parent, text=">", command=lambda:UF.wavplay(self.filelocation.get()), bg="gray30", fg="white") preview.grid(row=0, column=0, sticky=W, padx=(385,6), pady=(10,2)) ## SINE TRANSFORMATIONS ANALYSIS #ANALYSIS WINDOW TYPE wtype_label = "window:" Label(self.parent, text=wtype_label).grid(row=1, column=0, sticky=W, padx=5, pady=(10,2)) self.w_type = StringVar() self.w_type.set("hamming") # initial value window_option = OptionMenu(self.parent, self.w_type, "rectangular", "hanning", "hamming", "blackman", "blackmanharris") window_option.grid(row=1, column=0, sticky=W, padx=(65,5), pady=(10,2)) #WINDOW SIZE M_label = "M:" Label(self.parent, text=M_label).grid(row=1, column=0, sticky=W, padx=(180, 5), pady=(10,2)) self.M = Entry(self.parent, justify=CENTER) self.M["width"] = 5 self.M.grid(row=1,column=0, sticky=W, padx=(200,5), pady=(10,2)) self.M.delete(0, END) self.M.insert(0, "801") #FFT SIZE N_label = "N:" Label(self.parent, text=N_label).grid(row=1, column=0, sticky=W, padx=(255, 5), pady=(10,2)) self.N = Entry(self.parent, justify=CENTER) self.N["width"] = 5 self.N.grid(row=1,column=0, sticky=W, padx=(275,5), pady=(10,2)) self.N.delete(0, END) self.N.insert(0, "2048") #THRESHOLD MAGNITUDE t_label = "t:" Label(self.parent, text=t_label).grid(row=1, column=0, sticky=W, padx=(330,5), pady=(10,2)) self.t = Entry(self.parent, justify=CENTER) self.t["width"] = 5 self.t.grid(row=1, column=0, sticky=W, padx=(348,5), pady=(10,2)) self.t.delete(0, END) self.t.insert(0, "-90") #MIN DURATION SINUSOIDAL TRACKS minSineDur_label = "minSineDur:" Label(self.parent, text=minSineDur_label).grid(row=2, column=0, sticky=W, padx=(5, 5), pady=(10,2)) self.minSineDur = Entry(self.parent, justify=CENTER) self.minSineDur["width"] = 5 self.minSineDur.grid(row=2, column=0, sticky=W, padx=(87,5), pady=(10,2)) self.minSineDur.delete(0, END) self.minSineDur.insert(0, "0.01") #MAX NUMBER OF SINES maxnSines_label = "maxnSines:" Label(self.parent, text=maxnSines_label).grid(row=2, column=0, sticky=W, padx=(145,5), pady=(10,2)) self.maxnSines = Entry(self.parent, justify=CENTER) self.maxnSines["width"] = 5 self.maxnSines.grid(row=2, column=0, sticky=W, padx=(220,5), pady=(10,2)) self.maxnSines.delete(0, END) self.maxnSines.insert(0, "150") #FREQUENCY DEVIATION ALLOWED freqDevOffset_label = "freqDevOffset:" Label(self.parent, text=freqDevOffset_label).grid(row=2, column=0, sticky=W, padx=(280,5), pady=(10,2)) self.freqDevOffset = Entry(self.parent, justify=CENTER) self.freqDevOffset["width"] = 5 self.freqDevOffset.grid(row=2, column=0, sticky=W, padx=(372,5), pady=(10,2)) self.freqDevOffset.delete(0, END) self.freqDevOffset.insert(0, "20") #SLOPE OF THE FREQUENCY DEVIATION freqDevSlope_label = "freqDevSlope:" Label(self.parent, text=freqDevSlope_label).grid(row=3, column=0, sticky=W, padx=(5,5), pady=(10,2)) self.freqDevSlope = Entry(self.parent, justify=CENTER) self.freqDevSlope["width"] = 5 self.freqDevSlope.grid(row=3, column=0, sticky=W, padx=(98,5), pady=(10,2)) self.freqDevSlope.delete(0, END) self.freqDevSlope.insert(0, "0.02") #BUTTON TO DO THE ANALYSIS OF THE SOUND self.compute = Button(self.parent, text="Analysis/Synthesis", command=self.analysis, bg="dark red", fg="white") self.compute.grid(row=4, column=0, padx=5, pady=(10,5), sticky=W) #BUTTON TO PLAY ANALYSIS/SYNTHESIS OUTPUT self.output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation.get())[:-4] + '_sineModel.wav'), bg="gray30", fg="white") self.output.grid(row=4, column=0, padx=(145,5), pady=(10,5), sticky=W) ### #SEPARATION LINE Frame(self.parent,height=1,width=50,bg="black").grid(row=5, pady=5, sticky=W+E) ### #FREQUENCY SCALING FACTORS freqScaling_label = "Frequency scaling factors (time, value pairs):" Label(self.parent, text=freqScaling_label).grid(row=6, column=0, sticky=W, padx=5, pady=(5,2)) self.freqScaling = Entry(self.parent, justify=CENTER) self.freqScaling["width"] = 35 self.freqScaling.grid(row=7, column=0, sticky=W+E, padx=5, pady=(0,2)) self.freqScaling.delete(0, END) self.freqScaling.insert(0, "[0, 2.0, 1, .3]") #TIME SCALING FACTORS timeScaling_label = "Time scaling factors (in time, value pairs):" Label(self.parent, text=timeScaling_label).grid(row=8, column=0, sticky=W, padx=5, pady=(5,2)) self.timeScaling = Entry(self.parent, justify=CENTER) self.timeScaling["width"] = 35 self.timeScaling.grid(row=9, column=0, sticky=W+E, padx=5, pady=(0,2)) self.timeScaling.delete(0, END) self.timeScaling.insert(0, "[0, .0, .671, .671, 1.978, 1.978+1.0]") #BUTTON TO DO THE SYNTHESIS self.compute = Button(self.parent, text="Apply Transformation", command=self.transformation_synthesis, bg="dark green", fg="white") self.compute.grid(row=13, column=0, padx=5, pady=(10,15), sticky=W) #BUTTON TO PLAY TRANSFORMATION SYNTHESIS OUTPUT self.transf_output = Button(self.parent, text=">", command=lambda:UF.wavplay('output_sounds/' + os.path.basename(self.filelocation.get())[:-4] + '_sineModelTransformation.wav'), bg="gray30", fg="white") self.transf_output.grid(row=13, column=0, padx=(165,5), pady=(10,15), sticky=W) # define options for opening file self.file_opt = options = {} options['defaultextension'] = '.wav' options['filetypes'] = [('All files', '.*'), ('Wav files', '.wav')] options['initialdir'] = '../../sounds/' options['title'] = 'Open a mono audio file .wav with sample frequency 44100 Hz' def browse_file(self): self.filename = tkFileDialog.askopenfilename(**self.file_opt) #set the text of the self.filelocation self.filelocation.delete(0, END) self.filelocation.insert(0,self.filename) def analysis(self): try: inputFile = self.filelocation.get() window = self.w_type.get() M = int(self.M.get()) N = int(self.N.get()) t = int(self.t.get()) minSineDur = float(self.minSineDur.get()) maxnSines = int(self.maxnSines.get()) freqDevOffset = int(self.freqDevOffset.get()) freqDevSlope = float(self.freqDevSlope.get()) self.inputFile, self.fs, self.tfreq, self.tmag = sT.analysis(inputFile, window, M, N, t, minSineDur, maxnSines, freqDevOffset, freqDevSlope) except ValueError: tkMessageBox.showerror("Input values error", "Some parameters are incorrect") def transformation_synthesis(self): try: inputFile = self.inputFile fs = self.fs tfreq = self.tfreq tmag = self.tmag freqScaling = np.array(eval(self.freqScaling.get())) timeScaling = np.array(eval(self.timeScaling.get())) sT.transformation_synthesis(inputFile, fs, tfreq, tmag, freqScaling, timeScaling) except ValueError as errorMessage: tkMessageBox.showerror("Input values error", errorMessage) except AttributeError: tkMessageBox.showerror("Analysis not computed", "First you must analyse the sound!")
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Mar 1 18:44:04 2018 @author: JavaWizards """ import numpy as np file = "/Users/nuno_chicoria/Downloads/b_should_be_easy.in" handle = open(file) R, C, F, N, B, T = handle.readline().split() rides = [] index = [] for i in range(int(N)): index.append(i) for line in handle: rides.append(line.split()) rides_np = np.asarray(rides) rides_np = np.column_stack([rides_np, index]) rides_np = rides_np.astype(np.int) rides_np = rides_np[rides_np[:,5].argsort()] vehicles = {} for i in range(int(F)): vehicles [i] = ["A", [0, 0], [0, 0], [0, 0], []] for i in range(int(T)): rides_np = rides_np[rides_np[:,5] > i] for item in range(len(vehicles)): if vehicles[item][0] == "A": if rides_np.size != 0: if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i >= rides_np[0, 4]: if abs(vehicles[item][1][0] - rides_np[0, 0]) + abs(vehicles[item][1][1] - rides_np[0, 1]) + i + abs(rides_np[0,0] - rides_np[0,2]) + abs(rides_np[0,1] - rides_np[0,3]) <= rides_np[0, 5]: vehicles[item][0] = "C" vehicles[item][2] = [rides_np[0, 0], rides_np[0, 1]] vehicles[item][3] = [rides_np[0, 2], rides_np[0, 3]] vehicles[item][4].append(rides_np[0, 6]) rides_np = np.delete(rides_np, (0), axis=0) else: rides_np = np.delete(rides_np, (0), axis=0) for item in range(len(vehicles)): if vehicles[item][0] == "C": if vehicles[item][1][0] < vehicles[item][2][0]: vehicles[item][1][0] = vehicles[item][1][0] + 1 elif vehicles[item][1][0] > vehicles[item][2][0]: vehicles[item][1][0] = vehicles[item][1][0] - 1 elif vehicles[item][1][0] == vehicles[item][2][0]: if vehicles[item][1][1] < vehicles[item][2][1]: vehicles[item][1][1] = vehicles[item][1][1] + 1 elif vehicles[item][1][1] > vehicles[item][2][1]: vehicles[item][1][1] = vehicles[item][1][1] - 1 else: vehicles[item][0] = "D" for item in range(len(vehicles)): if vehicles[item][0] == "D": if vehicles[item][1][0] < vehicles[item][3][0]: vehicles[item][1][0] += 1 elif vehicles[item][1][0] > vehicles[item][3][0]: vehicles[item][1][0] -= 1 elif vehicles[item][1][0] == vehicles[item][3][0]: if vehicles[item][1][1] < vehicles[item][3][1]: vehicles[item][1][1] += 1 elif vehicles[item][1][1] > vehicles[item][3][1]: vehicles[item][1][1] -= 1 else: vehicles[item][0] = "A" vehicles[item][2] = None vehicles[item][3] = None results = open("ghc2018.txt", "w+") for item in range(len(vehicles)): if len(vehicles[item][4]) !=0: results.write(str(len(vehicles[item][4]))) for ride in vehicles[item][4]: results.write(" ") results.write(str(ride)) results.write("\n") results.close()
python
"""" Animation code source: https://gist.github.com/DanielTakeshi/fec9a5cd957eb05b04b6d06a16cc88ae """ import argparse import time import imageio from PIL import Image import numpy as np import torch as T import gym import rl.environments def evaluate(agent, env, EE, max_el, exp_name, gif=False): print('[ Evaluation ]') EZ = [] # Evaluation episodic return ES = [] # Evaluation episodic score EL = [] # Evaluation episodic if gif: GifObs = [] for ee in range(1, EE+1): print(f' [ Episode {ee} Agent Evaluation ] ') o, d, Z, S, el = env.reset(), False, 0, 0, 0 while not(d or (el == max_el)): print(f' [ Step {el} Agent Simulation ] ', end='\r') if gif: gifobs = env.render(mode='rgb_array', width=400, height=400) GifObs.append(gifobs) # Take deterministic actions at evaluation time pi, _ = agent(o, deterministic=True) a = pi.cpu().numpy() o, r, d, info = env.step(a) Z += r S = 0# += info['score'] el += 1 EZ.append(Z) ES.append(S/el) EL.append(el) env.close() print('\nlen(GifObs): ', len(GifObs)) if gif: print(' [ Saving a gif for evaluation ] ') exp_path = f'./gifs/{exp_name}.gif' with imageio.get_writer(exp_path, mode='I', duration=0.01) as writer: for obs_np in GifObs: writer.append_data(obs_np) # print(' [ Saving a jpg for evaluation ] ') # im = Image.fromarray(GifObs[50]) # im.save(f'./jpgs/{exp_name}.jpeg') return EZ, ES, EL def main(agent, env, alg, seed=0, epoch=0, metric='return', EE=10, gif=False): print('\n') print('=' * 50) print(f'Starting a new evaluation') print(f"\t Algorithm: {alg}") print(f"\t Environment: {env}") print(f"\t Random seed: {seed}") print(f"\t Epoch: {epoch}") print(f"\t Metric: {metric}") print('=' * 50) exp_name = f'{env}-{alg}-seed:{seed}' eval_env = gym.make(env) # eval_env.seed(seed) # eval_env.action_space.seed(seed) # eval_env.observation_space.seed(seed) max_el = eval_env.env.spec.max_episode_steps logs = dict() agent.eval() eval_start_real = time.time() EZ, ES, EL = evaluate(agent, eval_env, EE, max_el, exp_name, gif) logs['time/evaluation'] = time.time() - eval_start_real if metric == 'score': logs['evaluation/episodic_score_mean'] = np.mean(ES) logs['evaluation/episodic_score_std'] = np.std(ES) else: logs['evaluation/episodic_return_mean'] = np.mean(EZ) logs['evaluation/episodic_return_std'] = np.std(EZ) logs['evaluation/episodic_length_mean'] = np.mean(EL) for k, v in logs.items(): print(f'{k}: {round(v, 2)}') print('\n') print('End of the evaluation') print('=' * 50) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('-env', type=str) parser.add_argument('-alg', type=str) parser.add_argument('-seed', type=int) parser.add_argument('-epoch', type=int) parser.add_argument('-EE', type=int) parser.add_argument('-metric', type=str) parser.add_argument('-gif', nargs='?', const=True, type=bool) args = parser.parse_args() agent_path = f'./saved_agents/{args.env}-{args.alg}-seed:{args.seed}-epoch:{args.epoch}' + '.pth.tar' agent = T.load(agent_path) kwaergs = vars(args) main(agent, **kwaergs)
python
import numpy as np from .Classifier import Classifier class NearestNeighbourClassifier(Classifier): def __init__(self) -> None: self.x = np.array([]) self.y = np.array([]) def fit(self, x: np.ndarray, y: np.ndarray) -> None: """ Fit the training data to the classifier. Args: x (np.ndarray): Instances, numpy array with shape (N,K) y (np.ndarray): Class labels, numpy array with shape (N,) """ self.x = x self.y = y def predict(self, x: np.ndarray) -> None: """ Perform prediction given some examples. Args: x (np.ndarray): Instances, numpy array with shape (N,K) Returns: y (np.ndarray): Predicted class labels, numpy array with shape (N,) """ min_elem_indices = np.empty(len(x), dtype=int) for i, e in enumerate(x): distances = np.empty(len(self.x)) for j, v in enumerate(self.x): if j == i: distances[j] = np.inf else: distances[j] = np.linalg.norm(e - v) min_elem_indices[i] = np.argmin(distances) return self.y[min_elem_indices]
python
from sys import platform import sys try: import caffe except ImportError: print("This sample can only be run if Python Caffe if available on your system") print("Currently OpenPose does not compile Python Caffe. This may be supported in the future") sys.exit(-1) import os os.environ["GLOG_minloglevel"] = "1" import caffe import cv2 import numpy as np import sys import time dir_path = os.path.dirname(os.path.realpath(__file__)) sys.path.append('../../python') dir_path + "/../../models/" try: from openpose import OpenPose except: raise Exception('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?') # Params for change defRes = 736 scales = [1,0.5] class Param: caffemodel = dir_path + "/../../../models/pose/body_25/pose_iter_584000.caffemodel" prototxt = dir_path + "/../../../models/pose/body_25/pose_deploy.prototxt" # Load OpenPose object and Caffe Nets params = dict() params["logging_level"] = 3 params["output_resolution"] = "-1x-1" params["net_resolution"] = "-1x"+str(defRes) params["model_pose"] = "BODY_25" params["alpha_pose"] = 0.6 params["scale_gap"] = 0.5 params["scale_number"] = len(scales) params["render_threshold"] = 0.05 params["num_gpu_start"] = 0 params["disable_blending"] = False params["default_model_folder"] = dir_path + "/../../../models/" openpose = OpenPose(params) caffe.set_mode_gpu() caffe.set_device(0) nets = [] for scale in scales: nets.append(caffe.Net(Param.prototxt, Param.caffemodel, caffe.TEST)) print("Net loaded") # Test Function first_run = True def func(frame): # Get image processed for network, and scaled image imagesForNet, imagesOrig = OpenPose.process_frames(frame, defRes, scales) # Reshape global first_run if first_run: for i in range(0, len(scales)): net = nets[i] imageForNet = imagesForNet[i] in_shape = net.blobs['image'].data.shape in_shape = (1, 3, imageForNet.shape[1], imageForNet.shape[2]) net.blobs['image'].reshape(*in_shape) net.reshape() first_run = False print("Reshaped") # Forward pass to get heatmaps heatmaps = [] for i in range(0, len(scales)): net = nets[i] imageForNet = imagesForNet[i] net.blobs['image'].data[0,:,:,:] = imageForNet net.forward() heatmaps.append(net.blobs['net_output'].data[:,:,:,:]) # Pose from HM Test array, frame = openpose.poseFromHM(frame, heatmaps, scales) # Draw Heatmaps instead #hm = heatmaps[0][:,0:18,:,:]; frame = OpenPose.draw_all(imagesOrig[0], hm, -1, 1, True) #paf = heatmaps[0][:,20:,:,:]; frame = OpenPose.draw_all(imagesOrig[0], paf, -1, 4, False) return frame img = cv2.imread(dir_path + "/../../../examples/media/COCO_val2014_000000000192.jpg") frame = func(img) while 1: cv2.imshow("output", frame) cv2.waitKey(15)
python
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import os import sys import json from src.api_reader import get_members from src.intellisense import IntellisenseSchema from src.version import schema_version, sdk_go_version if __name__ == '__main__': model_dir = os.path.join(os.path.dirname(__file__), "model") api_file = os.path.join(model_dir, "api.json") api = json.load(open(api_file)) doc_file = os.path.join(model_dir, "docs.json") doc = json.load(open(doc_file)) operation = 'RegisterTaskDefinitionRequest' if operation not in api['shapes']: sys.exit('Operation "{op}" not found under "shapes"'.format(op=operation)) reference, required = get_members(api, operation) intellisense = IntellisenseSchema(api, doc, schema_version, sdk_go_version) schema = intellisense.build(reference, required, operation) schema_dir = os.path.join(model_dir, "schema") intellisense.write(schema_dir, schema)
python
from setuptools import setup import platform if platform.system() == 'Windows': setup( name='imagesimilarity', version='0.1.2', packages=[''], url='https://github.com/marvinferber/imagesimilarity', license='Apache License 2.0', author='Marvin Ferber', author_email='[email protected]', description='Find and display images that are similar.', install_requires=[ 'wxPython>=4', 'Pillow>=7', 'tensorflow==2.0.2', 'tensorflow_hub', 'annoy>=1.17', 'setuptools==44', 'pyinstaller @ https://github.com/pyinstaller/pyinstaller/archive/develop.tar.gz' ] ) else: setup( name='imagesimilarity', version='0.1.2', packages=[''], url='https://github.com/marvinferber/imagesimilarity', license='Apache License 2.0', author='Marvin Ferber', author_email='[email protected]', description='Find and display images that are similar.', install_requires=[ 'wxPython>=4', 'Pillow>=7', 'tensorflow==2.0.2', 'tensorflow_hub', 'annoy>=1.17', 'setuptools==44', 'pyinstaller @ https://github.com/pyinstaller/pyinstaller/archive/develop.tar.gz' ] )
python
from bytecodemanipulation import ( CodeOptimiser, Emulator, InstructionMatchers, MutableCodeObject, OptimiserAnnotations, ) from bytecodemanipulation.TransformationHelper import BytecodePatchHelper from bytecodemanipulation.Transformers import TransformationHandler from bytecodemanipulation.util import Opcodes
python
#o objetivo desse programa é escrever na tela a taboada do número que o usuário digitar. n = int(input('Digite um número para ver sua taboada: ')) print('-=' * 10) print("{} x {:2} = {} ".format(n,1, n*1)) print("{} x {:2} = {} ".format(n,2, n*2)) print("{} x {:2} = {} ".format(n,3, n*3)) print("{} x {:2} = {} ".format(n,4, n*4)) print("{} x {:2} = {} ".format(n,5, n*5)) print("{} x {:2} = {} ".format(n,6, n*6)) print("{} x {:2} = {} ".format(n,7, n*7)) print("{} x {:2} = {} ".format(n,8, n*8)) print("{} x {:2} = {} ".format(n,9, n*9)) print("{} x {:2} = {} ".format(n,10, n*10)) print('-=' * 10)
python
# project/server/models.py from flask import current_app from project.server import db, bcrypt class User(db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key=True, autoincrement=True) email = db.Column(db.String(255), unique=True, nullable=False) password = db.Column(db.String(255), nullable=False) admin = db.Column(db.Boolean, nullable=False, default=False) projects = db.relationship('Project', backref='users', lazy=True) def __init__(self, email, password, admin=False): self.email = email self.password = bcrypt.generate_password_hash( password, current_app.config.get('BCRYPT_LOG_ROUNDS') ).decode('utf-8') self.admin = admin def is_authenticated(self): return True def is_active(self): return True def is_anonymous(self): return False def get_id(self): return self.id def __repr__(self): return '<User {0}>'.format(self.email) class Project(db.Model): __tablename__ = 'projects' id = db.Column(db.Integer, primary_key=True, autoincrement=True) name = db.Column(db.String(120), nullable=False) url = db.Column(db.String, nullable=False) status = db.Column(db.Boolean, nullable=False, default=False) user_id = db.Column( db.Integer, db.ForeignKey('users.id'), nullable=False ) builds = db.relationship('Build', backref='builds', lazy=True) def __init__(self, user_id, name, url, status=False): self.user_id = user_id self.name = name self.url = url self.status = status class Build(db.Model): __tablename__ = 'builds' id = db.Column(db.Integer, primary_key=True, autoincrement=True) status = db.Column(db.Boolean, nullable=False) datetime = db.Column(db.DateTime, nullable=False) project_id = db.Column( db.Integer, db.ForeignKey('projects.id'), nullable=False ) def __init__(self, project_id, status, datetime): self.project_id = project_id self.status = status self.datetime = datetime def to_json(self): return { 'id': self.id, 'project_id': self.project_id, 'status': self.status, 'datetime': self.datetime }
python
# Copyright (C) 2021 Satoru SATOH <[email protected]> # SPDX-License-Identifier: MIT # """Entry point of tests.common.*. """ from .base import ( MaybeModT, Base ) from .constants import ( TESTS_DIR, TESTS_RES_DIR, RULES_DIR, ) from .testcases import ( RuleTestCase, CliTestCase ) __all__ = [ 'TESTS_DIR', 'TESTS_RES_DIR', 'RULES_DIR', 'MaybeModT', 'Base', 'RuleTestCase', 'CliTestCase', ]
python
import pytest import logging from multiprocessing.process import current_process from threading import current_thread import time logging.basicConfig(filename="log.txt", filemode="w") log = logging.getLogger() log.setLevel(logging.DEBUG) handler = logging.StreamHandler() handler.setLevel(logging.DEBUG) formatter = logging.Formatter("%(levelname)s - %(message)s") handler.setFormatter(formatter) log.addHandler(handler) def pytest_configure(config): print("pytest_configure") logging.info("pytest_configure") # d("configure") # if not hasattr(config, 'slaveinput'): # d("slave input") def pytest_sessionstart(session): logging.info("pytest_sessionstart") print("pytest_sessionstart") # d("session start") def pytest_runtest_setup(item): # called for running each test in 'a' directory print ("setting up", item)
python
from os import listdir import core.log as log async def main(message, client, serverdata): #Part 1 commandfiles = listdir("./core/commands") commandList = [] #Check if Command is a file for commands in commandfiles: if commands.endswith('.py'): commandList.append(commands.replace(".py", "")) #Get Variables messageContentList = message.content.split() command = messageContentList.pop(0).replace("-","").lower() args = messageContentList #Execute Command if command in commandList: commandexecute = __import__('core.commands.{}'.format(command), fromlist=[None]) await commandexecute.main(message, args, client, serverdata) await log.command.main(message, serverdata) else: if str(message.guild.id) in serverdata: commandfiles = listdir("./core/commands/special") commandList = [] #Check if Command is a file for commands in commandfiles: if commands.endswith('.py'): commandList.append(commands.replace(".py", "")) #Get Variables messageContentList = message.content.split() command = messageContentList.pop(0).replace("-","").lower() args = messageContentList #Execute Command if command not in commandList: return commandexecute = __import__('core.commands.special.{}'.format(command), fromlist=[None]) await commandexecute.main(message, args, client, serverdata) await log.command.main(message, serverdata)
python
""" ===================== Fitting a light curve ===================== This example shows how to fit the parameters of a SALT2 model to photometric light curve data. First, we'll load an example of some photometric data. """ import sncosmo data = sncosmo.load_example_data() print(data) ##################################################################### # An important additional note: a table of photometric data has a # ``band`` column and a ``zpsys`` column that use strings to identify # the bandpass (e.g., ``'sdssg'``) and zeropoint system (``'ab'``) of # each observation. If the bandpass and zeropoint systems in your data # are *not* built-ins known to sncosmo, you must register the # corresponding `~sncosmo.Bandpass` or `~sncosmo.MagSystem` to the # right string identifier using the registry. # create a model model = sncosmo.Model(source='salt2') # run the fit result, fitted_model = sncosmo.fit_lc( data, model, ['z', 't0', 'x0', 'x1', 'c'], # parameters of model to vary bounds={'z':(0.3, 0.7)}) # bounds on parameters (if any) ##################################################################### # The first object returned is a dictionary-like object where the keys # can be accessed as attributes in addition to the typical dictionary # lookup like ``result['ncall']``: print("Number of chi^2 function calls:", result.ncall) print("Number of degrees of freedom in fit:", result.ndof) print("chi^2 value at minimum:", result.chisq) print("model parameters:", result.param_names) print("best-fit values:", result.parameters) print("The result contains the following attributes:\n", result.keys()) ################################################################## # The second object returned is a shallow copy of the input model with # the parameters set to the best fit values. The input model is # unchanged. sncosmo.plot_lc(data, model=fitted_model, errors=result.errors) ####################################################################### # Suppose we already know the redshift of the supernova we're trying to # fit. We want to set the model's redshift to the known value, and then # make sure not to vary `z` in the fit. model.set(z=0.5) # set the model's redshift. result, fitted_model = sncosmo.fit_lc(data, model, ['t0', 'x0', 'x1', 'c']) sncosmo.plot_lc(data, model=fitted_model, errors=result.errors)
python
#!/bin/python3 # Copyright (C) 2017 Quentin "Naccyde" Deslandes. # Redistribution and use of this file is allowed according to the terms of the MIT license. # For details see the LICENSE file distributed with yall. import sys import os import requests import json import argparse import subprocess import fnmatch owner = 'naccyde' repo = 'yall' prefixPath = 'build/out/packages' requiredDistros = { 'deb' : { 'ubuntu' : [ 'xenial', 'yakkety', 'zesty', 'artful', 'bionic' ], 'debian' : [ 'jessie', 'wheezy', 'stretch', 'buster' ] }, 'rpm' : { 'fedora' : [ '25', '26', '27', '28' ] } } def findDistroIds(requiredDistros, pcDistributions): distrosPackages = {} for pcExt in pcDistributions: if not pcExt in requiredDistros: continue distrosPackages[pcExt] = { 'ids' : [], 'filename' : [] } for pcDistro in pcDistributions[pcExt]: if not pcDistro['index_name'] in requiredDistros[pcExt]: continue versions = requiredDistros[pcExt][pcDistro['index_name']] for pcVersion in pcDistro['versions']: if not pcVersion['index_name'] in versions: continue distrosPackages[pcExt]['ids'].append(pcVersion['id']) return distrosPackages def getArtefacts(folder, extensionFilter): files = [f for f in os.listdir(folder)] return fnmatch.filter(files, extensionFilter) class HttpApi: def isStatusValid(self, statusCode): return 200 <= statusCode <= 299 def get(self, url, headers={}): re = requests.get(url, headers=headers) return re.status_code, re.text def post(self, url, headers={}, data={}, files={}): re = requests.post(url, headers=headers, json=data, files=files) return re.status_code, re.text class PackageCloudApi(HttpApi): def __init__(self, owner, repo, token): self.owner = owner self.repo = repo self.token = token self.apiUrl = 'https://{}:@packagecloud.io/api/v1'.format(token) def getDistributions(self): url = self.apiUrl + '/distributions.json' status, text = self.get(url) return status, json.loads(text) def uploadPackage(self, distroId, filename): url = self.apiUrl + '/repos/{}/{}/packages.json'.format(self.owner, self.repo) file = { 'package[distro_version_id]': (None, str(distroId)), 'package[package_file]': (filename, open(prefixPath + '/' + filename, 'rb')), } status, text = self.post(url, files=file) return status, json.loads(text) def uploadPackages(self, distrosPackages={}): for distro in distrosPackages: for distroId in distrosPackages[distro]['ids']: for filename in distrosPackages[distro]['filename']: print('\t\t[+] Uploading', filename, 'to', distroId) status, text = self.uploadPackage(distroId, filename) if not 200 <= status <= 299: print('\t\t\t[-] ERROR: {}, HTTP {} : {}'.format(filename, status, text)) class GithubApi(HttpApi): apiUrl = 'https://api.github.com' uploadUrl = 'https://uploads.github.com' genericHeaders = { 'Accept' : 'application/vnd.github.v3+json' } def __init__(self, owner, repo, token): self.owner = owner self.repo = repo self.genericHeaders['Authorization'] = 'token ' + token def getReleases(self): url = self.apiUrl + '/repos/{}/{}/releases'.format(self.owner, self.repo) status, text = self.get(url, self.genericHeaders) return json.loads(text) if self.isStatusValid(status) else None def getRelease(self, tag): releases = self.getReleases() for release in releases: if release['tag_name'] == tag: return release return None def createRelease(self, tag_name, target_commitish, name, body, draft=False, prerelease=False): url = self.apiUrl + '/repos/{}/{}/releases'.format(self.owner, self.repo) data = { 'tag_name' : tag_name, 'target_commitish' : target_commitish, 'name' : name, 'body' : body, 'draft' : draft, 'prerelease' : prerelease } status, text = self.post(url, headers=self.genericHeaders, data=data) if not self.isStatusValid(status): raise Exception('Could not create release:', status, text) return json.loads(text) def uploadReleaseAsset(self, release, filename): url = self.uploadUrl + '/repos/{}/{}/releases/{}/assets?name={}'.format(self.owner, self.repo, release['id'], filename) headers = { 'Content-Type' : 'application/zip' } headers.update(self.genericHeaders) file = { 'file' : (filename, open(prefixPath + '/' + filename, 'rb'))} status, text = self.post(url, headers, None, file) return json.loads(text) if self.isStatusValid(status) else None def uploadReleaseAssets(self, release, files): for file in files: self.uploadReleaseAsset(release, file) def getReleaseMessage(changelog, tag): s = """ Each `yall` Linux release is available from `.deb` and `.rpm` repositories : * `.deb` : `curl -s https://packagecloud.io/install/repositories/naccyde/yall/script.deb.sh | sudo bash` * `.rpm` : `curl -s https://packagecloud.io/install/repositories/naccyde/yall/script.rpm.sh | sudo bash` You can then install `yall` and `yall-dev` using your package manager. The following distributions are supported : * Debian : `wheezy (7)`, `jessie (8)`, `stretch (9)`, `buster (10)` * Ubuntu : `Trusty Tarh (14.04)`, `Xenial Xerus (16.04)`, `Artful Ardvark (17.10)`, `Bionic Beaver (18.04)` * Fedora : `25`, `26`, `27` If your distribution is not supported, you can open an issue to ask to its support. """ return changelog + s def main(): parser = argparse.ArgumentParser(description='Script used to deploy yall releases') parser.add_argument('-g', '--github-token', required=True, help='Github token') parser.add_argument('-p', '--package-cloud-token', required=True, help='Package Cloud token') parser.add_argument('-t', '--tag', required=True, help='Tag of the release') parser.add_argument('-z', '--zip', action='store_true', help='Deploy .zip artefacts') parser.add_argument('-l', '--linux', action='store_true', help='Deploy .deb and .rpm artefacts') args = parser.parse_args() lastChangelog = "" with open("CHANGELOG.md", "r") as file: lastChangelogWVersion = file.read().split("\n\n")[2] lastChangelog = '\n'.join(lastChangelogWVersion.split("\n")[1:]) print('=== yall release ===\n') print('\t[+] Creating release {}\n'.format(args.tag)) # Create Github release githubApi = GithubApi(owner, repo, args.github_token) release = githubApi.getRelease(args.tag) if not release: print('\t[+] Creating release', args.tag) release = githubApi.createRelease(args.tag, 'master', args.tag, getReleaseMessage(lastChangelog, args.tag)) else: print('\t[.] Release', args.tag, 'already exists') if args.zip: print('\t[+] Deploying .zip artefacts') zipArtefacts = getArtefacts(prefixPath, '*.zip') githubApi.uploadReleaseAssets(release, zipArtefacts) if args.linux: print('\t[+] Deploying .deb and .rpm artefacts') packageCloudApi = PackageCloudApi(owner, 'yall', args.package_cloud_token) distrosPackages = findDistroIds(requiredDistros, packageCloudApi.getDistributions()[1]) distrosPackages['deb']['filename'] = getArtefacts(prefixPath, '*.deb') distrosPackages['rpm']['filename'] = getArtefacts(prefixPath, '*.rpm') packageCloudApi.uploadPackages(distrosPackages) print('\t[+] RELEASED !') if __name__== "__main__": main()
python
# reverse words in a string # " " output is wrong lol class Solution(object): def reverseWords(self, s): """ :type s: str :rtype: str """ reverse = [] temp = "" for i in s: if i == " ": if temp != "": reverse.append(temp) temp = "" else: temp = temp + i if temp != "": reverse.append(temp) return " ".join(reverse[::-1]) solution = Solution() print(","+solution.reverseWords(" ")+',')
python
import sys from os.path import join, isfile import threading import importlib.util as iutil from uuid import uuid4 from multiprocessing.dummy import Pool as ThreadPool from datetime import datetime from aequilibrae.project.data import Matrices from aequilibrae.paths.multi_threaded_skimming import MultiThreadedNetworkSkimming from aequilibrae.paths.results.skim_results import SkimResults from aequilibrae.utils import WorkerThread from aequilibrae import logger try: from aequilibrae.paths.AoN import skimming_single_origin except ImportError as ie: logger.warning(f"Could not import procedures from the binary. {ie.args}") spec = iutil.find_spec("PyQt5") pyqt = spec is not None if pyqt: from PyQt5.QtCore import pyqtSignal spec = iutil.find_spec("openmatrix") has_omx = spec is not None sys.dont_write_bytecode = True class NetworkSkimming(WorkerThread): """ :: from aequilibrae.paths.network_skimming import NetworkSkimming from aequilibrae.project import Project project = Project() project.open(self.proj_dir) network = self.project.network network.build_graphs() graph = network.graphs['c'] graph.set_graph(cost_field="distance") graph.set_skimming("distance") skm = NetworkSkimming(graph) skm.execute() # The skim report (if any error generated) is available here skm.report # To access the skim matrix directly from its temporary file matrix = skm.results.skims # Or you can save the results to disk skm.save_to_project('skimming result') # Or specify the AequilibraE's matrix file format skm.save_to_project('skimming result', 'aem') project.close() """ if pyqt: skimming = pyqtSignal(object) def __init__(self, graph, origins=None): WorkerThread.__init__(self, None) self.origins = origins self.graph = graph self.results = SkimResults() self.aux_res = MultiThreadedNetworkSkimming() self.report = [] self.procedure_id = "" self.procedure_date = "" self.cumulative = 0 def doWork(self): self.execute() def execute(self): """Runs the skimming process as specified in the graph""" if pyqt: self.skimming.emit(["zones finalized", 0]) self.results.prepare(self.graph) self.aux_res = MultiThreadedNetworkSkimming() self.aux_res.prepare(self.graph, self.results) pool = ThreadPool(self.results.cores) all_threads = {"count": 0} for orig in list(self.graph.centroids): i = int(self.graph.nodes_to_indices[orig]) if i >= self.graph.nodes_to_indices.shape[0]: self.report.append(f"Centroid {orig} is beyond the domain of the graph") elif self.graph.fs[int(i)] == self.graph.fs[int(i) + 1]: self.report.append(f"Centroid {orig} does not exist in the graph") else: pool.apply_async(self.__func_skim_thread, args=(orig, all_threads)) pool.close() pool.join() self.aux_res = None self.procedure_id = uuid4().hex self.procedure_date = str(datetime.today()) if pyqt: self.skimming.emit(["text skimming", "Saving Outputs"]) self.skimming.emit(["finished_threaded_procedure", None]) def save_to_project(self, name: str, format="omx") -> None: """Saves skim results to the project folder and creates record in the database Args: *name* (:obj:`str`): Name of the matrix. Same value for matrix record name and file (plus extension) *format* (:obj:`str`, `Optional`): File format ('aem' or 'omx'). Default is 'omx' """ file_name = f"{name}.{format.lower()}" mats = Matrices() record = mats.new_record(name, file_name, self.results.skims) record.procedure_id = self.procedure_id record.timestamp = self.procedure_date record.procedure = "Network skimming" record.save() def __func_skim_thread(self, origin, all_threads): if threading.get_ident() in all_threads: th = all_threads[threading.get_ident()] else: all_threads[threading.get_ident()] = all_threads["count"] th = all_threads["count"] all_threads["count"] += 1 x = skimming_single_origin(origin, self.graph, self.results, self.aux_res, th) self.cumulative += 1 if x != origin: self.report.append(x) if pyqt: self.skimming.emit(["zones finalized", self.cumulative]) txt = str(self.cumulative) + " / " + str(self.matrix.zones) self.skimming.emit(["text skimming", txt])
python
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) from os import makedirs, path import subprocess import re # -- Project information ----------------------------------------------------- project = 'ENRICO' copyright = '2019, UChicago Argonne, LLC' author = 'ENRICO Development Team' # The full version, including alpha/beta/rc tags release = '0.1' # -- General configuration --------------------------------------------------- master_doc = 'index' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinxcontrib.katex', # 'breathe', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] numfig = True # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'bootstrap-astropy' html_theme_options = {'logotext1': 'ENRICO', 'logotext2': '', 'logotext3': ''} html_show_sphinx = False # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # -- Breathe configuration --------------------------------------------------- # breathe_projects = {"enrico": "doxygen/xml"} # breathe_default_project = "enrico" # -- Build Doxygen --------------------------------------------------- def build_doxygen(app): # XML goes in Sphinx source dir, and HTML goes in Sphinx output dir doxygen_xmldir = path.abspath(path.join(app.srcdir, 'doxygen', 'xml')) doxygen_htmldir = path.abspath(path.join(app.outdir, 'doxygen', 'html')) # Doxygen won't create *nested* output dirs, so we do it ourselves. for d in (doxygen_xmldir, doxygen_htmldir): makedirs(d, exist_ok=True) # Need to know location of Doxyfile, so we'll assume its location relative to Sphinx srcdir doxyfile_dir = path.dirname(path.dirname(app.srcdir)) # To pass output dirs to Doxygen, we follow this advice: # http://www.doxygen.nl/manual/faq.html#faq_cmdline # Here we read the Doxyfile into a string, replace the *_OUTPUT vars, and pass the string as # stdin to the doxygen subprocess with open(path.join(doxyfile_dir, 'Doxyfile')) as f: doxy_opts = f.read() doxy_opts = re.sub(r'(\bHTML_OUTPUT\b\s*=\s*).*', r'\1"{}"'.format(doxygen_htmldir), doxy_opts) doxy_opts = re.sub(r'(\bXML_OUTPUT\b\s*=\s*).*', r'\1"{}"'.format(doxygen_xmldir), doxy_opts) subprocess.run(['doxygen', '-'], cwd=doxyfile_dir, input=doxy_opts, universal_newlines=True, check=True) # -- Setup hooks ------------------------------------------------------------- def setup(app): app.add_css_file('theme_overrides.css') app.connect("builder-inited", build_doxygen)
python
# License: BSD 3 clause import tick.base import tick.base_model.build.base_model from .model_hawkes_expkern_leastsq import ModelHawkesExpKernLeastSq from .model_hawkes_expkern_loglik import ModelHawkesExpKernLogLik from .model_hawkes_sumexpkern_leastsq import ModelHawkesSumExpKernLeastSq from .model_hawkes_sumexpkern_loglik import ModelHawkesSumExpKernLogLik __all__ = [ "ModelHawkesExpKernLogLik", "ModelHawkesSumExpKernLogLik", "ModelHawkesExpKernLeastSq", "ModelHawkesSumExpKernLeastSq" ]
python
from radar import db __all__ = ['Commit'] class Commit(db.Model): id = db.Column(db.Integer, primary_key=True) commit_hash = db.Column(db.String(40)) summary = db.Column(db.String(100)) branch = db.Column(db.String(50)) author = db.Column(db.String(100)) commit_time = db.Column(db.DateTime) __table__args = (db.UniqueConstraint(commit_hash, branch))
python
from django.contrib.gis.db import models class Mansion(models.Model): class Meta: db_table = 'mansion' gid = models.BigAutoField(primary_key=True) housing_area_code = models.BigIntegerField(null=False) facility_key = models.CharField(max_length=4000, null=True) shape_wkt = models.MultiLineStringField(null=False, geography=True) fabricated_type_code = models.BigIntegerField(null=True) pref = models.CharField(max_length=4000, null=True) created_by = models.CharField(max_length=4000, null=True) created_at = models.DateTimeField(null=True) updated_by = models.CharField(max_length=4000, null=True) updated_at = models.DateTimeField(null=True)
python
"""Create svg images from a keyboard definition.""" import xml.etree.ElementTree as ET import io from math import sin, cos, atan2, degrees, radians from kbtb.plate import generate_plate def shape_to_svg_element(shape, props={}, x_scale=1, y_scale=-1): return ET.Element( "path", { "d": " M " + " ".join(f"{x_scale*x},{y_scale*y}" for x, y in shape.exterior.coords) + " Z " + " ".join((" M " + " ".join(f"{x_scale*x},{y_scale*y}" for x, y in i.coords) + " Z ") for i in shape.interiors), **props, }) def shape_to_svg(shape, props={}, x_scale=1, y_scale=-1): # Calculate viewbox from shape bounds x_min, y_min, x_max, y_max = shape.bounds left = min(x_min * x_scale, x_max * x_scale) top = min(y_min * y_scale, y_max * y_scale) width = abs(x_scale * x_min - x_scale * x_max) height = abs(y_scale * y_min - y_scale * y_max) # Create the empty svg tree root = ET.Element( 'svg', { "viewBox": f"{left} {top} {width} {height}", "xmlns": "http://www.w3.org/2000/svg", "xmlns:xlink": "http://www.w3.org/1999/xlink", **props, }) root.append(shape_to_svg_element(shape, x_scale=x_scale, y_scale=y_scale)) return ET.ElementTree(root) def keyboard_to_layout_svg(kb, add_numbers=True): plate = generate_plate(kb) x_scale = 1 y_scale = -1 # Calculate viewbox from plate bounds x_min, y_min, x_max, y_max = plate.bounds left = min(x_min * x_scale, x_max * x_scale) top = min(y_min * y_scale, y_max * y_scale) width = abs(x_scale * x_min - x_scale * x_max) height = abs(y_scale * y_min - y_scale * y_max) # Create the empty svg tree root = ET.Element( 'svg', { "viewBox": f"{left} {top} {width} {height}", "xmlns": "http://www.w3.org/2000/svg", "xmlns:xlink": "http://www.w3.org/1999/xlink", }) root.append(ET.Comment(f'physical-dimensions: {width} mm by {height} mm')) # Add groups for document structure g_plate = ET.SubElement(root, "g", { "id": "plate", "style": "fill: black; fill-rule: evenodd;", }) g_plate = ET.SubElement(g_plate, "g", {"id": "plate"}) g_keycaps = ET.SubElement(root, "g", { "id": "keycaps", "style": "fill: white;" }) # Add plate ET.SubElement( g_plate, "path", { "d": " M " + " ".join(f"{x_scale*x},{y_scale*y}" for x, y in plate.exterior.coords) + " Z " + " ".join((" M " + " ".join(f"{x_scale*x},{y_scale*y}" for x, y in i.coords) + " Z ") for i in plate.interiors) }) g_plate.append( shape_to_svg_element(plate, {"style": "fill: black;"}, x_scale, y_scale)) for i, key in enumerate(kb.keys): x, y = x_scale * key.pose.x, y_scale * key.pose.y r = degrees( atan2(y_scale * sin(radians(key.pose.r - 90)), x_scale * cos(radians(key.pose.r - 90)))) + 90 keyboard_unit = 19.05 margin = keyboard_unit - 18.42 ET.SubElement( g_keycaps, "rect", { "width": str(keyboard_unit * key.unit_width - margin), "height": str(keyboard_unit * key.unit_height - margin), "x": str((keyboard_unit * key.unit_width - margin) / -2), "y": str((keyboard_unit * key.unit_height - margin) / -2), "rx": "1", "transform": f"translate({x} {y}) rotate({r})" }) if add_numbers: ET.SubElement( g_keycaps, "text", { "style": "fill: black; font-family: sans-serif; font-size: 5;", "transform": f"translate({x} {y}) rotate({180+r}) ", "alignment-baseline": "middle", "text-anchor": "middle", }).text = f"{i}" return ET.ElementTree(root) def svg_to_file(svg): f = io.BytesIO() svg.write(f) return f.getvalue()
python
# 工具类,字符串处理 import re class BFStringDeal(object): def __init__(self,arg): self.arg = arg @classmethod # 删除垃圾字符 -- 比如:\n def specialTXT(cls, text): return text.replace("\n", "") @classmethod # 正则表达式处理,字符串 def getAssignContent(cls, text, assignContent): # 获取正则表达式实例,其中assignContent为外界传入的表达式值 regx = re.compile(assignContent) return regx.findall(text) @classmethod # 删除html前尾部标签,传入值为单个标签 p or h1 # 常用tag标签有【h1,h2,h3,h4,h5,a,span,img,p】 -- 其中img需要单独处理下 def deleteHtmlTag(cls, originalTxt): # 外部输入进来,tag,在此处合成 -- 例如 tag-h1 output <h1.*?>.*?</h1> tagCollection = ['p','h1','h2','h3','h4','a','p','span'] for tag in tagCollection: tagCompelete = "<" + tag + ".*?" + '>|' + '</' + tag + '>' regx = re.compile(tagCompelete) hasDealTag = regx.sub("",originalTxt) # 删除h1,h2,p中含有标签a的情况 if "</a>" in hasDealTag: tagCompelete = "<" + 'a' + ".*?" + '>|' + '</' + 'a' + '>' regx = re.compile(tagCompelete) hasDealTag = regx.sub("",originalTxt) # 删除h1,h2,p中含有标签span的情况 if "</span>" in hasDealTag: tagCompelete = "<" + 'span' + ".*?" + '>|' + '</' + 'span' + '>' regx = re.compile(tagCompelete) hasDealTag = regx.sub("",originalTxt) # 含有img的情况以后处理 return hasDealTag @classmethod # 删除头尾tag标签信息 -- 目前还有一点错误,就是删除尾部tag,不一定删除的是最后的 # eg:传入<div class="fasdfd">something so many <div>ssss</div></div> # 得到something so many <div>ssss</div> def deleteFrontAndBackendTag(cls,content): # 删除tag前缀 frontTag = "<.*?>" regxFront = re.compile(frontTag) frontDelContent = regxFront.sub("",content,1) # 删除tag后缀 == 这里只是需要删除</xx>就可,不一定会删除最后一个 backendTag = "</.*?>" regxBack = re.compile(backendTag) backendDelContent = regxBack.sub("",frontDelContent,1) return backendDelContent @classmethod # 删除给定文本的所有tag # eg:传入<div class="fasdfd">something so many <div>ssss</div></div> # 得到something so many ssss def deleteAllTag(cls, content): frontTag = "<.*?>" regxFront = re.compile(frontTag) frontDelContent = regxFront.sub("",content) backendTag = "</.*?>" regxBack = re.compile(backendTag) backendDelContent = regxBack.sub("",frontDelContent) return backendDelContent
python
__author__ = 'Alexander Horkun' __email__ = '[email protected]' from django.conf.urls import patterns, url from xanderhorkunspider.web.websites.views import websites, auth urlpatterns = patterns('', url(r'^$', websites.index_view, name='index'), url(r'^add-website$', websites.edit_website_view, name='add_website'), url(r'^edit-website/(?P<wid>\d+)$', websites.edit_website_view, name='edit_website'), url(r'^delete_website/(?P<wid>\d+)$', websites.delete_website_view, name='delete_website'), url(r'^add-page', websites.edit_page_view, name='add_page'), url(r'^website/(?P<wid>\d+)/add-page', websites.edit_page_view, name='add_page_to_website'), url(r'^edit-page/(?P<pid>\d+)', websites.edit_page_view, name='edit_page'), url(r'^delete_page/(\d+)$', websites.delete_page_view, name='delete_page'), url(r'^spider_session/webiste-(?P<wid>\d+)$', websites.spider_session_view, name='spider_session'), url(r'^spider_session$', websites.start_spider_session_view, name='start_spider_session'), url(r'^spider-status/(.+)$', websites.spider_status_view, name='spider_status'), url(r'^sign-up$', auth.signup_view, name='signup'), url('logout', auth.logout_view, name='logout'), url('login', auth.login_view, name='login'), )
python
""" opbeat.contrib.django.celery ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: (c) 2011-2012 Opbeat Large portions are :copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from opbeat.contrib.celery import CeleryMixin from opbeat.contrib.django import DjangoClient class CeleryClient(CeleryMixin, DjangoClient): pass
python
# from glob import glob from setuptools import setup setup( name='pybrightsign', version='0.9.4', description='BrightSign APIs for humans. Python module to simplify using the BrightSign BSN/BSNEE API.', long_description=open('../README.md').read(), long_description_content_type='text/markdown', license='MIT', # https://pypi.org/classifiers/ classifiers=[ 'Development Status :: 4 - Beta', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Operating System :: OS Independent', 'Intended Audience :: Developers', 'Topic :: Adaptive Technologies', 'Topic :: Utilities' ], url='https://github.com/pointw-dev/pybrightsign', author='Michael Ottoson', author_email='[email protected]', packages=['pybrightsign'], include_package_data=True, install_requires=[ 'requests', 'oauthlib==2.1.0', 'requests-oauthlib==1.1.0' ], # scripts=glob('bin/*'), zip_safe=False )
python
# coding: utf-8 # 2019/10/17 @ tongshiwei
python
from curses import meta import shutil from unittest import TestCase import sys import os import metadata_mp3 import shutil import unittest from mutagen.easyid3 import EasyID3 class TestRenameSongName(TestCase): def test_1(self): songNameBefore = "Counting Crows - Colorblind (Official Video)" songNameAfter = "Counting Crows - Colorblind" songNameAfterTest = metadata_mp3.rename_song_name(songNameBefore) self.assertEqual(songNameAfter, songNameAfterTest) def test_2(self): songNameBefore = "Counting Crows - Colorblind test" songNameAfter = "Counting Crows - Colorblind" songNameAfterTest = metadata_mp3.rename_song_name(songNameBefore) self.assertNotEqual(songNameAfter, songNameAfterTest) class TestConvertSongnameOnMetadata(TestCase): def test_1(self): songNameBefore = "Counting Crows - Colorblind" metadataSongName = metadata_mp3.convert_songname_on_metadata(songNameBefore) self.assertEqual(metadataSongName['artist'], "Counting Crows") self.assertEqual(metadataSongName['title'], "Colorblind") def test_2(self): songNameBefore = "Counting Crows - Colorblind test" metadataSongName = metadata_mp3.convert_songname_on_metadata(songNameBefore) self.assertEqual(metadataSongName['artist'], "Counting Crows") self.assertEqual(metadataSongName['title'], "Colorblind test") class TestAddMetadataSong(TestCase): def test_1(self): originalTestFileName = "test.mp3" testFileName = "Counting Crows - Colorblind.mp3" songNameTest = "Counting Crows - Colorblind" artistTest = "Counting Crows" titleTest = "Colorblind" albumTest = "album test" currentDirectory = os.path.dirname(os.path.realpath(__file__)) originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName) testFileNameWithPath = os.path.join(currentDirectory,testFileName) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) newFileNameWithPath = metadata_mp3.add_metadata_song(currentDirectory,albumTest, artistTest, songNameTest) metatag = EasyID3(newFileNameWithPath) print(newFileNameWithPath) self.assertTrue(os.path.isfile(newFileNameWithPath)) self.assertEqual(newFileNameWithPath, testFileNameWithPath) self.assertEqual(metatag['artist'][0], artistTest) self.assertEqual(metatag['title'][0], titleTest) self.assertEqual(metatag['album'][0], albumTest) os.remove(newFileNameWithPath) def test_2(self): originalTestFileName = "test.mp3" fileNameTest = "Counting Crows - Colorblind (Official Video).mp3" songNameTest = "Counting Crows - Colorblind (Official Video)" artistTest = "Counting Crows" titleTest = "Colorblind" albumTest = "album test" currentDirectory = os.path.dirname(os.path.realpath(__file__)) originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName) testFileNameWithPath = os.path.join(currentDirectory,fileNameTest) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) newFileNameWithPath = metadata_mp3.add_metadata_song(currentDirectory,albumTest, artistTest, songNameTest) self.assertFalse(os.path.isfile(testFileNameWithPath)) self.assertTrue(os.path.isfile(newFileNameWithPath)) self.assertNotEqual(newFileNameWithPath, testFileNameWithPath) metatag = EasyID3(newFileNameWithPath) print(newFileNameWithPath) self.assertEqual(metatag['artist'][0], artistTest) self.assertEqual(metatag['title'][0], titleTest) self.assertEqual(metatag['album'][0], albumTest) os.remove(newFileNameWithPath) class TestAddMetadataPlaylist(TestCase): def test_1(self): originalTestFileName = "test.mp3" testFileName = "Counting Crows - Colorblind.mp3" songNameTest = "Counting Crows - Colorblind" artistTest = "Counting Crows" titleTest = "Colorblind" albumTest = "spokojne-sad" trackNumberTest = 1 currentDirectory = os.path.dirname(os.path.realpath(__file__)) originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName) albumDirectory = os.path.join(currentDirectory,albumTest) if not os.path.exists(albumDirectory): os.mkdir(albumDirectory) testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) newFileNameWithPath = metadata_mp3.add_metadata_playlist(currentDirectory,trackNumberTest,albumTest,artistTest,songNameTest) #print(newFileNameWithPath) self.assertTrue(os.path.isfile(newFileNameWithPath)) self.assertEqual(newFileNameWithPath, testFileNameWithPath) metatag = EasyID3(newFileNameWithPath) self.assertEqual(metatag['artist'][0], artistTest) self.assertEqual(metatag['title'][0], titleTest) self.assertEqual(metatag['album'][0], "YT "+albumTest) self.assertEqual(metatag['tracknumber'][0],str(trackNumberTest)) shutil.rmtree(os.path.join(currentDirectory,albumTest)) class TestUpdateMetadataYoutube(TestCase): def test_1(self): originalTestFileName = "test.mp3" testFileName1 = "Counting Crows - Colorblind.mp3" testFileName2 = "Eels - I Need Some Sleep.mp3" testFileName3 = "Paramore - The Only Exception.mp3" artistTestList = [] artistTestList.append("Counting Crows") titleTestList = [] titleTestList.append("Colorblind") artistTestList.append("Eels") titleTestList.append("I Need Some Sleep") artistTestList.append("Paramore") titleTestList.append("The Only Exception") albumTest = "spokojne-sad" currentDirectory = os.path.dirname(os.path.realpath(__file__)) originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName) albumDirectory = os.path.join(currentDirectory,albumTest) if not os.path.exists(albumDirectory): os.mkdir(albumDirectory) testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName1) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName2) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName3) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) newFilesList = metadata_mp3.update_metadata_youtube(currentDirectory,albumTest) i = 0 for newFile in newFilesList: print(newFile) self.assertTrue(os.path.isfile(newFile)) metatag = EasyID3(newFile) self.assertEqual(metatag['artist'][0], artistTestList[i]) self.assertEqual(metatag['title'][0], titleTestList[i]) self.assertEqual(metatag['album'][0], "YT "+albumTest) i = i+1 shutil.rmtree(os.path.join(currentDirectory,albumTest)) class TestUpdateMetadata(TestCase): def test_1(self): originalTestFileName = "test.mp3" testFileName1 = "Counting Crows - Colorblind.mp3" testFileName2 = "Eels - I Need Some Sleep.mp3" testFileName3 = "Paramore - The Only Exception.mp3" artistTestList = [] artistTestList.append("Counting Crows") titleTestList = [] titleTestList.append("Colorblind") artistTestList.append("Eels") titleTestList.append("I Need Some Sleep") artistTestList.append("Paramore") titleTestList.append("The Only Exception") albumTest = "spokojne-sad" currentDirectory = os.path.dirname(os.path.realpath(__file__)) originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName) albumDirectory = os.path.join(currentDirectory,albumTest) if not os.path.exists(albumDirectory): os.mkdir(albumDirectory) testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName1) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName2) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) testFileNameWithPath = os.path.join(currentDirectory,albumTest, testFileName3) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) newFilesList = metadata_mp3.update_metadata(albumDirectory,albumTest) i = 0 for newFile in newFilesList: print(newFile) self.assertTrue(os.path.isfile(newFile)) metatag = EasyID3(newFile) self.assertEqual(metatag['artist'][0], artistTestList[i]) self.assertEqual(metatag['title'][0], titleTestList[i]) self.assertEqual(metatag['album'][0], albumTest) i = i+1 shutil.rmtree(os.path.join(currentDirectory,albumTest)) class TestSetAlbum(TestCase): def test_1(self): originalTestFileName = "test.mp3" testFileName1 = "test1.mp3" testFileName2 = "test2.mp3" testCatalog = "test_1" currentDirectory = os.path.dirname(os.path.realpath(__file__)) originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName) testCatalogWithPath = os.path.join(currentDirectory, testCatalog) if not os.path.exists(testCatalogWithPath): os.mkdir(testCatalogWithPath) testFileNameWithPath = os.path.join(currentDirectory,testCatalog, testFileName1) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) testFileNameWithPath = os.path.join(currentDirectory,testCatalog, testFileName2) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) newFilesList = metadata_mp3.setAlbum(testCatalogWithPath, "album test") for newFile in newFilesList: newFileWithPath = os.path.join(testCatalogWithPath,newFile) self.assertTrue(os.path.isfile(newFileWithPath)) metatag = EasyID3(newFileWithPath) self.assertEqual(metatag['album'][0], "album test") shutil.rmtree(os.path.join(currentDirectory,testCatalog)) class TestSetArtist(TestCase): def test_1(self): originalTestFileName = "test.mp3" testFileName1 = "test1.mp3" testFileName2 = "test2.mp3" testCatalog = "test_1" currentDirectory = os.path.dirname(os.path.realpath(__file__)) originalTestFileNameWithPath = os.path.join(currentDirectory,originalTestFileName) testCatalogWithPath = os.path.join(currentDirectory, testCatalog) if not os.path.exists(testCatalogWithPath): os.mkdir(testCatalogWithPath) testFileNameWithPath = os.path.join(currentDirectory,testCatalog, testFileName1) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) testFileNameWithPath = os.path.join(currentDirectory,testCatalog, testFileName2) shutil.copy(originalTestFileNameWithPath, testFileNameWithPath) newFilesList = metadata_mp3.setArtist(testCatalogWithPath, "artist test") for newFile in newFilesList: newFileWithPath = os.path.join(testCatalogWithPath,newFile) self.assertTrue(os.path.isfile(newFileWithPath)) metatag = EasyID3(newFileWithPath) self.assertEqual(metatag['artist'][0], "artist test") shutil.rmtree(os.path.join(currentDirectory,testCatalog)) if __name__=='__main__': unittest.main()
python
#!/usr/bin/env python # coding=utf-8 """Writes uninstallation SQL script to stdout.""" from os.path import abspath, join, dirname import sys def uninstall(): with open(join(dirname(abspath(__file__)), 'uninstall.sql')) as f: sys.stdout.write(f.read()) if __name__ == '__main__': uninstall()
python
import os from PIL import Image, ImageDraw from pylab import * import csv class ImageScatterPlot: def __init__(self): self.h, self.w = 20000,20000 self.resize_h = 275 self.resize_w = 275 def create_save_fig(self, image_paths, projected_features, out_file): img_scatter = self.create_fig(image_paths, projected_features) self.save_fig(img_scatter, out_file) def create_fig(self, image_paths, projected_features): img = Image.new('RGB',(self.w,self.h),(255,255,255)) draw = ImageDraw.Draw(img) scale = abs(projected_features).max(0) scaled = floor(array([ (p / scale) * (self.w/2-20,self.h/2-20) + (self.w/2,self.h/2) for p in projected_features])) print "number of images", len(image_paths) for i in range(len(image_paths)): nodeim = Image.open(image_paths[i]) nodeim = nodeim.resize((self.resize_w,self.resize_h)) ns = nodeim.size img.paste(nodeim,(int(scaled[i][0]-ns[0]//2),int(scaled[i][1]-ns[1]//2),int(scaled[i][0]+ns[0]//2+1),int(scaled[i][1]+ns[1]//2+1))) return img def save_fig(self, img, out_file): img.save(out_file) if __name__ == "__main__": in_file = "PNAR-tsne-HOG-color.csv" out_file = "res-class.jpg" rows = [] with open(in_file, 'rb') as f: reader = csv.reader(f) for row in reader: rows.append(row) rows.pop(0) image_paths = [row[0] for row in rows] features = array([(float(row[1]), float(row[2])) for row in rows]) ImageScatterPlot().create_save_fig(image_paths = image_paths, projected_features = features, out_file = out_file)
python
# Lagoon (2400004) | Zero's Temple (320000000) from net.swordie.ms.loaders import StringData options = [] al = chr.getAvatarData().getAvatarLook() selection = sm.sendNext("Hello. How can I help you? #b\r\n" "#L0#Change hair colour#l\r\n" "#L1#Change eye colour#l\r\n" "#L2#Change skin tone#l") if selection == 0: hairColour = al.getHair() % 10 baseHair = al.getHair() - hairColour for colour in range(8): colourOption = baseHair + colour options.append(colourOption) answer = sm.sendAskAvatar("Choose your new hair colour!", False, False, options) if answer < len(options): sm.changeCharacterLook(options[answer]) elif selection == 1: faceColour = al.getFace() % 1000 - al.getFace() % 100 baseFace = al.getFace() - faceColour for colour in range(0, 900, 100): colourOption = baseFace + colour if not StringData.getItemStringById(colourOption) is None: options.append(colourOption) answer = sm.sendAskAvatar("With our specialized machine, you can see the results of your potential treatment in advance. " "What kind of lens would you like to wear? Please choose the style of your liking.", False, False, options) if answer < len(options): sm.changeCharacterLook(options[answer]) else: #These values will crash the client when attempting to load them onto character nullSkins = [6, 7, 8] for skin in range(14): #Skip past null skin values if skin in nullSkins: continue options.append(skin) answer = sm.sendAskAvatar("We have the latest in beauty equipment. " "With our technology, you can preview what your skin will look like in advance! " "Which treatment would you like?", False, False, options) if answer < len(options): sm.changeCharacterLook(options[answer])
python
def a_method(): pass class AClass: pass var = "A Variable" print("Support library name: {}".format(__name__)) if __name__ == '__main__': age = 0 while age <= 0: age = int(input("How old are you? "))
python
''' Manage file shares that use the SMB 3.0 protocol. ''' from ... pyaz_utils import _call_az from . import copy, metadata def list(share_name, account_key=None, account_name=None, connection_string=None, exclude_dir=None, marker=None, num_results=None, path=None, sas_token=None, snapshot=None, timeout=None): ''' List files and directories in a share. Required Parameters: - share_name -- The file share name. Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - exclude_dir -- None - marker -- An opaque continuation token. This value can be retrieved from the next_marker field of a previous generator object if num_results was specified and that generator has finished enumerating results. If specified, this generator will begin returning results from the point where the previous generator stopped. - num_results -- Specify the maximum number to return. If the request does not specify num_results, or specifies a value greater than 5000, the server will return up to 5000 items. Note that if the listing operation crosses a partition boundary, then the service will return a continuation token for retrieving the remaining of the results. Provide "*" to return all. - path -- The directory path within the file share. - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - snapshot -- A string that represents the snapshot version, if applicable. - timeout -- Request timeout in seconds. Applies to each call to the service. ''' return _call_az("az storage file list", locals()) def delete(path, share_name, account_key=None, account_name=None, connection_string=None, sas_token=None, timeout=None): ''' Required Parameters: - path -- The path to the file within the file share. - share_name -- The file share name. Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - timeout -- Request timeout in seconds. Applies to each call to the service. ''' return _call_az("az storage file delete", locals()) def resize(path, share_name, size, account_key=None, account_name=None, connection_string=None, sas_token=None, timeout=None): ''' Required Parameters: - path -- The path to the file within the file share. - share_name -- The file share name. - size -- The length to resize the file to. Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - timeout -- Request timeout in seconds. Applies to each call to the service. ''' return _call_az("az storage file resize", locals()) def url(path, share_name, account_key=None, account_name=None, connection_string=None, protocol=None, sas_token=None): ''' Create the url to access a file. Required Parameters: - path -- The path to the file within the file share. - share_name -- The file share name. Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - protocol -- Protocol to use. - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN ''' return _call_az("az storage file url", locals()) def generate_sas(path, share_name, account_key=None, account_name=None, cache_control=None, connection_string=None, content_disposition=None, content_encoding=None, content_language=None, content_type=None, expiry=None, https_only=None, ip=None, permissions=None, policy_name=None, start=None): ''' Required Parameters: - path -- The path to the file within the file share. - share_name -- The file share name. Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - cache_control -- Response header value for Cache-Control when resource is accessed using this shared access signature. - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - content_disposition -- Response header value for Content-Disposition when resource is accessed using this shared access signature. - content_encoding -- Response header value for Content-Encoding when resource is accessed using this shared access signature. - content_language -- Response header value for Content-Language when resource is accessed using this shared access signature. - content_type -- Response header value for Content-Type when resource is accessed using this shared access signature. - expiry -- Specifies the UTC datetime (Y-m-d'T'H:M'Z') at which the SAS becomes invalid. Do not use if a stored access policy is referenced with --id that specifies this value. - https_only -- Only permit requests made with the HTTPS protocol. If omitted, requests from both the HTTP and HTTPS protocol are permitted. - ip -- Specifies the IP address or range of IP addresses from which to accept requests. Supports only IPv4 style addresses. - permissions -- The permissions the SAS grants. Allowed values: (c)reate (d)elete (r)ead (w)rite (c)reate (d)elete (r)ead (w)rite. Do not use if a stored access policy is referenced with --id that specifies this value. Can be combined. - policy_name -- The name of a stored access policy within the container's ACL. - start -- Specifies the UTC datetime (Y-m-d'T'H:M'Z') at which the SAS becomes valid. Do not use if a stored access policy is referenced with --id that specifies this value. Defaults to the time of the request. ''' return _call_az("az storage file generate-sas", locals()) def show(path, share_name, account_key=None, account_name=None, connection_string=None, sas_token=None, snapshot=None, timeout=None): ''' Required Parameters: - path -- The path to the file within the file share. - share_name -- The file share name. Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - snapshot -- A string that represents the snapshot version, if applicable. - timeout -- Request timeout in seconds. Applies to each call to the service. ''' return _call_az("az storage file show", locals()) def update(path, share_name, account_key=None, account_name=None, clear_content_settings=None, connection_string=None, content_cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_md5=None, content_type=None, sas_token=None, timeout=None): ''' Required Parameters: - path -- The path to the file within the file share. - share_name -- The file share name. Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - clear_content_settings -- If this flag is set, then if any one or more of the following properties (--content-cache-control, --content-disposition, --content-encoding, --content-language, --content-md5, --content-type) is set, then all of these properties are set together. If a value is not provided for a given property when at least one of the properties listed below is set, then that property will be cleared. - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - content_cache_control -- The cache control string. - content_disposition -- Conveys additional information about how to process the response payload, and can also be used to attach additional metadata. - content_encoding -- The content encoding type. - content_language -- The content language. - content_md5 -- The content's MD5 hash. - content_type -- The content MIME type. - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - timeout -- Request timeout in seconds. Applies to each call to the service. ''' return _call_az("az storage file update", locals()) def exists(path, share_name, account_key=None, account_name=None, connection_string=None, sas_token=None, snapshot=None, timeout=None): ''' Check for the existence of a file. Required Parameters: - path -- The path to the file within the file share. - share_name -- The file share name. Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - snapshot -- A string that represents the snapshot version, if applicable. - timeout -- Request timeout in seconds. Applies to each call to the service. ''' return _call_az("az storage file exists", locals()) def download(path, share_name, account_key=None, account_name=None, connection_string=None, dest=None, end_range=None, max_connections=None, no_progress=None, open_mode=None, sas_token=None, snapshot=None, start_range=None, timeout=None, validate_content=None): ''' Required Parameters: - path -- The path to the file within the file share. - share_name -- The file share name. Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - dest -- Path of the file to write to. The source filename will be used if not specified. - end_range -- End of byte range to use for downloading a section of the file. If end_range is given, start_range must be provided. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. - max_connections -- If set to 2 or greater, an initial get will be done for the first self.MAX_SINGLE_GET_SIZE bytes of the file. If this is the entire file, the method returns at this point. If it is not, it will download the remaining data parallel using the number of threads equal to max_connections. Each chunk will be of size self.MAX_CHUNK_GET_SIZE. If set to 1, a single large get request will be done. This is not generally recommended but available if very few threads should be used, network requests are very expensive, or a non-seekable stream prevents parallel download. This may also be valuable if the file is being concurrently modified to enforce atomicity or if many files are expected to be empty as an extra request is required for empty files if max_connections is greater than 1. - no_progress -- Include this flag to disable progress reporting for the command. - open_mode -- Mode to use when opening the file. Note that specifying append only open_mode prevents parallel download. So, max_connections must be set to 1 if this open_mode is used. - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - snapshot -- A string that represents the snapshot version, if applicable. - start_range -- Start of byte range to use for downloading a section of the file. If no end_range is given, all bytes after the start_range will be downloaded. The start_range and end_range params are inclusive. Ex: start_range=0, end_range=511 will download first 512 bytes of file. - timeout -- Request timeout in seconds. Applies to each call to the service. - validate_content -- If set to true, validates an MD5 hash for each retrieved portion of the file. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that the service will only return transactional MD5s for chunks 4MB or less so the first get request will be of size self.MAX_CHUNK_GET_SIZE instead of self.MAX_SINGLE_GET_SIZE. If self.MAX_CHUNK_GET_SIZE was set to greater than 4MB an error will be thrown. As computing the MD5 takes processing time and more requests will need to be done due to the reduced chunk size there may be some increase in latency. ''' return _call_az("az storage file download", locals()) def upload(share_name, source, account_key=None, account_name=None, connection_string=None, content_cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_md5=None, content_type=None, max_connections=None, metadata=None, no_progress=None, path=None, sas_token=None, timeout=None, validate_content=None): ''' Upload a file to a share that uses the SMB 3.0 protocol. Required Parameters: - share_name -- The file share name. - source -- Path of the local file to upload as the file content. Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - content_cache_control -- The cache control string. - content_disposition -- Conveys additional information about how to process the response payload, and can also be used to attach additional metadata. - content_encoding -- The content encoding type. - content_language -- The content language. - content_md5 -- The content's MD5 hash. - content_type -- The content MIME type. - max_connections -- Maximum number of parallel connections to use. - metadata -- Metadata in space-separated key=value pairs. This overwrites any existing metadata. - no_progress -- Include this flag to disable progress reporting for the command. - path -- The path to the file within the file share. If the file name is omitted, the source file name will be used. - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - timeout -- Request timeout in seconds. Applies to each call to the service. - validate_content -- If true, calculates an MD5 hash for each range of the file. The storage service checks the hash of the content that has arrived with the hash that was sent. This is primarily valuable for detecting bitflips on the wire if using http instead of https as https (the default) will already validate. Note that this MD5 hash is not stored with the file. ''' return _call_az("az storage file upload", locals()) def upload_batch(destination, source, account_key=None, account_name=None, connection_string=None, content_cache_control=None, content_disposition=None, content_encoding=None, content_language=None, content_md5=None, content_type=None, destination_path=None, dryrun=None, max_connections=None, metadata=None, no_progress=None, pattern=None, sas_token=None, validate_content=None): ''' Upload files from a local directory to an Azure Storage File Share in a batch operation. Required Parameters: - destination -- None - source -- None Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - content_cache_control -- The cache control string. - content_disposition -- Conveys additional information about how to process the response payload, and can also be used to attach additional metadata. - content_encoding -- The content encoding type. - content_language -- The content language. - content_md5 -- The content's MD5 hash. - content_type -- The content MIME type. - destination_path -- None - dryrun -- None - max_connections -- None - metadata -- Metadata in space-separated key=value pairs. This overwrites any existing metadata. - no_progress -- Include this flag to disable progress reporting for the command. - pattern -- None - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - validate_content -- None ''' return _call_az("az storage file upload-batch", locals()) def download_batch(destination, source, account_key=None, account_name=None, connection_string=None, dryrun=None, max_connections=None, no_progress=None, pattern=None, sas_token=None, snapshot=None, validate_content=None): ''' Download files from an Azure Storage File Share to a local directory in a batch operation. Required Parameters: - destination -- None - source -- None Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - dryrun -- None - max_connections -- None - no_progress -- Include this flag to disable progress reporting for the command. - pattern -- None - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - snapshot -- None - validate_content -- None ''' return _call_az("az storage file download-batch", locals()) def delete_batch(source, account_key=None, account_name=None, connection_string=None, dryrun=None, pattern=None, sas_token=None, timeout=None): ''' Delete files from an Azure Storage File Share. Required Parameters: - source -- None Optional Parameters: - account_key -- Storage account key. Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_KEY - account_name -- Storage account name. Related environment variable: AZURE_STORAGE_ACCOUNT. Must be used in conjunction with either storage account key or a SAS token. If neither are present, the command will try to query the storage account key using the authenticated Azure account. If a large number of storage commands are executed the API quota may be hit - connection_string -- Storage account connection string. Environment variable: AZURE_STORAGE_CONNECTION_STRING - dryrun -- None - pattern -- None - sas_token -- A Shared Access Signature (SAS). Must be used in conjunction with storage account name. Environment variable: AZURE_STORAGE_SAS_TOKEN - timeout -- Request timeout in seconds. Applies to each call to the service. ''' return _call_az("az storage file delete-batch", locals())
python
from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import uuid from django.core.urlresolvers import reverse from rest_framework.test import APITestCase from kolibri.core.auth.models import FacilityUser from kolibri.core.auth.test.helpers import setup_device from kolibri.core.content.models import ChannelMetadata from kolibri.core.content.models import ContentNode from kolibri.core.device.models import DevicePermissions DUMMY_PASSWORD = "password" class ChannelOrderTestCase(APITestCase): fixtures = ["content_test.json"] the_channel_id = "6199dde695db4ee4ab392222d5af1e5c" def setUp(self): self.facility, self.superuser = setup_device() self.learner = FacilityUser.objects.create( username="learner", facility=self.facility ) self.learner.set_password(DUMMY_PASSWORD) self.learner.save() channel = ChannelMetadata.objects.get(id=self.the_channel_id) channel.root.available = True channel.root.save() self.url = reverse("kolibri:kolibri.plugins.device:devicechannelorder") def test_learner_cannot_post(self): self.client.login(username=self.learner.username, password=DUMMY_PASSWORD) response = self.client.post(self.url, [], format="json") self.assertEqual(response.status_code, 403) def test_can_manage_content_can_post(self): DevicePermissions.objects.create(user=self.learner, can_manage_content=True) self.client.login(username=self.learner.username, password=DUMMY_PASSWORD) response = self.client.post(self.url, [], format="json") self.assertNotEqual(response.status_code, 403) def test_superuser_can_post(self): self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD) response = self.client.post(self.url, [], format="json") self.assertNotEqual(response.status_code, 403) def test_error_wrong_number_of_uuids(self): self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD) response = self.client.post( self.url, [self.the_channel_id, uuid.uuid4().hex], format="json" ) self.assertEqual(response.status_code, 400) def test_error_invalid_uuid(self): self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD) response = self.client.post(self.url, ["test"], format="json") self.assertEqual(response.status_code, 400) def test_error_not_array(self): self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD) response = self.client.post(self.url, {}, format="json") self.assertEqual(response.status_code, 400) def test_set_order_one(self): self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD) response = self.client.post(self.url, [self.the_channel_id], format="json") channel = ChannelMetadata.objects.get(id=self.the_channel_id) self.assertEqual(response.status_code, 200) self.assertEqual(channel.order, 1) def test_set_order_two(self): self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD) new_channel_id = uuid.uuid4().hex new_channel = ChannelMetadata.objects.create( id=new_channel_id, name="Test", root=ContentNode.objects.create( title="test", id=uuid.uuid4().hex, channel_id=new_channel_id, content_id=uuid.uuid4().hex, available=True, ), ) response = self.client.post( self.url, [self.the_channel_id, new_channel.id], format="json" ) self.assertEqual(response.status_code, 200) channel = ChannelMetadata.objects.get(id=self.the_channel_id) new_channel.refresh_from_db() self.assertEqual(channel.order, 1) self.assertEqual(new_channel.order, 2) def test_set_order_two_one_unavailable(self): self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD) new_channel_id = uuid.uuid4().hex new_channel = ChannelMetadata.objects.create( id=new_channel_id, name="Test", root=ContentNode.objects.create( title="test", id=uuid.uuid4().hex, channel_id=new_channel_id, content_id=uuid.uuid4().hex, available=False, ), ) response = self.client.post( self.url, [self.the_channel_id, new_channel.id], format="json" ) self.assertEqual(response.status_code, 400) def test_set_order_two_reorder(self): self.client.login(username=self.superuser.username, password=DUMMY_PASSWORD) new_channel_id = uuid.uuid4().hex new_channel = ChannelMetadata.objects.create( id=new_channel_id, name="Test", root=ContentNode.objects.create( title="test", id=uuid.uuid4().hex, channel_id=new_channel_id, content_id=uuid.uuid4().hex, available=True, ), order=1, ) channel = ChannelMetadata.objects.get(id=self.the_channel_id) channel.order = 2 channel.save() response = self.client.post( self.url, [self.the_channel_id, new_channel.id], format="json" ) self.assertEqual(response.status_code, 200) new_channel.refresh_from_db() channel.refresh_from_db() self.assertEqual(channel.order, 1) self.assertEqual(new_channel.order, 2)
python
"""Tests for the config.config-module """ # System library imports from collections import namedtuple from datetime import date, datetime import pathlib import re import sys # Third party imports import pytest # Midgard imports from midgard.config import config from midgard.collections import enums from midgard.dev import exceptions # # Helper functions # EntryTestCase = namedtuple("EntryTestCase", ("type", "cfg_value", "value")) def normalize_whitespace(string): """Normalize whitespace in string Deletes consecutive spaces and newlines """ return re.sub("\n+", "\n", re.sub(" +", " ", string)) def only_word_characters(string): """Filter out only word characters from the string""" return re.sub("\W", "", string) # # Test configuration # @pytest.fixture def config_file(): """A test configuration read from file""" cfg = config.Configuration("file") cfg_path = pathlib.Path(__file__).parent / "test_config.conf" cfg.update_from_file(cfg_path) cfg_vars = dict(var_1="one", var_2="two") cfg.update_vars(cfg_vars) return cfg @pytest.fixture def config_options(): """A test configuration based on (mocked) command line options""" cfg = config.Configuration("options") cfg_argv = [ sys.argv[0], "not_an_option", "--section_1:foo=bar", "--section_1:pi=3.14", "--section_2:foo=baz", "--just_a_flag", "--non_existing_config:section_1:foo=none", "--options:section_3:name=options", "--section_1:pi=3.1415", ] remember_sys_argv, sys.argv = sys.argv, cfg_argv cfg.update_from_options(allow_new=True) sys.argv = remember_sys_argv return cfg @pytest.fixture def config_dict(gps_dict): """A test configuration based on a dictionary""" cfg = config.Configuration("dictionary") cfg.update_from_dict(gps_dict, section="gps") return cfg @pytest.fixture def gps_dict(): """A dictionary with GPS test data""" return dict(gps_f1=1575.42, gps_f2=1227.60, gps_f5=1176.45, gps_name="Global Positioning System") @pytest.fixture def config_section(config_dict): """A section with test data""" return config_dict.gps # # Tests # def test_read_config_from_file(config_file): """Test that reading a configuration from file works""" assert len(config_file.sections) > 0 assert len(config_file.sources) == 1 assert list(config_file.sources)[0].endswith("test_config.conf") def test_read_config_from_file_classmethod(config_file): """Test that reading a configuration from file works using the classmethod""" cfg_path = pathlib.Path(__file__).parent / "test_config.conf" cfg = config.Configuration.read_from_file("test", cfg_path) assert cfg.as_str() == config_file.as_str() @pytest.mark.skip(reason="as_str() does not print profiles correctly") def test_write_config_to_file(config_file, tmpdir): """Test that writing a configuration creates a file that is identical to the original""" cfg_path = pathlib.Path("".join(config_file.sources)) out_path = pathlib.Path(tmpdir / "test_config.conf") config_file.write_to_file(out_path) assert normalize_whitespace(cfg_path.read_text()) == normalize_whitespace(out_path.read_text()) def test_read_config_from_dict(config_dict): """Test that reading a configuration from a dict works""" assert len(config_dict.sections) > 0 assert len(config_dict.sources) == 1 assert list(config_dict.sources)[0] == "dictionary" def test_read_config_from_options(config_options): """Test that reading a configuration from a options works""" assert len(config_options.sections) > 0 assert len(config_options.sources) > 0 assert all(s.startswith("command line") for s in config_options.sources) def test_update_config_from_config_section(config_file, config_options): """Test that a config section can be copied""" assert "section_1" not in config_file.section_names config_file.update_from_config_section(config_options.section_1) assert "section_1" in config_file.section_names assert str(config_file.section_1) == str(config_options.section_1) def test_update_config_from_options(config_file): """Test that a config can be updated from command line options""" config_file.master_section = "midgard" sections_before = set(config_file.section_names) entries_before = set(config_file.midgard.as_list()) cfg_argv = [ sys.argv[0], "not_an_option", "--foo=I am an option", "--midgard:pi=4", "--new_key=new value", "--new_section:pi=3.14", "--just_a_flag", "--non_existing_config:midgard:foo=none", "--file:midgard:spam=more ham", ] remember_sys_argv, sys.argv = sys.argv, cfg_argv config_file.update_from_options(allow_new=True) sys.argv = remember_sys_argv assert set(config_file.section_names) - sections_before == {"new_section"} assert set(config_file.midgard.as_list()) - entries_before == {"new_key"} assert config_file.midgard.foo.str == "I am an option" assert config_file.midgard.pi.str == "4" assert config_file.midgard.spam.str == "more ham" assert config_file.midgard.foo.source == "command line (--foo=I am an option)" def test_clearing_config(config_file): """Test that clearing a configuration works""" config_file.clear() assert len(config_file.sections) == 0 def test_set_non_existing_master_section(config_file): """Test that setting a non-existing section is ok, but getting from it raises an error""" config_file.master_section = "non_existing" with pytest.raises(exceptions.MissingSectionError): config_file.non_exisiting def test_access_from_master_section(config_file): """Test that accessing entry from master section can be done without referencing section""" config_file.master_section = "midgard" assert config_file.foo is config_file.midgard.foo def test_access_with_master_section(config_file): """Test accessing an entry that is not in the master section""" config_file.master_section = "midgard" assert config_file.profile_test.technique.str == "none" def test_get_from_master_section_without_master_section(config_file): """Test that trying to get an entry as if from a master section typically raises an error""" with pytest.raises(exceptions.MissingSectionError): config_file.foo def test_get_from_master_section(config_file): """Test that get can access entries from a master section""" config_file.master_section = "midgard" entry = config_file.get("foo", default="baz") assert entry is config_file.midgard.foo def test_profiles_are_not_separate_sections(config_file): """Test that profiles are not registered as separate sections""" assert len([s for s in config_file.section_names if s.startswith("profile_test")]) == 1 def test_profiles_are_prioritized(config_file): """Test that values are taken from the correct profiles, when giving a list of profiles to use""" config_file.profiles = ["sisre", "vlbi", None] assert config_file.profile_test.technique.str == "gnss" # from profile sisre assert config_file.profile_test.spam.str == "bam" # from profile vlbi assert config_file.profile_test.foo.str == "baz" # from default profile def test_automatic_default_profile(config_file): """Test that default profile is included automatically""" config_file.profiles = ["sisre", "vlbi"] assert config_file.profiles == ["sisre", "vlbi", None] def test_set_non_existing_profiles(config_file): """Test that non-existing profiles are ignored (no error)""" config_file.profiles = ["non_existing", None] assert config_file.profile_test.technique.str == "none" # from default profile def test_using_only_default_profile(config_file): """Test that default profile can be set simply by assigning None""" config_file.profiles = None assert config_file.profiles == [None] assert config_file.profile_test.technique.str == "none" # from default profile def test_get_with_override_value(config_file): """Test that get with override value returns override value""" entry = config_file.get("foo", section="midgard", value="override") assert isinstance(entry, config.ConfigurationEntry) assert entry.str == "override" assert entry.source == "method call" def test_get_with_default_value_and_non_existing_section(config_file): """Test that get returns default value when nothing is found in configuration""" entry = config_file.get("foo", section="non_existing", default="default") assert isinstance(entry, config.ConfigurationEntry) assert entry.str == "default" assert entry.source == "default value" def test_get_with_default_value_and_non_existing_entry(config_file): """Test that get returns default value when nothing is found in configuration""" entry = config_file.get("non_existing", section="midgard", default="default") assert isinstance(entry, config.ConfigurationEntry) assert entry.str == "default" assert entry.source == "default value" def test_get_without_default_value_and_non_existing_section(config_file): """Test that get raises error when nothing is found in configuration and no default value is given""" with pytest.raises(exceptions.MissingSectionError): config_file.get("foo", section="non_existing") def test_get_without_default_value_and_non_existing_entry(config_file): """Test that get raises error when nothing is found in configuration and no default value is given""" with pytest.raises(exceptions.MissingEntryError): config_file.get("non_existing", section="midgard") def test_get_from_configuration(config_file): """Test that get returns the same entry as regular attribute access""" entry = config_file.get("foo", section="midgard", default="baz") assert entry is config_file.midgard.foo def test_get_from_fallback_config(config_file, config_dict): """Test that get can access entries in a fallback configuration""" config_dict.fallback_config = config_file entry = config_dict.get("foo", section="midgard", default="baz") assert entry is config_file.midgard.foo def test_exists_with_section(config_file): """Test that exists works for both existing and non-existing keys""" assert config_file.exists("foo", section="midgard") assert not config_file.exists("does_not_exist", section="midgard") assert not config_file.exists("foo", section="does_not_exist") def test_exists_with_master_section(config_file): """Test that exists works for both existing and non-existing keys without specifying section""" config_file.master_section = "data_types" assert config_file.exists("str") assert not config_file.exists("does_not_exist") def test_exists_with_master_section_defined(config_file): """Test that exists behaves correctly when master_section is defined and section specified""" config_file.master_section = "data_types" assert config_file.exists("foo", section="midgard") assert not config_file.exists("str", section="str") assert not config_file.exists("foo", section="does_not_exist") def test_getattr_from_fallback_config(config_file, config_dict): """Test that attribute access can get entries in fallback configuration""" config_dict.fallback_config = config_file entry = config_dict.midgard.foo assert entry is config_file.midgard.foo def test_getitem_from_fallback_config(config_file, config_dict): """Test that dictionary access can get entries in fallback configuration""" config_dict.fallback_config = config_file entry = config_dict["midgard"].foo assert entry is config_file.midgard.foo def test_add_single_entry(config_file): """Test adding a single new entry""" sections_before = set(config_file.section_names) config_file.update("new_section", "new_key", "new_value", source="test") assert set(config_file.section_names) - sections_before == {"new_section"} assert config_file.new_section.new_key.str == "new_value" assert config_file.new_section.new_key.source == "test" def test_updating_existing_entry(config_file): """Test updating the value of an existing entry""" sections_before = config_file.section_names config_file.update("midgard", "foo", "new_value", source="test", allow_new=False) assert config_file.section_names == sections_before assert config_file.midgard.foo.str == "new_value" assert config_file.midgard.foo.source == "test" def test_updating_non_existing_section(config_file): """Test updating the value of an entry in a non-existing section""" with pytest.raises(exceptions.MissingSectionError): config_file.update("non_existing", "foo", "new_value", source="test", allow_new=False) def test_updating_non_existing_entry(config_file): """Test updating the value of a non-existing entry""" with pytest.raises(exceptions.MissingEntryError): config_file.update("midgard", "non_existing", "new_value", source="test", allow_new=False) @pytest.mark.skip(reason="as_str() does not print profiles correctly") def test_configuration_as_string(config_file): """Test that configuration as string is similar to configuration file""" path = pathlib.Path(list(config_file.sources)[0]) with open(path, mode="r") as fid: file_str = "".join(l for l in fid if not l.startswith("#")) assert normalize_whitespace(file_str) == normalize_whitespace(config_file.as_str()) @pytest.mark.skip(reason="str() does not print profiles correctly") def test_string_representation_of_configuration(config_file): """Test that string representation is similar to configuration file""" path = pathlib.Path(list(config_file.sources)[0]) with open(path, mode="r") as fid: file_str = "".join(l for l in fid if not l.startswith("#")) assert normalize_whitespace(file_str) == normalize_whitespace(str(config_file)) def test_configuration_as_dict(config_dict, gps_dict): """Test that dict representation gives back a sensible dictionary""" assert config_dict.as_dict(default_getter="str")["gps"] == {k: str(v) for k, v in gps_dict.items()} def test_configuration_as_dict_with_getters(config_dict, gps_dict): """Test that dict representation gives back a sensible dictionary""" getters = {"gps": {k: type(v).__name__ for k, v in gps_dict.items()}} assert config_dict.as_dict(getters=getters)["gps"] == gps_dict def test_attribute_and_item_access(config_file): """Test that the same sections are returned whether using attribute or item access""" assert config_file.midgard is config_file["midgard"] def test_deleting_section_as_item(config_file): """Test that deleting a section removes it""" sections_before = set(config_file.section_names) del config_file["midgard"] assert sections_before - set(config_file.section_names) == {"midgard"} def test_deleting_section_as_attribute(config_file): """Test that deleting a section removes it""" sections_before = set(config_file.section_names) del config_file.midgard assert sections_before - set(config_file.section_names) == {"midgard"} def test_dir_return_sections(config_file): """Test that sections are included in dir(configuration)""" cfg_dir = dir(config_file) sections = set(config_file.section_names) assert len(sections) > 0 assert set(cfg_dir) & sections == sections def test_dir_return_master_section(config_file): """Test that entries in master section are included in dir(configuration)""" config_file.master_section = "midgard" cfg_dir = dir(config_file) entries = set(config_file.midgard.as_list()) assert len(entries) > 0 assert set(cfg_dir) & entries == entries def test_repr_of_configuration(config_file): """Test that repr of configuration is sensible""" assert repr(config_file) == "Configuration(name='file')" def test_section_as_string(config_section, gps_dict): """Test that string representation of section looks reasonable""" assert only_word_characters(config_section.as_str()) == only_word_characters("gps" + str(gps_dict)) def test_section_as_list(config_section, gps_dict): """Test that the list representation of section equals list of keys""" assert config_section.as_list() == list(gps_dict.keys()) def test_section_as_dict(config_section, gps_dict): """Test that the dict representation of section equals original dict""" assert config_section.as_dict(default_getter="str") == {k: str(v) for k, v in gps_dict.items()} def test_section_as_dict_with_getters(config_section, gps_dict): """Test that the dict representation of section equals original dict""" getters = {k: type(v).__name__ for k, v in gps_dict.items()} assert config_section.as_dict(getters=getters) == gps_dict def test_dir_return_entries(config_section): """Test that entries are included in dir(section)""" cfg_dir = dir(config_section) entries = set(config_section.as_list()) assert len(entries) > 0 assert set(cfg_dir) & entries == entries def test_repr_of_section(config_section): """Test that repr of section is sensible""" assert repr(config_section) == "ConfigurationSection(name='gps')" entry_data = [ EntryTestCase("str", "Curiouser and curiouser!", "Curiouser and curiouser!"), EntryTestCase("int", "42", 42), EntryTestCase("float", "3.14", 3.14), EntryTestCase("bool", "on", True), EntryTestCase("bool", "no", False), EntryTestCase("date", "2018-05-30", date(2018, 5, 30)), EntryTestCase("datetime", "2017-01-28 15:12:30", datetime(2017, 1, 28, 15, 12, 30)), EntryTestCase("path", "test_config.conf", pathlib.Path("test_config.conf")), EntryTestCase("list", "vlbi, slr, gnss, doris", ["vlbi", "slr", "gnss", "doris"]), EntryTestCase("tuple", "one two three", ("one", "two", "three")), EntryTestCase("dict", "one:en, two:to, three:tre", {"one": "en", "two": "to", "three": "tre"}), ] @pytest.mark.parametrize("test_case", entry_data) def test_access_entry(test_case): """Test getting values of entries through accessors""" entry = config.ConfigurationEntry("test", test_case.cfg_value) assert getattr(entry, test_case.type) == test_case.value assert getattr(entry, f"as_{test_case.type}")() == test_case.value @pytest.mark.parametrize("test_case", entry_data) def test_entry_is_used(test_case): """Test that entry is marked as used when accessing value""" entry = config.ConfigurationEntry("test", test_case.cfg_value) assert entry.is_used is False getattr(entry, test_case.type) assert entry.is_used is True def test_access_enum(): """Test getting the value of an entry as an enum (has no property access)""" entry = config.ConfigurationEntry("test", "info") assert entry.as_enum("log_level") is enums.get_value("log_level", "info") def test_enum_is_used(): """Test that entry is marked as used when accessed as enum""" entry = config.ConfigurationEntry("test", "info") assert entry.is_used is False entry.as_enum("log_level") assert entry.is_used is True def test_entry_with_type(config_file): """Test that type hints of an entry can be accessed""" assert config_file.midgard.foo.type == "str" def test_entry_with_help(config_file): """Test that help texts of an entry can be accessed""" assert config_file.midgard.foo.help == "How to foodazzle" def test_metadata_of_entry(config_file): """Test that metadata of entry can be accessed""" assert len(config_file.midgard.foo.meta.keys()) > 0 assert config_file.midgard.foo.meta["type"] is config_file.midgard.foo.type assert config_file.midgard.foo.meta["help"] is config_file.midgard.foo.help def test_bool_of_entry(): """Test the bool value of an entry""" entry = config.ConfigurationEntry("key", "value") assert entry def test_bool_of_empty_entry(): """Test that the bool value of an empty entry is False""" entry = config.ConfigurationEntry("empty", "") assert not entry def test_repr_of_entry(): """Test that the repr of an entry is sensible""" entry = config.ConfigurationEntry("key", "value") assert repr(entry) == "ConfigurationEntry(key='key', value='value')"
python
#!/usr/bin/python3 # Created by Jared Dunbar, April 4th, 2020 # Use this as an example for a basic game. import pyxel, random, math import os.path from os import path # Width and height of game screen, in tiles WIDTH = 16 HEIGHT = 12 # Width and height of the game level GL_WIDTH = 170 GL_HEIGHT = 150 # Window offsets for the panning feature. windowOffsetX = 0 windowOffsetY = 0 # Entities (should not) be able to walk through structures, # unless they have "allow" set to True structures = [] # Entities can move all over the place and stand in the same cube, but not walk # into structures unless the structure has "allow" set to True entities = [] # These contain all fireables and are cleared relatively often. lazers = [] # Sound mappings sounds = {} # These are the texture maps for 8x8 and 16x16 texture8 = {} texture16 = {} # Information about the image map: # Image maps are 256x256. This allows for 256 16x16 textures in one tilemap, # or 1024 8x8 textures in one tilemap # Image Map 0: 16x16 textures # Image Map 1: 8x8 textures # Image Map 2: <unused> # This sets up all the rendering code for ya. Give it a image, # and it will remember the thing for you. # NOTE: transparent is a color key. If -1, doesn't do transparent stuff. class Drawn(): def __init__(self, name, size=16, texture="invalid16.png", transparent=-1): if (size != 8) and (size != 16): print("CRITICAL FAIL! Texture is not of correct size!") exit(1) self.trans = transparent if size == 16: # Only register if we're not in the 16x16 texturemap if name not in texture16: if not path.exists(texture): texture = "invalid16.png" # 16x16 is in bank 0 self.bank = 0 self.xLoc = int(len(texture16)/16)*16 self.yLoc = (len(texture16)%16) * 16 pyxel.image(self.bank).load(self.xLoc, self.yLoc, texture) texture16[name] = self elif size == 8: # Only register if we're not in the 8x8 texturemap if name not in texture8: if not path.exists(texture): print("Could not find texture {}".format(texture)) texture = "invalid8.png" # 8x8 is in bank 1 self.bank = 1 self.xLoc = int(len(texture8)/32)*8 self.yLoc = (len(texture8)%32)*8 pyxel.image(self.bank).load(self.xLoc, self.yLoc, texture) texture8[name] = self def draw(self, x, y, trans=None, fX=False, fY=False): if (trans == None): trans = self.trans # Default texture size is 16x16 ts = 16 # If we're in Bank 1, texture size is 8x8 if self.bank == 1: ts = 8 xf = ts yf = ts if fX: xf = -ts if fY: yf = -ts pyxel.blt(x*abs(ts), y*abs(ts), self.bank, self.xLoc, self.yLoc, xf, yf, trans) class Sounded(): def __init__(self, name, notes, tone="s", volume="4", effect=("n" * 4 + "f"), speed=7): if name not in sounds: self.id = len(sounds) pyxel.sound(self.id).set(note=notes, tone=tone, volume=volume, effect=effect, speed=speed) sounds[name] = self # There are 4 streams - 0 through 3 def play(self, stream=0): pyxel.play(stream, self.id) # This is the base class of any thing that renders to the screen and ticks. class Entity(): def __init__(self, name, texture=["invalid16.png"], x=0, y=0): self.name = name self.x = x self.y = y self.allow = False self.frameNum = 0 self.dir = "N" self.texName = [x.rsplit(".",1)[0] for x in texture] for tex in texture: texName = tex.rsplit(".",1)[0] # remove file extension Drawn(texName, 16, tex) def update(self): pass def draw(self): drawX = self.x + windowOffsetX drawY = self.y + windowOffsetY if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT): texture16[self.texName[self.frameNum]].draw(drawX, drawY) class Lazer(): def __init__(self, owner, x, y, dir): self.owner = owner self.x = x self.y = y self.dir = dir def draw(self): drawX = (self.x + windowOffsetX)*2 drawY = (self.y + windowOffsetY)*2 if (drawX >= 0 and drawX < WIDTH*2) and (drawY >=0 and drawY < HEIGHT*2): if (self.dir == "N" or self.dir == "S"): texture8["player/beem_V{}".format(random.randrange(0,3))].draw(drawX + 0.5, drawY + 0.5, 0) else: texture8["player/beem_H{}".format(random.randrange(0,3))].draw(drawX + 0.5, drawY + 0.5, 0) class Wall(Entity): def __init__(self, name, x, y): super(Wall, self).__init__(name, ["player/wall_{}.png".format(x) for x in range(0,12)], x, y) self.frameNum = 0 + random.randrange(0,12) self.randX = random.choice([True, False]) self.randY = random.choice([True, False]) def update(self): pass def draw(self): drawX = self.x + windowOffsetX drawY = self.y + windowOffsetY if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT): texture16[self.texName[int(self.frameNum)]].draw(drawX, drawY, 0, fX=self.randX, fY=self.randY) self.frameNum += 0.5 if (self.frameNum >= 12): self.frameNum = 0 class Floor(Entity): def __init__(self, name, x, y): super(Floor, self).__init__(name, [random.choice(["player/ground.png"]*8 + ["player/ground_blip.png"])], x, y) self.allow = True self.randX = random.choice([True, False]) self.randY = random.choice([True, False]) def draw(self): drawX = self.x + windowOffsetX drawY = self.y + windowOffsetY if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT): texture16[self.texName[self.frameNum]].draw(drawX, drawY, fX=self.randX, fY=self.randY) # The player class extends Entity by listening for keyboard events. class Player(Entity): def __init__(self, name, x=WIDTH/2, y=HEIGHT/2): super(Player, self).__init__(name, ["player/char_H{}.png".format(x) for x in range(0,12)] + ["player/char_V{}.png".format(x) for x in range(0,12)], x, y) self.cooldown = 0 self.cooldownTime = 2 self.frameNum = 1 self.texHnames = [x for x in self.texName if "H" in x] self.texVnames = [x for x in self.texName if "V" in x] def update(self): self.cooldown -= 1 if (self.cooldown <= 0): wantGoX = 0 wantGoY = 0 if pyxel.btn(pyxel.KEY_UP): wantGoY -= 1 self.dir = "N" if pyxel.btn(pyxel.KEY_DOWN): wantGoY += 1 self.dir = "S" if pyxel.btn(pyxel.KEY_LEFT): wantGoX -= 1 self.dir = "E" if pyxel.btn(pyxel.KEY_RIGHT): wantGoX += 1 self.dir = "W" if (wantGoX != 0 or wantGoY != 0): if canGo(self.x, self.y, wantGoX, wantGoY): global windowOffsetX, windowOffsetY self.x = self.x + wantGoX self.y = self.y + wantGoY self.cooldown = self.cooldownTime windowOffsetX -= wantGoX windowOffsetY -= wantGoY def draw(self): drawX = self.x + windowOffsetX drawY = self.y + windowOffsetY fX = False fY = False ch = self.texHnames if self.dir == "N": fX = True fY = True ch = self.texVnames if self.dir == "S": fX = False fY = False ch = self.texVnames if self.dir == "E": fX = False fY = False ch = self.texHnames if self.dir == "W": fX = True fY = True ch = self.texHnames if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT): texture16[ch[self.frameNum - 1]].draw(drawX, drawY, 0, fX=fX, fY=fY) self.frameNum += 1 if (self.frameNum >= 12): self.frameNum = 0 class StationaryTurret(Entity): def __init__(self, name, x=WIDTH/2, y=HEIGHT/2, dir="N"): super(StationaryTurret, self).__init__(name, ["player/turret_H.png", "player/turret_V.png"], x, y) self.texHnames = [x for x in self.texName if "H" in x] self.texVnames = [x for x in self.texName if "V" in x] self.dir = dir self.charge = 0 self.chargeTexNames = [] self.HbeamNames = [] self.VbeamNames = [] self.owner = random.randrange(0,32000) # good enough for tex in ["player/turret_charge_{}.png".format(x) for x in range(0,4)]: texName = tex.rsplit(".",1)[0] # remove file extension self.chargeTexNames.append(texName) Drawn(texName, 8, tex) for tex in ["player/beem_H{}.png".format(x) for x in range(0,3)]: texName = tex.rsplit(".",1)[0] self.HbeamNames.append(texName) Drawn(texName, 8, tex) for tex in ["player/beem_V{}.png".format(x) for x in range(0,3)]: texName = tex.rsplit(".",1)[0] self.VbeamNames.append(texName) Drawn(texName, 8, tex) def update(self): charge = 0 for entity in entities: #print(entity) if isinstance(entity, Player): #print("{} is player!".format(entity)) xdiff = math.pow(entity.x - self.x, 2) ydiff = math.pow(entity.y - self.y, 2) if xdiff + ydiff < 10: #print("ARMING {} {}".format(self.x, self.y)) charge += 0.5 if (charge == 0): if (self.charge > 0): self.charge -= 1 else: if self.charge < 3: self.charge += 1 if (self.charge == 3): sounds["bzzz"].play(2) self.placeLazer(self.dir) def placeLazer(self, direction="N"): count = 0 if direction == "N" or direction == "S": beamNames = self.HbeamNames if direction == "N": for y in range(0, HEIGHT*4): yL = self.y - y/2 lz = Lazer("{}{}".format(self.owner, y), self.x, yL, "N") lazers.append(lz) if direction == "S": for y in range(0, HEIGHT*4): yL = self.y + y/2 lz = Lazer("{}{}".format(self.owner, y), self.x, yL, "S") lazers.append(lz) elif direction == "E" or direction == "W": beamNames = self.VbeamNames if direction == "E": for x in range(0, WIDTH*4): xL = self.x - x/2 lz = Lazer("{}{}".format(self.owner, x), xL, self.y, "E") lazers.append(lz) if direction == "W": for x in range(0, WIDTH*4): xL = self.x + x/2 lz = Lazer("{}{}".format(self.owner, x), xL, self.y, "W") lazers.append(lz) def draw(self): drawX = self.x + windowOffsetX drawY = self.y + windowOffsetY fX = False fY = False ch = self.texHnames if self.dir == "N": fX = True fY = True ch = self.texVnames if self.dir == "S": fX = False fY = False ch = self.texVnames if self.dir == "E": fX = False fY = False ch = self.texHnames if self.dir == "W": fX = True fY = True ch = self.texHnames if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT): texture16[ch[0]].draw(drawX, drawY, 0, fX=fX, fY=fY) texture8[self.chargeTexNames[int(self.charge)]].draw(drawX*2+0.5, drawY*2+0.5, 0) class MovingTurret(Entity): def __init__(self, name, x=WIDTH/2, y=HEIGHT/2, dir="N"): super(MovingTurret, self).__init__(name, ["player/turret_H{}.png".format(x) for x in range(0,12)] + ["player/turret_V{}.png".format(x) for x in range(0,12)], x, y) self.cooldown = 0 self.cooldownTime = 2 self.frameNum = 1 self.texHnames = [x for x in self.texName if "H" in x] self.texVnames = [x for x in self.texName if "V" in x] self.dir = dir self.charge = 0 self.chargeTexNames = [] self.HbeamNames = [] self.VbeamNames = [] self.owner = random.randrange(0,32000) # good enough for tex in ["player/turret_charge_{}.png".format(x) for x in range(0,4)]: texName = tex.rsplit(".",1)[0] # remove file extension self.chargeTexNames.append(texName) Drawn(texName, 8, tex) for tex in ["player/beem_H{}.png".format(x) for x in range(0,3)]: texName = tex.rsplit(".",1)[0] self.HbeamNames.append(texName) Drawn(texName, 8, tex) for tex in ["player/beem_V{}.png".format(x) for x in range(0,3)]: texName = tex.rsplit(".",1)[0] self.VbeamNames.append(texName) Drawn(texName, 8, tex) def update(self): charge = 0 for entity in entities: #print(entity) if isinstance(entity, Player): #print("{} is player!".format(entity)) xdiff = math.pow(entity.x - self.x, 2) ydiff = math.pow(entity.y - self.y, 2) if xdiff + ydiff < 10: #print("ARMING {} {}".format(self.x, self.y)) charge += 0.5 if (charge == 0): if (self.charge > 0): self.charge -= 1 else: if self.charge < 3: self.charge += 1 if (self.charge == 3): sounds["bzzz"].play(2) self.placeLazer(self.dir) def placeLazer(self, direction="N"): count = 0 if direction == "N" or direction == "S": beamNames = self.HbeamNames if direction == "N": for y in range(0, HEIGHT*4): yL = self.y - y/2 lz = Lazer("{}{}".format(self.owner, y), self.x, yL, "N") lazers.append(lz) if direction == "S": for y in range(0, HEIGHT*4): yL = self.y + y/2 lz = Lazer("{}{}".format(self.owner, y), self.x, yL, "S") lazers.append(lz) elif direction == "E" or direction == "W": beamNames = self.VbeamNames if direction == "E": for x in range(0, WIDTH*4): xL = self.x - x/2 lz = Lazer("{}{}".format(self.owner, x), xL, self.y, "E") lazers.append(lz) if direction == "W": for x in range(0, WIDTH*4): xL = self.x + x/2 lz = Lazer("{}{}".format(self.owner, x), xL, self.y, "W") lazers.append(lz) def draw(self): drawX = self.x + windowOffsetX drawY = self.y + windowOffsetY fX = False fY = False ch = self.texHnames if self.dir == "N": fX = True fY = True ch = self.texVnames if self.dir == "S": fX = False fY = False ch = self.texVnames if self.dir == "E": fX = False fY = False ch = self.texHnames if self.dir == "W": fX = True fY = True ch = self.texHnames if (drawX >= 0 and drawX < WIDTH) and (drawY >=0 and drawY < HEIGHT): texture16[ch[self.frameNum - 1]].draw(drawX, drawY, 0, fX=fX, fY=fY) texture8[self.chargeTexNames[self.charge]].draw(drawX*2+0.5, drawY*2+0.5, 0) self.frameNum += 1 if (self.frameNum >= 12): self.frameNum = 0 if (self.frameNum == 3): if self.dir == "N": self.dir = "E" elif self.dir == "E": self.dir = "S" elif self.dir == "S": self.dir = "W" elif self.dir == "W": self.dir = "N" # This tells you if an entity is permitted to go somewhere. # From x,y with velocity a,b def canGo(x, y, a, b): # Don't allow to exit past the edges of the screen if ((x+a) < 0 or (x+a) >= GL_WIDTH): sounds["collide"].play(0) return False if ((y+b) < 0 or (y+b) >= GL_HEIGHT): sounds["collide"].play(0) return False # Basic structure checks in direction for s in structures: if (s.x == (x+a)) and (s.y == (y+b)): if s.allow: return True sounds["collide"].play(0) return False # Advanced structure checks on diagonals if not (x == a or y == b): xCheck = False yCheck = False for s in structures: if (s.x == (x+a) and (s.y == y)): xCheck = not s.allow if (s.x == x) and (s.y == (y+b)): yCheck = not s.allow if xCheck and yCheck: sounds["collide"].play(0) return False return True # This sets up the game def setup(): # Register with Pyxel pyxel.init(WIDTH * 16, HEIGHT * 16, caption="smolgame", palette=[0xff00e5, 0xaaa9ad, 0x5b676d, 0x1f262a, 0x9cff78, 0x44ff00, 0x2ca600, 0x7cff00, 0xff8b00, 0xff0086, 0x6f00ff, 0x0086ff, 0x00ff9a, 0x1f0000, 0x49afff, 0xe2e1ff], scale=4, fps=20) # Register sounds Sounded("collide", "c2c1", speed=4) Sounded("level", "c3e3g3c4c4") Sounded("bzzz", "c1c1c1c1c1c1c1", tone="t", speed=9) # Register our player player = Player("player") entities.append(player) st = StationaryTurret("turret", -1, -1, "N") entities.append(st) st = StationaryTurret("turret", 16, 16, "S") entities.append(st) st = StationaryTurret("turret", -1, 16, "W") entities.append(st) st = StationaryTurret("turret", 16, -1, "E") entities.append(st) mt = MovingTurret("turret", 8, 8, "N") entities.append(mt) #wa = Wall("wall", -1, 11) #structures.append(wa) #wa = Wall("wall", -1, 12) #structures.append(wa) #wa = Wall("wall", -1, 13) #structures.append(wa) #wa = Wall("wall", -1, 14) #structures.append(wa) #wa = Wall("wall", -1, 15) #structures.append(wa) # Invalid texture test code #random = Entity("random", "random.png") #entities.append(random) def mapObjType(type, ct, cb, cl, cr): if type == "W": return Wall if type == "F": return Floor if type[0] == "C": if "U" in type and ct: if "W" in type: return Wall if "F" in type: return Floor if "L" in type and cl: if "W" in type: return Wall if "F" in type: return Floor if "R" in type and cr: if "W" in type: return Wall if "F" in type: return Floor if "D" in type and cb: if "W" in type: return Wall if "F" in type: return Floor return None if type[0] == "O": if "U" in type and ct: return Floor if "D" in type and cl: return Floor if "R" in type and cr: return Floor if "L" in type and cb: return Floor return Wall return None def parseRoomCSV(csvFile, ct, cb, cl, cr): f = open(csvFile) dat = f.read() f.close() lines = [x for x in dat.split("\n") if x.strip() != ""] roomData = [] for line in lines: ld = [] for entry in line.split(","): ld.append(mapObjType(entry,ct,cb,cl,cr)) roomData.append(ld) return roomData class RoomTile(): def __init__(self, ct, cb, cl, cr): self.ct = ct self.cl = cl self.cr = cr self.cb = cb # x and y are the room tile location, not the render tile. Room tiles are 15x15 the image tiles def generateInWorld(self, x, y): pass # Generates a room class Room(RoomTile): def generateInWorld(self, x, y): roomData = parseRoomCSV("room.csv",self.ct,self.cb,self.cl,self.cr) for xL in range(0,15): for yL in range(0,15): tile = roomData[xL][yL] if (tile == Floor): tileObj = tile(name="floor", x=xL+x*15, y=yL+y*15) structures.append(tileObj) elif (tile == Wall): tileObj = tile(name="wall", x=xL+x*15, y=yL+y*15) structures.append(tileObj) # Generates a thin hallway between two or more rooms class Hallway(RoomTile): def generateInWorld(self, x, y): roomData = parseRoomCSV("hall.csv",self.ct,self.cb,self.cl,self.cr) for xL in range(0,15): for yL in range(0,15): tile = roomData[xL][yL] if (tile == Floor): tileObj = tile(name="floor", x=xL+x*15, y=yL+y*15) structures.append(tileObj) elif (tile == Wall): tileObj = tile(name="wall", x=xL+x*15, y=yL+y*15) structures.append(tileObj) def basicWorldgen(): h = Hallway(True, True, True, True) h.generateInWorld(0, 1) r = Room(True, True, True, True) r.generateInWorld(0, 0) r = Room(True, True, True, True) r.generateInWorld(1, 0) # Generate the world! You can use this to generate levels or whatever def worldgen(roomSetup):# rooms = roomSetup #rooms += [item for sublist in [[x[0] for y in range(x[1])] for x in roomSetup] for item in sublist] map = [] roommap = [] for x in range(0,15): map.append([]) roommap.append([]) for y in range(0,9): map[x].append(0) roommap[x].append(None) x = 1 y = 1 while len(rooms) > 1: map[x][y] = 1 roommap[x][y] = rooms.pop(random.randrange(0,len(rooms))) n = random.randrange(1,5) direction = 0 not_this_way = 0 while n > 0: while direction == not_this_way: direction = random.randrange(1,4) if direction == 1: # Left if x > 0: not_this_way = 3 x = x - 1 else: not_this_way = 1 x = x + 1 if map[x][y] == 0: map[x][y] = 2 elif direction == 2: # Up if y > 0: not_this_way = 4 y = y - 1 else: not_this_way = 2 y = y + 1 if map[x][y] == 0: map[x][y] = 2 elif direction == 3: # Right if x < 14: not_this_way = 1 x = x + 1 else: not_this_way = 3 x = x - 1 if map[x][y] == 0: map[x][y] = 2 elif direction == 4: # Down if y < 8: not_this_way = 2 y = y + 1 else: not_this_way = 4 y = y - 1 if map[x][y] == 0: map[x][y] = 2 if roommap[x][y] == None or n > 1: n = n - 1 map[x][y] = 1 roommap[x][y] = rooms.pop(random.randrange(0,len(rooms))) for x in range(0,15): for y in range(0,9): mxy = map[x][y] if mxy == 0: continue mxyl = False mxyu = False mxyd = False mxyr = False if y > 0: if map[x][y-1] != 0: mxyu = True if y < 8: if map[x][y+1] != 0: mxyd = True if x > 0: if map[x-1][y] != 0: mxyl = True if x < 14: if map[x+1][y] != 0: mxyr = True if mxy == 1: roomobj = Room(mxyu,mxyd,mxyl,mxyr) elif mxy == 2: roomobj = Hallway(mxyu,mxyd,mxyl,mxyr) roomobj.generateInWorld(x,y) # This is called by Pyxel every tick, and handles all game inputs def update(): # Quit if Q if pyxel.btn(pyxel.KEY_Q): pyxel.quit() # Play a sound if Space if pyxel.btn(pyxel.KEY_SPACE): sounds["level"].play(1) # Tick all entites and structures. The player movement is included randomly # somewhere in this list but you can do a list comprehension to make it # go first or last if you want (examples provided with no warranty) # for x in [x for x in entities if x is Player] # for x in [x for x in entities if x is not Player] # Clear all lazers lazers.clear() for x in structures: x.update() for x in entities: x.update() # This is called by Pyxel every time the screen needs a redraw, which can be # more than once per tick, but really depends on the FPS? def draw(): # Clear the screen pyxel.cls(col=3) for x in structures: x.draw() for x in lazers: x.draw() for x in entities: x.draw() # This is where the game setup logic is def run(): setup() basicWorldgen() #worldgen([0,0,0,0,0,0,0,0,0,0,0,0]) pyxel.run(update, draw) # This is the entry point for our file. run()
python
import os from urllib.parse import urljoin, urlparse import urllib import ntpath is_win32 = os.name == "nt" def createDirectory(base, new_dir): if is_win32: new_dir = cleanName(new_dir, ".") if not base.startswith("\\\\?\\"): base = "\\\\?\\" + base path_new_dir = os.path.join(base, new_dir) if not os.path.exists(path_new_dir): os.mkdir(path_new_dir) return path_new_dir def longPath(path): if is_win32 and not path.startswith("\\\\?\\"): return "\\\\?\\" + path return path def try_get(src, getter, expected_type=None): if not isinstance(getter, (list, tuple)): getter = [getter] for get in getter: try: v = get(src) except (AttributeError, KeyError, TypeError, IndexError): pass else: if expected_type is None or isinstance(v, expected_type): return v return None def cleanName(value, deletechars = '<>:"/\\|?*\r\n'): value = str(value) for c in deletechars: value = value.replace(c,'') return value def GetFileNameFromUrl(url): urlParsed = urlparse(urllib.parse.unquote(url)) fileName = os.path.basename(urlParsed.path).encode('utf-8') return cleanName(fileName) def pathLeaf(path): ''' Name..........: pathLeaf Description...: get file name from full path Parameters....: path - string. Full path Return values.: string file name Author........: None ''' head, tail = ntpath.split(path) return tail or ntpath.basename(head) def path_join(*args): new_path = os.path.join(*args) if os.path.altsep: return new_path.replace(os.path.sep, os.path.altsep) return new_path
python
# This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members from __future__ import absolute_import from __future__ import print_function from future.utils import itervalues import copy from twisted.internet import defer from twisted.internet import reactor from twisted.python import log from buildbot.data import sourcestamps as sourcestampsapi from buildbot.data import base from buildbot.data import types from buildbot.process.buildrequest import BuildRequestCollapser from buildbot.process.results import SUCCESS from buildbot.process.results import worst_status from buildbot.util import datetime2epoch from buildbot.util import epoch2datetime class Db2DataMixin(object): @defer.inlineCallbacks def db2data(self, bsdict): if not bsdict: defer.returnValue(None) buildset = bsdict.copy() # gather the actual sourcestamps, in parallel sourcestamps = [] @defer.inlineCallbacks def getSs(ssid): ss = yield self.master.data.get(('sourcestamps', str(ssid))) sourcestamps.append(ss) yield defer.DeferredList([getSs(id) for id in buildset['sourcestamps']], fireOnOneErrback=True, consumeErrors=True) buildset['sourcestamps'] = sourcestamps # minor modifications buildset['submitted_at'] = datetime2epoch(buildset['submitted_at']) buildset['complete_at'] = datetime2epoch(buildset['complete_at']) defer.returnValue(buildset) fieldMapping = { 'bsid': 'buildsets.id', 'external_idstring': 'buildsets.external_idstring', 'reason': 'buildsets.reason', 'submitted_at': 'buildsets.submitted_at', 'complete': 'buildsets.complete', 'complete_at': 'buildsets.complete_at', 'results': 'buildsets.results', 'parent_buildid': 'buildsets.parent_buildid', 'parent_relationship': 'buildsets.parent_relationship' } class BuildsetEndpoint(Db2DataMixin, base.Endpoint): isCollection = False pathPatterns = """ /buildsets/n:bsid """ @defer.inlineCallbacks def get(self, resultSpec, kwargs): res = yield self.master.db.buildsets.getBuildset(kwargs['bsid']) res = yield self.db2data(res) defer.returnValue(res) class BuildsetsEndpoint(Db2DataMixin, base.Endpoint): isCollection = True pathPatterns = """ /buildsets """ rootLinkName = 'buildsets' def get(self, resultSpec, kwargs): complete = resultSpec.popBooleanFilter('complete') resultSpec.fieldMapping = self.fieldMapping d = self.master.db.buildsets.getBuildsets(complete=complete, resultSpec=resultSpec) @d.addCallback def db2data(buildsets): d = defer.DeferredList([self.db2data(bs) for bs in buildsets], fireOnOneErrback=True, consumeErrors=True) @d.addCallback def getResults(res): return [r[1] for r in res] return d return d class Buildset(base.ResourceType): name = "buildset" plural = "buildsets" endpoints = [BuildsetEndpoint, BuildsetsEndpoint] keyFields = ['bsid'] eventPathPatterns = """ /buildsets/:bsid """ class EntityType(types.Entity): bsid = types.Integer() external_idstring = types.NoneOk(types.String()) reason = types.String() submitted_at = types.Integer() complete = types.Boolean() complete_at = types.NoneOk(types.Integer()) results = types.NoneOk(types.Integer()) sourcestamps = types.List( of=sourcestampsapi.SourceStamp.entityType) parent_buildid = types.NoneOk(types.Integer()) parent_relationship = types.NoneOk(types.String()) entityType = EntityType(name) @base.updateMethod @defer.inlineCallbacks def addBuildset(self, waited_for, scheduler=None, sourcestamps=None, reason=u'', properties=None, builderids=None, external_idstring=None, parent_buildid=None, parent_relationship=None, _reactor=reactor): if sourcestamps is None: sourcestamps = [] if properties is None: properties = {} if builderids is None: builderids = [] submitted_at = int(_reactor.seconds()) bsid, brids = yield self.master.db.buildsets.addBuildset( sourcestamps=sourcestamps, reason=reason, properties=properties, builderids=builderids, waited_for=waited_for, external_idstring=external_idstring, submitted_at=epoch2datetime(submitted_at), parent_buildid=parent_buildid, parent_relationship=parent_relationship) yield BuildRequestCollapser(self.master, list(itervalues(brids))).collapse() # get each of the sourcestamps for this buildset (sequentially) bsdict = yield self.master.db.buildsets.getBuildset(bsid) sourcestamps = [] for ssid in bsdict['sourcestamps']: sourcestamps.append( (yield self.master.data.get(('sourcestamps', str(ssid)))).copy() ) # notify about the component build requests brResource = self.master.data.getResourceType("buildrequest") brResource.generateEvent(list(itervalues(brids)), 'new') # and the buildset itself msg = dict( bsid=bsid, external_idstring=external_idstring, reason=reason, submitted_at=submitted_at, complete=False, complete_at=None, results=None, scheduler=scheduler, sourcestamps=sourcestamps) # TODO: properties=properties) self.produceEvent(msg, "new") log.msg("added buildset %d to database" % bsid) # if there are no builders, then this is done already, so send the # appropriate messages for that if not builderids: yield self.maybeBuildsetComplete(bsid, _reactor=_reactor) defer.returnValue((bsid, brids)) @base.updateMethod @defer.inlineCallbacks def maybeBuildsetComplete(self, bsid, _reactor=reactor): brdicts = yield self.master.db.buildrequests.getBuildRequests( bsid=bsid, complete=False) # if there are incomplete buildrequests, bail out if brdicts: return brdicts = yield self.master.db.buildrequests.getBuildRequests(bsid=bsid) # figure out the overall results of the buildset: cumulative_results = SUCCESS for brdict in brdicts: cumulative_results = worst_status( cumulative_results, brdict['results']) # get a copy of the buildset bsdict = yield self.master.db.buildsets.getBuildset(bsid) # if it's already completed, we're late to the game, and there's # nothing to do. # # NOTE: there's still a strong possibility of a race condition here, # which would cause two buildset.$bsid.complete messages to be sent. # That's an acceptable risk, and a necessary consequence of this # denormalized representation of a buildset's state. if bsdict['complete']: return # mark it as completed in the database complete_at = epoch2datetime(int(_reactor.seconds())) yield self.master.db.buildsets.completeBuildset(bsid, cumulative_results, complete_at=complete_at) # get the sourcestamps for the message # get each of the sourcestamps for this buildset (sequentially) bsdict = yield self.master.db.buildsets.getBuildset(bsid) sourcestamps = [] for ssid in bsdict['sourcestamps']: sourcestamps.append( copy.deepcopy( (yield self.master.data.get(('sourcestamps', str(ssid)))) ) ) msg = dict( bsid=bsid, external_idstring=bsdict['external_idstring'], reason=bsdict['reason'], sourcestamps=sourcestamps, submitted_at=bsdict['submitted_at'], complete=True, complete_at=complete_at, results=cumulative_results) # TODO: properties=properties) self.produceEvent(msg, "complete")
python
import json import logging from platform import system from ctypes import (c_char_p, c_int, c_uint, c_long, Structure, cdll, POINTER) from typing import Any, TYPE_CHECKING, Tuple, List, AnyStr from rita.engine.translate_standalone import rules_to_patterns, RuleExecutor from rita.types import Rules logger = logging.getLogger(__name__) field = Tuple[AnyStr, Any] fields = List[field] if TYPE_CHECKING: # We cannot simply import SessionConfig because of cyclic imports from rita.config import SessionConfig class NamedRangeResult(Structure): _fields_ = [ ("start", c_long), ("end", c_long), ("name", c_char_p), ] class ResultEntity(Structure): _fields_ = [ ("label", c_char_p), ("start", c_long), ("end", c_long), ("sub_count", c_uint), ] class Result(Structure): _fields_ = [ ("count", c_uint) ] class Context(Structure): _fields_: fields = [] def load_lib(): try: os_name = system() if os_name == "Windows": lib = cdll.LoadLibrary("rita_rust.dll") elif os_name == "Darwin": lib = cdll.LoadLibrary("librita_rust.dylib") else: lib = cdll.LoadLibrary("librita_rust.so") lib.compile.restype = POINTER(Context) lib.execute.argtypes = [POINTER(Context), c_char_p] lib.execute.restype = POINTER(Result) lib.clean_env.argtypes = [POINTER(Context)] lib.clean_result.argtypes = [POINTER(Result)] lib.read_result.argtypes = [POINTER(Result), c_int] lib.read_result.restype = POINTER(ResultEntity) lib.read_submatch.argtypes = [POINTER(ResultEntity), c_int] lib.read_submatch.restype = POINTER(NamedRangeResult) return lib except Exception as ex: logger.error("Failed to load rita-rust library, reason: {}\n\n" "Most likely you don't have required shared library to use it".format(ex)) class RustRuleExecutor(RuleExecutor): def __init__(self, patterns, config: "SessionConfig"): self.config = config self.context = None self.lib = load_lib() self.patterns = [self._build_regex_str(label, rules) for label, rules in patterns] self.compile() @staticmethod def _build_regex_str(label, rules): indexed_rules = ["(?P<s{}>{})".format(i, r) if not r.startswith("(?P<") else r for i, r in enumerate(rules)] return r"(?P<{0}>{1})".format(label, "".join(indexed_rules)) def compile(self): flag = 0 if self.config.ignore_case else 1 c_array = (c_char_p * len(self.patterns))(*list([p.encode("UTF-8") for p in self.patterns])) self.context = self.lib.compile(c_array, len(c_array), flag) return self.context def execute(self, text, include_submatches=True): result_ptr = self.lib.execute(self.context, text.encode("UTF-8")) count = result_ptr[0].count for i in range(0, count): match_ptr = self.lib.read_result(result_ptr, i) match = match_ptr[0] matched_text = text[match.start:match.end].strip() def parse_subs(): k = match.sub_count for j in range(0, k): s = self.lib.read_submatch(match_ptr, j)[0] start = s.start end = s.end sub_text = text[start:end] if sub_text.strip() == "": continue yield { "text": sub_text.strip(), "start": start, "end": end, "key": s.name.decode("UTF-8"), } yield { "start": match.start, "end": match.end, "text": matched_text, "label": match.label.decode("UTF-8"), "submatches": list(parse_subs()) if include_submatches else [] } def clean_context(self): self.lib.clean_env(self.context) @staticmethod def load(path): from rita.config import SessionConfig config = SessionConfig() with open(path, "r") as f: patterns = [(obj["label"], obj["rules"]) for obj in map(json.loads, f.readlines())] return RustRuleExecutor(patterns, config) def compile_rules(rules: Rules, config: "SessionConfig", **kwargs) -> RustRuleExecutor: logger.info("Using rita-rust rule implementation") patterns = [rules_to_patterns(*group, config=config) for group in rules] executor = RustRuleExecutor(patterns, config) return executor
python
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals from codecs import open import json import opengraph from repos import final_theses as thesis_slugs template = open('_template.html', 'r', 'utf-8').read() theses = [] for thesis_slug in thesis_slugs: url = 'http://kabk.github.io/%s/' % thesis_slug print "parsing %s:" % url g = opengraph.OpenGraph(url=url, scrape=True) d = json.loads(g.to_json()) d['slug'] = thesis_slug theses.append(d) template = open('_template.html', 'r', 'utf-8').read() thesis_template = """ <div class="preview"> <figure> <a href="{url}"><img src="{image}"/></a> </figure> <h2><a href="{url}">{title}</a></h2> <h3>{creator}</h3> <p>{description} <a href="{url}">Continue reading…</a></p> </div> """ thesis_links = "" for thesis in theses: thesis_links += thesis_template.format(image=thesis['image'], title=thesis['title'], creator=thesis['creator'], description=thesis['description'], url=thesis['url'], slug=thesis['slug']) result = template.format(body=thesis_links) generated_file = open('index.html', 'w', 'utf-8') generated_file.write(result) generated_file.close()
python
import matplotlib matplotlib.use('TkAgg') from collections import namedtuple import matplotlib.pyplot as plt import numpy as np from scipy.integrate import ode def f(x, y): """ Правая часть ДУ y'=f(x, y) """ return x/4-1/(1+y**2) def on_move(event): """ Обработчик событий мыши """ # начальные данные x0 = event.xdata y0 = event.ydata # выход курсора за пределы системы координат if not x0 or not y0: line.set_data([], []) fig.canvas.draw() return dt = 0.05 # шаг интегрирования sol = [] # решение de = ode(f) de.set_integrator('dop853') # интегрирование "вправо" от начальной точки de.set_initial_value(y0, x0) while de.successful() and de.t <= xlim.end: de.integrate(de.t + dt) sol.append((de.t, de.y[0])) # интегрирование "влево" от начальной точки de.set_initial_value(y0, x0) while de.successful() and de.t >= xlim.start: de.integrate(de.t - dt) sol.append((de.t, de.y[0])) sol.sort(key=lambda x: x[0]) sol = list(zip(*sol)) if event.button: ax.plot(sol[0], sol[1], 'r') else: line.set_data(sol[0], sol[1]) fig.canvas.draw() # прямоугольная область на плоскости Lims = namedtuple('Lims', ['start', 'end']) xlim = Lims(-5, 5) ylim = Lims(-5, 5) fig = plt.figure() # подключение обработчика событий fig.canvas.mpl_connect('motion_notify_event', on_move) fig.canvas.mpl_connect('button_press_event', on_move) ax = plt.axes(xlim=xlim, ylim=ylim) ax.set_aspect('equal') # оси координат ax.hlines(0, xlim.start, xlim.end, lw=0.5) ax.vlines(0, ylim.start, ylim.end, lw=0.5) x = np.linspace(xlim.start, xlim.end, 21) y = np.linspace(ylim.start, ylim.end, 21) X, Y = np.meshgrid(x, y) # нормирующий множитель, чтобы все векторы поля # имели одинаковую длину norm = np.hypot(1, f(X, Y)) # поле направлений kwargs = {'angles':'xy', 'width':0.002, 'pivot':'mid'} ax.quiver(X, Y, 1/norm, f(X, Y)/norm, **kwargs) # линия, которая будет отрисовывать график решения # при движении мыши line, = ax.plot([], [], 'm') plt.show()
python
# Generated by Django 2.2.8 on 2019-12-11 16:24 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('django_eveonline_connector', '0010_auto_20191211_1514'), ] operations = [ migrations.AlterField( model_name='evecharacter', name='token', field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='django_eveonline_connector.EveToken'), ), ]
python
import time import numpy as np from yaaf.evaluation import Metric class SecondsPerTimestepMetric(Metric): def __init__(self): super(SecondsPerTimestepMetric, self).__init__(f"Seconds Per Timestep") self._deltas = [] self._last = None def reset(self): self._deltas = [] def __call__(self, timestep): now = time.time() delta = now - self._last if self._last is not None else 0.0 self._last = now self._deltas.append(delta) return delta def result(self): return np.array(self._deltas)
python
from pytest import raises from async_cog.tags import Tag def test_tag_format() -> None: tag = Tag(code=254, type=4, length=13) assert tag.format_str == "13I" assert tag.data_pointer is None def test_tag_size() -> None: tag = Tag(code=254, type=4, length=13) assert tag.data_size == 52 def test_tag_name() -> None: tag = Tag(code=34735, type=3, length=32, data_pointer=502) assert tag.name == "GeoKeyDirectoryTag" def test_tag_str() -> None: tag = Tag(code=34735, type=3, length=32, data_pointer=502) assert str(tag) == "GeoKeyDirectoryTag: None" tag = Tag(code=257, type=3, length=1, value=256) assert str(tag) == "ImageHeight: 256" tag = Tag(code=258, type=3, length=3, value=[8, 8, 8]) assert str(tag) == "BitsPerSample: [8, 8, 8]" def test_not_imlemented() -> None: tag = Tag(code=34735, type=3, length=32, data_pointer=502) with raises(NotImplementedError): tag.parse_data(b"", "<")
python
""" RenameWidget: This widget permit the rename of the output files in the MKVCommand Also if files are drop from directories in the OS it will rename them. """ # LOG FW0013 import logging import re from pathlib import Path from PySide2.QtCore import Signal, Qt, Slot from PySide2.QtWidgets import ( QGridLayout, QWidget, QHBoxLayout, QSizePolicy, QGroupBox, ) import vsutillib.pyqt as pyqt from .. import config from ..utils import Text from .RenameWidgetHelpers import ( findDuplicates, RegExFilesWidget, RegExLineInputWidget, RegExInputWidget, resolveIncrements, ) MODULELOG = logging.getLogger(__name__) MODULELOG.addHandler(logging.NullHandler()) class RenameWidget(pyqt.TabWidgetExtension, QWidget): """Central widget""" # pylint: disable=too-many-instance-attributes # Defining elements of a GUI # Class logging state __log = False outputRenameResultsSignal = Signal(str, dict) outputOriginalFilesSignal = Signal(str, dict) applyFileRenameSignal = Signal(list) setFilesSignal = Signal(object) setCurrentIndexSignal = Signal() @classmethod def classLog(cls, setLogging=None): """ get/set logging at class level every class instance will log unless overwritten Args: setLogging (bool): - True class will log - False turn off logging - None returns current Value Returns: bool: returns the current value set """ if setLogging is not None: if isinstance(setLogging, bool): cls.__log = setLogging return cls.__log def __init__(self, parent, controlQueue=None, log=None): super(RenameWidget, self).__init__(parent=parent, tabWidgetChild=self) self.__log = None self.__output = None self.__tab = None self.parent = parent self.controlQueue = controlQueue self._outputFileNames = [] self._renameFileNames = [] self._initControls() self._initUI() self._initHelper() self._bFilesDropped = False self._bDuplicateRename = False self.log = log def _initControls(self): # # Input Lines # self.textRegEx = RegExLineInputWidget(Text.txt0200, Text.txt0201) self.textSubString = RegExLineInputWidget(Text.txt0202, Text.txt0203) self.textOriginalNames = RegExFilesWidget(Text.txt0204, Text.txt0205) self.textOriginalNames.textBox.setReadOnly(True) self.textOriginalNames.textBox.connectToInsertText( self.outputOriginalFilesSignal ) self.textOriginalNames.textBox.filesDroppedUpdateSignal.connect( self._setFilesDropped ) self.textRenameResults = RegExInputWidget(Text.txt0206, Text.txt0207) self.textRenameResults.textBox.setReadOnly(True) self.textRenameResults.textBox.connectToInsertText( self.outputRenameResultsSignal ) btnApplyRename = pyqt.QPushButtonWidget( Text.txt0208, function=self._applyRename, margins=" ", toolTip=Text.txt0209, ) btnApplyRename.setEnabled(False) btnUndoRename = pyqt.QPushButtonWidget( Text.txt0210, function=self._undoRename, margins=" ", toolTip=Text.txt0211 ) btnUndoRename.setEnabled(False) btnClear = pyqt.QPushButtonWidget( Text.txt0212, function=self.clear, margins=" ", toolTip=Text.txt0213 ) self.btnGrid = QHBoxLayout() self.btnGrid.addWidget(btnApplyRename) self.btnGrid.addWidget(btnUndoRename) self.btnGrid.addStretch() self.btnGrid.addWidget(btnClear) self.btnGroup = QGroupBox() self.btnGroup.setLayout(self.btnGrid) def _initUI(self): inputGrid = QGridLayout() # # Input lines # inputGrid.addWidget(self.textRegEx, 0, 0, 1, 2) inputGrid.addWidget(self.textSubString, 1, 0, 1, 2) # buttons inputGrid.addWidget(self.btnGroup, 2, 0, 1, 2) gridWidget = QWidget() gridWidget.setLayout(inputGrid) gridWidget.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed) boxWidget = QWidget() hboxLayout = QHBoxLayout() hboxLayout.addWidget(self.textOriginalNames) hboxLayout.addWidget(self.textRenameResults) boxWidget.setLayout(hboxLayout) grid = QGridLayout() grid.setSpacing(5) grid.addWidget(gridWidget, 0, 0, 2, 0, Qt.AlignTop) grid.addWidget(boxWidget, 2, 0) self.setLayout(grid) def _initHelper(self): maxCount = config.data.get(Key.MaxRegExCount) # local signals # self.setCurrentIndexSignal.connect(self._setCurrentIndex) self.setFilesSignal.connect(self.setFiles) self.textRegEx.cmdLine.currentTextChanged.connect(self._updateRegEx) self.textSubString.cmdLine.currentTextChanged.connect(self._updateRegEx) self.textOriginalNames.textBox.textChanged.connect(self.clearButtonState) self.textRegEx.cmdLine.itemsChangeSignal.connect( lambda: self.saveItems(Key.RegEx) ) self.textSubString.cmdLine.itemsChangeSignal.connect( lambda: self.saveItems(Key.SubString) ) self.textOriginalNames.textBox.verticalScrollBar().valueChanged.connect( self.scrollRenameChanged ) self.textRenameResults.textBox.verticalScrollBar().valueChanged.connect( self.scrollResultsChanged ) if maxCount is not None: self.textRegEx.cmdLine.setMaxCount(maxCount) self.textSubString.cmdLine.setMaxCount(maxCount) items = config.data.get(Key.RegEx) self.textRegEx.cmdLine.addItems(items) self.textRegEx.cmdLine.clearEditText() items = config.data.get(Key.SubString) self.textSubString.cmdLine.addItems(items) self.textSubString.cmdLine.clearEditText() self.btnGrid.itemAt(ButtonIndex.Clear).widget().setEnabled(False) def __bool__(self): for n, r in zip(self._outputFileNames, self._renameFileNames): if n != r: return True return False @property def log(self): """ class property can be used to override the class global logging setting Returns: bool: True if logging is enable False otherwise """ if self.__log is not None: return self.__log return RenameWidget.classLog() @log.setter def log(self, value): """set instance log variable""" if isinstance(value, bool) or value is None: self.__log = value @property def output(self): return self.__output @output.setter def output(self, value): self.__output = value @Slot() def saveItems(self, comboType): """ saveItems of ComboLineEdit use in widget Args: comboType (str): key indicating witch ComboListEdit to save to config """ if comboType == Key.RegEx: if self.textRegEx.cmdLine.count() > 0: items = [] for i in range(0, self.textRegEx.cmdLine.count()): items.append(self.textRegEx.cmdLine.itemText(i)) config.data.set(Key.RegEx, items) if comboType == Key.SubString: if self.textRegEx.cmdLine.count(): items = [] for i in range(0, self.textSubString.cmdLine.count()): items.append(self.textSubString.cmdLine.itemText(i)) config.data.set(Key.SubString, items) @Slot(object) def setFiles(self, objCommand): """ setFile setup file names to work with Args: objCommand (MKVCommand): MKVCommand object containing the files to rename """ self.textOriginalNames.textBox.clear() self.textRenameResults.textBox.clear() for f in objCommand.destinationFiles: # show files self.outputOriginalFilesSignal.emit(str(f.name) + "\n", {}) # save files self._outputFileNames.append(f) @Slot(int) def scrollRenameChanged(self, value): self.textRenameResults.textBox.verticalScrollBar().valueChanged.disconnect( self.scrollResultsChanged ) self.textRenameResults.textBox.verticalScrollBar().setValue(value) self.textRenameResults.textBox.verticalScrollBar().valueChanged.connect( self.scrollResultsChanged ) @Slot(int) def scrollResultsChanged(self, value): self.textOriginalNames.textBox.verticalScrollBar().valueChanged.disconnect( self.scrollRenameChanged ) self.textOriginalNames.textBox.verticalScrollBar().setValue(value) self.textOriginalNames.textBox.verticalScrollBar().valueChanged.connect( self.scrollRenameChanged ) def clear(self): """ clear reset widget working variables and widgets """ self._outputFileNames = [] self._renameFileNames = [] self._bFilesDropped = False self.textRegEx.cmdLine.lineEdit().clear() self.textSubString.cmdLine.lineEdit().clear() self.textOriginalNames.textBox.clear() self.textRenameResults.textBox.clear() def clearButtonState(self): """Set clear button state""" if self.textOriginalNames.textBox.toPlainText() != "": self.btnGrid.itemAt(ButtonIndex.Clear).widget().setEnabled(True) else: self.btnGrid.itemAt(ButtonIndex.Clear).widget().setEnabled(False) def connectToSetFiles(self, objSignal): objSignal.connect(self.setFiles) def setLanguage(self): """ setLanguage set labels according to locale """ for index in range(self.btnGrid.count()): widget = self.btnGrid.itemAt(index).widget() if isinstance(widget, pyqt.QPushButtonWidget): widget.setLanguage() #widget.setText(" " + _(widget.originalText) + " ") #widget.setToolTip(_(widget.toolTip)) for w in [self.textRegEx, self.textSubString]: w.lblText.setText(_(w.label) + ": ") w.cmdLine.setToolTip(_(w.toolTip)) for w in [self.textOriginalNames, self.textRenameResults]: w.lblText.setText(_(w.label) + ":") w.textBox.setToolTip(_(w.toolTip)) w.repaint() def _setFilesDropped(self, filesDropped): if filesDropped: self._outputFileNames = [] self._outputFileNames.extend(filesDropped) self.textRenameResults.textBox.clear() if not self._bFilesDropped: self._bFilesDropped = True self._updateRegEx() else: # receive when clear issued to FilesListWidget self._outputFileNames = [] self.textRenameResults.textBox.clear() self.btnGrid.itemAt(ButtonIndex.Undo).widget().setEnabled(False) self._bFilesDropped = False def _displayRenames(self): duplicateNames = findDuplicates(self._renameFileNames) if duplicateNames: self._bDuplicateRename = True else: self._bDuplicateRename = False for f in self._renameFileNames: of = Path(f) try: if (f in duplicateNames) or of.is_file(): self.outputRenameResultsSignal.emit( str(f.name) + "\n", {"color": Qt.red} ) else: # check theme self.outputRenameResultsSignal.emit(str(f.name) + "\n", {}) except OSError: self.outputRenameResultsSignal.emit(str(f.name) + "\n", {}) def _updateRegEx(self): rg = self.textRegEx.cmdLine.currentText() subText = self.textSubString.cmdLine.currentText() statusBar = self.parent.statusBar() statusBar.showMessage("") self.textRenameResults.textBox.clear() self._renameFileNames = [] try: regEx = re.compile(rg) for f in self._outputFileNames: strFile = f.stem matchRegEx = regEx.sub(subText, strFile) if matchRegEx: objName = f.parent.joinpath(matchRegEx + f.suffix) else: objName = f self._renameFileNames.append(objName) resolveIncrements(self._outputFileNames, self._renameFileNames, subText) self._displayRenames() if self: self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(True) else: self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(False) except re.error: self.textRenameResults.textBox.clear() statusBar.showMessage(Text.txt0214) if resolveIncrements(self._outputFileNames, self._renameFileNames, subText): self._displayRenames() if self: self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(True) else: self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(False) def _applyRename(self): if self._bFilesDropped: # self.applyFileRenameSignal.emit(self._renameFileNames) filesPair = zip(self._outputFileNames, self._renameFileNames) for oldName, newName in filesPair: try: oldName.rename(newName) except FileExistsError: pass else: self.applyFileRenameSignal.emit(self._renameFileNames) self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(False) self.btnGrid.itemAt(ButtonIndex.Undo).widget().setEnabled(True) def _undoRename(self): if self._bFilesDropped: filesPair = zip(self._renameFileNames, self._outputFileNames) for oldName, newName in filesPair: try: oldName.rename(newName) except FileExistsError: pass else: self.applyFileRenameSignal.emit(self._outputFileNames) self.btnGrid.itemAt(ButtonIndex.ApplyRename).widget().setEnabled(True) self.btnGrid.itemAt(ButtonIndex.Undo).widget().setEnabled(False) class ButtonIndex: ApplyRename = 0 Undo = 1 Clear = 3 class Key: RegEx = "RegEx" SubString = "SubString" MaxRegExCount = "MaxRegExCount"
python
# Copyright (c) 2013 Stian Lode # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from triton.rigidbody2d import RigidBody2d from triton.vector2d import Vector2d class CoordScaler: def __init__(self, screen_size, scale=1): self.scale = scale if self.scale is None: self.adaptive_scale = True self.scale = 1 self.screen_size = screen_size self.translation = screen_size/2 def get_coords(self, cosmic_vect): screen_coords = cosmic_vect * self.scale + self.translation return screen_coords if self.adaptive_scale: if not 0 < screen_coords.x < screen_size.x: pass def main(): import pygame from collections import deque screen_scaler = CoordScaler(Vector2d(800, 800), 350.0 / 249209300000.0) max_history = 10000 gravitational_const = 6.67384*10**-11 earth = RigidBody2d() earth._mass = 5.97*10**24 earth.pos = Vector2d(149600000000.0, 0.0) earth.vel = Vector2d(0.0, 29000.8) earth_history = deque([screen_scaler.get_coords(earth.pos).tuple()], max_history) mars = RigidBody2d() mars._mass = 6.42*10**23 mars.pos = Vector2d(249209300000.0, 0.0) mars.vel = Vector2d(0.0, 24000.077) mars_history = deque([screen_scaler.get_coords(mars.pos).tuple()], max_history) sun = RigidBody2d() sun._mass = 1.989*10**30 sun.pos = Vector2d(0.0, 0.0) t = 0 dt = 3600 screen = pygame.display.set_mode(screen_scaler.screen_size.tuple()) clock = pygame.time.Clock() def gravity(ent1, ent2): """Returns a force vector from one body to another""" diff = (ent2.pos-ent1.pos) #Universal gravity dist = diff.length_sq() force = gravitational_const * ent1._mass * ent2._mass / dist return diff.normalize() * force def draw_history(screen, history_deque): if len(history_deque) < 2: return pygame.draw.lines( screen, (150,150,150), False, history_deque, 1) def int_tuple(tup): return (int(tup[0]), int(tup[1])) counter = 0 while not pygame.QUIT in [e.type for e in pygame.event.get()]: counter += 1 earth_sun = gravity(earth, sun) earth_mars = gravity(earth, mars) sun_mars = gravity(sun, mars) earth.apply_force(earth.pos, earth_sun) earth.apply_force(earth.pos, earth_mars) mars.apply_force(mars.pos, -sun_mars) mars.apply_force(mars.pos, -earth_mars) sun.apply_force(sun.pos, sun_mars) sun.apply_force(sun.pos, -earth_sun) sun.update(t, dt) earth.update(t, dt) mars.update(t, dt) t += dt print("Simulation time (in days): " + str(t/(3600*24))) screen.fill((10, 10, 20)) # draw the sun sun_screen_coords = int_tuple(screen_scaler.get_coords(sun.pos).tuple()) pygame.draw.circle(screen, (220,200,100), sun_screen_coords, 20, 0) # draw the earth earth_screen_coords = int_tuple(screen_scaler.get_coords(earth.pos).tuple()) pygame.draw.circle(screen, (50,50,200), earth_screen_coords, 10, 0) if counter % 10 == 0: earth_history.append(earth_screen_coords) draw_history(screen, earth_history) # draw mars mars_screen_coords = int_tuple(screen_scaler.get_coords(mars.pos).tuple()) pygame.draw.circle(screen, (200,100,100), mars_screen_coords, 10, 0) if counter % 10 == 0: mars_history.append(mars_screen_coords) draw_history(screen, mars_history) pygame.display.flip() pygame.time.wait(0) if __name__ == '__main__': main()
python
""" @author: Hasan Albinsaid @site: https://github.com/hasanabs """ import matplotlib.pyplot as plt import numpy as np import itertools import os def nck(n,k): return np.math.factorial(n)/np.math.factorial(k)/np.math.factorial(n-k) def nchoosek(arr, k): return np.array(list(itertools.combinations(arr, k))) def optimum_RAC(all_RAC, n, r, size_comb): ukuran=np.zeros(n,dtype=int) while(len(all_RAC)>size_comb): for i in range(n): ukuran[i]=(all_RAC==i+1).sum() idx_rem=0; remaining_idx=np.arange(len(all_RAC)) sort_remove=np.argsort(-ukuran) while(len(remaining_idx)>1): old_remaining_idx=remaining_idx remaining_idx=remaining_idx[np.where((all_RAC[remaining_idx,:]==sort_remove[idx_rem]+1))[0]] if (len(remaining_idx)==0): idx=0 while(len(remaining_idx)==0): remaining_idx=old_remaining_idx[np.where((all_RAC[old_remaining_idx,:]==sort_remove[idx]+1))[0]] idx+=1 idx_rem+=1 all_RAC=np.delete(all_RAC, (remaining_idx), axis=0) return all_RAC def bi2de(arr): result=0 for i in range(len(arr)):result+=np.power(2,i)*arr[len(arr)-1-i] return result def de2bi(decimal, L_bit): arr=np.zeros((1,L_bit), dtype=np.int8) for i in range(L_bit): arr[0,(L_bit-i-1)]=decimal%2 decimal=decimal>>1 return arr def modulation(M): if M==2: modulation=np.array([-1+0j, 1+0j]) elif M==4: modulation=np.array([-1-1j, -1+1j, 1+1j, 1-1j]/np.sqrt(2)) elif M==16: modulation=np.array([-3+3j, -3+1j, -3-3j, -3-1j, -1+3j, -1+1j, -1-3j, -1-1j, 3+3j, 3+1j, 3-3j, 3-1j, 1+3j, 1+1j, 1-3j, 1-1j]/np.sqrt(10)) return modulation def herm(matrix): return np.transpose(np.conjugate(matrix)) def H(Nr, Nt): return (np.random.randn(Nr,Nt)+np.random.randn(Nr,Nt)*1j)/np.sqrt(2) def noise(SNR, Nr, Es): return (np.random.randn(Nr,1)+np.random.randn(Nr,1)*1j)*np.sqrt(Es/np.power(10,(SNR)/10))/np.sqrt(2) def plotter(Range, Error_bit, SNR_Min, SNR_Max, L, prop, Title, Label): plt.figure(1) ASBT = (np.ones((len(Error_bit),1)) - Error_bit)*L plt.plot(Range, ASBT, prop, linewidth=1, label=Label) plt.legend(loc='lower right', fontsize='x-large') plt.axis([SNR_Min, SNR_Max, 2, 10.5]) plt.yscale('linear') plt.xlabel('SNR[dB]') plt.ylabel('ASBT') plt.minorticks_on() plt.grid(b=True, which='major') plt.grid(b=True, which='minor',alpha=0.4) plt.suptitle('ASBT '+ Label, fontsize='x-large', fontweight='bold') plt.title(Title, fontsize='large', fontweight='book') plt.show() if not os.path.exists('../results'): os.makedirs('../results') plt.savefig('../results/ASBT_'+Label+'.png') plt.figure(2) plt.plot(Range, Error_bit, prop, linewidth=1, label=Label) plt.legend(loc='upper right', fontsize='x-large') plt.axis([SNR_Min, SNR_Max, 6e-4, 1e-0]) plt.xscale('linear') plt.yscale('log') plt.xlabel('SNR[dB]') plt.ylabel('BER') plt.minorticks_on() plt.grid(b=True, which='major') plt.grid(b=True, which='minor',alpha=0.4) plt.suptitle('BER ' + Label, fontsize='x-large', fontweight='bold') plt.title(Title, fontsize='large', fontweight='book') plt.show() if not os.path.exists('../results'): os.makedirs('../results') plt.savefig('../results/'+Label+'.png')
python
#! /usr/bin/env python #----------------------------------------------------------------------- # COPYRIGHT_BEGIN # Copyright (C) 2016, FixFlyer, LLC. # All rights reserved. # COPYRIGHT_END #----------------------------------------------------------------------- class SessionStore(object): """ """ class Listener(object): """ """ def on_session(self, session_id, begin_string, sender_comp_id, target_comp_id, session_qualifier, trading_Session_id, last_seq): pass def add_session(self, session_id): pass def remove_session(self, session_id): pass def has_session(self, session_id): pass def get_session(self, session_id, trading_session_id_out, last_seq_out): pass def update_session(self, session_id, trasing_session_id, last_seq): pass def for_each_session(self, listener): pass
python
"""Process the markdown files. The purpose of the script is to create a duplicate src directory within which all of the markdown files are processed to match the specifications of building a pdf from multiple markdown files using the pandoc library (***add link to pandoc library documentation***) with pdf specific text rendering in mind as well. """ import os import subprocess import re from datetime import datetime def run_shell_cmd(command): """Run shell/bash commands passed as a string using subprocess module.""" process = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = process.stdout.read() return output.decode('utf-8') def copy_src(): """Duplicate src directory to a new but temp directory named 'src_copy'.""" # source and target directories src_path = "../src/" target_path = "src_copy" # make new directory mkdir_cmd = "mkdir "+target_path run_shell_cmd(mkdir_cmd) # copy contents of src directory copy_cmd = "cp -R "+src_path+" "+target_path run_shell_cmd(copy_cmd) def copy_bids_logo(): """Copy BIDS_logo.jpg from the BIDS_logo dir in the root of the repo.""" run_shell_cmd("cp ../BIDS_logo/BIDS_logo.jpg src_copy/src/images/") def copy_images(root_path): """Copy images. Will be done from images directory of subdirectories to images directory in the src directory """ subdir_list = [] # walk through the src directory to find subdirectories named 'images' # and copy contents to the 'images' directory in the duplicate src # directory for root, dirs, files in os.walk(root_path): if 'images' in dirs: subdir_list.append(root) for each in subdir_list: if each != root_path: run_shell_cmd("cp -R "+each+"/images"+" "+root_path+"/images/") def extract_header_string(): """Extract the latest release's version number and date from CHANGES.md.""" released_versions = [] run_shell_cmd("cp ../mkdocs.yml src_copy/") with open(os.path.join(os.path.dirname(__file__), 'src_copy/mkdocs.yml'), 'r') as file: data = file.readlines() header_string = data[0].split(": ")[1] title = " ".join(header_string.split()[0:4]) version_number = header_string.split()[-1] build_date = datetime.today().strftime('%Y-%m-%d') return title, version_number, build_date def add_header(): """Add the header string extracted from changelog to header.tex file.""" title, version_number, build_date = extract_header_string() header = " ".join([title, version_number, build_date]) # creating a header string with latest version number and date header_string = ("\chead{ " + header + " }") with open('header.tex', 'r') as file: data = file.readlines() # now change the last but 2nd line, note that you have to add a newline data[-2] = header_string+'\n' # re-write header.tex file with new header string with open('header.tex', 'w') as file: file.writelines(data) def remove_internal_links(root_path, link_type): """Find and replace all cross and same markdown internal links. The links will be replaced with plain text associated with it. """ if link_type == 'cross': # regex that matches cross markdown links within a file # TODO: add more documentation explaining regex primary_pattern = re.compile(r'\[((?!http).[\w\s.\(\)`*/–]+)\]\(((?!http).+(\.md|\.yml|\.md#[\w\-\w]+))\)') # noqa: E501 elif link_type == 'same': # regex that matches references sections within the same markdown primary_pattern = re.compile(r'\[([\w\s.\(\)`*/–]+)\]\(([#\w\-._\w]+)\)') for root, dirs, files in os.walk(root_path): for file in files: if file.endswith(".md"): with open(os.path.join(root, file), 'r') as markdown: data = markdown.readlines() for ind, line in enumerate(data): match = primary_pattern.search(line) if match: line = re.sub(primary_pattern, match.group().split('](')[0][1:], line) data[ind] = line with open(os.path.join(root, file), 'w') as markdown: markdown.writelines(data) def modify_changelog(): """Change first line of the changelog to markdown Heading 1. This modification makes sure that in the pdf build, changelog is a new chapter. """ with open('src_copy/src/CHANGES.md', 'r') as file: data = file.readlines() data[0] = "# Changelog" with open('src_copy/src/CHANGES.md', 'w') as file: file.writelines(data) def edit_titlepage(): """Add title and version number of the specification to the titlepage.""" title, version_number, build_date = extract_header_string() with open('cover.tex', 'r') as file: data = file.readlines() data[-1] = ("\\textsc{\large "+version_number+"}" + "\\\\[0.5cm]" + "{\large " + build_date + "}" + "\\\\[2cm]" + "\\vfill" + "\\end{titlepage}") with open('cover.tex', 'w') as file: data = file.writelines(data) if __name__ == '__main__': duplicated_src_dir_path = 'src_copy/src' # Step 1: make a copy of the src directory in the current directory copy_src() # Step 2: copy BIDS_logo to images directory of the src_copy directory copy_bids_logo() # Step 3: copy images from subdirectories of src_copy directory copy_images(duplicated_src_dir_path) subprocess.call("mv src_copy/src/images/images/* src_copy/src/images/", shell=True) # Step 4: extract the latest version number, date and title extract_header_string() add_header() edit_titlepage() # Step 5: modify changelog to be a level 1 heading to facilitate section # separation modify_changelog() # Step 6: remove all internal links remove_internal_links(duplicated_src_dir_path, 'cross') remove_internal_links(duplicated_src_dir_path, 'same')
python
# django from hashlib import sha256 from uuid import uuid4 from django.utils.text import slugify from django.conf import settings from django.core.mail import EmailMultiAlternatives from django.template.loader import render_to_string from django.utils.html import strip_tags # python from bs4 import BeautifulSoup from mistune import Markdown, Renderer def get_new_hash(): return sha256(str(uuid4().hex).encode("utf-8")).hexdigest() def format_tags(tags): return " ".join({slugify(tag.lower()) for tag in tags}) def second_convert(second): second = int(second) minutes = int(second / 60) second -= minutes * 60 hours = int(second / (60 * 60)) second -= hours * (60 * 60) days = int(second / (60 * 60 * 24)) second -= days * (60 * 60 * 24) years = int(second / (60 * 60 * 24 * 365.25)) second -= years * (60 * 60 * 24 * 365.25) return dict(y=years, d=days, h=hours, m=minutes, s=int(second)) def marktohtml(marktext): renderer = Renderer(escape=False, parse_block_html=True) markdown = Markdown(renderer=renderer) return BeautifulSoup(markdown(marktext), "html.parser") def get_first_image(body): soup = marktohtml(body) img = soup.find("img") if img is not None: return img.get("src", "") def dor(body): "duration of read -> second" return body.__len__() / 28 class NextOrPrevious: def __init__(self, model, filter_field, id): self.model = model self.filter_field = filter_field self.id = id def next_or_previous(self, next=True): queryset = self.model.objects.filter(**self.filter_field) try: index = list(queryset).index(queryset.filter(id=self.id)[0]) except IndexError: return False else: if next: index = index - 1 else: index = index + 1 try: return queryset[index] except (IndexError, AssertionError): return False @property def next_query(self): return self.next_or_previous() @property def previous_query(self): return self.next_or_previous(False) def send_mail(subject, template_name, context, to): html_content = render_to_string(template_name, context) text_content = strip_tags(html_content) msg = EmailMultiAlternatives(subject, text_content, settings.EMAIL_HOST_USER, to) msg.attach_alternative(html_content, "text/html") msg.send() def get_client_url(): return f"?client_id={settings.GITHUB_AUTH.get('client_id')}&client_secret={settings.GITHUB_AUTH.get('client_secret')}" def ready_tags(tags, limit=5): return format_tags(tags.split(" ")[:limit])
python
import attr from jstruct import JStruct, JList, REQUIRED from typing import Optional, List @attr.s(auto_attribs=True) class Appointment: type: str date: Optional[str] = None time: Optional[str] = None phone: Optional[str] = None @attr.s(auto_attribs=True) class Address: postalCode: str provinceCode: str number: Optional[int] = None countryCode: Optional[str] = None name: Optional[str] = None @attr.s(auto_attribs=True) class Hazmat: number: int phone: str @attr.s(auto_attribs=True) class Parcel: quantity: int parcelType: str id: Optional[int] = None weight: Optional[int] = None length: Optional[int] = None depth: Optional[int] = None width: Optional[int] = None note: Optional[str] = None status: Optional[int] = None FCA_Class: Optional[str] = None hazmat: Optional[Hazmat] = JStruct[Hazmat] requestReturnLabel: Optional[bool] = None returnWaybill: Optional[str] = None @attr.s(auto_attribs=True) class PromoCode: code: Optional[str] = None @attr.s(auto_attribs=True) class Surcharge: type: str id: Optional[int] = None value: Optional[str] = None name: Optional[str] = None amount: Optional[int] = None @attr.s(auto_attribs=True) class RateRequest: category: str paymentType: str deliveryType: str unitOfMeasurement: str sender: Address = JStruct[Address, REQUIRED] consignee: Address = JStruct[Address, REQUIRED] parcels: List[Parcel] = JList[Parcel, REQUIRED] billing: Optional[int] = None promoCodes: Optional[List[PromoCode]] = JList[PromoCode] surcharges: Optional[List[Surcharge]] = JList[Surcharge] appointment: Optional[Appointment] = JStruct[Appointment] @attr.s(auto_attribs=True) class TaxesDetail: type: Optional[str] = None amount: Optional[str] = None name: Optional[str] = None @attr.s(auto_attribs=True) class Rate: grossAmount: Optional[int] = None discountAmount: Optional[int] = None otherCharge: Optional[int] = None fuelChargePercentage: Optional[int] = None accountType: Optional[str] = None rateType: Optional[str] = None cubicWeight: Optional[float] = None basicCharge: Optional[float] = None weightCharge: Optional[float] = None surcharges: List[Surcharge] = JList[Surcharge] subTotal: Optional[float] = None unitOfMeasurement: Optional[str] = None taxesDetails: List[TaxesDetail] = JList[TaxesDetail] taxes: Optional[float] = None fuelCharge: Optional[float] = None zoneCharge: Optional[float] = None total: Optional[float] = None @attr.s(auto_attribs=True) class Reference: code: Optional[int] = None type: Optional[str] = None @attr.s(auto_attribs=True) class RateResponse: delay: Optional[int] = None terminalLimit: Optional[int] = None singleShipmentCost: Optional[int] = None quantity: Optional[int] = None rates: List[Rate] = JList[Rate] references: List[Reference] = JList[Reference] unitOfMeasurement: Optional[str] = None parcelType: Optional[str] = None weight: Optional[str] = None postalCodeDelivery: Optional[str] = None postalCodePickup: Optional[str] = None creator: Optional[str] = None date: Optional[str] = None warning: Optional[str] = None
python
# Project Euler Problem 19 Solution # # Problem statement: # You are given the following information, but you may prefer to # do some research for yourself. # 1 Jan 1900 was a Monday. # Thirty days has September, # April, June and November. # All the rest have thirty-one, # Saving February alone, # Which has twenty-eight, rain or shine. # And on leap years, twenty-nine. # A leap year occurs on any year evenly divisible by 4, but not on # a century unless it is divisible by 400. How many Sundays fell on # the first of the month during the twentieth century (1 Jan 1901 to # 31 Dec 2000)? # # Solution description: # Bruteforce solution: Implements a simple calendar, iterates over # all the days and counts the number of Sundays that fell on the # first of a month # # Fast solution: Iterates only over the relevant dates and # uses Zeller's congruence # (https://en.wikipedia.org/wiki/Zeller%27s_congruence) to figure # out the weekday of each first day of a month # # Author: Tom Praschan # Date: 2019/02/17 # License: MIT (see ../LICENSE.md) import time def is_leapyear(year): """ Returns True if year is a leap year and false otherwise """ return year % 4 == 0 and not (year % 100 == 0 and year % 400 != 0) def days_per_month(month, year): """ Given a month (1=january, 2=february, etc.) this function returns the number of days in that month (leap years are) taken into account """ if month in [1, 3, 5, 7, 8, 10, 12]: return 31 elif month in [4, 6, 9, 11]: return 30 elif month == 2: return 29 if is_leapyear(year) else 28 raise ValueError("The provided month m must fullfill 1 <= m <= 12!") def bruteforce_solution(): weekday = 1 # 1 = Monday, 2 = Tueday, ..., 7 = Sunday day = 1 month = 1 year = 1900 num_sundays = 0 while not (day == 31 and month == 12 and year == 2000): # Count sundays that fell on the first day of a month # Remember that we only start counting after 1901! if day == 1 and weekday == 7 and year >= 1901: num_sundays += 1 # Increment date and weekday using modular arithmetic day = day % days_per_month(month, year) + 1 weekday = weekday % 7 + 1 # Increment month if day == 1: month = month % 12 + 1 # Increment year if day == 1 and month == 1: year += 1 return num_sundays def zellers_congruence(day, month, year): """ For a given date year/month/day this algorithm returns the weekday of that date (1 = Monday, 2 = Tuesday, etc.) For details see https://en.wikipedia.org/wiki/Zeller%27s_congruence """ # Consistent variable names with the formula on on Wikipedia q = day if month >= 3: m = month # pragma: no cover else: m = month + 12 year -= 1 K = year % 100 J = year // 100 h = (q + (13 * (m + 1)) // 5 + K + K // 4 + J // 4 + 5*J) % 7 # Convert to ISO return ((h + 5) % 7) + 1 def fast_solution(): num_sundays = 0 for year in range(1901, 2001): for month in range(1, 13): if zellers_congruence(1, month, year) == 7: num_sundays += 1 return num_sundays if __name__ == "__main__": start = time.time() solution = bruteforce_solution() end = time.time() print(f"Bruteforce Solution: {solution}") print(f"Elapsed time: {end - start:.6}s") start = time.time() solution = fast_solution() end = time.time() print(f"Fast Solution (Zeller's congruence): {solution}") print(f"Elapsed time: {end - start:.6}s")
python
#!/usr/bin/python """Executes Android Monkey stress test over adb to attached Android device.""" __author__ = '[email protected] (Jeff Carollo)' import datetime import json import logging import os import subprocess import sys import time from tasklib import apklib ADB_COMMAND = apklib.ADB_COMMAND MONKEY_COMMAND = ADB_COMMAND + 'shell "/system/bin/monkey -p %s --ignore-timeouts --kill-process-after-error -v 5000 --pct-touch 90 --pct-trackball 10 -s 10 %s; echo $? > /data/local/tmp/ret"' STDOUT_FILENAME = 'cmd_stdout.log' STDERR_FILENAME = 'cmd_stderr.log' def ExitWithErrorCode(error_code): if error_code == 0: logging.warning('Error code is zero, maaking it non-zero') error_code = -7 sys.exit(error_code) def main(argv): my_name = argv.pop(0) try: apk_file_path = argv.pop(0) except: sys.stderr.write('Must give apk_file_path as first argument.\n') sys.exit(-1) FORMAT = '%(asctime)-15s %(message)s' logging.basicConfig(format=FORMAT, level=logging.DEBUG) result_metadata = {} try: manifest = apklib.ReadAndroidManifest(apk_file_path) result_metadata[u'AndroidManifest.xml'] = manifest.encode('utf-8') class_path = apklib.FindClassPath(manifest) logging.info('Found class_path: %s', class_path) logging.info('Installing .apk...') try: output = subprocess.check_output( ADB_COMMAND + 'install -r %s' % apk_file_path, shell=True) apklib.CheckAdbSuccess(output) except subprocess.CalledProcessError, e: logging.error('adb install error %d:\n%s', e.returncode, e.output) try: logging.info('Signing .apk...') apklib.SignApk(apk_file_path) output = subprocess.check_output( ADB_COMMAND + 'install -r %s' % apk_file_path, shell=True) apklib.CheckAdbSuccess(output) except subprocess.CalledProcessError, e: logging.error('adb install error %d:\n%s', e.returncode, e.output) ExitWithErrorCode(e.returncode) try: logging.info('Running command...') cmd_stdout = open(STDOUT_FILENAME, 'w') cmd_stderr = open(STDERR_FILENAME, 'w') command = MONKEY_COMMAND % (class_path, ' '.join(argv)) try: timeout = datetime.timedelta(0, 900) # Give the thing 15 minutes. begin_time = datetime.datetime.now() timeout_time = begin_time + timeout process = subprocess.Popen(args=command, stdout=cmd_stdout, stderr=cmd_stderr, shell=True) ret = None while None == ret and (datetime.datetime.now() < timeout_time): time.sleep(0.02) ret = process.poll() finished_time = datetime.datetime.now() execution_time = finished_time - begin_time logging.info('execution_time: %s', execution_time) if finished_time >= timeout_time and (None == ret): logging.error('command %s timed out.', command) process.terminate() process.wait() ret = 0 elif ret == 0: # Only write execution_time if we didn't time out or fail. result_metadata['execution_time'] = execution_time.total_seconds() apklib.CheckAdbShellExitCode() if ret != 0: logging.error('adb command exited with code %s', ret) ExitWithErrorCode(ret) except subprocess.CalledProcessError, e: logging.error('Error %d:\n%s', e.returncode, e.output) ExitWithErrorCode(e.returncode) finally: apklib.WriteResultMetadata(result_metadata) cmd_stdout.flush() cmd_stdout.close() cmd_stderr.flush() cmd_stderr.close() logging.info('Uninstalling .apk...') try: output = subprocess.check_output( ADB_COMMAND + 'uninstall %s' % class_path, shell=True) apklib.CheckAdbSuccess(output) except subprocess.CalledProcessError, e: logging.error('adb uninstall error %d:\n%s', e.returncode, e.output) # Don't fail just because uninstall didn't work. try: # Inspect and dump to logs the cmd stdout output. cmd_stdout = open(STDOUT_FILENAME, 'r') stdout_exitcode = apklib.DumpAndCheckErrorLogs(cmd_stdout, sys.stdout) except Exception, e: logging.error('Error while dumping command stdout: %s', str(e)) stdout_exitcode = -5 # Don't exit yet, allow stderr to be dumped. finally: cmd_stdout.close() try: # Inspect and dump to logs the cmd stderr output. cmd_stderr = open(STDERR_FILENAME, 'r') stderr_exitcode = apklib.DumpAndCheckErrorLogs(cmd_stderr, sys.stderr) except Exception, e: logging.error('Error while dumping command stderr: %s', str(e)) stderr_exitcode = -5 finally: cmd_stderr.close() if stdout_exitcode != 0: logging.info('Error found in stdout.') ExitWithErrorCode(stdout_exitcode) if stderr_exitcode != 0: logging.info('Error found in stderr.') ExitWithErrorCode(stderr_exitcode) logging.info('Monkey work done successfully.') return 0 finally: logging.shutdown() if __name__ == '__main__': main(sys.argv)
python
from vol import Vol from net import Net from trainers import Trainer from util import * import os from random import shuffle, sample, random from sys import exit embeddings = None training_data = None testing_data = None network = None t = None N = None tokens_l = None def load_data(): global embeddings, N, tokens_l embeddings = {} raw = file('./data/word_projections-80.txt').read() raw = raw[9:] raw = raw.split('\n') for elem in raw: try: data = elem.split() word = data[0].lower() vector = [ float(v) for v in data[1:] ] embeddings[word] = vector except: continue path = './data/text/train_tiny' words = list(token for fname in os.listdir(path) for token in file(os.path.join(path, fname)).read().split()) tokens = set(words) tokens_l = list(tokens) N = len(tokens) print 'Corpus size: {} words'.format(N) step = 4 data = [] for n in xrange(0, len(words) - step): w1, w2, w3, pred = words[n:n+step] if not (w1 in embeddings and w2 in embeddings and w3 in embeddings and pred in embeddings and pred in tokens): continue V = Vol(embeddings[w1] + embeddings[w2] + embeddings[w3]) label = tokens_l.index(pred) data.append((V, label)) return data def start(): global training_data, testing_data, network, t, N all_data = load_data() shuffle(all_data) size = int(len(all_data) * 0.1) training_data, testing_data = all_data[size:], all_data[:size] print 'Data loaded, size: {}...'.format(len(all_data)) layers = [] layers.append({'type': 'input', 'out_sx': 1, 'out_sy': 1, 'out_depth': 240}) layers.append({'type': 'fc', 'num_neurons': 200, 'activation': 'sigmoid'}) layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'}) layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'}) layers.append({'type': 'fc', 'num_neurons': 10, 'activation': 'sigmoid'}) layers.append({'type': 'fc', 'num_neurons': 50, 'activation': 'sigmoid'}) layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'sigmoid'}) #layers.append({'type': 'conv', 'sx': 1, 'filters': 240, 'pad': 0}) #lookup table like #layers.append({'type': 'fc', 'num_neurons': 200, 'activation': 'tanh', 'drop_prob': 0.5}) #layers.append({'type': 'fc', 'num_neurons': 100, 'activation': 'tanh', 'drop_prob': 0.5}) layers.append({'type': 'softmax', 'num_classes': N}) print 'Layers made...' network = Net(layers) print 'Net made...' print network t = Trainer(network, {'method': 'adadelta', 'batch_size': 10, 'l2_decay': 0.0001}); def train(): global training_data, network, t print 'In training...' print 'k', 'time\t\t ', 'loss\t ', 'training accuracy' print '----------------------------------------------------' try: for x, y in training_data: stats = t.train(x, y) print stats['k'], stats['time'], stats['loss'], stats['accuracy'] except KeyboardInterrupt: pass finally: saveJSON('./models/next_word_embeddings/network.json', network.toJSON()) def test_text(text, ngenerate=10, delete=True): out = '' for n in xrange(ngenerate): x = [] words = text.split() for word in words: if word not in embeddings: return 'word: {} not in corpus'.format(word) else: x.extend(embeddings[word]) output = network.forward(Vol(x)).w pred = network.getPrediction() new = tokens_l[pred] if random() < 0.5 else \ weightedSample(embeddings.keys(), output) out += ' ' + new text = ' '.join(words[1:] + [new]) return out def test(): global testing_data, network try: print 'In testing...' right = 0 for x, y in testing_data: network.forward(x) right += network.getPrediction() == y accuracy = float(right) / len(testing_data) print accuracy except KeyboardInterrupt: pass finally: print test_text('the answer is') print test_text('i did this')
python
# # Copyright(c) 2019 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause-Clear # from enum import Enum class OutputFormat(Enum): table = 0 csv = 1 class StatsFilter(Enum): all = 0 conf = 1 usage = 2 req = 3 blk = 4 err = 5
python
import networkx as nx import numpy as np import math def create_network (correct_answers, data, p_factor, realmodelQ, n_edges_score): #correct_answers is a string which assumes the following values: True, False, "All" #p_factor is a bool that assumes the value True if the factor (1-p) is to be considered for the weights and False otherwise #realmodelQ is a Bool identifying if the network is for a real model (True) or not (False) #n_edges_score is the number of edges to be considered when computing the score #Load dataset dataset = data #initialize the directed graph (A -> B iff A answered to question x before B and both chose the same option; #the weight is a value >0 and <1, and multiple of 1/num_questions) connected_students = nx.DiGraph() #Get list of usernames students = dataset.username.unique() #Add the students (nodes) to the graph connected_students.add_nodes_from(students) #Get number of quizzes num_quizzes = len(dataset.quiz_id.unique()) #Get total number of questions considering all the quizzes total_num_questions = len(dataset.quiz_question_id.unique()) #Initialize a dictionary with the students' performance (% of correct answers) students_performance = {} #Initialize a dictionary with the % of choice for each option_id in the set of all quizzes and questions percent_options = {} #Initialize a dictionary with the % of correct answers for each question in the set of all quizzes percent_correct_questions = {} #Initialize a dictionary of the edge colors edge_colors = {} #Initialize a dictionary with the correspondence of -> question: quiz questions_by_quiz = {} #Initialize a dictionary with the ranks of quizzes rank_quizzes = {} #Initialize the rank var rank = 0 #Initialize a dictionary with the correspondence of -> quiz: number of questions num_quest_by_quiz = {} #Initialize a dictionary with the number of quizzes each student participated num_question_participations = {} #Initialize a dictionary that has as keys the questions and as values dicionaries with keys the student and values 1 if #his/her answer is correct or 0 otherwise correct_question_per_student = {} #Initialize a dictionary that has as keys the questions and as values their solution frequency (higher values means that the question is easier) sol_freq_per_question = {} #Initialize a dictionary that has as keys the questions and as values their solution frequency penalized (higher values means that the question is easier) sol_freq_per_question_penalized = {} for i in dataset.quiz_id.unique(): #run the list of possible quizzes to compute edges and weights #print("quiz_id =", i) #Get the subdataset for each quiz dataset_quiz_i = dataset.loc[dataset['quiz_id'] == i] #Update the dictionary with the rank of quizzes rank_quizzes[i] = rank #Get number of questions of this quiz(to compute edge's weights) - each quiz has its won factor, given the number of questions num_questions = len(dataset_quiz_i.quiz_question_id.unique()) #Store the number of questions on this quiz num_quest_by_quiz[str(i)] = num_questions #Sort dataset for quiz i by quiz_question_id and answer_date dataset_quiz_i = dataset_quiz_i.sort_values(by=['quiz_question_id', 'answer_date'], ascending = [True, True]) for question in dataset_quiz_i.quiz_question_id.unique(): #run the list of possible question_id #Initialize the empty dictionary for this question correct_question_per_student[question] = {} #print("question =", question) #Get the subdataset for each question_id dataset_qi = dataset_quiz_i.loc[dataset_quiz_i['quiz_question_id'] == question] #Get list of students which participated in this question participating_students = dataset_qi.username.unique() for participant in participating_students: if participant in num_question_participations.keys(): num_question_participations[participant] += 1 else: num_question_participations[participant] = 1 #Update the dictionary with the correspondence of -> question: quiz questions_by_quiz[question] = i #Initialize the percentage of correct answers for this question percent_correct_answers = 0 #Get the percentage for each option_id/correct answers in this question for user in range(len(dataset_qi)): #Get user name username = dataset_qi['username'].iloc[user] #Get the option_id chosen by this user option_chosen = dataset_qi['option_id'].iloc[user] #Check if the option chosen is correct or not is_correct = dataset_qi['correct'].iloc[user] #If the option chosen is correct, update the percentage of correct answers value if is_correct: percent_correct_answers += 1/len(dataset_qi) #save the information on this student's answer correct_question_per_student[question][username] = 1 else: #save the information on this student's answer correct_question_per_student[question][username] = 0 #if the option_id is not in the percent's dictionary add it if option_chosen not in percent_options: percent_options[option_chosen] = 1/len(dataset_qi) #else update its percentage else: percent_options[option_chosen] += 1/len(dataset_qi) if percent_options[option_chosen]>1: #Do not let this percentage to be greater than 1 percent_options[option_chosen] = 1 #Add to the dictionary the percentage of correct answers for this question percent_correct_questions[question] = percent_correct_answers #Evaluate which kind of connections we wish to analyse: only the True/False or All of them if isinstance(correct_answers, bool): for j in range(len(dataset_qi)): userj = dataset_qi['username'].iloc[j] #Get the option_id chosen by userj option_chosen_j = dataset_qi['option_id'].iloc[j] #if the answer is correct if dataset_qi['correct'].iloc[j]: value = 1 #if the answer is incorrect else: value = 0 #if the user is not in the performance's dictionary add it if userj not in students_performance: students_performance[userj] = value #else update its performance else: students_performance[userj] += value #if its response is in accordance with the value of correct_answers, study the following users if dataset_qi['correct'].iloc[j] == correct_answers: #create an edge between every student wich answered after the current one and chose the same option_id for k in range(j+1, len(dataset_qi)): userk = dataset_qi['username'].iloc[k] #Get the option_id chosen by userk option_chosen_k = dataset_qi['option_id'].iloc[k] #if both students chose the same option if option_chosen_j == option_chosen_k: #if the edge already exists, update its weight if connected_students.has_edge(userj, userk): if p_factor: connected_students[userj][userk]['weight'] += 1/num_questions * (1 - percent_options[option_chosen_j]) else: connected_students[userj][userk]['weight'] += 1/num_questions #if the edge does not exist, add it else: if p_factor: connected_students.add_weighted_edges_from([(userj, userk, 1/num_questions * (1 - percent_options[option_chosen_j]))]) else: connected_students.add_weighted_edges_from([(userj, userk, 1/num_questions)]) elif correct_answers == "All": #run then subdataset for question_id=i to create edges between students for j in range(len(dataset_qi)): userj = dataset_qi['username'].iloc[j] #Get the option_id chosen by userj option_chosen_j = dataset_qi['option_id'].iloc[j] #if the answer is correct if dataset_qi['correct'].iloc[j]: value = 1 #else the answer is incorrect else: value = 0 #if the user is not in the performance's dictionary add it if userj not in students_performance: students_performance[userj] = value #else update its performance else: students_performance[userj] += value #create an edge between every student wich answered after the current one and chose the same option_id for k in range(j+1, len(dataset_qi)): userk = dataset_qi['username'].iloc[k] #Get the option_id chosen by userk option_chosen_k = dataset_qi['option_id'].iloc[k] #if both students chose the same option if option_chosen_j == option_chosen_k: #if the edge already exists, update its weight if connected_students.has_edge(userj, userk): if p_factor: connected_students[userj][userk]['weight'] += 1/num_questions * (1 - percent_options[option_chosen_j]) else: connected_students[userj][userk]['weight'] += 1/num_questions #if the edge does not exist, add it else: if p_factor: connected_students.add_weighted_edges_from([(userj, userk, 1/num_questions * (1 - percent_options[option_chosen_j]) )]) else: connected_students.add_weighted_edges_from([(userj, userk, 1/num_questions)]) #Sort the dictionary for each question by student username # FIX: A Username may not be transformable into a float. Re # correct_question_per_student[question] = dict(sorted(correct_question_per_student[question].items(), key=lambda item: float(item[0]))) if realmodelQ: #Compute the solution frequency for each question sol_freq_per_question[question] = (1/len(correct_question_per_student[question])) * sum([value for value in correct_question_per_student[question].values()]) #Compute the solution frequency penalized for each question if sol_freq_per_question[question] != 1: sol_freq_per_question_penalized[question] = math.log(sol_freq_per_question[question] / (1 - sol_freq_per_question[question] )) if realmodelQ: #Sort questions by difficulty (easier - solution frequency higher - first) sol_freq_per_question = dict(sorted(sol_freq_per_question.items(), key=lambda item: item[1], reverse=True)) #Increment the value of the rank rank += 1 #Compute the cheating indicators statistics for each student score_U1 = {} score_U3 = {} score_CS = {} if realmodelQ: for alumn in students: #U1 Statistic numerator = 0 #get the sum score for this student sum_score = 0 num_ques = len(sol_freq_per_question) ordered_questions = [key for key in sol_freq_per_question.keys()] for q in ordered_questions: if alumn in correct_question_per_student[q].keys(): sum_score += correct_question_per_student[q][alumn] for qu in range(num_ques-1): for que in range(qu+1,num_ques): if alumn in correct_question_per_student[ordered_questions[qu]].keys() and alumn in correct_question_per_student[ordered_questions[que]].keys(): if correct_question_per_student[ordered_questions[qu]][alumn] < correct_question_per_student[ordered_questions[que]][alumn]: numerator += 1 if sum_score > 0 and sum_score < num_ques: score_U1[alumn] = numerator / (sum_score * (num_ques - sum_score)) else: score_U1[alumn] = 0 #Sort dictionary score_U1 = dict(sorted(score_U1.items(), key=lambda item: item[1], reverse=True)) #U3 Statistic & CS Statistic first_term = 0 first_term_CS = 0 for w in range(sum_score): if ordered_questions[w] in sol_freq_per_question_penalized.keys(): first_term += sol_freq_per_question_penalized[ordered_questions[w]] first_term_CS += sol_freq_per_question[ordered_questions[w]] second_term = 0 second_term_CS = 0 third_term_CS = 0 for y in range(num_ques): if alumn in correct_question_per_student[ordered_questions[y]].keys(): if ordered_questions[y] in sol_freq_per_question_penalized.keys(): second_term += correct_question_per_student[ordered_questions[y]][alumn] * sol_freq_per_question_penalized[ordered_questions[y]] second_term_CS += correct_question_per_student[ordered_questions[y]][alumn] * sol_freq_per_question[ordered_questions[y]] third_term_CS += sol_freq_per_question[ordered_questions[y]] third_term = 0 for x in range(num_ques - sum_score + 1 - 1, num_ques): if ordered_questions[x] in sol_freq_per_question_penalized.keys(): third_term += sol_freq_per_question_penalized[ordered_questions[x]] if sum_score > 0 and sum_score < num_ques: score_U3[alumn] = (first_term - second_term) / (first_term - third_term) else: score_U3[alumn] = 0 #Sort dictionary score_U3 = dict(sorted(score_U3.items(), key=lambda item: item[1], reverse=True)) if sum_score > 0 and sum_score < num_ques: score_CS[alumn] = (num_ques * (first_term_CS - second_term_CS)) / (num_ques * first_term_CS - sum_score * third_term_CS) else: score_CS[alumn] = 0 #Sort dictionary score_CS = dict(sorted(score_CS.items(), key=lambda item: item[1], reverse=True)) num_questions_total = np.max([value for value in num_question_participations.values()]) #Get classification of correct answers (0-20) in the dictionary students_performance = {k: round(v/num_questions_total*20,2) for k, v in students_performance.items()} #Define node color based on the performance color_map = {} #Assign color to each node for key in students_performance: if students_performance[key] >= 19: color_map[key] = 'DarkGreen' elif students_performance[key] >= 17: color_map[key] = 'Green' elif students_performance[key] >= 15: color_map[key] = 'OliveDrab' elif students_performance[key] >= 13: color_map[key] = 'ForrestGreen' elif students_performance[key] >= 10: color_map[key] = 'YellowGreen' elif students_performance[key] >= 7: color_map[key] = 'GreenYellow' else: color_map[key] = 'PaleGreen' #Get list of graph's edges edges_data = list(connected_students.edges.data()) #Compute students' scores #Create dictionary with scores per student (in and out) students_score_in = {} students_score_out = {} for node in connected_students.nodes(): #List of ingoing weights for this node ingoing_edges_weights = [e[2]['weight'] for e in edges_data if e[1] == str(node)] #Sort list of weights ingoing_edges_weights = sorted(ingoing_edges_weights, reverse=True) #Ingoing score (consumption) #Get the three highest values of weight n_highest_in = ingoing_edges_weights[:n_edges_score] #If there are no ingoing edges the score is 0 if n_highest_in != []: students_score_in[node] = sum(n_highest_in) else: students_score_in[node] = 0 #List of ingoing weights for this node outgoing_edges_weights = [e[2]['weight'] for e in edges_data if e[0] == str(node)] #Sort list of weights outgoing_edges_weights = sorted(outgoing_edges_weights, reverse=True) #Outgoing score (sharing) #Get the three highest values of weight n_highest_out = outgoing_edges_weights[:n_edges_score] #If there are no ingoing edges the score is 0 if n_highest_out != []: students_score_out[node] = sum(n_highest_out) else: students_score_out[node] = 0 #Sort the dictionaries by values students_score_in = dict(sorted(students_score_in.items(), key=lambda item: item[1], reverse=True)) students_score_out = dict(sorted(students_score_out.items(), key=lambda item: item[1], reverse=True)) return [students_score_in, students_score_out]
python
from django.contrib import admin from .models import ( EconomicAssessment, EconomicImpactAssessment, ResolvabilityAssessment, StrategicAssessment, ) @admin.register(EconomicImpactAssessment) class EconomicImpactAssessmentAdmin(admin.ModelAdmin): pass @admin.register(EconomicAssessment) class EconomicAssessmentAdmin(admin.ModelAdmin): pass @admin.register(StrategicAssessment) class StrategicAssessmentAdmin(admin.ModelAdmin): pass @admin.register(ResolvabilityAssessment) class ResolvabilityAssessmentAdmin(admin.ModelAdmin): pass
python
import textwrap from pathlib import Path import pyexasol import pytest from exasol_udf_mock_python.column import Column from exasol_udf_mock_python.connection import Connection from exasol_udf_mock_python.group import Group from exasol_udf_mock_python.mock_exa_environment import MockExaEnvironment from exasol_udf_mock_python.mock_meta_data import MockMetaData from exasol_udf_mock_python.udf_mock_executor import UDFMockExecutor from exasol_data_science_utils_python.preprocessing.sql.schema.schema_name import SchemaName from exasol_bucketfs_utils_python.bucketfs_factory import BucketFSFactory @pytest.fixture(scope="session") def db_connection(): db_connection = Connection(address=f"localhost:8888", user="sys", password="exasol") return db_connection @pytest.fixture(scope="session") def pyexasol_connection(db_connection): conn = pyexasol.connect(dsn=db_connection.address, user=db_connection.user, password=db_connection.password) return conn @pytest.fixture(scope="session") def upload_language_container(pyexasol_connection, language_container): container_connection = Connection(address=f"http://localhost:6583/default/container;bfsdefault", user="w", password="write") bucket_fs_factory = BucketFSFactory() container_bucketfs_location = \ bucket_fs_factory.create_bucketfs_location( url=container_connection.address, user=container_connection.user, pwd=container_connection.password, base_path=None) container_path = Path(language_container["container_path"]) alter_session = Path(language_container["alter_session"]) pyexasol_connection.execute(f"ALTER SYSTEM SET SCRIPT_LANGUAGES='{alter_session}'") pyexasol_connection.execute(f"ALTER SESSION SET SCRIPT_LANGUAGES='{alter_session}'") with open(container_path, "rb") as container_file: container_bucketfs_location.upload_fileobj_to_bucketfs(container_file, "ml.tar") @pytest.fixture(scope="session") def create_input_table(pyexasol_connection): pyexasol_connection.execute(""" CREATE OR REPLACE TABLE TEST.ABC( P1 INTEGER, P2 INTEGER, A FLOAT, B FLOAT, C FLOAT ) """) for i in range(1, 100): if i % 100 == 0: print(f"Insert {i}") values = ",".join([f"({j % 2},{i % 2},{j * 1.0 * i}, {j * 2.0 * i}, {j * 3.0 * i})" for j in range(1, 100)]) pyexasol_connection.execute(f"INSERT INTO TEST.ABC VALUES {values}") print("COUNT", pyexasol_connection.execute("SELECT count(*) FROM TEST.ABC").fetchall()) def drop_and_create_target_schema(pyexasol_connection): try: pyexasol_connection.execute(""" DROP SCHEMA TARGET_SCHEMA CASCADE; """) except: pass pyexasol_connection.execute("""CREATE SCHEMA TARGET_SCHEMA;""") def udf_wrapper(): from exasol_udf_mock_python.udf_context import UDFContext from sklearn.linear_model import SGDRegressor from numpy.random import RandomState from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_description_based_table_preprocessor_factory import \ ColumnDescriptionBasedTablePreprocessorFactory from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_preprocessor_description import \ ColumnPreprocessorDescription from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.exact_column_name_selector import \ ExactColumnNameSelector from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.normalization.min_max_scaler_factory import \ MinMaxScalerFactory from exasol_data_science_utils_python.model_utils.udfs.partial_fit_regression_train_udf import PartialFitRegressionTrainUDF train_udf = PartialFitRegressionTrainUDF() def run(ctx: UDFContext): model = SGDRegressor(random_state=RandomState(0), loss="squared_loss", verbose=False, fit_intercept=True, eta0=0.9, power_t=0.1, learning_rate='invscaling') table_preprocessor_factory = ColumnDescriptionBasedTablePreprocessorFactory( input_column_preprocessor_descriptions=[ ColumnPreprocessorDescription( column_selector=ExactColumnNameSelector("A"), column_preprocessor_factory=MinMaxScalerFactory() ), ColumnPreprocessorDescription( column_selector=ExactColumnNameSelector("B"), column_preprocessor_factory=MinMaxScalerFactory() ), ], target_column_preprocessor_descriptions=[ ColumnPreprocessorDescription( column_selector=ExactColumnNameSelector("C"), column_preprocessor_factory=MinMaxScalerFactory() ), ] ) train_udf.run(exa, ctx, model, table_preprocessor_factory) def test_train_udf_with_mock_random_partitions( upload_language_container, create_input_table, pyexasol_connection, db_connection): expected_number_of_base_models = 3 result, fitted_base_models, fitted_combined_models, unique_base_models = \ run_mock_test_valid( db_connection, pyexasol_connection, split_by_node=False, number_of_random_partitions=3, split_by_columns=None, ) assert len(fitted_base_models) == expected_number_of_base_models assert len(unique_base_models) == expected_number_of_base_models assert len(fitted_combined_models) == 1 assert len(result) == 1 for group in result: assert len(group.rows) == 1 def test_train_udf_with_mock_split_by_node( upload_language_container, create_input_table, pyexasol_connection, db_connection): expected_number_of_base_models = 1 result, fitted_base_models, fitted_combined_models, unique_base_models = \ run_mock_test_valid( db_connection, pyexasol_connection, split_by_node=True, number_of_random_partitions=None, split_by_columns=None, ) assert len(fitted_base_models) == expected_number_of_base_models assert len(unique_base_models) == expected_number_of_base_models assert len(fitted_combined_models) == 1 assert len(result) == 1 for group in result: assert len(group.rows) == 1 def test_train_udf_with_mock_split_by_columns( upload_language_container, create_input_table, pyexasol_connection, db_connection): expected_number_of_base_models = 4 result, fitted_base_models, fitted_combined_models, unique_base_models = \ run_mock_test_valid( db_connection, pyexasol_connection, split_by_node=False, number_of_random_partitions=None, split_by_columns="P1,P2", ) assert len(fitted_base_models) == expected_number_of_base_models assert len(unique_base_models) == expected_number_of_base_models assert len(fitted_combined_models) == 1 assert len(result) == 1 for group in result: assert len(group.rows) == 1 def test_train_udf_with_mock_random_partitions_and_split_by_columns( upload_language_container, create_input_table, pyexasol_connection, db_connection): expected_number_of_base_models = 6 result, fitted_base_models, fitted_combined_models, unique_base_models = \ run_mock_test_valid( db_connection, pyexasol_connection, split_by_node=False, number_of_random_partitions=3, split_by_columns="P1", ) assert len(fitted_base_models) == expected_number_of_base_models assert len(unique_base_models) == expected_number_of_base_models assert len(fitted_combined_models) == 1 assert len(result) == 1 for group in result: assert len(group.rows) == 1 def test_train_udf_with_mock_split_by_node_and_random_partitions( upload_language_container, create_input_table, pyexasol_connection, db_connection): expected_number_of_base_models = 2 result, fitted_base_models, fitted_combined_models, unique_base_models = \ run_mock_test_valid( db_connection, pyexasol_connection, split_by_node=True, number_of_random_partitions=2, split_by_columns=None ) assert len(fitted_base_models) == expected_number_of_base_models assert len(unique_base_models) == expected_number_of_base_models assert len(fitted_combined_models) == 1 assert len(result) == 1 for group in result: assert len(group.rows) == 1 def test_train_udf_with_mock_split_by_columns_empty_string( upload_language_container, create_input_table, pyexasol_connection, db_connection): expected_number_of_base_models = 2 result, fitted_base_models, fitted_combined_models, unique_base_models = \ run_mock_test_valid( db_connection, pyexasol_connection, split_by_node=False, number_of_random_partitions=2, split_by_columns="", ) assert len(fitted_base_models) == expected_number_of_base_models assert len(unique_base_models) == expected_number_of_base_models assert len(fitted_combined_models) == 1 assert len(result) == 1 for group in result: assert len(group.rows) == 1 def test_train_udf_with_mock_multiple_groups( upload_language_container, create_input_table, pyexasol_connection, db_connection): number_of_groups = 2 expected_number_of_base_models = 2 result, fitted_base_models, fitted_combined_models, unique_base_models = \ run_mock_test_valid( db_connection, pyexasol_connection, split_by_node=False, number_of_random_partitions=2, split_by_columns="", number_of_groups=number_of_groups ) unique_model_id_in_base_models = {row[1] for row in fitted_base_models} assert len(fitted_base_models) == expected_number_of_base_models * number_of_groups assert len(unique_model_id_in_base_models) == number_of_groups assert len(unique_base_models) == expected_number_of_base_models * number_of_groups assert len(fitted_combined_models) == 1 * number_of_groups assert len(result) == number_of_groups for group in result: assert len(group.rows) == 1 def run_mock_test_valid(db_connection, pyexasol_connection, split_by_node: bool, number_of_random_partitions: int, split_by_columns: str, number_of_groups: int = 1): result = run_mock_test(db_connection, pyexasol_connection, split_by_node, number_of_random_partitions, split_by_columns, number_of_groups) fitted_base_models, fitted_combined_models, unique_base_models = get_results(pyexasol_connection, result) return result, fitted_base_models, fitted_combined_models, unique_base_models def get_results(pyexasol_connection, result): fitted_base_models = pyexasol_connection.execute(""" SELECT * FROM TARGET_SCHEMA.FITTED_BASE_MODELS""").fetchall() print("fitted_base_models", fitted_base_models) fitted_combined_models = pyexasol_connection.execute(""" SELECT * FROM TARGET_SCHEMA.FITTED_COMBINED_MODEL""").fetchall() print("fitted_combined_models", fitted_combined_models) unique_base_models = {row[4] for row in fitted_base_models} print("result", result) return fitted_base_models, fitted_combined_models, unique_base_models def run_mock_test(db_connection, pyexasol_connection, split_by_node: bool, number_of_random_partitions: int, split_by_columns: str, number_of_groups: int = 1): executor = UDFMockExecutor() meta = MockMetaData( script_code_wrapper_function=udf_wrapper, input_type="SET", input_columns=[ Column("model_connection", str, "VARCHAR(2000000)"), Column("path_under_model_connection", str, "VARCHAR(2000000)"), Column("download_retry_seconds", int, "INTEGER"), Column("db_connection", str, "VARCHAR(2000000)"), Column("source_schema_name", str, "VARCHAR(2000000)"), Column("source_table_name", str, "VARCHAR(2000000)"), Column("columns", str, "VARCHAR(2000000)"), Column("target_schema_name", str, "VARCHAR(2000000)"), Column("experiment_name", str, "VARCHAR(2000000)"), Column("epochs", int, "INTEGER"), Column("batch_size", int, "INTEGER"), Column("shuffle_buffer_size", int, "INTEGER"), Column("split_per_node", bool, "BOOLEAN"), Column("number_of_random_partitions", int, "INTEGER"), Column("split_by_columns", str, "VARCHAR(2000000)"), ], output_type="EMIT", output_columns=[ Column("job_id", str, "VARCHAR(2000000)"), Column("model_id", str, "VARCHAR(2000000)"), Column("model_connection_name", str, "VARCHAR(2000000)"), Column("path_under_model_connection", str, "VARCHAR(2000000)"), Column("model_path", str, "VARCHAR(2000000)"), ] ) model_connection, model_connection_name = \ create_model_connection(pyexasol_connection) drop_and_create_target_schema(pyexasol_connection) exa = MockExaEnvironment(meta, connections={ "MODEL_CONNECTION": model_connection, "DB_CONNECTION": db_connection }) groups = [Group([( model_connection_name, "my_path_under_model_connection_" + str(i), 60, "DB_CONNECTION", "TEST", "ABC", "A,B,C", "TARGET_SCHEMA", "EXPERIMENT", 10, 100, 10000, split_by_node, number_of_random_partitions, split_by_columns )]) for i in range(number_of_groups)] result = list(executor.run(groups, exa)) return result def test_train_udf( upload_language_container, create_input_table, pyexasol_connection, db_connection): model_connection, model_connection_name = \ create_model_connection(pyexasol_connection) db_connection, db_connection_name = \ create_db_connection(pyexasol_connection, db_connection) target_schema = SchemaName("TARGET_SCHEMA") drop_and_create_target_schema(pyexasol_connection) udf_sql = textwrap.dedent(f""" CREATE OR REPLACE PYTHON3_DSUP SET SCRIPT {target_schema.fully_qualified()}."TRAIN_UDF"( model_connection VARCHAR(2000000), path_under_model_connection VARCHAR(2000000), download_retry_seconds INTEGER, db_connection VARCHAR(2000000), source_schema_name VARCHAR(2000000), source_table_name VARCHAR(2000000), columns VARCHAR(2000000), target_schema_name VARCHAR(2000000), experiment_name VARCHAR(2000000), epochs INTEGER, batch_size INTEGER, shuffle_buffer_size INTEGER, split_per_node BOOLEAN, number_of_random_partitions INTEGER, split_by_columns VARCHAR(2000000) ) EMITS ( job_id VARCHAR(2000000), model_id VARCHAR(2000000), model_connection_name VARCHAR(2000000), path_under_model_connection VARCHAR(2000000), model_path VARCHAR(2000000) ) AS from sklearn.linear_model import SGDRegressor from numpy.random import RandomState from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_description_based_table_preprocessor_factory import \ ColumnDescriptionBasedTablePreprocessorFactory from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.column_preprocessor_description import \ ColumnPreprocessorDescription from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.exact_column_name_selector import \ ExactColumnNameSelector from exasol_data_science_utils_python.preprocessing.sql_to_scikit_learn.normalization.min_max_scaler_factory import \ MinMaxScalerFactory from exasol_data_science_utils_python.model_utils.udfs.partial_fit_regression_train_udf import PartialFitRegressionTrainUDF train_udf = PartialFitRegressionTrainUDF() def run(ctx): model = SGDRegressor(random_state=RandomState(0), loss="squared_loss", verbose=False, fit_intercept=True, eta0=0.9, power_t=0.1, learning_rate='invscaling') table_preprocessor_factory = ColumnDescriptionBasedTablePreprocessorFactory( input_column_preprocessor_descriptions=[ ColumnPreprocessorDescription( column_selector=ExactColumnNameSelector("A"), column_preprocessor_factory=MinMaxScalerFactory() ), ColumnPreprocessorDescription( column_selector=ExactColumnNameSelector("B"), column_preprocessor_factory=MinMaxScalerFactory() ), ], target_column_preprocessor_descriptions=[ ColumnPreprocessorDescription( column_selector=ExactColumnNameSelector("C"), column_preprocessor_factory=MinMaxScalerFactory() ), ] ) train_udf.run(exa, ctx, model, table_preprocessor_factory) """) pyexasol_connection.execute(udf_sql) query_udf = f""" select {target_schema.fully_qualified()}."TRAIN_UDF"( '{model_connection_name}', 'my_path_under_model_connection', 60, '{db_connection_name}', 'TEST', 'ABC', 'A,B,C', 'TARGET_SCHEMA', 'EXPERIMENT', 10, 100, 10000, True, 4, null ) """ pyexasol_connection.execute(query_udf) fitted_base_models = pyexasol_connection.execute(""" SELECT * FROM TARGET_SCHEMA.FITTED_BASE_MODELS""").fetchall() print(fitted_base_models) assert len(fitted_base_models) == 4 fitted_combined_models = pyexasol_connection.execute(""" SELECT * FROM TARGET_SCHEMA.FITTED_COMBINED_MODEL""").fetchall() print(fitted_combined_models) assert len(fitted_combined_models) == 1 def create_model_connection(conn): model_connection = Connection(address=f"http://localhost:6583/default/model;bfsdefault", user="w", password="write") model_connection_name = "MODEL_CONNECTION" return drop_and_create_connection(conn, model_connection, model_connection_name) def create_db_connection(conn, db_connection): db_connection_name = "DB_CONNECTION" return drop_and_create_connection(conn, db_connection, db_connection_name) def drop_and_create_connection(conn, model_connection, model_connection_name): try: conn.execute(f"DROP CONNECTION {model_connection_name}") except: pass conn.execute( f"CREATE CONNECTION {model_connection_name} TO '{model_connection.address}' USER '{model_connection.user}' IDENTIFIED BY '{model_connection.password}';") return model_connection, model_connection_name
python
# -*- coding: utf-8 -*- import ast # This has to be a global due to `exec` shenanigans :-( current_spec = {} # SQL types SQL_TYPES = [ 'TEXT', 'DATE', 'DATETIME', 'INTEGER', 'BIGINT', 'UNSIGNED_BIGINT', 'DOUBLE', 'BLOB', ] # Functions that we don't need DUMMY_FUNCTIONS = [ 'ForeignKey', 'attributes', 'description', 'examples', 'implementation', 'fuzz_paths', 'WINDOWS', 'POSIX', 'LINUX', 'DARWIN', ] RESERVED_KEYWORDS = [ 'table', 'set', ] def table_name(name, aliases=None): current_spec['name'] = name current_spec['aliases'] = aliases def Column(name, col_type, *args, **kwargs): if name in RESERVED_KEYWORDS: name = '"%s"' % name return (name, col_type) def schema(schema): # Filter out 'None' entries (usually from ForeignKeys) real_schema = [x for x in schema if x is not None] current_spec['schema'] = real_schema def extended_schema(macro, schema): # Filter out 'None' entries (usually from ForeignKeys) real_schema = [x for x in schema if x is not None] current_spec.setdefault('extended_schema', []).extend(real_schema) def extract_schema(filename): namespace = { 'Column': Column, 'schema': schema, 'table_name': table_name, 'extended_schema': extended_schema, 'current_spec': {}, } for fn in DUMMY_FUNCTIONS: namespace[fn] = lambda *args, **kwargs: None for ty in SQL_TYPES: namespace[ty] = ty with open(filename, 'rU') as f: tree = ast.parse(f.read()) exec(compile(tree, '<string>', 'exec'), namespace) columns = ', '.join('%s %s' % (x[0], x[1]) for x in current_spec['schema']) statements = [] statements = [] statements.append('CREATE TABLE %s (%s);' % (current_spec['name'], columns)) if 'extended_schema' in current_spec: statement = 'ALTER TABLE %s ADD %%s %%s;' % (current_spec['name'], ) for column_name, column_definition in current_spec['extended_schema']: statements.append(statement % (column_name, column_definition)) del current_spec['extended_schema'] return '\n'.join(statements) if __name__ == '__main__': import sys print(extract_schema(sys.argv[1]))
python
# -*- coding: utf-8 -*- # ---------------------------------------------------------------------------- # Copyright © 2021, Spyder Bot # # Licensed under the terms of the MIT license # ---------------------------------------------------------------------------- """ Status bar widgets. """ # Third-party imports from qtpy.QtCore import Signal, Slot from qtpy.QtWidgets import QComboBox # Spyder imports from spyder.api.config.decorators import on_conf_change from spyder.api.translations import get_translation from spyder.api.widgets.status import StatusBarWidget # Localization _ = get_translation("status_bar_widgets.spyder") # ---- Constants class StatusbarWidgets: ThemeStatus = 'theme-status' PlainFontSizeStatus = 'combobox-status' # ---- Theme widget class ThemeStatusWidget(StatusBarWidget): """ Widget to display the current syntax highlighting theme. Notes ----- * Status bar widgets need to inherit from StatusBarWidget or BaseTimerStatus. * See container.py to check how its label is updated and plugin.py to see how it's registered in the status bar. """ ID = StatusbarWidgets.ThemeStatus # ---- Font size widget class PlainFontSizeComboBox(QComboBox): def __init__(self, parent): super().__init__(parent) # Add some font sizes to choose from. self.addItems([str(i) for i in range(9, 16)]) class PlainFontSizeStatus(StatusBarWidget): ID = StatusbarWidgets.PlainFontSizeStatus CUSTOM_WIDGET_CLASS = PlainFontSizeComboBox sig_size_change_requested = Signal(int) """ This is signal is emitted to request for a plain text font size change in Spyder. Parameters ---------- font_size: int New font size (in pixels). """ def __init__(self, parent): super().__init__(parent) self.custom_widget.currentTextChanged.connect(self.set_size) def set_current_size(self, size): """Set current font size in combobox.""" # The value that comes from Spyder config system is an int, but # the combobox only accepts strings. size = str(size) # Add size to combobox in case it's not present among items if self.custom_widget.findText(size) == -1: self.custom_widget.addItem(size) # Set size as default value index = self.custom_widget.findText(size) self.custom_widget.setCurrentIndex(index) @Slot(str) def set_size(self, value): """ Set selected size in combobox in Spyder config system and request a change. """ # In Spyder this is an int, not a string. value = int(value) # *Note*: This should be as simple as setting the new font size and # seeing the changes happen in Spyder. Unfortunately, that's not the # way it's working right now, but it will be in Spyder 5.1.0. # For now we have to emit a signal and handle the update manually at # the plugin level. self.set_conf(section='appearance', option='font/size', value=value) self.sig_size_change_requested.emit(value)
python
from .. import Provider as CreditCardProvider class Provider(CreditCardProvider): pass
python
import collections import time import warnings from collections import namedtuple import numpy as np import torch from tianshou.data import Batch, ReplayBuffer from tianshou.env import BaseVectorEnv, VectorEnv Experience = namedtuple('Exp', ['hidden', 'obs', 'act', 'reward', 'obs_next', 'done']) HIDDEN_SIZE = 256 class Collector(object): """The :class:`~tianshou.data.Collector` enables the policy to interact with different types of environments conveniently. :param policy: an instance of the :class:`~tianshou.policy.BasePolicy` class. :param env: an environment or an instance of the :class:`~tianshou.env.BaseVectorEnv` class. :param buffer: an instance of the :class:`~tianshou.data.ReplayBuffer` class, or a list of :class:`~tianshou.data.ReplayBuffer`. If set to ``None``, it will automatically assign a small-size :class:`~tianshou.data.ReplayBuffer`. :param int stat_size: for the moving average of recording speed, defaults to 100. Example: :: policy = PGPolicy(...) # or other policies if you wish env = gym.make('CartPole-v0') replay_buffer = ReplayBuffer(size=10000) # here we set up a collector with a single environment collector = Collector(policy, env, buffer=replay_buffer) # the collector supports vectorized environments as well envs = VectorEnv([lambda: gym.make('CartPole-v0') for _ in range(3)]) buffers = [ReplayBuffer(size=5000) for _ in range(3)] # you can also pass a list of replay buffer to collector, for multi-env # collector = Collector(policy, envs, buffer=buffers) collector = Collector(policy, envs, buffer=replay_buffer) # collect at least 3 episodes collector.collect(n_episode=3) # collect 1 episode for the first env, 3 for the third env collector.collect(n_episode=[1, 0, 3]) # collect at least 2 steps collector.collect(n_step=2) # collect episodes with visual rendering (the render argument is the # sleep time between rendering consecutive frames) collector.collect(n_episode=1, render=0.03) # sample data with a given number of batch-size: batch_data = collector.sample(batch_size=64) # policy.learn(batch_data) # btw, vanilla policy gradient only # supports on-policy training, so here we pick all data in the buffer batch_data = collector.sample(batch_size=0) policy.learn(batch_data) # on-policy algorithms use the collected data only once, so here we # clear the buffer collector.reset_buffer() For the scenario of collecting data from multiple environments to a single buffer, the cache buffers will turn on automatically. It may return the data more than the given limitation. .. note:: Please make sure the given environment has a time limitation. """ def __init__(self, policy, env, buffer=None, episodic=False, stat_size=5, **kwargs): super().__init__() if not isinstance(env, BaseVectorEnv): self.env = VectorEnv([env]) else: self.env = env self._collect_step = 0 self._collect_episode = 0 self._collect_time = 0 self.buffer = buffer self.policy = policy self.process_fn = policy.process_fn self._episodic = episodic if self._episodic and buffer is not None: self._cached_buf = [ReplayBuffer(buffer._maxsize // self.env.env_num) for _ in range(self.env.env_num)] self.stat_size = stat_size self._step_speed = collections.deque([], self.stat_size) self._episode_speed = collections.deque([], self.stat_size) self._episode_length = collections.deque([], self.stat_size) self._episode_reward = collections.deque([], self.stat_size) self.reset() def reset(self): """Reset all related variables in the collector.""" self.reset_env() self.reset_buffer() # state over batch is either a list, an np.ndarray, or a torch.Tensor self._step_speed.clear() self._episode_speed.clear() self._episode_length.clear() self._episode_reward.clear() self._collect_step = 0 self._collect_episode = 0 self._collect_time = 0 def reset_buffer(self): """Reset the main data buffer.""" if self._episodic: [b.reset() for b in self._cached_buf] if self.buffer is not None: self.buffer.reset() def get_env_num(self): """Return the number of environments the collector has.""" return self.env.env_num def reset_env(self): """Reset all of the environment(s)' states and reset all of the cache buffers (if need). """ self._obs = self.env.reset() self._act = self._rew = self._done = None self._hidden_next = self._hidden = np.zeros((self.get_env_num(), HIDDEN_SIZE)) self.reward = np.zeros(self.env.env_num) self.length = np.zeros(self.env.env_num) def seed(self, seed=None): """Reset all the seed(s) of the given environment(s).""" return self.env.seed(seed) def render(self, **kwargs): """Render all the environment(s).""" return self.env.render(**kwargs) def close(self): """Close the environment(s).""" self.env.close() def _to_numpy(self, x): """Return an object without torch.Tensor.""" if isinstance(x, torch.Tensor): return x.cpu().numpy() elif isinstance(x, dict): for k in x: if isinstance(x[k], torch.Tensor): x[k] = x[k].cpu().numpy() return x elif isinstance(x, Batch): x.to_numpy() return x return x def collect(self, n_step=0, n_episode=0, sampling=False, render=None): """Collect a specified number of step or episode. :param int n_step: how many steps you want to collect. :param n_episode: how many episodes you want to collect (in each environment). :type n_episode: int or list :param float render: the sleep time between rendering consecutive frames, defaults to ``None`` (no rendering). .. note:: One and only one collection number specification is permitted, either ``n_step`` or ``n_episode``. :return: A dict including the following keys * ``n/ep`` the collected number of episodes. * ``n/st`` the collected number of steps. * ``v/st`` the speed of steps per second. * ``v/ep`` the speed of episode per second. * ``rew`` the mean reward over collected episodes. * ``len`` the mean length over collected episodes. """ warning_count = 0 start_time = time.time() assert not (n_step and n_episode), "One and only one collection number specification is permitted!" cur_step = 0 cur_episode = np.zeros(self.env.env_num) while True: if warning_count >= 100000: warnings.warn( 'There are already many steps in an episode. ' 'You should add a time limitation to your environment!', Warning) batch_data = Batch(obs=self._obs, act=self._act, rew=self._rew, done=self._done) if sampling == True: self._act = self.env.sample() else: with torch.no_grad(): result = self.policy(batch_data, self._hidden) if hasattr(result, 'hidden') and result.hidden is not None: self._hidden_next = result.hidden if isinstance(result.act, torch.Tensor): self._act = self._to_numpy(result.act) elif not isinstance(self._act, np.ndarray): self._act = np.array(result.act) else: self._act = result.act obs_next, self._rew, self._done, _ = self.env.step(self._act) if render is not None: self.env.render() if render > 0: time.sleep(render) self.length += 1 self.reward += self._rew for i in range(self.env.env_num): warning_count += 1 collection = Experience( self._hidden[i], self._obs[i], self._act[i], self._rew[i], obs_next[i], self._done[i] ) if not self._episodic: cur_step += 1 if self.buffer is not None: self.buffer.add(collection) else: self._cached_buf[i].add(collection) if self._done[i]: if self._episodic: cur_step += len(self._cached_buf[i]) if self.buffer is not None: self.buffer.extend(self._cached_buf[i]) cur_episode[i] += 1 self._episode_reward.append(self.reward[i]) self._episode_length.append(self.length[i]) self.reward[i], self.length[i] = 0, 0 if sum(self._done): ids = np.where(self._done)[0] obs_next = self.env.reset(ids) self._hidden_next[self._done] = 0. self._obs = obs_next self._hidden = self._hidden_next if n_episode and np.sum(cur_episode) >= n_episode: break if n_step != 0 and cur_step >= n_step: break cur_episode = sum(cur_episode) duration = time.time() - start_time self._step_speed.append(cur_step / duration) self._episode_speed.append(cur_episode / duration) self._collect_step += cur_step self._collect_episode += cur_episode self._collect_time += duration return { 'n/ep': cur_episode, 'n/st': cur_step, 'n/buffer': len(self.buffer) if self.buffer else 0, 'v/st': np.nanmean(self._step_speed), 'v/ep': np.nanmean(self._episode_speed) if self._collect_episode else 0, 'ep/reward': np.nanmean(self._episode_reward) if self._collect_episode else 0, 'ep/len': np.nanmean(self._episode_length) if self._collect_episode else 0, } def sample(self, batch_size): """Sample a data batch from the internal replay buffer. It will call :meth:`~tianshou.policy.BasePolicy.process_fn` before returning the final batch data. :param int batch_size: ``0`` means it will extract all the data from the buffer, otherwise it will extract the data with the given batch_size. """ batch_data, indice = self.buffer.sample(batch_size) batch_data = self.process_fn(batch_data, self.buffer, indice) return batch_data
python
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from keystoneauth1 import loading as kaloading from oslo_utils import importutils from ironicclient.common.i18n import _ from ironicclient import exc LOG = logging.getLogger(__name__) # TODO(vdrok): remove in Stein def convert_keystoneauth_opts(kwargs): old_to_new_names = { ('os_auth_token',): 'token', ('os_username',): 'username', ('os_password',): 'password', ('os_auth_url',): 'auth_url', ('os_project_id',): 'project_id', ('os_project_name',): 'project_name', ('os_tenant_id',): 'tenant_id', ('os_tenant_name',): 'tenant_name', ('os_region_name',): 'region_name', ('os_user_domain_id',): 'user_domain_id', ('os_user_domain_name',): 'user_domain_name', ('os_project_domain_id',): 'project_domain_id', ('os_project_domain_name',): 'project_domain_name', ('os_service_type',): 'service_type', ('os_endpoint_type',): 'interface', ('ironic_url',): 'endpoint', ('os_cacert', 'ca_file'): 'cafile', ('os_cert', 'cert_file'): 'certfile', ('os_key', 'key_file'): 'keyfile' } for olds, new in old_to_new_names.items(): for old in olds: if kwargs.get(old): LOG.warning('The argument "%s" passed to get_client is ' 'deprecated and will be removed in Stein release, ' 'please use "%s" instead.', old, new) kwargs.setdefault(new, kwargs[old]) def get_client(api_version, auth_type=None, os_ironic_api_version=None, max_retries=None, retry_interval=None, **kwargs): """Get an authenticated client, based on the credentials. :param api_version: the API version to use. Valid value: '1'. :param auth_type: type of keystoneauth auth plugin loader to use. :param os_ironic_api_version: ironic API version to use. :param max_retries: Maximum number of retries in case of conflict error :param retry_interval: Amount of time (in seconds) between retries in case of conflict error. :param kwargs: all the other params that are passed to keystoneauth. """ # TODO(TheJulia): At some point, we should consider possibly noting # the "latest" flag for os_ironic_api_version to cause the client to # auto-negotiate to the greatest available version, however we do not # have the ability yet for a caller to cap the version, and will hold # off doing so until then. convert_keystoneauth_opts(kwargs) if auth_type is None: if 'endpoint' in kwargs: if 'token' in kwargs: auth_type = 'admin_token' else: auth_type = 'none' elif 'token' in kwargs and 'auth_url' in kwargs: auth_type = 'token' else: auth_type = 'password' session = kwargs.get('session') if not session: loader = kaloading.get_plugin_loader(auth_type) loader_options = loader.get_options() # option.name looks like 'project-name', while dest will be the actual # argument name to which the value will be passed to (project_name) auth_options = [o.dest for o in loader_options] # Include deprecated names as well auth_options.extend([d.dest for o in loader_options for d in o.deprecated]) auth_kwargs = {k: v for (k, v) in kwargs.items() if k in auth_options} auth_plugin = loader.load_from_options(**auth_kwargs) # Let keystoneauth do the necessary parameter conversions session_loader = kaloading.session.Session() session_opts = {k: v for (k, v) in kwargs.items() if k in [o.dest for o in session_loader.get_conf_options()]} session = session_loader.load_from_options(auth=auth_plugin, **session_opts) endpoint = kwargs.get('endpoint') if not endpoint: try: # endpoint will be used to get hostname # and port that will be used for API version caching. endpoint = session.get_endpoint( service_type=kwargs.get('service_type') or 'baremetal', interface=kwargs.get('interface') or 'publicURL', region_name=kwargs.get('region_name') ) except Exception as e: raise exc.AmbiguousAuthSystem( _('Must provide Keystone credentials or user-defined ' 'endpoint, error was: %s') % e) ironicclient_kwargs = { 'os_ironic_api_version': os_ironic_api_version, 'max_retries': max_retries, 'retry_interval': retry_interval, 'session': session, 'endpoint_override': endpoint } return Client(api_version, **ironicclient_kwargs) def Client(version, *args, **kwargs): module = importutils.import_versioned_module('ironicclient', version, 'client') client_class = getattr(module, 'Client') return client_class(*args, **kwargs)
python
# -*- coding: utf-8 -*- """ Created on Thu Jul 23 13:56:25 2020 Authors: Pavan Kota, Daniel LeJeune Reference: P. K. Kota, D. LeJeune, R. A. Drezek, and R. G. Baraniuk, "Extreme Compressed Sensing of Poisson Rates from Multiple Measurements," Mar. 2021. arXiv ID: """ # Multiple Measurement Vector Compressed Sensing from abc import ABC, abstractmethod import numpy as np import pickle class SignalGenerator(ABC): """Methods for generating X """ @abstractmethod def xgen(self, N, D, k): """Generate an N x D signal matrix X Parameters ---------- N: int Dimension of signals D: int Number of N-dimensional signals to generate k: int Sparsity level. Number of nonzero elements in lambda^* (true Poisson rates) Returns ------- X : (N, D) ndarray Samples of X for each column of Y. """ pass class MMVP(SignalGenerator): """ Multiple Measurement Vector with Poisson constraints (MMVP) signal generator """ def __init__(self, N, D, k, lamTot, initialSeed=None): """ New Parameters ---------- lamTot: float or int Sum(lambda^*). Corresponds with, for example, average analyte number per observation initialSeed: int, optional Seed for restoring RNG if X's are generated multiple times in same script and generating the initial X's again is desired. """ if k > N : raise ValueError("k must be less than N") self.N = N self.D = D self.k = k self.lamTot = lamTot self.initialSeed = initialSeed #np.random.seed(initialSeed) self._generator = np.random.default_rng(initialSeed) def set_lambda(self): lambdaStar = np.zeros(self.N) # Choose sparse rows randomly rowInds = np.random.choice(self.N, self.k, replace=False) # Set lambda randomly lambdaStar[rowInds] = self.get_mags() return lambdaStar def xgen(self): lambdaStar = self.set_lambda() # Generate X's X = self._generator.poisson(lambdaStar[:, None], (self.N, self.D)) return X, lambdaStar def gen_trials(self, numTrials, seed=None, savePath=None): """ Parameters ---------- numTrials : int Number of trials to generate sensing matrices for seed : int, optional Random seed initial state. The default is None. savePath: string or None Path including filename (.pickle file type) to store generated X's and lambda^*'s. If None, signals are not saved. """ # Which to use? Need consistent selection of k rows too if seed is None: np.random.seed(self.initialSeed) self._generator = np.random.default_rng(self.initialSeed) else: np.random.seed(seed) self._generator = np.random.default_rng(seed) allX = np.zeros((self.N, self.D, numTrials)) allLambdaStars = np.zeros((self.N, numTrials)) for i in range(numTrials): allX[:,:,i], allLambdaStars[:,i] = self.xgen() if savePath is not None: allSignals = {'signalModelUsed': self, 'allX': allX, 'allLambdaStars': allLambdaStars} with open(savePath, 'wb') as fileWrite: pickle.dump(allSignals, fileWrite) return allX, allLambdaStars def get_mags(self): mags = self._generator.uniform(size=self.k) return mags / np.sum(mags) * self.lamTot class MMVPConstantLambda(MMVP): def __init__(self, N, D, k, lambda_val, initialSeed=None): """ New Parameters ---------- lambda_val: float or int Value to set any nonzero value of lambda to """ if k > N : raise ValueError("k must be less than N") self.N = N self.D = D self.k = k self.lambda_val = lambda_val self.initialSeed = initialSeed self._generator = np.random.default_rng(initialSeed) def get_mags(self): return np.ones(self.k) * self.lambda_val class MMVPInputLambda(MMVP): def __init__(self, D, lambda_vec, initialSeed=None): """ New Parameters ---------- lambda_vec: numpy array, shape (N,) Fixed lambda vector """ self.lam = lambda_vec self.N = np.size(lambda_vec) self.D = D self.initialSeed = initialSeed self._generator = np.random.default_rng(initialSeed) def set_lambda(self): return self.lam def get_mags(self): pass
python
import torch import torch.nn as nn from lazytorch import ( LazyConv2dInChannelModule, create_lazy_signature, NamedSequential, ) from .depth_sep_conv import DepthwiseConv2d, PointwiseConv2d from .squeeze_excitation import SqueezeExcitation from typing import Optional class InvertedBottleneck(nn.Module): """An inverted bottleneck block with optional squeeze-and-excitiation layer. References: - MobileNetV2 (https://arxiv.org/abs/1801.04381) - MnasNet (https://arxiv.org/abs/1807.11626)""" def __init__( self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1, expansion_ratio: int = 1, use_se: bool = False, se_reduction_ratio: Optional[int] = None, norm_layer: nn.Module = nn.BatchNorm2d, activation: nn.Module = nn.ReLU, ): super().__init__() self.stride = stride self.out_channels = out_channels mid_channels = in_channels * expansion_ratio self.layers = NamedSequential( pw=PointwiseConv2d( in_channels, mid_channels, norm_layer=norm_layer, activation=activation, ), dw=DepthwiseConv2d( mid_channels, kernel_size=kernel_size, stride=stride, norm_layer=norm_layer, activation=activation, ), se=nn.Identity(), bottleneck=nn.Conv2d(mid_channels, out_channels, 1), ) if use_se: self.layers.se = SqueezeExcitation( mid_channels, reduction_ratio=se_reduction_ratio ) def forward(self, x: torch.Tensor): out = self.layers(x) if x.shape == out.shape: out += x return out @create_lazy_signature(exclude=["in_channels"]) class LazyInvertedBottleneck(LazyConv2dInChannelModule, InvertedBottleneck): """Lazily-initialized InvertedBottleneck module"""
python
from rest_framework import generics, status from rest_framework import viewsets from rest_framework.exceptions import ( ValidationError ) from rest_framework.response import Response from rest_framework.permissions import AllowAny from .models import ( Category, Recipe ) from .serializers import ( CategorySerializer, RecipeSerializer, ) class CategoryViewSet(viewsets.ModelViewSet): permission_classes = (AllowAny,) serializer_class = CategorySerializer def get_queryset(self): # list categories queryset = Category.objects.all() return queryset def get_object(self): if self.kwargs.get("pk"): category = Category.objects.filter(pk=self.kwargs.get("pk")).first() if not category: msg='Category with that id does not exists' raise ValidationError(msg) return category def create(self, request): # check if category already exists category = Category.objects.filter( name=request.data.get('name'), ) if category: msg='Category with that name already exists' raise ValidationError(msg) return super().create(request) def destroy(self, request, *args, **kwargs): category = Category.objects.filter(pk=self.kwargs["pk"]).first() if not category: msg='Category with that id does not exists' raise ValidationError(msg) return super().destroy(request, *args, **kwargs) def update(self, request, *args, **kwargs): category = Category.objects.filter(pk=self.kwargs["pk"]).first() if not category: msg='Category with that id does not exists' raise ValidationError(msg) return super().update(request, *args, **kwargs) def perform_create(self, serializer): serializer.save() class CategoryRecipes(generics.ListCreateAPIView): permission_classes = (AllowAny,) serializer_class = RecipeSerializer def get_queryset(self): if self.kwargs.get("category_pk"): category = Category.objects.filter(pk=self.kwargs["category_pk"]).first() if not category: msg='Category with that id does not exists' raise ValidationError(msg) queryset = Recipe.objects.filter( category=category ) return queryset # def create(self, request, *args, **kwargs): # serializer = self.get_serializer(data=request.data) # if not serializer.is_valid(): # return Response( # serializer.errors, status=status.HTTP_400_BAD_REQUEST) # category = Category.objects.get(pk=self.kwargs["category_pk"]) # item = Recipe.objects.create( # name=serializer.data['name'], # description=serializer.data['description'], # ingredients=serializer.data['ingredients'], # image=serializer.data['image'], # directions=serializer.data['directions'], # is_public=serializer.data['is_public'], # category=category, # ) # result = self.serializer_class(item) # return Response(result.data, status=status.HTTP_201_CREATED) def perform_create(self, serializer): category = Category.objects.filter(pk=self.kwargs["category_pk"]).first() if not category: msg='Category with that id does not exists' raise ValidationError(msg) serializer.save(category=category) class SingleCategoryRecipe(generics.RetrieveUpdateDestroyAPIView): permission_classes = (AllowAny,) serializer_class = RecipeSerializer def get_queryset(self): if self.kwargs.get("category_pk") and self.kwargs.get("pk"): category = Category.objects.filter(pk=self.kwargs["category_pk"]).first() if not category: msg='Category with that id does not exists' raise ValidationError(msg) queryset = Recipe.objects.filter( pk=self.kwargs["pk"], category=category ) if len(queryset) == 0: msg=f'Recipe with that id does not exists' raise ValidationError(msg) return queryset class RecipesViewSet(viewsets.ModelViewSet): permission_classes = (AllowAny,) serializer_class = RecipeSerializer def get_queryset(self): queryset = Recipe.objects.all() return queryset # Only authenticated users can create recipes def create(self, request, *args, **kwargs): return super().create(request, *args, **kwargs) def destroy(self, request, *args, **kwargs): return super().destroy(request, *args, **kwargs) def update(self, request, *args, **kwargs): return super().update(request, *args, **kwargs) def perform_create(self, serializer): serializer.save() class PublicRecipes(generics.ListAPIView): permission_classes = (AllowAny,) serializer_class = RecipeSerializer def get_queryset(self): queryset = Recipe.objects.all().filter() return queryset class PublicRecipesDetail(generics.RetrieveAPIView): permission_classes = (AllowAny,) serializer_class = RecipeSerializer def get_queryset(self): queryset = Recipe.objects.all().filter(is_public=True) return queryset
python
import shutil from tokenizers.normalizers import NFKC from autonmt.preprocessing import tokenizers from autonmt.bundle import utils from autonmt.bundle.utils import * def normalize_file(input_file, output_file, normalizer, force_overwrite, limit=None): if force_overwrite or not os.path.exists(output_file): lines = read_file_lines(input_file, autoclean=True) lines = lines if not limit else lines[:limit] lines = lines if not normalizer else [normalizer(line) for line in lines] write_file_lines(lines=lines, filename=output_file, insert_break_line=True, encoding="utf-8") assert os.path.exists(output_file) def pretokenize_file(input_file, output_file, lang, force_overwrite, **kwargs): # Tokenize if force_overwrite or not os.path.exists(output_file): tokenizers.moses_tokenizer(input_file=input_file, output_file=output_file, lang=lang) assert os.path.exists(output_file) def encode_file(ds, input_file, output_file, lang, merge_vocabs, truncate_at, force_overwrite, **kwargs): # Check if file exists if force_overwrite or not os.path.exists(output_file): # Apply preprocessing # Copy file if ds.subword_model in {None, "none"}: shutil.copyfile(input_file, output_file) elif ds.subword_model in {"bytes"}: # Save file as UTF8 and make sure everything uses NFKC lines = read_file_lines(input_file, autoclean=True) lines = [NFKC().normalize_str(line) for line in lines] lines = [" ".join([hex(x) for x in line.encode()]) for line in lines] write_file_lines(lines=lines, filename=output_file, insert_break_line=True) else: # Select model if merge_vocabs: model_path = ds.get_vocab_file() + ".model" else: model_path = ds.get_vocab_file(lang=lang) + ".model" # Encode files tokenizers.spm_encode(spm_model_path=model_path, input_file=input_file, output_file=output_file) # Truncate if needed if truncate_at: lines = read_file_lines(output_file, autoclean=True) lines = [" ".join(line.split(' ')[:truncate_at]).strip() for line in lines] write_file_lines(lines=lines, filename=output_file, insert_break_line=True) # Check that the output file exist assert os.path.exists(output_file) def decode_file(input_file, output_file, lang, subword_model, pretok_flag, model_vocab_path, force_overwrite, remove_unk_hyphen=False, **kwargs): if force_overwrite or not os.path.exists(output_file): # Detokenize if subword_model in {None, "none"}: # Rename or copy files (tok==txt) shutil.copyfile(input_file, output_file) elif subword_model in {"bytes"}: # Decode files lines = read_file_lines(input_file, autoclean=True) lines = [clean_file_line(bytes([int(x, base=16) for x in line.split(' ')])) for line in lines] # Write files write_file_lines(lines=lines, filename=output_file, insert_break_line=True) else: # Decode files tokenizers.spm_decode(model_vocab_path + ".model", input_file=input_file, output_file=output_file) # Remove the hyphen of unknown words when needed if remove_unk_hyphen: replace_in_file('▁', ' ', output_file) # Detokenize with moses if pretok_flag: tokenizers.moses_detokenizer(input_file=output_file, output_file=output_file, lang=lang) # Check that the output file exist assert os.path.exists(output_file) def decode_lines(lines, lang, subword_model, pretok_flag, model_vocab_path, remove_unk_hyphen=False): # Detokenize if subword_model in {None, "none"}: # Rename or copy files (tok==txt) lines = lines elif subword_model in {"bytes"}: # Decode files lines = [utils.clean_file_line(bytes([int(x, base=16) for x in line.split(' ')])) for line in lines] else: # Decode files lines = tokenizers._spm_decode(lines, model_vocab_path + ".model") # Remove the hyphen of unknown words when needed if remove_unk_hyphen: lines = [line.replace('▁', ' ') for line in lines] # Detokenize with moses if pretok_flag: lines = tokenizers._moses_detokenizer(lines, lang=lang) return lines
python
"""PythonHere app.""" # pylint: disable=wrong-import-order,wrong-import-position from launcher_here import try_startup_script try: try_startup_script() # run script entrypoint, if it was passed except Exception as exc: startup_script_exception = exc # pylint: disable=invalid-name else: startup_script_exception = None # pylint: disable=invalid-name import asyncio import os from pathlib import Path import sys import threading from typing import Any, Dict from kivy.app import App from kivy.clock import Clock from kivy.config import Config, ConfigParser from kivy.logger import Logger from enum_here import ScreenName, ServerState from exception_manager_here import install_exception_handler, show_exception_popup from patches_here import monkeypatch_kivy from server_here import run_ssh_server from window_here import reset_window_environment monkeypatch_kivy() class PythonHereApp(App): """PythonHere main app.""" def __init__(self): super().__init__() self.server_task = None self.settings = None self.ssh_server_config_ready = asyncio.Event() self.ssh_server_started = asyncio.Event() self.ssh_server_connected = asyncio.Event() self.ssh_server_namespace = {} self.icon = "data/logo/logo-32.png" @property def upload_dir(self) -> str: """Path to the directory to use for uploaded data.""" root_dir = Path(self.user_data_dir or ".").resolve() upload_dir = Path(root_dir) / "upload" upload_dir.mkdir(exist_ok=True) return str(upload_dir) @property def config_path(self) -> str: """Path to the application config file.""" root_dir = Path(self.user_data_dir or ".").resolve() return str(root_dir / "config.ini") def load_config(self) -> ConfigParser: """Returning the application configuration.""" Config.read(self.config_path) # Override the configuration file location return super().load_config() def build(self): """Initialize application UI.""" super().build() install_exception_handler() self.settings = self.root.ids.settings self.ssh_server_namespace.update( { "app": self, "root": self.root, } ) self.update_server_config_status() if startup_script_exception: Clock.schedule_once( lambda _: show_exception_popup(startup_script_exception), 0 ) def run_app(self): """Run application and SSH server tasks.""" self.ssh_server_started = asyncio.Event() self.server_task = asyncio.ensure_future(run_ssh_server(self)) return asyncio.gather(self.async_run_app(), self.server_task) async def async_run_app(self): """Run app asynchronously.""" try: await self.async_run(async_lib="asyncio") Logger.info("PythonHere: async run completed") except asyncio.CancelledError: Logger.info("PythonHere: app main task canceled") except Exception as exc: Logger.exception(exc) if self.server_task: self.server_task.cancel() if self.get_running_app(): self.stop() await self.cancel_asyncio_tasks() async def cancel_asyncio_tasks(self): """Cancel all asyncio tasks.""" tasks = [ task for task in asyncio.all_tasks() if task is not asyncio.current_task() ] if tasks: for task in tasks: task.cancel() await asyncio.wait(tasks, timeout=1) def update_server_config_status(self): """Check and update value of the `ssh_server_config_ready`, update screen.""" def update(): if all(self.get_pythonhere_config().values()): self.ssh_server_config_ready.set() screen.update() screen = self.root.ids.here_screen_manager screen.current = ServerState.starting_server self.root.switch_screen(ScreenName.here) threading.Thread(name="update_server_config_status", target=update).start() def get_pythonhere_config(self): """Return user settings for SSH server.""" return self.settings.get_pythonhere_config() def update_ssh_server_namespace(self, namespace: Dict[str, Any]): """Update SSH server namespace.""" self.ssh_server_namespace.update(namespace) def on_start(self): """App start handler.""" Logger.info("PythonHere: app started") def on_stop(self): """App stop handler.""" Logger.info("PythonHere: app stopped") def on_pause(self): """Pause mode request handler.""" return True def on_ssh_connection_made(self): """New authenticated SSH client connected handler.""" Logger.info("PythonHere: new SSH client connected") if not self.ssh_server_connected.is_set(): self.ssh_server_connected.set() Logger.info("PythonHere: reset window environment") self.ssh_server_namespace["root"] = reset_window_environment() self.chdir(self.upload_dir) def chdir(self, path: str): """Changes the working directory.""" Logger.info("PythonHere: change working directory to %s", path) os.chdir(path) sys.path.insert(0, path) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(PythonHereApp().run_app()) loop.close()
python
#!/usr/bin/env python3 import random #random.seed(1) # comment-out this line to change sequence each time # Write a program that stores random DNA sequence in a string # The sequence should be 30 nt long # On average, the sequence should be 60% AT # Calculate the actual AT fraction while generating the sequence # Report the length, AT fraction, and sequence seq = '' at_count = 0 for i in range(30): n = random.randint(1,10); print(n,end=' ') if 1<=n<=3: seq+='A' at_count+=1 elif 4<=n<=6: seq+='T' at_count+=1 elif 7<=n<=8: seq+='G' else: seq+='C' print('\n',len(seq), at_count/len(seq), seq) """ python3 at_seq.py 30 0.6666666666666666 ATTACCGTAATCTACTATTAAGTCACAACC """
python
import numpy as np import typing as tp import matplotlib.pyplot as plt import pickle import scipy.signal as signal import shapely.geometry import scipy.interpolate as interp from taylor import PointAccumulator from dataclasses import dataclass def find_datapoints(image, start=0): # _image = 255 - image _image = image window1 = signal.gaussian(50, 15) window1_sum = window1.sum() differentiator = PointAccumulator(num_lines=1) x = np.linspace(0, 1, _image.shape[0]) for i in range(start, _image.shape[1]): raw_signal = _image[:, i] filtered_signal = signal.fftconvolve(raw_signal, window1, mode='same')/window1_sum peaks = np.sort(signal.find_peaks( filtered_signal, prominence=5, distance=100 )[0]) # peaks = sorted(tmp_peaks, key=lambda x: filtered_signal[x], reverse=True)[:4] # yield i, filtered_signal[peaks] if len(peaks) == 0: continue new_points = differentiator.add_point(i, peaks, look_back=3) # Probably want to move away from generator. Use differentiator always yield i, new_points # TODO: Return any number of points, and use separate method to filter # yield i, peaks[:1] # TODO: Return any number of points, and use separate method to filter fig, (ax1, ax2) = plt.subplots(2) ax2.imshow(_image, cmap="gray") ax2.axvline(i, color="r") ax1.plot(raw_signal) ax1.plot(filtered_signal, "--") ax1.plot(peaks, filtered_signal[peaks], "x", linewidth=20) plt.show() plt.close(fig) if __name__ == "__main__": # contours = list(np.load("contours.npy", allow_pickle=True)) # take1(contours) # take2(contours) for contour_number in [3]: contour_image = np.load(f"tmp_contours/image_contour{contour_number}.npy") # plt.imshow(contour_image) # plt.show() # assert False # print(contour_image.shape) new_image = np.zeros(contour_image.shape) point_list = [] x_list = [] y_list = [] for i, new_y in find_datapoints(contour_image, start=7300): # point_list.append((i, new_y)) new_y = new_y[0] new_image[int(new_y), i] = 255 x_list.append(i) y_list.append(int(new_y)) fig, (ax1, ax2) = plt.subplots(2) ax1.imshow(new_image) x_arr = np.asarray(x_list, dtype=np.float_) y_arr = np.asarray(y_list, dtype=np.float_) y_arr -= y_arr.mean() # mean zero y_arr *= -1 # flip ax2.plot(x_arr, y_arr) out_array = np.zeros((x_arr.size, 2)) out_array[:, 0] = x_arr out_array[:, 1] = y_arr np.save(f"tmp_lines/out_array{contour_number}", out_array) plt.show() # from scipy.signal import welch # f, pxx = welch(y_arr, 1600e3) # plt.loglog(f, pxx) # plt.show() # for i in range(100, contour_image.shape[1]): # for i in range(100, 200): # print(np.median(contour_image[i, :]))
python
""" Tests for the GeniusZone class """ import unittest from unittest.mock import Mock from geniushubclient.const import IMODE_TO_MODE, ZONE_MODE, ZONE_TYPE from geniushubclient.zone import GeniusZone class GeniusZoneDataStateTests(unittest.TestCase): """ Test for the GeniusZone Class, state data. """ _device_id = "Device Id" _zone_name = "Zone Name" raw_json = { "iID": _device_id, "strName": _zone_name, "bIsActive": 0, "bInHeatEnabled": 0, "bOutRequestHeat": 0, "fBoostSP": 0, "fPV": 21.0, "fPV_offset": 0.0, "fSP": 14.0, "iBoostTimeRemaining": 0, "iFlagExpectedKit": 517, "iType": ZONE_TYPE.OnOffTimer, "iMode": ZONE_MODE.Off, "objFootprint": { "bIsNight": 0, "fFootprintAwaySP": 14.0, "iFootprintTmNightStart": 75600, "iProfile": 1, "lstSP": [{ "fSP": 16.0, "iDay": 0, "iTm": 0 }, { "fSP": 14.0, "iDay": 0, "iTm": 23400 }, { "fSP": 20.0, "iDay": 0, "iTm": 59700 }, { "fSP": 14.0, "iDay": 0, "iTm": 75000 }, { "fSP": 16.0, "iDay": 0, "iTm": 75600 } ], "objReactive": { "fActivityLevel": 0.0 } }, "objTimer": [{ "fSP": 14.0, "iDay": 0, "iTm": -1 }], "trigger": { "reactive": 0, "output": 0 }, "warmupDuration": { "bEnable": "true", "bEnableCalcs": "true", "fRiseRate": 0.5, "iLagTime": 2420, "iRiseTime": 300, "iTotalTime": 2720 }, "zoneReactive": { "fActivityLevel": 0 }, "zoneSubType": 1 } def setUp(self): hub = Mock() hub.api_version = 3 self.hub = hub def test_when_bIsActive_is_false_then_state_bIsActive_false(self): "Check that the bIsActive is correctly set to false" self.raw_json["bIsActive"] = 0 genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertFalse(genius_zone.data["_state"]["bIsActive"]) def test_when_bIsActive_is_true_then_state_bIsActive_true(self): "Check that the bIsActive is correctly set to true" self.raw_json["bIsActive"] = 1 genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertTrue(genius_zone.data["_state"]["bIsActive"]) def test_when_bOutRequestHeat_is_false_then_output_false(self): "Check that the bOutRequestHeat is correctly set to false" self.raw_json["bOutRequestHeat"] = 0 genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertEqual(genius_zone.data["output"], 0) def test_when_bOutRequestHeat_is_true_then_output_true(self): "Check that the bOutRequestHeat is correctly set to true" self.raw_json["bOutRequestHeat"] = 1 genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertEqual(genius_zone.data["output"], 1) def test_when_iMode_set_then_state_mode_is_set_correctly(self): "Check that the mode is set on the class" for zone_mode, zone_mode_text in IMODE_TO_MODE.items(): with self.subTest(zone_mode=zone_mode, zone_mode_text=zone_mode_text): self.raw_json["iMode"] = zone_mode self.raw_json["zoneSubType"] = 1 genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertEqual(genius_zone.data["mode"], zone_mode_text) def test_when_iType_should_set_temperature_state_temperature_set_correctly(self): "Check that the temperature is set for certain values of iType" temperature = 20.0 self.raw_json["fPV"] = temperature test_values = ( ZONE_TYPE.ControlSP, ZONE_TYPE.TPI, ZONE_TYPE.Manager ) for zone_type in test_values: with self.subTest(zone_type=zone_type): self.raw_json["iType"] = zone_type genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertEqual(genius_zone.data["temperature"], temperature) def test_when_iType_should_not_set_temperature_state_temperature_not_set(self): "Check that the temperature is not set for certain values of iType" self.raw_json["fPV"] = 20.0 test_values = ( ZONE_TYPE.OnOffTimer, ZONE_TYPE.ControlOnOffPID, ZONE_TYPE.Surrogate ) for zone_type in test_values: with self.subTest(zone_type=zone_type): self.raw_json["iType"] = zone_type genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertFalse("temperature" in genius_zone.data) def test_when_iType_should_set_setpoint_state_setpoint_set_correctly(self): "Check that the setpoint is set for certain values of iType" setpoint = 21.0 self.raw_json["fSP"] = setpoint test_values = ( ZONE_TYPE.ControlSP, ZONE_TYPE.TPI ) for zone_type in test_values: with self.subTest(zone_type=zone_type): self.raw_json["iType"] = zone_type genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertEqual(genius_zone.data["setpoint"], setpoint) def test_when_iType_should_not_set_setpoint_state_setpoint_not_set(self): "Check that the setpoint is not set for certain values of iType" self.raw_json["fSP"] = 21.0 test_values = ( ZONE_TYPE.Manager, ZONE_TYPE.ControlOnOffPID, ZONE_TYPE.Surrogate ) for zone_type in test_values: with self.subTest(zone_type=zone_type): self.raw_json["iType"] = zone_type genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertFalse("setpoint" in genius_zone.data) def test_when_iType_OnOffTimer_fSP_not_zero_setpoint_state_setpoint_set_true(self): """Check that the setpoint is set to true when iType is OnOffTimer and fSP is not zero""" self.raw_json["fSP"] = 1.0 self.raw_json["iType"] = ZONE_TYPE.OnOffTimer genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertTrue(genius_zone.data["setpoint"]) def test_when_iType_OnOffTimer_fSP_zero_setpoint_state_setpoint_set_false(self): """Check that the setpoint is set to false when iType is OnOffTimer and fSP is zero""" self.raw_json["fSP"] = 0.0 self.raw_json["iType"] = ZONE_TYPE.OnOffTimer genius_zone = GeniusZone(self._device_id, self.raw_json, self.hub) self.assertFalse(genius_zone.data["setpoint"])
python
from django.conf.urls import url, include from . import views from django.urls import path urlpatterns = [ path('', views.index, name = 'index'), path('allcomment/',views.allcomment, name = 'allcomment'), path('allexpert/',views.allexpert, name = 'allexpert'), path('apply/',views.apply, name = 'apply'), ]
python
# -*- coding: utf-8 -*- import wx import wx.xrc import time import pyperclip import os import sys import platform import data ########################################################################### ## Class MyFrame1 ########################################################################### class MyFrame1 ( wx.Frame ): def __init__( self, parent ): wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"PocLibrary", pos = wx.DefaultPosition, size = wx.Size( 300,150 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL ) self.SetSizeHints( wx.Size( 300,150 ), wx.Size( 300,150 ) ) bSizer1 = wx.BoxSizer( wx.VERTICAL ) self.m_staticText2 = wx.StaticText( self, wx.ID_ANY, u"请选择查询的模块:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText2.Wrap( -1 ) bSizer1.Add( self.m_staticText2, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 ) m_comboBox1Choices = data.module_list self.m_comboBox1 = wx.ComboBox( self, wx.ID_ANY, u"请选择!", wx.DefaultPosition, wx.Size( 150,-1 ), m_comboBox1Choices, 0 ) bSizer1.Add( self.m_comboBox1, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 ) self.m_button1 = wx.Button( self, wx.ID_ANY, u"确定", wx.DefaultPosition, wx.DefaultSize, 0 ) bSizer1.Add( self.m_button1, 0, wx.ALL|wx.ALIGN_CENTER_HORIZONTAL, 5 ) self.m_button1.Bind(wx.EVT_BUTTON, self.select_module) self.SetSizer( bSizer1 ) self.Layout() self.Centre( wx.BOTH ) def select_module(self, event): global module module = self.m_comboBox1.GetValue() if module in data.module_list: win = MyFrame2(parent=None) win.Show() time.sleep(0.5) self.Destroy() else: temp_win = MyFrame3(parent=None) temp_win.Show() def __del__( self ): pass ########################################################################### ## Class MyFrame2 ########################################################################### class MyFrame2 ( wx.Frame ): def __init__( self, parent ): wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = u"PocLibrary - Produced by Coldsnap", pos = wx.DefaultPosition, size = wx.Size( 800,750 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL ) self.SetSizeHints( wx.Size( 800,750 ), wx.Size( 800,750 ) ) wSizer1 = wx.WrapSizer( wx.HORIZONTAL, wx.WRAPSIZER_DEFAULT_FLAGS ) self.m_staticText3 = wx.StaticText( self, wx.ID_ANY, u"请选择查询的POC/EXP:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText3.Wrap( -1 ) wSizer1.Add( self.m_staticText3, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) m_comboBox2Choices = self.setchoices(module) self.m_comboBox2 = wx.ComboBox( self, wx.ID_ANY, u"请选择!", wx.DefaultPosition, wx.Size( 500,-1 ), m_comboBox2Choices, 0 ) wSizer1.Add( self.m_comboBox2, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_button2 = wx.Button( self, wx.ID_ANY, u"确定", wx.DefaultPosition, wx.DefaultSize, 0 ) wSizer1.Add( self.m_button2, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_button2.Bind(wx.EVT_BUTTON, self.selectPoc) self.m_staticText4 = wx.StaticText( self, wx.ID_ANY, u"漏洞信息:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText4.Wrap( -1 ) wSizer1.Add( self.m_staticText4, 0, wx.ALL, 5 ) self.m_textCtrl1 = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(700, 200), 0 | wx.TE_READONLY | wx.TE_MULTILINE) self.m_textCtrl1.Enable(True) self.m_textCtrl1.SetMinSize(wx.Size(700, 200)) self.m_textCtrl1.SetMaxSize(wx.Size(700, 200)) wSizer1.Add(self.m_textCtrl1, 0, wx.ALL, 5) self.m_staticText5 = wx.StaticText( self, wx.ID_ANY, u"利用信息:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText5.Wrap( -1 ) wSizer1.Add( self.m_staticText5, 0, wx.ALL, 5 ) self.m_textCtrl2 = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(400, 200), 0 | wx.TE_READONLY | wx.TE_MULTILINE) self.m_textCtrl2.Enable(True) self.m_textCtrl2.SetMinSize(wx.Size(700, 200)) self.m_textCtrl2.SetMaxSize(wx.Size(700, 200)) wSizer1.Add(self.m_textCtrl2, 0, wx.ALL, 5) self.m_staticText71 = wx.StaticText( self, wx.ID_ANY, u"利用内容:", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText71.Wrap( -1 ) wSizer1.Add( self.m_staticText71, 0, wx.ALL, 5 ) self.m_textCtrl3 = wx.TextCtrl(self, wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.Size(700, 200), 0 | wx.TE_READONLY | wx.TE_MULTILINE) self.m_textCtrl3.Enable(True) self.m_textCtrl3.SetMinSize(wx.Size(700, 200)) self.m_textCtrl3.SetMaxSize(wx.Size(700, 200)) wSizer1.Add(self.m_textCtrl3, 0, wx.ALL, 5) self.m_staticText9 = wx.StaticText( self, wx.ID_ANY, u"复制利用内容", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText9.Wrap( -1 ) wSizer1.Add( self.m_staticText9, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_button7 = wx.Button( self, wx.ID_ANY, u"Copy", wx.DefaultPosition, wx.Size( 65,-1 ), 0 ) wSizer1.Add( self.m_button7, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_button7.Bind(wx.EVT_BUTTON, self.copyCode) self.m_staticText10 = wx.StaticText( self, wx.ID_ANY, u"重新选择模块", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText10.Wrap( -1 ) wSizer1.Add( self.m_staticText10, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_button8 = wx.Button( self, wx.ID_ANY, u"Return", wx.DefaultPosition, wx.Size( 65,-1 ), 0 ) wSizer1.Add( self.m_button8, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_button8.Bind(wx.EVT_BUTTON, self.back) self.m_staticText11 = wx.StaticText( self, wx.ID_ANY, u"退出程序", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText11.Wrap( -1 ) wSizer1.Add( self.m_staticText11, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_button9 = wx.Button( self, wx.ID_ANY, u"Exit", wx.DefaultPosition, wx.Size( 65,-1 ), 0 ) wSizer1.Add( self.m_button9, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5 ) self.m_button9.Bind(wx.EVT_BUTTON, self.exit) self.SetSizer( wSizer1 ) self.Layout() self.Centre( wx.BOTH ) # MyFrame1窗体模块参数送到MyFrame2创建对应窗体 def setchoices(self, module): if module == "Drupal": return data.drupalchoice elif module == "F5": return data.f5choice elif module == "Fastjson": return data.fastjsonchoice elif module == "Jboss": return data.jbosschoice elif module == "Nexus": return data.nexuschoice elif module == "Shiro": return data.shirochoice elif module == "Apache-Solr": return data.solrchoice elif module == "Spring": return data.springchoice elif module == "Struts2": return data.struts2choice elif module == "Tomcat": return data.tomcatchoice elif module == "Weblogic": return data.weblogicchoice elif module == "Linux-local": return data.linuxchoice elif module == "Webmin": return data.webminchoice elif module == "IIS": return data.iischoice elif module == "OA-System": return data.oachoice elif module == "IOT": return data.iotchoice elif module == "CMS": return data.cmschoice elif module == "Windows": return data.winchioce elif module == "WebFramework": return data.webframechoice elif module == "Others": return data.otherchoice # MyFrame2窗体选择POC/EXP后获取具体选项 def selectPoc(self, event): str = self.m_comboBox2.GetValue() if str in data.drupalchoice: self.readfile(str) elif str in data.f5choice: self.readfile(str) elif str in data.jbosschoice: self.readfile(str) elif str in data.nexuschoice: self.readfile(str) elif str in data.shirochoice: self.readfile(str) elif str in data.solrchoice: self.readfile(str) elif str in data.springchoice: self.readfile(str) elif str in data.struts2choice: self.readfile(str) elif str in data.tomcatchoice: self.readfile(str) elif str in data.weblogicchoice: self.readfile(str) elif str in data.fastjsonchoice: self.readfile(str) elif str in data.linuxchoice: self.readfile(str) elif str in data.webminchoice: self.readfile(str) elif str in data.iischoice: self.readfile(str) elif str in data.oachoice: self.readfile(str) elif str in data.iotchoice: self.readfile(str) elif str in data.cmschoice: self.readfile(str) elif str in data.winchioce: self.readfile(str) elif str in data.webframechoice: self.readfile(str) elif str in data.otherchoice: self.readfile(str) else: temp_win = MyFrame3(parent=None) temp_win.Show() # Windows下pyinstaller包含资源后在程序运行时产生临时文件夹,该函数返回资源临时文件夹地址 def source_path(self, relative_path): # 是否Bundle Resource if getattr(sys, 'frozen', False): base_path = sys._MEIPASS # IDE运行报错,仅生成exe可执行文件时生效 else: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path) # 根据MyFrame2传回的具体POC/EXP读取对应文件 def readfile(self, str): os_name = platform.system() if os_name == 'Windows': vuln_file = open(self.source_path('Library/') + module + "/" + str + "_vul.txt", encoding="utf-8") info_file = open(self.source_path('Library/') + module + "/" + str + ".txt", encoding="utf-8") code_file = open(self.source_path('Library/') + module + "/" + str, encoding="utf-8") self.m_textCtrl1.SetValue(vuln_file.read()) vuln_file.close() self.m_textCtrl2.SetValue(info_file.read()) info_file.close() self.m_textCtrl3.SetValue(code_file.read()) code_file.close() elif os_name == 'Darwin': vuln_file = open(os.getcwd() + "/Library/" + module + "/" + str + "_vul.txt", encoding="utf-8") info_file = open(os.getcwd() + "/Library/" + module + "/" + str + ".txt", encoding="utf-8") code_file = open(os.getcwd() + "/Library/" + module + "/" + str, encoding="utf-8") self.m_textCtrl1.SetValue(vuln_file.read()) vuln_file.close() self.m_textCtrl2.SetValue(info_file.read()) info_file.close() self.m_textCtrl3.SetValue(code_file.read()) code_file.close() # Copy功能对应的事件处理函数 def copyCode(self, event): pyperclip.copy(self.m_textCtrl3.GetValue()) # Back功能对应的事件处理函数 def back(self, event): win = MyFrame1(parent=None) win.Show() time.sleep(0.5) self.Destroy() # Exit功能对应的事件处理函数 def exit(self, event): time.sleep(0.5) self.Destroy() def __del__( self ): pass ########################################################################### ## Class MyFrame3 ########################################################################### class MyFrame3 ( wx.Frame ): def __init__( self, parent ): wx.Frame.__init__ ( self, parent, id = wx.ID_ANY, title = wx.EmptyString, pos = wx.DefaultPosition, size = wx.Size( 200,100 ), style = wx.DEFAULT_FRAME_STYLE|wx.TAB_TRAVERSAL ) self.SetSizeHints( wx.Size( 200,100 ), wx.Size( 200,100 ) ) bSizer3 = wx.BoxSizer( wx.VERTICAL ) self.m_staticText19 = wx.StaticText( self, wx.ID_ANY, u"\n\n错误,请重新选择!", wx.DefaultPosition, wx.DefaultSize, 0 ) self.m_staticText19.Wrap( -1 ) self.m_staticText19.SetFont( wx.Font( wx.NORMAL_FONT.GetPointSize(), wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, wx.EmptyString ) ) bSizer3.Add( self.m_staticText19, 0, wx.ALIGN_CENTER_HORIZONTAL|wx.BOTTOM|wx.RIGHT, 5 ) self.SetSizer( bSizer3 ) self.Layout() self.Centre( wx.BOTH ) def __del__( self ): pass
python
''' ''' import os import numpy as np from provabgs import models as Models def test_DESIspeculator(): ''' script to test the trained speculator model for DESI ''' # initiate desi model Mdesi = Models.DESIspeculator() # load test parameter and spectrum test_theta = np.load('/Users/chahah/data/gqp_mc/speculator/DESI_complexdust.theta_test.npy') test_logspec = np.load('/Users/chahah/data/gqp_mc/speculator/DESI_complexdust.logspectrum_fsps_test.npy') for i in range(10): print(1.-(Mdesi._emulator(test_theta[i]) - np.exp(test_logspec[i]))/np.exp(test_logspec[i])) print('') return None if __name__=="__main__": test_DESIspeculator()
python
import datetime import difflib # import datefinder from dateparser.search import search_dates from dateutil.parser import parse from SMELT.validators.twitter.tweets import get_tweets from SMELT.Validation import Validator # from twitterscraper import import twint def fetch_closest_matching_tweet(username, message, time): tweets = [] tweet = None conf = 0 for tweet in get_tweets(username, pages=1): print(tweet['time'].date(), time.date()) if tweet['time'].date() == time.date(): tweets.append(tweet) # print(tweets) messages = list(map(lambda x: x['text'], tweets)) matches = difflib.get_close_matches(message, messages, cutoff=0.7) if matches: text = matches[0] tweet = list(filter(lambda x: x['text'] == text, tweets))[0] conf = difflib.SequenceMatcher(None, text, message).ratio() else: conf = 1 return tweet, conf class SimpleTwitterValidator(Validator): display_name = "" username = "" body = "" time = "" conf = 0 failed = False tweet = {} tc = None def __init__(self, image, **kwargs): super().__init__(image, confidence=0.9, **kwargs) if SimpleTwitterValidator.tc is None: SimpleTwitterValidator.setup() @staticmethod def setup(config=None, user_list=()): if config: SimpleTwitterValidator.tc = config else: SimpleTwitterValidator.tc = twint.Config() SimpleTwitterValidator.tc.Members_list = user_list SimpleTwitterValidator.tc.Database def get_tweet_date(self): matches = list(datefinder.find_dates(self.ocr.string)) for line in self.ocr.lines: matches2 = parse() print(matches2) # d = matches[0] # try: # date = '-'.join(dateline.split('-')[:2]).strip() # try: # time = datetime.datetime.strptime(date, '%I:%M %p - %m/%d/%y') # except ValueError: # time = datetime.datetime.strptime(date, '%I:%M %p - %b %d, %Y') return matches[0] def handle(self): print(self.ocr.lines) username = self.ocr.lines[1].split('@')[-1] message = ' '.join(self.ocr.chunks[1]) time = self.get_tweet_date() print(time, username) self.tweet, self.conf = fetch_closest_matching_tweet(username, message, time) if self.tweet is None: self.failed = True def confidence(self): return max(min(self.conf + 0.01, 1), 0) def __str__(self): return """ \rTWEET: %s \rCONFIDENCE: %f \rPASSING: %r """ % (self.tweet, self.confidence(), self.passing())
python
# # This file is part of Brazil Data Cube Collection Builder. # Copyright (C) 2019-2020 INPE. # # Brazil Data Cube Collection Builder is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. # """Define the Collection Builder utilities for Landsat data products.""" import logging import tarfile from datetime import datetime from pathlib import Path from bdc_core.decorators.utils import working_directory from ...config import Config class LandsatProduct: """Define base class for handling Landsat data products.""" def __init__(self, scene_id: str): """Build a Landsat class.""" self.scene_id = scene_id self._fragments = LandsatProduct.parse_scene_id(scene_id) @property def scene_fragments(self): if self._fragments is None: self._fragments = LandsatProduct.parse_scene_id(self.scene_id) return self._fragments @staticmethod def parse_scene_id(scene_id: str): """Parse a Landsat Scene Identifier.""" fragments = scene_id.split('_') if len(fragments) != 7: raise ValueError('Invalid scene id Landsat') return fragments @property def id(self) -> str: """Retrieve Landsat Collection ID on Brazil Data Cube.""" raise NotImplementedError() @property def level(self) -> int: """Retrieve Landsat Collection Level.""" raise NotImplementedError() def satellite(self) -> str: """Retrieve scene satellite.""" part = self._fragments[0] return part[-2:] def tile_id(self) -> str: """Retrieve Landsat scene Path row.""" return self._fragments[2] def source(self) -> str: """Retrieve Landsat source part from scene id.""" return self._fragments[0] def sensing_date(self) -> datetime: """Retrieve Landsat scene sensing date.""" return datetime.strptime(self._fragments[3], '%Y%m%d') def get_band_map(self) -> dict: raise NotImplementedError() def google_path(self) -> Path: """Retrieve a formal path for Landsat on Google Provider. Example: >>> scene = LandsatDigitalNumber08('LC08_L1GT_044034_20130330_20170310_01_T2') >>> print(str(scene.google_path())) ... 'LC08/01/044/034/LC08_L1GT_044034_20130330_20170310_01_T2' """ first_part = Path(self._fragments[0]) path = self._fragments[2][:3] row = self._fragments[2][-3:] path = first_part / '01' / path / row / self.scene_id return path def path(self, prefix=Config.DATA_DIR): """Retrieve relative path on Brazil Data Cube cluster. Example: >>> scene = LandsatDigitalNumber08('LC08_L1GT_044034_20130330_20170310_01_T2') >>> print(str(scene.path())) ... '/gfs/Repository/Archive/LC8DN/2013-03/044034' """ year_month = self.sensing_date().strftime('%Y-%m') scene_path = Path(prefix or '') / 'Repository/Archive' / self.id / year_month / self.tile_id() return scene_path def compressed_file(self): """Retrieve path to the compressed file (L1).""" year_month = self.sensing_date().strftime('%Y-%m') product_version = int(self._fragments[0][-2:]) if product_version == 8: collection = 'LC8' else: collection = '{}{}'.format(self._fragments[0][:2], product_version) scene_path = Path(Config.DATA_DIR) / 'Repository/Archive' / collection / year_month / self.tile_id() return scene_path / '{}.tar.gz'.format(self.scene_id) def compressed_file_bands(self): relative_path = self.compressed_file().parent files = [ relative_path / '{}_{}.TIF'.format(self.scene_id, band) for band in self.get_band_map().values() ] files.append(relative_path / '{}_ANG.txt'.format(self.scene_id)) files.append(relative_path / '{}_MTL.txt'.format(self.scene_id)) return files def get_files(self): """Try to find of file names from Brazil Data Cube Cluster. Note: The scene must be published in order to retrieve the file list. Example: >>> scene = LandsatDigitalNumber08('LC08_L1TP_220069_20180618_20180703_01_T1') >>> print(str(scene.path())) ... ['/gfs/Repository/Archive/LC8DN/2018-06/220069/LC08_L1TP_220069_20180618_20180703_01_T1_B1.TIF', ... '/gfs/Repository/Archive/LC8DN/2018-06/220069/LC08_L1TP_220069_20180618_20180703_01_T1_B2.TIF'] """ scene_path = self.path() scene_id_without_processing_date = '{}_*_{}*'.format( '_'.join(self._fragments[:4]), '_'.join(self._fragments[-2:]) ) logging.debug('Searching on {} with {}'.format(str(scene_path), scene_id_without_processing_date)) files = scene_path.glob(scene_id_without_processing_date) return list([f for f in files if f.suffix.lower() == '.tif']) class LandsatDigitalNumber08(LandsatProduct): """Landsat 8 Digital Number.""" id = 'LC8DN' level = 1 def get_band_map(self) -> dict: return dict( coastal='B1', blue='B2', green='B3', red='B4', nir='B5', swir1='B6', swir2='B7', quality='BQA', panchromatic='B8', cirrus='B9', tirs1='B10', tirs2='B11' ) class LandsatSurfaceReflectance08(LandsatProduct): """Landsat 8 Surface Reflectance.""" id = 'LC8SR' level = 2 def get_band_map(self) -> dict: return dict( coastal='sr_band1', blue='sr_band2', green='sr_band3', red='sr_band4', nir='sr_band5', swir1='sr_band6', swir2='sr_band7', evi='sr_evi', ndvi='sr_ndvi', quality='Fmask4' ) class LandsatNBAR08(LandsatProduct): """Landsat 8 Nadir BRDF Adjusted Reflectance.""" id = 'LC8NBAR' level = 3 def get_band_map(self) -> dict: return dict( blue='sr_band2', green='sr_band3', red='sr_band4', nir='sr_band5', swir1='sr_band6', swir2='sr_band7', quality='pixel_qa' ) class LandsatDigitalNumber07(LandsatProduct): """Landsat 7 Digital Number.""" id = 'L7DN' level = 1 def get_band_map(self) -> dict: return dict( blue='B1', green='B2', red='B3', nir='B4', swir1='B5', tirs1='B6_VCID_1', tirs2='B6_VCID_2', swir2='B7', panchromatic='B8', quality='BQA' ) class LandsatSurfaceReflectance07(LandsatProduct): """Landsat 7 Surface Reflectance.""" id = 'L7SR' level = 2 def get_band_map(self) -> dict: return dict( blue='sr_band1', green='sr_band2', red='sr_band3', nir='sr_band4', swir1='sr_band5', swir2='sr_band7', evi='sr_evi', ndvi='sr_ndvi', quality='Fmask4' ) class LandsatDigitalNumber05(LandsatProduct): """Landsat 5 Digital Number.""" id = 'L5DN' level = 1 def get_band_map(self) -> dict: return dict( blue='B1', green='B2', red='B3', nir='B4', swir1='B5', tirs='B6', swir2='B7', quality='BQA' ) class LandsatSurfaceReflectance05(LandsatProduct): """Landsat 5 Surface Reflectance.""" id = 'L5SR' level = 2 def get_band_map(self) -> dict: return dict( blue='sr_band1', green='sr_band2', red='sr_band3', nir='sr_band4', swir1='sr_band5', swir2='sr_band7', evi='sr_evi', ndvi='sr_ndvi', quality='Fmask4' ) class LandsatFactory: """Define a factory to identify a Landsat product based on scene identifier.""" map = dict( l1=dict(), l2=dict(), l3=dict() ) def register(self): """Initialize factory object.""" self.map['l1'][LandsatDigitalNumber05.id] = LandsatDigitalNumber05 self.map['l2'][LandsatSurfaceReflectance05.id] = LandsatSurfaceReflectance05 self.map['l1'][LandsatDigitalNumber07.id] = LandsatDigitalNumber07 self.map['l2'][LandsatSurfaceReflectance07.id] = LandsatSurfaceReflectance07 self.map['l1'][LandsatDigitalNumber08.id] = LandsatDigitalNumber08 self.map['l2'][LandsatSurfaceReflectance08.id] = LandsatSurfaceReflectance08 self.map['l3'][LandsatNBAR08.id] = LandsatNBAR08 def get_from_collection(self, collection: str): """Retrieve the respective Landsat driver from given collection.""" for drivers_by_level in self.map.values(): for driver_name in drivers_by_level: if collection == driver_name: return drivers_by_level[driver_name] raise ValueError('Not found a valid driver for {}.'.format(collection)) def get_from_sceneid(self, scene_id: str, level=1) -> LandsatProduct: """Retrieve the respective Landsat driver from given scene id.""" fragments = LandsatProduct.parse_scene_id(scene_id) drivers_by_level = self.map.get('l{}'.format(level)) or dict() scene_satellite = int(fragments[0][-2:]) for key in drivers_by_level: satellite = key[1] if not satellite.isdigit(): satellite = key[2] satellite = int(satellite) if scene_satellite == satellite: driver = drivers_by_level[key] if driver.level == level: return driver(scene_id) raise ValueError('Not found a valid driver for {}'.format(scene_id)) factory = LandsatFactory() def compress_landsat_scene(scene: LandsatProduct, data_dir: str): """Compress the Landsat files to tar.gz. Args: scene - Landsat Product data_dir - Path to search for files """ try: context_dir = Path(data_dir) if not context_dir.exists() or not context_dir.is_dir(): raise IOError('Invalid directory to compress Landsat. "{}"'.format(data_dir)) compressed_file_path = Path(data_dir) / scene.compressed_file().name files = scene.compressed_file_bands() logging.debug('Compressing {}'.format(str(compressed_file_path))) # Create compressed file and make available with tarfile.open(compressed_file_path, 'w:gz') as compressed_file: with working_directory(str(context_dir)): for f in files: compressed_file.add(f.name) except BaseException: logging.error('Could not compress {}.tar.gz'.format(scene.scene_id), exc_info=True) raise return compressed_file_path
python
"""Custom CSV-related functionality.""" import csv import os def create_csv(): """Create new csv to store git-geo result Delete any existing csv and the create new csv. Args: None Returns: None """ # delete csv if it already exists filename = "git-geo-results.csv" if os.path.exists(filename): os.remove(filename) # Create new csv file with column names with open(filename, "w") as file: fieldnames = ["pkg", "username", "location"] writer = csv.DictWriter(file, fieldnames=fieldnames) writer.writeheader() def add_committer_to_csv(pkg, username, location): """Write committer info to existing csv file Use to create dataset of location data for analysis. Args: pkg - package name username - GitHub username location - Geographic info from GitHub profile Returns: null """ with open("git-geo-results.csv", "a") as file: fieldnames = ["pkg", "username", "location"] writer = csv.DictWriter(file, fieldnames=fieldnames) writer.writerow({"pkg": pkg, "username": username, "location": location})
python
from __future__ import absolute_import, division, print_function, with_statement from __future__ import unicode_literals from tornado import ioloop, web, websocket, httpserver, concurrent from collections import defaultdict import mock class DeepstreamHandler(websocket.WebSocketHandler): connections = defaultdict(set) received_messages = defaultdict(list) sent_messages = defaultdict(list) callbacks = defaultdict(mock.Mock) def open(self): self._path = self.request.path self._messages = [] DeepstreamHandler.connections[self._path].add(self) self._msg_future = None self._close_future = None def on_message(self, message): DeepstreamHandler.received_messages[self._path].append(message) if self._msg_future: self._msg_future.set_result(message) def write_message(self, message): DeepstreamHandler.sent_messages[self._path].append(message) return super(DeepstreamHandler, self).write_message(message) def on_close(self): DeepstreamHandler.connections[self._path].remove(self) if self._close_future: self._close_future.set_result(True) def message_future(self): self._msg_future = concurrent.Future() return self._msg_future def close_future(self): self._close_future = concurrent.Future() return self._close_future def _connections(request_path): return DeepstreamHandler.connections[request_path] def _sent_messages(request_path): return DeepstreamHandler.sent_messages[request_path] def _received_messages(request_path): return DeepstreamHandler.received_messages[request_path] def _num_connection(request_path): return len(_connections(request_path)) def _create_server(port, path): application = web.Application([ (path, DeepstreamHandler), ]) server = httpserver.HTTPServer(application) server.listen(port) return server def before_all(context): context.uid_patcher = mock.patch("deepstreampy.utils.get_uid", return_value="<UID>") context.uid_patcher.start() def after_all(context): context.uid_patcher.stop() def after_step(context, step): if "the server sends the message" in step.name: context.io_loop.call_later(0.03, context.io_loop.stop) context.io_loop.start() def before_scenario(context, scenario): if ioloop.IOLoop.initialized(): context.io_loop = ioloop.IOLoop.current() else: context.io_loop = ioloop.IOLoop(make_current=True) context.server = None context.other_server = None DeepstreamHandler.connections.clear() DeepstreamHandler.received_messages.clear() DeepstreamHandler.sent_messages.clear() DeepstreamHandler.callbacks.clear() context.create_server = _create_server context.num_connections = _num_connection context.connections = _connections context.sent_messages = _sent_messages context.received_messages = _received_messages context.client = None context.client_errors = [] context.event_callbacks = {} context.has_callbacks = {} context.snapshot_callbacks = {} context.subscribe_callback = None context.presence_callback = None context.presence_query_callback = None context.rpc_provide_callback = None context.rpc_request_callback = None context.listen_callback = None context.rpc_response = None context.records = {} context.write_acknowledge = mock.Mock() context.login_future = None def after_scenario(context, scenario): context.io_loop.clear_current() context.io_loop.close(all_fds=True)
python
# -*- coding: utf-8 -*- import json import logging from pathlib import Path from questionary import prompt from ... import constants as C from ...core import display from ...core.app import App from ...core.arguments import get_args from ...core.crawler import Crawler from .open_folder_prompt import display_open_folder logger = logging.getLogger(__name__) def resume_session(): args = get_args() output_path = args.resume or C.DEFAULT_OUTPUT_PATH resumable_meta_data = [] for meta_file in Path(output_path).glob('**/' + C.META_FILE_NAME): with open(meta_file, 'r', encoding="utf-8") as file: data = json.load(file) if 'session' in data and not data['session']['completed']: resumable_meta_data.append(data) # end if # end with # end for metadata = None if len(resumable_meta_data) == 1: metadata = resumable_meta_data[0] elif len(resumable_meta_data) > 1: answer = prompt([ { 'type': 'list', 'name': 'resume', 'message': 'Which one do you want to resume?', 'choices': display.format_resume_choices(resumable_meta_data), } ]) index = int(answer['resume'].split('.')[0]) metadata = resumable_meta_data[index - 1] # end if if not metadata: print('No unfinished download to resume\n') display.app_complete() return # end if app = load_session_from_metadata(metadata) assert isinstance(app.crawler, Crawler) print('Resuming', app.crawler.novel_title) print('Output path:', app.output_path) app.initialize() app.crawler.initialize() if app.can_do('login') and app.login_data: logger.debug('Login with %s', app.login_data) app.crawler.login(*list(app.login_data)) # end if app.start_download() app.bind_books() app.compress_books() app.destroy() display.app_complete() display_open_folder(app.output_path) # end def def load_session_from_metadata(data) -> App: app = App() session_data = data['session'] app.output_path = session_data['output_path'] app.user_input = session_data['user_input'] app.login_data = session_data['login_data'] app.pack_by_volume = session_data['pack_by_volume'] app.output_formats = session_data['output_formats'] app.good_file_name = session_data['good_file_name'] app.no_append_after_filename = session_data['no_append_after_filename'] logger.info('Novel Url: %s', data['url']) app.init_crawler(data['url']) if not isinstance(app.crawler, Crawler): raise Exception('No crawler found for ' + data['url']) app.crawler.novel_title = data['title'] app.crawler.novel_author = data['author'] app.crawler.novel_cover = data['cover'] app.crawler.volumes = data['volumes'] app.crawler.chapters = data['chapters'] app.crawler.is_rtl = data['rtl'] app.chapters = [ chap for chap in data['chapters'] if chap['id'] in session_data['download_chapters'] ] logger.info('Number of chapters to download: %d', len(app.chapters)) logger.debug(app.chapters) return app # end def
python