content
stringlengths
0
894k
type
stringclasses
2 values
import tkinter import tkinter.filedialog from PIL import Image,ImageTk from torchvision import transforms as transforms from test import main,model # 创建UI win = tkinter.Tk() win.title("picture process") win.geometry("1280x1080") # 声明全局变量 original = Image.new('RGB', (300, 400)) save_img = Image.new('RGB', (300, 400)) count = 0 e2 = None e2 = str(e2) file_name = None img2 = tkinter.Label(win) def choose_file(): '''选择一张照片''' select_file = tkinter.filedialog.askopenfilename(title='select the picture') global file_name file_name=select_file e.set(select_file) load = Image.open(select_file) load = transforms.Resize((400,400))(load) # 声明全局变量 global original original = load render = ImageTk.PhotoImage(load) img = tkinter.Label(win,image=render) img.image = render img.place(x=100,y=100) def coloring(): '''图片生成''' model() new_img = Image.open('generate.png') new_img = transforms.Resize((400,400))(new_img) render = ImageTk.PhotoImage(new_img) global img2 img2.destroy() img2 = tkinter.Label(win,image=render) img2.image = render img2.place(x=800,y=100) def transfer(): main(file_name) model() new_img = Image.open('generate.png') new_img = transforms.Resize((400,400))(new_img) render = ImageTk.PhotoImage(new_img) global img2 img2.destroy() img2 = tkinter.Label(win,image=render) img2.image = render img2.place(x=800,y=100) def edge_detect(): '''边缘检测''' main(file_name) new_img = Image.open('canny&HED.jpg') new_img = transforms.Resize((400,400))(new_img) render = ImageTk.PhotoImage(new_img) global img2 img2.destroy() img2 = tkinter.Label(win,image=render) img2.image = render img2.place(x=800,y=100) e = tkinter.StringVar() e_entry = tkinter.Entry(win, width=68, textvariable=e) e_entry.pack() # 文件选择 button1 = tkinter.Button(win, text ="Select", command = choose_file) button1.pack() button2 = tkinter.Button(win, text="edge detect" , command = edge_detect,width=20,height =1) button2.place(x=570,y=200) button3 = tkinter.Button(win, text="coloring" , command = coloring,width=20,height =1) button3.place(x=570,y=300) button4 = tkinter.Button(win, text="style transfer" , command = transfer,width=20,height =1) button4.place(x=570,y=400) label1 = tkinter.Label(win,text="Original Picture") label1.place(x=250,y=50) label2 = tkinter.Label(win,text="style transfer!") label2.place(x=950,y=50) # 退出按钮 button0 = tkinter.Button(win,text="Exit",command=win.quit,width=20,height =1) button0.place(x=570,y=650) win.mainloop()
python
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2015 RAPP # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Authors: Konstantinos Panayiotou, Manos Tsardoulias # contact: [email protected], [email protected] ## @file RandStrGen/RandStrGen.py # # @copyright Rapp Projecty EU 2015 # @author Konstantinos Panayiotou, [[email protected]] # import random import string class RandStrGen: """ Random String Generator static class (Namespace). Generates random string boundaries. """ @staticmethod def create(size): """! Generate a nwe random string @param size string - Number of characters for the random string to generate """ randStr = ''.join( random.SystemRandom().choice( string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(size)) return randStr
python
# Train FSDKaggle2018 model # import sys sys.path.append('../..') from lib_train import * conf.logdir = 'logs_mobilenetv2_small' conf.best_weight_file = 'best_mobilenetv2_small_weight.h5' # 1. Load Meta data DATAROOT = Path.home() / '.kaggle/competitions/freesound-audio-tagging' #Data frame for training dataset df_train = pd.read_csv(DATAROOT / 'train.csv') #Plain y_train label plain_y_train = np.array([conf.label2int[l] for l in df_train.label]) # 2. Preprocess data if it's not ready def fsdkaggle2018_map_y_train(idx_train, plain_y_train): return np.array([plain_y_train[i] for i in idx_train]) def fsdkaggle2018_make_preprocessed_train_data(): conf.folder.mkdir(parents=True, exist_ok=True) if not os.path.exists(conf.X_train): XX = mels_build_multiplexed_X(conf, [DATAROOT/'audio_train'/fname for fname in df_train.fname]) X_train, y_train, X_test, y_test = \ train_valid_split_multiplexed(conf, XX, plain_y_train, demux=True) np.save(conf.X_train, X_train) np.save(conf.y_train, y_train) np.save(conf.X_test, X_test) np.save(conf.y_test, y_test) fsdkaggle2018_make_preprocessed_train_data() # 3. Load all dataset & normalize X_train, y_train = load_audio_datafiles(conf, conf.X_train, conf.y_train, normalize=True) X_test, y_test = load_audio_datafiles(conf, conf.X_test, conf.y_test, normalize=True) print('Loaded train:test = {}:{} samples.'.format(len(X_train), len(X_test))) # 4. Train folds history, model, plain_datagen = train_model(conf, fold=0, dataset=[X_train, y_train, X_test, y_test], model=None, init_weights=None, # from scratch #init_weights='../../model/mobilenetv2_small_fsd2018_41cls.h5' ) # 5. Evaluate evaluate_model(conf, model, X_test, y_test) print('___ training finished ___')
python
""" GFS2FileSystemBlockSize - command ``stat -fc %s <mount_point_path>`` ==================================================================== The parser parse the output of ``stat -fc %s <mount_point_path>`` """ from insights import parser, CommandParser from insights.specs import Specs from insights.parsers import SkipException @parser(Specs.gfs2_file_system_block_size) class GFS2FileSystemBlockSize(CommandParser): """ Class for parsing ``stat -fc %s <mount_point_path>`` command output. The size is kept in the ``block_size`` property. Typical output of command ``stat -fc %s <mount_point_path>`` looks like:: 4096 Examples:: >>> type(gfs2_mp) <class 'insights.parsers.gfs2_file_system_block_size.GFS2FileSystemBlockSize'> >>> gfs2_mp.block_size 4096 Raise:: SkipException: When the content isn't in the expected format. Attributes:: block_size (int): The block size of the gfs2 file system. """ def parse_content(self, content): if len(content) == 1 and content[0].isdigit(): self.block_size = int(content[0]) else: raise SkipException('The output is invalid.')
python
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from .operator import Operator from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class Joiner(Operator): """ The information about a joiner object. """ #: A constant which can be used with the join_type property of a Joiner. #: This constant has a value of "INNER" JOIN_TYPE_INNER = "INNER" #: A constant which can be used with the join_type property of a Joiner. #: This constant has a value of "FULL" JOIN_TYPE_FULL = "FULL" #: A constant which can be used with the join_type property of a Joiner. #: This constant has a value of "LEFT" JOIN_TYPE_LEFT = "LEFT" #: A constant which can be used with the join_type property of a Joiner. #: This constant has a value of "RIGHT" JOIN_TYPE_RIGHT = "RIGHT" def __init__(self, **kwargs): """ Initializes a new Joiner object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.Joiner.model_type` attribute of this class is ``JOINER_OPERATOR`` and it should not be changed. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param model_type: The value to assign to the model_type property of this Joiner. Allowed values for this property are: "SOURCE_OPERATOR", "FILTER_OPERATOR", "JOINER_OPERATOR", "AGGREGATOR_OPERATOR", "PROJECTION_OPERATOR", "TARGET_OPERATOR", "DISTINCT_OPERATOR", "SORT_OPERATOR", "UNION_OPERATOR", "INTERSECT_OPERATOR", "MINUS_OPERATOR", "MERGE_OPERATOR", "START_OPERATOR", "END_OPERATOR", "PIPELINE_OPERATOR", "TASK_OPERATOR", "EXPRESSION_OPERATOR", "LOOKUP_OPERATOR", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type model_type: str :param key: The value to assign to the key property of this Joiner. :type key: str :param model_version: The value to assign to the model_version property of this Joiner. :type model_version: str :param parent_ref: The value to assign to the parent_ref property of this Joiner. :type parent_ref: oci.data_integration.models.ParentReference :param name: The value to assign to the name property of this Joiner. :type name: str :param description: The value to assign to the description property of this Joiner. :type description: str :param object_version: The value to assign to the object_version property of this Joiner. :type object_version: int :param input_ports: The value to assign to the input_ports property of this Joiner. :type input_ports: list[oci.data_integration.models.InputPort] :param output_ports: The value to assign to the output_ports property of this Joiner. :type output_ports: list[oci.data_integration.models.OutputPort] :param object_status: The value to assign to the object_status property of this Joiner. :type object_status: int :param identifier: The value to assign to the identifier property of this Joiner. :type identifier: str :param parameters: The value to assign to the parameters property of this Joiner. :type parameters: list[oci.data_integration.models.Parameter] :param op_config_values: The value to assign to the op_config_values property of this Joiner. :type op_config_values: oci.data_integration.models.ConfigValues :param join_type: The value to assign to the join_type property of this Joiner. Allowed values for this property are: "INNER", "FULL", "LEFT", "RIGHT", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type join_type: str :param join_condition: The value to assign to the join_condition property of this Joiner. :type join_condition: oci.data_integration.models.Expression """ self.swagger_types = { 'model_type': 'str', 'key': 'str', 'model_version': 'str', 'parent_ref': 'ParentReference', 'name': 'str', 'description': 'str', 'object_version': 'int', 'input_ports': 'list[InputPort]', 'output_ports': 'list[OutputPort]', 'object_status': 'int', 'identifier': 'str', 'parameters': 'list[Parameter]', 'op_config_values': 'ConfigValues', 'join_type': 'str', 'join_condition': 'Expression' } self.attribute_map = { 'model_type': 'modelType', 'key': 'key', 'model_version': 'modelVersion', 'parent_ref': 'parentRef', 'name': 'name', 'description': 'description', 'object_version': 'objectVersion', 'input_ports': 'inputPorts', 'output_ports': 'outputPorts', 'object_status': 'objectStatus', 'identifier': 'identifier', 'parameters': 'parameters', 'op_config_values': 'opConfigValues', 'join_type': 'joinType', 'join_condition': 'joinCondition' } self._model_type = None self._key = None self._model_version = None self._parent_ref = None self._name = None self._description = None self._object_version = None self._input_ports = None self._output_ports = None self._object_status = None self._identifier = None self._parameters = None self._op_config_values = None self._join_type = None self._join_condition = None self._model_type = 'JOINER_OPERATOR' @property def join_type(self): """ Gets the join_type of this Joiner. joinType Allowed values for this property are: "INNER", "FULL", "LEFT", "RIGHT", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The join_type of this Joiner. :rtype: str """ return self._join_type @join_type.setter def join_type(self, join_type): """ Sets the join_type of this Joiner. joinType :param join_type: The join_type of this Joiner. :type: str """ allowed_values = ["INNER", "FULL", "LEFT", "RIGHT"] if not value_allowed_none_or_none_sentinel(join_type, allowed_values): join_type = 'UNKNOWN_ENUM_VALUE' self._join_type = join_type @property def join_condition(self): """ Gets the join_condition of this Joiner. :return: The join_condition of this Joiner. :rtype: oci.data_integration.models.Expression """ return self._join_condition @join_condition.setter def join_condition(self, join_condition): """ Sets the join_condition of this Joiner. :param join_condition: The join_condition of this Joiner. :type: oci.data_integration.models.Expression """ self._join_condition = join_condition def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
python
"""Methods specific to handling chess datasets. """ import torch import torchvision import typing import logging from enum import Enum import numpy as np import chess from recap import URI, CfgNode as CN from .transforms import build_transforms from .datasets import Datasets logger = logging.getLogger(__name__) def color_name(color: chess.Color) -> str: """Convert a chess color to a string. Args: color (chess.Color): the color Returns: str: the string representation """ return {chess.WHITE: "white", chess.BLACK: "black"}[color] def piece_name(piece: chess.Piece) -> str: """Convert a chess piece to a string. Args: piece (chess.Piece): the piece Returns: str: the corresponding string """ return f"{color_name(piece.color)}_{chess.piece_name(piece.piece_type)}" def name_to_piece(name: str) -> chess.Piece: """Convert the name of a piece to an instance of :class:`chess.Piece`. Args: name (str): the name of the piece Returns: chess.Piece: the instance of :class:`chess.Piece` """ color, piece_type = name.split("_") color = color == "white" piece_type = chess.PIECE_NAMES.index(piece_type) return chess.Piece(piece_type, color) def build_dataset(cfg: CN, mode: Datasets) -> torch.utils.data.Dataset: """Build a dataset from its configuration. Args: cfg (CN): the config object mode (Datasets): the split (important to figure out which transforms to apply) Returns: torch.utils.data.Dataset: the dataset """ transform = build_transforms(cfg, mode) dataset = torchvision.datasets.ImageFolder(root=URI(cfg.DATASET.PATH) / mode.value, transform=transform) return dataset def build_data_loader(cfg: CN, dataset: torch.utils.data.Dataset, mode: Datasets) -> torch.utils.data.DataLoader: """Build a data loader for a dataset. Args: cfg (CN): the config object dataset (torch.utils.data.Dataset): the dataset mode (Datasets): the split Returns: torch.utils.data.DataLoader: the data loader """ shuffle = mode in {Datasets.TRAIN, Datasets.VAL} return torch.utils.data.DataLoader(dataset, batch_size=cfg.DATASET.BATCH_SIZE, shuffle=shuffle, num_workers=cfg.DATASET.WORKERS)
python
option = 'Yy' print ('\033[1;32m{:=^40}\033[m'.format(' ANNUAL STUDENT RESULT ')) while option == 'Yy': nome = str(input('\033[1mType your name: ')) n1 = float(input('\033[1;33m{}\033[m \033[1;32mType a first note:\033[m '.format(nome.lower().capitalize()))) n2 = float(input('\033[1;33m{}\033[m \033[1;32mEnter your second note:\033[m '.format(nome.lower().capitalize()))) n3 = float(input('\033[1;33m{}\033[m \033[1;32mEnter your second note:\033[m '.format(nome.lower().capitalize()))) n4 = float(input('\033[1;33m{}\033[m \033[1;32mEnter your second note:\033[m '.format(nome.lower().capitalize()))) média = (n1+n2+n3+n4)/4 print ('\033[1m{} Your average is\033[m \033[1;36m{:.1f}\033[m'.format(nome.lower().capitalize(), média)) option = str(input('\033[1mDo you wish to continue? [Yes/No]\033[m ')).upper().strip()[0] print ('\033[1;32m{:=^40}\033[m'.format(' RESULT ')) if média <= 4: print ('\033[1mVocê está\033[m \033[1;31mDISAPPROVED\033[m') elif média == 5: print ('\033[1mVocê está em\033[m \033[1;33mRECOVERY\033[m') else: print ('\033[1mVocê foi\033[m \033[1;36mAAPPROVED\033[m') print ('\033[1;35mOperation completed\033[m')
python
from shared.numeric import is_permutation from shared.generators import infinite_range def is_max_permutation(number: int, multiple: int) -> bool: for i in range(2, multiple + 1): if not is_permutation(number, number * i): return False return True def permutation_multiples(multiple: int) -> int: for i in infinite_range(1): if is_max_permutation(i, multiple): return i def main() -> None: m = permutation_multiples(6) print(m) if __name__ == "__main__": main()
python
import pandas as pd from estimators.FuzzyFlow import FuzzyFlow fuzzy = FuzzyFlow() dat = pd.read_csv('../sampling_617685_metric_10min_datetime.csv',parse_dates=True,index_col=0)[:3000] dat = pd.Series(dat['cpu_rate'].round(3)) fuzzy.fit_transform(dat)
python
input_str = input("Enter a list of elements: ") list1 = [int(x) for x in input_str.split() if int(x) % 2 == 0] print(list1)
python
""" URLconf for ``access_log`` app. """ # Prefix URL names with the app name. Avoid URL namespaces unless it is likely # this app will be installed multiple times in a single project. from django.conf.urls import include, patterns, url urlpatterns = patterns( 'access_log.views', url(r'^downloads/(?P<content_type>\d+)/$', 'downloads', name='access_log_downloads'), url(r'^downloads/(?P<content_type>\d+)/(?P<object_id>\d+)/$', 'downloads', name='access_log_downloads'), )
python
import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=3, padding=0), nn.BatchNorm2d(64), nn.MaxPool2d(kernel_size=3, stride=2), nn.Dropout2d(0.5), nn.ReLU(inplace=True) ) self.conv2 = nn.Sequential( nn.Conv2d(in_channels=64, out_channels=256, kernel_size=5, stride=1, padding=0), nn.BatchNorm2d(256), nn.MaxPool2d(kernel_size=3, stride=2), nn.Dropout2d(0.5), nn.ReLU(inplace=True) ) self.conv3 = nn.Sequential( nn.Conv2d(in_channels=256, out_channels=400, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(400), nn.Dropout2d(0.5), nn.ReLU(inplace=True) ) self.conv4 = nn.Sequential( nn.Conv2d(in_channels=400, out_channels=576, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(576), nn.Dropout2d(0.5), nn.ReLU(inplace=True) ) self.conv5 = nn.Sequential( nn.Conv2d(in_channels=576, out_channels=1024, kernel_size=3, stride=1, padding=0), nn.BatchNorm2d(1024), nn.Dropout2d(0.5), nn.ReLU(inplace=True) ) # self.fc1 = nn.Linear(12544, 3136) # self.fc2 = nn.Linear(3136, 392) # self.fc3 = nn.Linear(392, 1) self.fc1 = nn.Linear(14400, 1440) self.fc2 = nn.Linear(1440, 144) self.fc3 = nn.Linear(144, 30) def forward(self, x): x = x.cuda() x = self.conv1(x) x = self.conv2(x) x = self.conv3(x) x = self.conv4(x) x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x), inplace=True) x = F.relu(self.fc2(x), inplace=True) x = self.fc3(x) return x def num_flat_features(self, x): size = x.size()[1:] # all dimensions except the batch dimension num_features = 1 for s in size: num_features *= s return num_features
python
__all__ = [ "AuthenticationViewDjangoMixin", "AuthenticationViewMixin", "AuthenticationViewRestMixin", "Authenticator", ] from .authenticator import Authenticator from .views import AuthenticationViewDjangoMixin, AuthenticationViewMixin, AuthenticationViewRestMixin
python
# Copyright (c) 2006-2012 Filip Wasilewski <http://en.ig.ma/> # Copyright (c) 2012-2016 The PyWavelets Developers # <https://github.com/PyWavelets/pywt> # See COPYING for license details. """ The thresholding helper module implements the most popular signal thresholding functions. """ from __future__ import division, print_function, absolute_import __all__ = ['threshold'] import numpy as np def soft(data, value, substitute=0): data = np.asarray(data) magnitude = np.absolute(data) with np.errstate(divide='ignore'): # divide by zero okay as np.inf values get clipped, so ignore warning. thresholded = (1 - value/magnitude) thresholded.clip(min=0, max=None, out=thresholded) thresholded = data * thresholded if substitute == 0: return thresholded else: cond = np.less(magnitude, value) return np.where(cond, substitute, thresholded) def hard(data, value, substitute=0): data = np.asarray(data) cond = np.less(np.absolute(data), value) return np.where(cond, substitute, data) def greater(data, value, substitute=0): data = np.asarray(data) if np.iscomplexobj(data): raise ValueError("greater thresholding only supports real data") return np.where(np.less(data, value), substitute, data) def less(data, value, substitute=0): data = np.asarray(data) if np.iscomplexobj(data): raise ValueError("less thresholding only supports real data") return np.where(np.greater(data, value), substitute, data) thresholding_options = {'soft': soft, 'hard': hard, 'greater': greater, 'less': less} def threshold(data, value, mode='soft', substitute=0): """ Thresholds the input data depending on the mode argument. In ``soft`` thresholding, data values with absolute value less than `param` are replaced with `substitute`. Data values with absolute value greater or equal to the thresholding value are shrunk toward zero by `value`. In other words, the new value is ``data/np.abs(data) * np.maximum(np.abs(data) - value, 0)``. In ``hard`` thresholding, the data values where their absolute value is less than the value param are replaced with `substitute`. Data values with absolute value greater or equal to the thresholding value stay untouched. In ``greater`` thresholding, the data is replaced with `substitute` where data is below the thresholding value. Greater data values pass untouched. In ``less`` thresholding, the data is replaced with `substitute` where data is above the thresholding value. Lesser data values pass untouched. Both ``hard`` and ``soft`` thresholding also support complex-valued data. Parameters ---------- data : array_like Numeric data. value : scalar Thresholding value. mode : {'soft', 'hard', 'greater', 'less'} Decides the type of thresholding to be applied on input data. Default is 'soft'. substitute : float, optional Substitute value (default: 0). Returns ------- output : array Thresholded array. Examples -------- >>> import numpy as np >>> import pywt >>> data = np.linspace(1, 4, 7) >>> data array([ 1. , 1.5, 2. , 2.5, 3. , 3.5, 4. ]) >>> pywt.threshold(data, 2, 'soft') array([ 0. , 0. , 0. , 0.5, 1. , 1.5, 2. ]) >>> pywt.threshold(data, 2, 'hard') array([ 0. , 0. , 2. , 2.5, 3. , 3.5, 4. ]) >>> pywt.threshold(data, 2, 'greater') array([ 0. , 0. , 2. , 2.5, 3. , 3.5, 4. ]) >>> pywt.threshold(data, 2, 'less') array([ 1. , 1.5, 2. , 0. , 0. , 0. , 0. ]) """ try: return thresholding_options[mode](data, value, substitute) except KeyError: # Make sure error is always identical by sorting keys keys = ("'{0}'".format(key) for key in sorted(thresholding_options.keys())) raise ValueError("The mode parameter only takes values from: {0}." .format(', '.join(keys)))
python
# -*- coding: utf-8 -*- from flask_mongoengine import Document from mongoengine import CASCADE from mongoengine.fields import LazyReferenceField, BooleanField, StringField from mpcontribs.api.contributions.document import Contributions class Cards(Document): contribution = LazyReferenceField( Contributions, passthrough=True, reverse_delete_rule=CASCADE, primary_key=True, help_text="contribution this table belongs to", ) is_public = BooleanField( required=True, default=False, help_text="public or private card" ) html = StringField(required=True, default="", help_text="embeddable html code") meta = {"collection": "cards", "indexes": ["is_public"]}
python
from collections import defaultdict from django.conf import settings from django.db import transaction, IntegrityError, models from django.db.models import Q, Sum from django.utils import timezone from article.models import ArticleType from money.models import Money, Decimal, Denomination, CurrencyData, Currency, MoneyField from sales.models import TransactionLine, Transaction from stock.models import StockChange, StockChangeSet from stock.stocklabel import StockLabeledLine from swipe.settings import CASH_PAYMENT_TYPE_NAME from tools.management.commands.consistencycheck import consistency_check, CRITICAL from tools.util import raiseif class PaymentType(models.Model): # Name of the payment type. "Cash" is always used when using cash registers. Should not be changed. name = models.CharField(max_length=255, unique=True) # Is used for invoicing. If enabled, the cost is to be used at a later date. Should not be changed. is_invoicing = models.BooleanField(default=False) def __str__(self): return "{}".format(self.name) class Register(models.Model): """ A register. This can be a cash register with denominations or a virtual register that accepts money in a general sense """ # Name of the register. Cosmetic class Meta: permissions = ( # Permission to allow linking customers to users via the swipe web interface. ("open_register", "Can open a register"), ("close_register", "Can close a register"), ) name = models.CharField(max_length=255, unique=True) # Currency used for this register. Unchangeable currency = models.ForeignKey(CurrencyData, on_delete=models.PROTECT) # Indicates if register accepts cash or otherwise is a digital register is_cash_register = models.BooleanField(default=False) # Do we use this register right now?(Non-active registers should be empty) is_active = models.BooleanField(default=True) # How do people pay in this register? payment_type = models.ForeignKey(PaymentType, on_delete=models.PROTECT) def get_denominations(self): # Gets denominations from register based on its currency if self.is_cash_register: return Denomination.objects.filter(currency=self.currency) else: return [] def is_open(self): # Checks if the register is in an opened state sales_period = SalesPeriod.objects.filter(endTime__isnull=True) if len(sales_period) > 1: raise IntegrityError("More than one salesperiod opened") elif len(sales_period) == 1: counts = RegisterCount.objects.filter(sales_period=sales_period[0], register=self) if len(counts) == 0 or len(counts) > 1: return False else: if counts[0].is_opening_count: return True else: raise IntegrityError("The only count for the opened sales period is a closing count") else: return False def get_prev_closing_count(self): # Get this registers previous count when it was closed. # This shouldn't be used for Brief Registers; they should start at zero instead. count_exists = RegisterCount.objects.filter(is_opening_count=False, register=self).exists() if not count_exists: # Dummy the count return Money(currency=Currency(self.currency.iso), amount=Decimal("0.00000")) last_count = RegisterCount.objects.filter(is_opening_count=False, register=self).order_by('sales_period__beginTime').last() denoms = DenominationCount.objects.filter(register_count=last_count) sum = None for denom in denoms: if not sum: sum = denom.get_money_value() else: sum += denom.get_money_value() return sum @property def denomination_counts(self): if RegisterCount.objects.filter(register=self).exists(): return DenominationCount.objects.filter(register_count=RegisterCount.objects.filter(register=self). latest('time_created')) else: return [] @transaction.atomic def open(self, counted_amount, memo="", denominations=None): # Opens a register, opens a registerperiod if neccessary if denominations is None: denominations = [] if memo == "": memo = None if self.is_active: if self.is_open(): raise AlreadyOpenError("Register is already open") else: # Calculate Cash Register Difference if self.is_cash_register: count = None for denomination_count in denominations: if count is None: count = denomination_count.get_money_value() else: count += denomination_count.get_money_value() # Without denominations, the value is equal to 0 # This prevents an error when denomination count is empty # Failure will occur however, if the opening count is non-zero as no counts means that # there is a difference between counted_amount and denomination counts if len(denominations) == 0: count = Money(amount=Decimal(0), currency=Currency(self.currency.iso)) diff = count - self.get_prev_closing_count() # Get or create SalesPeriod if RegisterMaster.sales_period_is_open(): open_sales_period = RegisterMaster.get_open_sales_period() else: open_sales_period = SalesPeriod() open_sales_period.save() # Create cash register if self.is_cash_register: reg_count = RegisterCount(is_opening_count=True, register=self, sales_period=open_sales_period, amount=counted_amount) used_denominations = set() for denomination_count in denominations: counted_amount -= denomination_count.number * denomination_count.denomination.amount used_denominations.add(denomination_count.denomination) raiseif(counted_amount != Decimal("0.00000"), RegisterCountError, "denominations amounts did not add up.") reg_count.save(denominations=denominations) for denomination_count in denominations: denomination_count.register_count = reg_count all_denominations = Denomination.objects.filter(currency__register=self) for den in all_denominations: if den not in used_denominations: denominations.append(DenominationCount(number=0, denomination=den, register_count=reg_count)) for denomination_count in denominations: denomination_count.save() else: # Create Brief Register # Optional: Disallow opening with no value reg_count = RegisterCount(is_opening_count=True, amount=counted_amount, register=self, sales_period=open_sales_period) reg_count.save() # Set diff to zero, may change later on if not self.is_cash_register: diff = Money(amount=counted_amount, currency=Currency(self.currency.iso)) # Save Register Count Difference # noinspection PyUnboundLocalVariable OpeningCountDifference.objects.create(register_count=reg_count, difference=diff) return reg_count else: raise InactiveError("The register is inactive and cannot be opened") def close(self, indirect=False, register_count=None, denomination_counts=None): """ :param indirect: :param register_count: :type register_count: RegisterCount :param denomination_counts: :type denomination_counts: List[DenominationCount] :return: """ # Closes a register, should always be called indirectly via registermaster if denomination_counts is None: denomination_counts = [] if not indirect: raise InvalidOperationError("You can only close a register when the entire sales period is closed") else: if not self.is_open(): raise AlreadyClosedError("Register is already closed") else: # Opened register means opened sales period opened_sales_period = SalesPeriod.get_opened_sales_period() reg_count = RegisterCount.objects.filter(register=self, sales_period=opened_sales_period) if len(reg_count) > 1: raise IntegrityError("Register is either opened twice or already closed.") elif len(reg_count) == 0: raise IntegrityError("Register is apparantly not opened but function indicated that it was.") else: register_count.sales_period = opened_sales_period if register_count.register_id != self.id: raise InvalidInputError("Registercount's register does not match register") if register_count.is_opening_count: raise InvalidInputError("Registercount should be closing and connected to salesperiod") if not self.is_cash_register: for denom in denomination_counts: raiseif(denom.denomination.currency_id != self.currency_id, InvalidInputError, "Denomination does not have correct currency") raiseif(denom.register_count.register_id != self.id, InvalidInputError, "Denominationcount and register don't match") register_count.save() for denom in denomination_counts: denom.register_count = register_count denom.save() def save(self, **kwargs): if self.is_cash_register: raiseif(self.payment_type.name != CASH_PAYMENT_TYPE_NAME, CurrencyTypeMismatchError, "Payment type name did not match the provided preset. Use {} instead".format( CASH_PAYMENT_TYPE_NAME)) super(Register, self).save() def __str__(self): return "Name: {}, Currency: {}, is_cash_register: {}, is_active: {}, Payment Method: {}".\ format(self.name, self.currency.name, self.is_cash_register, self.is_active, self.payment_type.name) class RegisterMaster: """ A helper class that can do the necessary checks to see the state of the registers. Also, some commands can be given """ @staticmethod def sales_period_is_open(): return RegisterMaster.get_open_sales_period() @staticmethod def get_open_sales_period(): try: a = SalesPeriod.objects.get(endTime__isnull=True) except SalesPeriod.DoesNotExist: return False return a @staticmethod def number_of_open_registers(): # Retrieves the number of open registers, 0 when period is closed and error when inconsistent return RegisterCount.objects.filter(sales_period__endTime__isnull=True, is_opening_count=True).count() @staticmethod def get_open_registers(): # Returns all open registers return Register.objects.filter(registercount__sales_period__endTime__isnull=True, registercount__is_opening_count=True).distinct() @staticmethod def get_payment_types_for_open_registers(): # Returns the set of payment types that are possible in the open register period return PaymentType.objects.filter(register__registercount__sales_period__endTime__isnull=True, register__registercount__is_opening_count=True).distinct() @staticmethod def get_last_closed_register_counts(): # Very inefficient. If you can do this better, please do is_open = RegisterMaster.sales_period_is_open() closed_register_counts = [] if not is_open: closed_registers = Register.objects.all() else: open_regs = RegisterMaster.get_open_registers() closed_registers = set(Register.objects.all()) for open in open_regs: closed_registers.remove(open) for register in closed_registers: counts = RegisterCount.objects.filter(register=register, is_opening_count=False) if len(counts) > 0: closed_register_counts.append(counts.latest('time_created')) closed_register_counts_ids = [] for reg in closed_register_counts: closed_register_counts_ids.append(reg.id) return RegisterCount.objects.filter(id__in=closed_register_counts_ids) @staticmethod # Gets the last register count for each register, dummied for registers without counts def get_last_register_counts(): registers = Register.objects.all() counts = [] for register in registers: count_exists = RegisterCount.objects.filter(register=register).exists() if count_exists: counts.append(RegisterCount.objects.filter(register=register).latest('time_created')) else: counts.append(RegisterCount(register=register, sales_period_id=-1, is_opening_count=False, amount=Decimal("0"), time_created=timezone.now())) return counts # type: List[RegisterCount] class ConsistencyChecker: """ Checks the consistency of the system. Will raise IntegrityErrors if the system is an inconsistent state. Fixes are required if any of these tests fail """ # This test runs the tests, but rather than raising an error it appends the errors to an array @staticmethod @consistency_check def non_crashing_full_check(): errors = [] try: ConsistencyChecker.check_open_sales_periods() except IntegrityError: errors.append({ "text": "More than one sales period is open", "location": "SalesPeriods", "line": -1, "severity": CRITICAL }) try: ConsistencyChecker.check_open_register_counts() except IntegrityError: errors.append({ "text": "Register has more register counts opened in an opened sales period than possible", "location": "SalesPeriods", "line": -1, "severity": CRITICAL }) try: ConsistencyChecker.check_payment_types() except IntegrityError: errors.append({ "text": "Cash register can only have cash as payment method", "location": "SalesPeriods", "line": -1, "severity": CRITICAL }) return errors @staticmethod def full_check(): ConsistencyChecker.check_open_sales_periods() ConsistencyChecker.check_open_register_counts() ConsistencyChecker.check_payment_types() @staticmethod def check_open_sales_periods(): # Checks if there is either one or zero open sales periods active_salesperiods = SalesPeriod.objects.filter(endTime__isnull=True) if len(active_salesperiods) > 1: raise IntegrityError("More than one sales period is open") @staticmethod def check_open_register_counts(): # Checks if register is opened at most once relevant_register_counts = RegisterCount.objects.filter(sales_period__endTime__isnull=True) a = set() for count in relevant_register_counts: if count.register_id in a: raise IntegrityError("Register is opened and closed while Sales period is still open") else: a.add(count.register_id) @staticmethod def check_payment_types(): # Checks for valid payment types. Currently it checks if cash register only hold cash registers = Register.objects.all() for register in registers: if register.is_cash_register and register.payment_type.name != settings.CASH_PAYMENT_TYPE_NAME: raise IntegrityError("Cash register can only have cash as payment method") class SalesPeriod(models.Model): """ A general period in which transactions on opened registers can take place """ # When does the sales period start? beginTime = models.DateTimeField(auto_now_add=True) # When does the sales period end?(null indicates not ended) endTime = models.DateTimeField(null=True) # Any relevant information a user wants to add? closing_memo = models.CharField(max_length=255, default=None, null=True) @classmethod def create(cls, *args, **kwargs): return cls(*args, **kwargs) def is_opened(self): return not self.endTime @staticmethod def get_opened_sales_period(): """ Gets the opened salesperiod. If there is none or there are multiple, Django will throw an exception. :return: """ return SalesPeriod.objects.get(endTime__isnull=True) @staticmethod @transaction.atomic def close( registercounts_denominationcounts, memo: str=None): """ Closes a sales period by closing all the opened registers. Requires the totals to be filled in. :param registercounts_denominationcounts: :type registercounts_denominationcounts: list[tuple[RegisterCount, list[DenominationCount]]] :param memo: :return: """ # early return when register is closed if not RegisterMaster.sales_period_is_open(): return [AlreadyClosedError("Salesperiod is already closed")] if not memo: memo = None # ensure memo is None when None or "" or otherwise empty string open_registers = set(RegisterMaster.get_open_registers()) unchecked = set(open_registers) errors = [] totals = defaultdict(lambda: Decimal(0)) for (registercount, denominationcounts) in registercounts_denominationcounts: registercount.is_opening_count = False amount = registercount.amount register = registercount.register # let's already add the counted amount to the currency so that we don't have to do that later on totals[register.currency.iso] += amount if register.is_cash_register: # check if denominations have valid amounts if not denominationcounts: errors.append(InvalidDenominationList( "Register {} should have denomination counts attached, but doesn't.".format(register.name) )) break denom_amount = Decimal(0) for denom_count in denominationcounts: if denom_count.number < 0: errors.append(NegativeCountError( "Register {} has an invalid denomination count for {}{}".format( register.name, denom_count.denomination.currency, denom_count.denomination.amount, ) )) break denom_count.register_count = registercount denom_amount += denom_count.get_money_value().amount if denom_amount != amount: errors.append(InvalidDenominationList("List not equal to expected count: {}, count: {}. " "Result: {}".format(denominationcounts, registercount, denom_amount))) break # now that we're done with checking the register's data, we can pop the register from the list. if register in unchecked: unchecked.remove(register) else: errors.append(InvalidOperationError("Register {} is not available in the list of " "unchecked registers.".format(register.name))) if errors: raise SalesPeriodCloseError(errors=errors) if len(unchecked) > 0: return [InvalidOperationError("There are some uncounted registers, please count them")] sales_period = RegisterMaster.get_open_sales_period() tlines = TransactionLine.objects.filter(transaction__salesperiod=sales_period) for tline in tlines: totals[tline.price.currency.iso] -= tline.price.amount in_outs = MoneyInOut.objects.filter(sales_period=sales_period).select_related('register__currency') for in_out in in_outs: totals[in_out.register.currency.iso] -= in_out.amount for (registercount, denom_counts) in registercounts_denominationcounts: register = registercount.register # type: Register register.close(indirect=True, register_count=registercount, denomination_counts=denom_counts) for diff in totals: close = ClosingCountDifference(sales_period=sales_period, difference=Money(currency=Currency(diff), amount=totals[diff])) close.save() sales_period.endTime = timezone.now() sales_period.save() return sales_period def __str__(self): return "Begin time: {}, End time: {}".format(self.beginTime, self.endTime) class RegisterCount(models.Model): """ The amount of currency and perhaps the denomination in the case of a cash register is stored here """ # A register period has one or two counts register = models.ForeignKey(Register, on_delete=models.PROTECT) # The salesperiod of the count sales_period = models.ForeignKey(SalesPeriod, on_delete=models.PROTECT) # Indicates if this the opening or the closing count is_opening_count = models.BooleanField() # How much money is there at the moment of counting? amount = models.DecimalField(max_digits=settings.MAX_DIGITS, decimal_places=settings.DECIMAL_PLACES, default=-1.0) # Time at which the registercount was created(otherwise it's really to hard to find the latest one) time_created = models.DateTimeField(auto_now_add=True, null=True) def save(self, *args, **kwargs): denominations = [] if 'denominations' in kwargs: denominations = kwargs['denominations'] if self.register.is_cash_register: # Put all denominations for currency in a hashmap denoms_for_register = Denomination.objects.filter(currency=self.register.currency) all_denoms = {} for denom in denoms_for_register: all_denoms[str(denom.amount)] = 1 # For all denominationcounts for denom_count in denominations: # Assert every denomination is available exactly once if all_denoms.pop(str(denom_count.denomination.amount), 0) == 0: raise InvalidDenominationList("Denominations invalid (Unexpected Denom): GOT {}, EXPECTED {}. " "Crashed at {} || {}".format(denominations, denoms_for_register, denom_count.denomination.amount, all_denoms)) else: raiseif(denominations, RegisterInconsistencyError, "non-cash registers should not have denominations") super().save() @classmethod def create(cls, *args, **kwargs): return cls(*args, **kwargs) def is_cash_register_count(self): return self.register.is_cash_register def get_amount_from_denominationcounts(self): # Distills an amount value from the denomination counts denom_counts = DenominationCount.objects.filter(register_count=self) if len(denom_counts) > 0: amount = Decimal(0) for count in denom_counts: amount += count.get_money_value() return amount else: return Decimal(0) def __str__(self): return "Register:{}, is_opening_count:{}, Amount:{}".\ format(self.register_id, self.is_opening_count, self.amount) class DenominationCount(models.Model): """ Counting of the denominations in a cash register """ # Every cash register count needs to count all of its denominations, amongst which is 'self' register_count = models.ForeignKey(RegisterCount, on_delete=models.PROTECT) # Denomination belonging to the currency of this register denomination = models.ForeignKey(Denomination, on_delete=models.PROTECT) # Number of pieces of denomination number = models.IntegerField() def get_money_value(self): return Money(self.denomination.amount, Currency(self.denomination.currency.iso)) * int(self.number) @classmethod def create(cls, *args, **kwargs): return cls(*args, **kwargs) def __str__(self): return "{} {} x {} @ RegCount {}".format(self.denomination.currency, self.denomination.amount, self.number, self.register_count_id) class MoneyInOut(models.Model): """ Adds money to a register during an open register period """ # Register to which register = models.ForeignKey(Register, on_delete=models.PROTECT) # Salesperiod where in/out took place sales_period = models.ForeignKey(SalesPeriod, on_delete=models.PROTECT) # Positive: ADD, negative: REMOVE moneys amount = models.DecimalField(max_digits=settings.MAX_DIGITS, decimal_places=settings.DECIMAL_PLACES, default=0.0) def __str__(self): return "Register:{}, Sales Period: {}, Amount:{}".format(self.register_id, self.sales_period_id, self.amount) def save(self, force_insert=False, force_update=False, using=None, update_fields=None): if not self.id: if not hasattr(self, 'sales_period') or not self.sales_period: self.sales_period = SalesPeriod.get_opened_sales_period() super(MoneyInOut, self).save() else: super(MoneyInOut, self).save() class SalesPeriodDifference(models.Model): """ Resolves differences between expected amounts of money in the combined opened registers and the actual amount of money. Count is per type of money """ # Period in which there is a difference sales_period = models.ForeignKey(SalesPeriod, on_delete=models.PROTECT) # Currency of the difference currency_data = models.ForeignKey(CurrencyData, on_delete=models.PROTECT) # Amount of difference amount = models.DecimalField(max_digits=settings.MAX_DIGITS, decimal_places=settings.DECIMAL_PLACES, default=0.0) class OpeningCountDifference(models.Model): # Difference that can occur when a register is opened. This indicated that money (dis)appeared between closing and # opening of the register. difference = MoneyField() register_count = models.OneToOneField("RegisterCount", on_delete=models.PROTECT) def __str__(self): return "[{}] : {}".format(self.register_count, self.difference) class ClosingCountDifference(models.Model): # Difference that can occur when a sales period closes. Since this could have any reason, it cannot be pointed to # a single register. This makes it different from an OpeningCountDifference difference = MoneyField() sales_period = models.ForeignKey("SalesPeriod", on_delete=models.PROTECT) class InactiveError(Exception): pass class AlreadyOpenError(Exception): pass class AlreadyClosedError(Exception): pass class InvalidOperationError(Exception): pass class InvalidDenominationList(Exception): pass class InvalidRegisterError(Exception): pass class CurrencyTypeMismatchError(Exception): pass class NegativeCountError(Exception): pass class RegisterCountError(Exception): pass class RegisterInconsistencyError(Exception): pass class InvalidInputError(Exception): pass class SalesPeriodCloseError(Exception): def __init__(self, errors): super(SalesPeriodCloseError, self).__init__() self.errors = errors def __str__(self): ret = "" for error in self.errors: ret += str(error) return ret
python
import threading from functools import wraps def delay(delay=0.): """ Decorator delaying the execution of a function for a while. """ def wrap(f): @wraps(f) def delayed(*args, **kwargs): timer = threading.Timer(delay, f, args=args, kwargs=kwargs) timer.start() return delayed return wrap
python
train_imgs_path="path_to_train_images" test_imgs_path="path_to_val/test images" dnt_names=[] import os with open("dont_include_to_train.txt","r") as dnt: for name in dnt: dnt_names.append(name.strip("\n").strip(".json")) dnt.close() print(dnt_names) with open("baseline_train.txt","w") as btr: for file in os.listdir(train_imgs_path): if file not in dnt_names: btr.write(train_imgs_path+file+"\n") btr.close() with open("baseline_val.txt","w") as bv: for file in os.listdir(test_imgs_path): bv.write(test_imgs_path+file+"\n") bv.close()
python
import datetime import time import pandas as pd from apscheduler.schedulers.background import BackgroundScheduler from django_apscheduler.jobstores import DjangoJobStore, register_events, register_job from django_apscheduler.models import DjangoJob, DjangoJobExecution # from django_pandas.io import read_frame from BiSheServer.settings import BASE_LOG_DIR, LOG_SUFFIX from api import upload_log # 开启定时工作,每日任务,定时执行 scheduler_plan = BackgroundScheduler() # 实例化调度器 try: # 清除原有任务 dje = DjangoJobExecution.objects.all() dj = DjangoJob.objects.all() # 判断是否存在该任务 dj_rs = dj.filter(id="task_time") if dj_rs.exists(): dj_rs = dj_rs.first() # 如果启动时已过任务的下一次执行时间,则立即启动上传 if int(time.mktime(dj_rs.next_run_time.timetuple())) < int(time.time()): upload_log.upload_hadoop_log_thread(suffix=(dj_rs.next_run_time + datetime.timedelta(days=-1)) .strftime(LOG_SUFFIX)) djePd = pd.DataFrame(list(dje.values())) djPd = pd.DataFrame(list(dj.values())) if not djePd.empty: # 如果有执行记录,则将执行记录进行记录到文件后再清空表 crontab_log_path = BASE_LOG_DIR + "/crontab.log" djPd.to_csv(crontab_log_path, mode='a', index=True, sep='\t', encoding='utf_8_sig') with open(crontab_log_path, "a") as f: f.write("\n") # 自带文件关闭功能,不需要再写f.close() djePd.to_csv(crontab_log_path, mode='a', index=True, sep='\t', encoding='utf_8_sig') with open(crontab_log_path, "a") as f: f.write("\n\n") # 自带文件关闭功能,不需要再写f.close() dje.delete() dj.delete() # 任务表清空完毕后,重新设置任务 # 调度器使用DjangoJobStore() scheduler_plan.add_jobstore(DjangoJobStore(), "default") # 设置定时任务,选择方式为interval,时间间隔为15 minutes # 'cron'方式循环,周一到周五,每天9:30:10执行,id为工作ID作为标记 # 另一种方式为周一到周五固定时间执行任务,对应代码为: # @register_job(scheduler_plan, "interval", minutes=15) # @register_job(scheduler_plan, 'cron', day_of_week='mon-sun', hour='20', minute='3', second='1', id='task_time') # @register_job(scheduler_plan, "interval", minutes=1, replace_existing=True) @register_job(scheduler_plan, 'cron', day_of_week='mon-sun', hour='0', minute='1', second='1', id='task_time', replace_existing=True) def my_job(): # 这里写你要执行的任务 upload_log.upload_hadoop_log_thread(suffix="") # pass register_events(scheduler_plan) scheduler_plan.start() except Exception as e: print(e) # 有错误就停止定时器 scheduler_plan.shutdown()
python
#!/usr/bin/env python """monitorTasks""" # usage: ./monitorTasks.py -v ve2 -u admin -j 54334 -k 'Starting directory differ' -t 120 # import pyhesity wrapper module from pyhesity import * from time import sleep from datetime import datetime import os import smtplib import email.message import email.utils # command line arguments import argparse parser = argparse.ArgumentParser() parser.add_argument('-v', '--vip', type=str, required=True) # cluster to connect to parser.add_argument('-u', '--username', type=str, required=True) # username parser.add_argument('-d', '--domain', type=str, default='local') # (optional) domain - defaults to local parser.add_argument('-j', '--jobid', type=int, required=True) # job ID to monitor parser.add_argument('-n', '--jobname', type=str, required=True) # string to find in pulse log parser.add_argument('-k', '--keystring', type=str, required=True) # string to find in pulse log parser.add_argument('-o', '--timeoutsec', type=int, required=True) # seconds until we alert and bailout parser.add_argument('-c', '--callbackuser', type=str, required=True) # user@target to run callback script parser.add_argument('-b', '--callbackpath', type=str, required=True) # user@target to run callback script parser.add_argument('-s', '--mailserver', type=str) parser.add_argument('-p', '--mailport', type=int, default=25) parser.add_argument('-t', '--sendto', action='append', type=str) parser.add_argument('-f', '--sendfrom', type=str) args = parser.parse_args() vip = args.vip username = args.username domain = args.domain jobid = args.jobid jobname = args.jobname keystring = args.keystring timeoutsec = args.timeoutsec callbackuser = args.callbackuser callbackpath = args.callbackpath mailserver = args.mailserver mailport = args.mailport sendto = args.sendto sendfrom = args.sendfrom # authenticate apiauth(vip, username, domain) # track seconds passed s = 0 # count tasks where preprocess is finished x = 0 preprocessFinished = True # new job run startTime should be in the last 60 seconds now = datetime.now() nowUsecs = dateToUsecs(now.strftime("%Y-%m-%d %H:%M:%S")) startTimeUsecs = nowUsecs - 60000000 # get latest job run run = None print("waiting for new run...") while run is None and s < timeoutsec: try: run = api('get', 'protectionRuns?jobId=%s&numRuns=1&startTimeUsecs=%s' % (jobid, startTimeUsecs))[0] runStartTimeUsecs = run['backupRun']['stats']['startTimeUsecs'] # create a flag file for this run so we only run once if not os.path.exists(str(runStartTimeUsecs)): f = open(str(runStartTimeUsecs), 'w') f.write(str(runStartTimeUsecs)) f.close() else: exit() stats = run['backupRun']['sourceBackupStatus'] if run: print("found new run") except Exception as e: run = None sleep(1) s += 1 # wait until all tasks are finished preprocessing print("monitoring tasks...") while x < len(run['backupRun']['sourceBackupStatus']) and s < timeoutsec: sleep(1) s += 1 if s > timeoutsec: break x = 0 for source in run['backupRun']['sourceBackupStatus']: # get task monitor per source task = api('get', '/progressMonitors?taskPathVec=%s' % source['progressMonitorTaskPath']) try: # get pulse log messages eventmsgs = task['resultGroupVec'][0]['taskVec'][0]['progress']['eventVec'] foundkeystring = False # check for key string in event messages for eventmsg in eventmsgs: if keystring in eventmsg['eventMsg']: foundkeystring = True if foundkeystring is True: x += 1 else: preprocessFinished = False except Exception as e: pass if x >= len(run['backupRun']['sourceBackupStatus']): # we're good print('preprocessing complete') else: # we timed out - send an alert email print('we timed out') print('Sending report to %s...' % ', '.join(sendto)) msg = email.message.Message() msg['Subject'] = "thaw timeout %s" % jobname msg['From'] = sendfrom msg['To'] = ','.join(sendto) msg.add_header('Content-Type', 'text') msg.set_payload("thaw timeout %s" % jobname) smtpserver = smtplib.SMTP(mailserver, mailport) smtpserver.sendmail(sendfrom, sendto, msg.as_string()) smtpserver.quit() # regardless - call the thaw script os.system("ssh -t %s %s" % (callbackuser, callbackpath))
python
from graphite_feeder.handler.appliance.socket import energy_guard, presence
python
# https://atcoder.jp/contests/abc077/tasks/arc084_a N = int(input()) a_arr = list(map(int, input().split())) a_arr.sort() b_arr = list(map(int, input().split())) c_arr = list(map(int, input().split())) c_arr.sort() def find_least_idx(num: int, lst: list) -> int: n = len(lst) left = 0 right = n - 1 while left < right: mid = (left + right) // 2 if lst[mid] > num: right = mid continue left = mid + 1 return right def find_most_idx(num: int, lst: list) -> int: n = len(lst) left = 0 right = n - 1 while left < right: mid = (left + right) // 2 + 1 if lst[mid] < num: left = mid continue right = mid - 1 return left total = 0 for b in b_arr: if a_arr[0] >= b: continue if c_arr[N - 1] <= b: continue a_most = find_most_idx(b, a_arr) c_least = find_least_idx(b, c_arr) total += (a_most + 1) * (N - c_least) print(total)
python
from refiner.generic.refiner import Refiner from topology.communication import Communication from topology.node import Node, Direction from topology.microToscaTypes import NodeType, RelationshipProperty from topology.protocols import IP import ipaddress import copy class DynamicDiscoveryRecognizer(Refiner): def __init__(self): pass @classmethod def recognize(cls, nodes: dict, args: dict): for nodeName, node in nodes.items(): if node.getType() is NodeType.MICROTOSCA_NODES_MESSAGE_ROUTER: continue edges = node.getEdges(Direction.OUTGOING) for adjacentName in edges.keys(): if nodes[adjacentName].getType() is NodeType.MICROTOSCA_NODES_MESSAGE_ROUTER or not node.getIsMicroToscaEdge(adjacentName): continue communications = node.getCommunications(adjacentName) ipAddress = '' for communication in communications: protocol = communication.getNetworkLayer() actualIP = '' if 'ip' in protocol and nodeName == protocol['ip'].getSenderHost(): assert adjacentName == protocol['ip'].getReceiverHost() actualIP = copy.deepcopy(str(protocol['ip'].getReceiverIP())) elif 'ip' in protocol and nodeName == protocol['ip'].getReceiverHost(): assert adjacentName == protocol['ip'].getSenderHost() actualIP = copy.deepcopy(str(protocol['ip'].getSenderIP())) if ipAddress == '': ipAddress = actualIP elif actualIP and actualIP != ipAddress: node.addRelationshipProperty(adjacentName, RelationshipProperty.MICROTOSCA_RELATIONSHIPS_INTERACT_WITH_DYNAMIC_DISCOVERY_PROPERTY) break
python
import __init__ from rider.utils.commands import main main()
python
#!/usr/bin/env python3 # Paulo Cezar, Maratona 2016, huaauhahhuahau s = ''.join(c for c in input() if c in "aeiou") print("S" if s == s[::-1] else "N")
python
#import PIL and numpy from PIL import Image import numpy as np #open images by providing path of images img1 = Image.open("") img2 = Imgae.open("") #create arrays of above images img1_array = np.array(img1) img2_array = np.array(img2) # collage of 2 images #arrange arrays of two images in a single row imgg = np.hstack([img1_array,img2_array]) #create image of imgg array final_img = Image.fromarray(imgg) #provide the path with name for finalizing where you want to save it final_img.save("") print("Image saved")
python
import argparse import cv2 from glob import glob from itertools import product import numpy as np import os from tqdm import tqdm from scipy.special import erf import torch import torch.nn as nn from model.model import CompModel import arithmetic_coding as ac MAX_N = 65536 TINY = 1e-10 def load_img(path): img = cv2.imread(path).astype(np.float32)[..., ::-1] img = ((img / 255.) - 0.5) * 2. img = torch.from_numpy(img.transpose((2, 0, 1))).unsqueeze(0) _, _, h, w = img.size() h_, w_ = h, w if h % 16 != 0: h_ = (h // 16 + 1) * 16 if w % 16 != 0: w_ = (w // 16 + 1) * 16 img_ = torch.zeros((1, 3, h_, w_)) img_[:, :, :h, :w] = img return img_, h_ - h, w_ - w def load_model(args): args.device = 'cpu' comp_model = CompModel(args) state_dict = torch.load(args.model_path, map_location='cpu') comp_model.load_state_dict(state_dict['comp_model']) comp_model.eval() return comp_model def compress(args): comp_model = load_model(args) os.makedirs('outputs/binary', exist_ok=True) if os.path.isdir(args.image_path): pathes = glob(os.path.join(args.image_path, '*')) else: pathes = [args.image_path] for path in pathes: bitpath = "outputs/binary/{}.pth".format(os.path.basename(path).split('.')[0]) img, pad_h, pad_w = load_img(path) _, _, H, W = img.size() with torch.no_grad(): y_hat, p = comp_model.compress(img) _, yC, yH, yW = y_hat.size() min_val = int(torch.max(torch.abs(y_hat))) p = p.detach().numpy() p = np.reshape(p, (1, args.gmm_K, args.bottleneck*3, yH, yW)) y_mu = p[:, :, :args.bottleneck, :, :] + min_val y_std = np.abs(p[:, :, args.bottleneck:2*args.bottleneck, :, :]) y_w = p[:, :, 2*args.bottleneck:, :, :] y_w = np.exp(y_w) / np.sum(np.exp(y_w), axis=1) #softmax # store side information fileobj = open(bitpath, mode='wb') img_size = np.array([W, H], dtype=np.uint16) img_size.tofile(fileobj) pad_size = np.array([pad_w, pad_h], dtype=np.uint8) pad_size.tofile(fileobj) min_value = np.array([min_val], dtype=np.uint8) min_value.tofile(fileobj) fileobj.close() print('=============================================================') print(os.path.basename(path)) with open(bitpath, 'ab+') as fout: bit_out = ac.CountingBitOutputStream( bit_out=ac.BitOutputStream(fout)) enc = ac.ArithmeticEncoder(bit_out) samples = np.arange(0, min_val*2+1).reshape(-1, 1) with tqdm(product(range(yH), range(yW)), ncols=60, total=yH*yW) as qbar: for h, w in qbar: for ch in range(yC): weight = y_w[:, :, ch, h, w] mean = y_mu[:, :, ch, h, w] std = y_std[:, :, ch, h, w] high = weight * 0.5 * (1 + erf((samples + 0.5 - mean) / ((std + TINY) * 2 ** 0.5))) low = weight * 0.5 * (1 + erf((samples - 0.5 - mean) / ((std + TINY) * 2 ** 0.5))) pmf = np.sum(high - low, axis=1) pmf_clip = np.clip(pmf, 1.0/MAX_N, 1.0) pmf_clip = np.round(pmf_clip / np.sum(pmf_clip) * MAX_N).astype(np.uint32) symbol = np.int(y_hat[0, ch, h, w].item() + min_val) freq = ac.SimpleFrequencyTable(pmf_clip) enc.write(freq, symbol) enc.finish() bit_out.close() real_bpp = os.path.getsize(bitpath) * 8 print('bitrate : {0:.4}bpp'.format(real_bpp / H / W)) print('=============================================================\n') if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('model_path') parser.add_argument('image_path') parser.add_argument('--bottleneck', type=int, default=32) parser.add_argument('--main_channel', type=int, default=192) parser.add_argument('--gmm_K', type=int, default=3) args = parser.parse_args() compress(args)
python
# This code is generated by [Atcoder_base64](https://github.com/kyomukyomupurin/AtCoder_base64) from base64 import b85decode import subprocess from pathlib import Path from zlib import decompress binary = "c%0=~Z){uD6~DIg$7xf?EiD9ERs&&bS7PF}DJfls?ZoXfIB96o(v?4#7u$)w`A=p)mxLi^skEwB(nV7@`hiJB>;pr5fN4VefV!Xz#-wcuQ>P)pq>fQ7>Bh)3sImd^&VBEm=l8DPqZ`_{-N-rb{N6d|-h1xfbKbWivAs@*gJ5xyuMoyP+@kS8j-T78ivZR`THybUWD{uuzQG_T(q)I4y7hWAy&k8=i*yq)Q-^^z68lnaHB&--x*lt5YK?}b*7!{HIbQFQ%dF?*dS$Lx=JH4h*F%c^Yv8{Tj*GOPm}vv2Lzk2Ud;V~h#)~vY%oO$R1-&NnpBFEu2Y7vUemTqAixkJ<_Dp(o*UsBBiOx(qSDNgc?Ag`1Yp1`M_itC*<mIya1`n$8%&q7u_I^Fb*!!i(*iFwXr(dqV``Wh19Vh2MKKc9!ChH-!aU-M)o*Pt*mBR7Z#tmm%`|SF64u0z_@E91p*#iHyjr?5>Yy52+`4?>PlQ#G@Hug6YoPp+Yzm2@tMt+YCK4H@yHd;4>mxuU#!{x9I>Rn6PNzkiV^WsE+c~Ddm@on4K*zY2gj%V{ZT2$nMLMfrqkpY@W7Lw!XqLM6(4D@C4x#WmEnn`N!tlu0Nyk}VHjw@3W$!I*%lgQIY(urg)5@)Z5m7a9INJr&ja%VJ`R{8^xqks=9fnAg;sY3n{c$c7Yxu_gf<?}lxN{XgyG#l7K2Q<4h4Q3)-xO*&9DyF!}Ofd>}1M!qxph`hbD@9O@8~|(w)Sr&id{HSR<!m%QLL<Z7bm$(-CiY<ycgFK2h50Ow?ut*!R92FcX@xUm>71NNKMG!;Opd!UE2nd0Uo0B#quc%4{oUI0;lp%?f2ThXFtUWLG#CFhz|X0VqZ8JUwhA5a*X^jyo6-&G7ABSD<LXdlYx{(}{FIZe+$pZ3Z2hjA**nSQalgR*FUaT5gn)PPb7fM%372<!%{cP&Z5nMNwS9z@w+wiT0l#R#R~hgH1HRgTFB$Mw1HNp)*BEeI2kb0rGvH^rd^4%-m8_gI;4TCHoB?kz;4d0*<9YW}18&^!XAO9>0e{1QyA8N>?5}R=RO6*)ACZpFD$eS>bnNHuIa2K|093tIf&XorgYd-o6jNEeScP)y*BH;bwm7fyql{-mvN)^qM;On#xcH*VA7DJ|+TuBtznk%_ON%oqA7(u3%3@jN?*x8q2HP1Lk*2%%fO%=UcLv@ZWDQ^VJ_t&c*Q8TKIu%@qo%&(p637l5e@{6aDnHiILlj@A?Yz>_CsjUJ*sTixVj;vRbTTmeVCaFlSmn<P6p-kthK}=WhDg&J*k%s~yQJOo%4;eJQq}pk^Gknuc_9qKV>7DXrhBh-`^Z9&89dedQWN+P?1F)p+2eHgGBkAY-4!Muct2Ws^C7A7u5|3ZrJ<47bmI|L$@H2(sa5uFVtw9l2=qQ|+uW~0Q>u~10wcSZl?^aV{w8ghTB;Y@HkX-S<~RlUs8(s1k*zCIWl4JVZ+A(rF1sYh>(ZMm%6c%+$PKuw7su3kP`}(8g~pQ7?cmtn^NsK*RW2#5(5=hBEM%b*3n^&g>y0;ooa4bcq5j2hGPn5KHb0>b*Gy$;uEw=(CZYR6hoY6=hYp6I@3&rYvJsr_Tmd>9s(c!)T#0SHr26#LPhAV+pMpb=zpwZLZ=;D=<zKPNmHx^Hq3Zf~q+@dqY4;yWmzlfohfxeY7<wo~=VoeE{$q~MZFM$l`<Rr>Wb(K9?#mZ4iJKU<(X~h7>tPdAc?RgyKwkm+0?-AZUxtpahmpzvO#uG@bn^nx4A9#Ee+Q`I(C#s;I37Jf9Ftzh#@1%{jHB7haP{sA`r8ca&RU+34lZB*q*_%S>+$aOwBOyf`Vn`T+;zjAFYV~K8THwn@hy-uu4lF<Kfv^%KHOin5bd0Q1!PY%Anfrz>D=#WKjG^4_#R*7Y47uR!yb2NwPyEe`1zp!ekR-R@!scgt9AVZem!6}$!&bc8TPcFc11kC>4vap%ae_gr|U$M<moxqJm3iyJUt;#SIDy^?D2tISgk^B-zV^U80;AL@Bf#A=OLbdcqkd;8YAuI4|o<W^GzMkpbK38dd}ndi(wj`+c$8E=PkxlICnl;spc8Q`#qjDL4FwF88hoNHJhB_?ZY#)ozo4(TyQRy`8kAR{~V{d9`MY%j?<xLGcGlm+wJD{qdT$wFA%fb$5pL=4i2qwewlZ)%6V+p66Y`Q_y6R)@ofHI7Cnv>$`^SDc5%9w(?gt&ae9=~6UNQpa}>O1?d$8i)3@c|Xep<Ze1R_i4u4l?pro>a#{)h7uAPquw(=KOv*`k__gE};b}t>V#9c(O#2fVaX~rA%d1=O*^!aPXoAq%u<8FOEnDG{U-kI@L`usHGtMzqi##@QkVtib%nrAF=4>@Ovx9RKNjC;vNOMI=KH!$PtNN}|kevQ75nDO=c{$a*9Sk7M;xmI8AW*k<x*9x!2m1g|<I*cv*2J-i+h_ijb34X)<)z9%8Rh@Qn?^>U(&&q8!cJAPIW?IbnZjR3ic#Pvj@bf6IcSxu=r#{#F^LyM*P_XliDqlY?cI}^4<=2tNTE#ck^1o64*Uytbs&?wf_p<u`n!5IX%yF@u9tX2iKOZ`5@)*5pe0d%53iIJfCyP_-=TE<ym#EEuT=v5@c^-C7tTU{y(;Uw;JI?F$eGtoIHtk$-GQ560KIO2^pZwUy&WuxQ59V9(uJ|h(`QNJY_4~|Pr`CSaZ#O!v?F`!BMW=P1=i5$gTtxq0VKN|pc&!<K%BkUEd!Dta_YL#|_gj+VZ#k{=S08fuc`mQ#7G2i%Z*f`M*=~c!Z0r!0i1Ekcgzk&&4~Jru7E7Zvs^w=`h9}lTX*$mmN+e!Tib`p0%pWJU6c|;qG|p08MM7yJPscO)Q8_~sO1@B}a%qyp^Vx|^Qb{J@fVGrh8KN|m3k7+KCUZ(*ii{QHY?3BQ+3Xak7&tYtWfG-y?}5-jghmGYS$dA{8$3uO5(lOJ1BCW}bucs#?SpqF<%Ahfvr@Vf_V3+092ub_p>QlhjXAeq(3qH_$%L%Pghuy+ej=TtrD8H6q`kNX&vJnSig}una|taiSWC|dmOmq7Xh^XM#y?+zPpFxW4D_KpuaaOiX|k-HP1aKE8_yex6*FhzK+w@*5udH|kD|7kT#Oe>YTG_LQCpK`Q?EWTDUMlv3fL~qYn972H<b8`Q&~kG1*#M@n!=}a4oprEe=e^i{h@HQQ<29xH=Zl`M@#8UqBET!DwmRrDdJB|<-ndsm4f!>XtGdD=W`~8!n;B;BQrt%G?7tQmEbJ)=pWC+1I#M;uX@E_$g8v4pG@&3m`W6CoTjR+9!&$Ep$0jdj>Chh4g*R2unA;g!CGxV|6O76Alt%mf3*9~hQ%T9w-7A`Lw)f(mQRouuiXJbAMf`_JFcS7@())a219-Ed(;+=<Mmy<HV+alMn!#$7m${DhuJmXVbI5T@(?k9yTZ5v=^4RcqW>NuT8xSM7=Ix33GEj?Cz7udEoMa?;}oPrq}F~Xsqg=zMDr=?V?3kB(++d}V*j7u`eDvv+=CS3c&2}?g+9Aq)xP1?@)$@l-p3-2e?`#8I0|W5koOAojG&M47E;k)Zh`(l(8qft((OY0={2+DSwSD~i%5lVqOSixBIfmr_gtjn9YxfCNzk|Zo@}T8GeIBYMx6>1Ui9ZLiS`{A@4>b2%Nj5GOZ5Nmi1vL2^)c?4Tw`6IsL$e<CWC(M`#lGE$C?B^Y}Z+$^<T_mmH9`X@>S};Ea>a^XRo;)Q6I|^$53CtmvhB-!T&))|07<%n5;qlP64;dM_T1;gIs@!J8mPlC9eM#L)(p|i!c0d2rtF`bFGd3Gd!`mBH*I_zX5t8jN<" Path("077b5b43ed6ae0f2ad2b26d4f6fb1be45713b723ae814448a294f8d77118b1e9.bin").write_bytes(decompress(b85decode(binary))) Path("077b5b43ed6ae0f2ad2b26d4f6fb1be45713b723ae814448a294f8d77118b1e9.bin").chmod(0o755) subprocess.run("./077b5b43ed6ae0f2ad2b26d4f6fb1be45713b723ae814448a294f8d77118b1e9.bin") # Original source code: """ #include <iostream> using namespace std; int main() { cout << "Hello, World!" << endl; return 0; } """
python
from django.db import models from django.contrib.auth.models import User class Duck(models.Model): color = models.CharField(max_length=30, default='yellow') model = models.CharField(max_length=30) price = models.FloatField(default=0) owner = models.ForeignKey(User, null=True)
python
from polyphony import testbench def if29(p0, p1, p2): x = 0 if p0 == 0: pass elif p0 == 1: if p1 == 0: if p2 == 0: x = 10 elif p1 == 1: pass elif p1 == 2: pass else: return -1 #x = -1 return x @testbench def test(): assert 0 == if29(0, 0, 0) assert 10 == if29(1, 0, 0) assert 0 == if29(1, 0, 1) assert 0 == if29(1, 1, 0) assert 0 == if29(1, 2, 0) assert -1 == if29(1, 3, 0) assert 0 == if29(2, 3, 0) test()
python
import json from django.core.management.base import BaseCommand from ...models import Item # BaseCommandを継承して作成 class Command(BaseCommand): # python manage.py help import_itemで表示されるメッセージ help = 'Create Item from json file' def remove_null(self, value, default): if value is None: return default return value # コマンドが実行された際に呼ばれるメソッド def handle(self, *args, **options): # ファイルのオープン with open('web.json', 'r') as file: # JSONの読み込み data = json.load(file) count = 0 # 取得したデータを1件づつ取り出す for item_obj in data: if not item_obj['number']: continue # Itemの保存処理 item = Item() item.set_number = item_obj['number'] item.name = self.remove_null(item_obj['name'], '') item.image_url = self.remove_null(item_obj['image'], '') item.rate = self.remove_null(item_obj['rating'], 0.0) item.piece_count = self.remove_null(item_obj['pieces'], 0) item.minifig_count = self.remove_null(item_obj['minifigs'], 0) item.us_price = self.remove_null(item_obj['us_price'], 0.0) item.want_it_count = self.remove_null(item_obj['want_it'], 0) item.owner_count = self.remove_null(item_obj['owner'], 0) item.save() count += 1 print('Create Item: {0}: {1}'.format(item.id, item.name)) print('{} items have been created.'.format(count))
python
#!/usr/bin/python # encoding: utf-8 from helper import * import cv2 import numpy as np import os import pickle # https://klassenresearch.orbs.com/Plotting+with+Python #import matplotlib.rc # Make use of TeX #rc('text',usetex=True) # Change all fonts to 'Computer Modern' #rc('font',**{'family':'serif','serif':['Computer Modern']}) fileName = "1" cap = cv2.VideoCapture("danu1.mp4") # dataLog = pickle.load( open( "cb.p", "rb" ) ) # dataLog2 = pickle.load( open( "cb.p", "rb" ) ) dataLog = { 'videoTimestamp' : '', 'pos' : '' } def nothing(x): pass cv2.namedWindow('Trackbar') #cap.set(3,320); #cap.set(4,240); # ilowH = 20 # ilowS = 110 # ilowV = 130 # ihighH = 48 # ihighS = 176 # ihighV = 255 H_bawah = 20 H_atas = 48 S_bawah = 110 S_atas = 176 V_bawah = 130 V_atas = 255 ukuran = 0 cv2.createTrackbar('H_bawah','Trackbar',H_bawah,255,nothing) cv2.createTrackbar('H_atas','Trackbar',H_atas,255,nothing) cv2.createTrackbar('S_bawah','Trackbar',S_bawah,255,nothing) cv2.createTrackbar('S_atas','Trackbar',S_atas,255,nothing) cv2.createTrackbar('V_bawah','Trackbar',V_bawah,255,nothing) cv2.createTrackbar('V_atas','Trackbar',V_atas,255,nothing) cv2.createTrackbar('ukuran','Trackbar',ukuran,255,nothing) def my_mouse_callback(event,x,y,flags,param): global hsv if event == cv2.EVENT_LBUTTONUP: print("warna:") print(hsv[y,x]) cv2.setTrackbarPos('H_bawah', 'Trackbar', hsv[y,x][0]-25) cv2.setTrackbarPos('H_atas', 'Trackbar', hsv[y,x][0]+25) cv2.setTrackbarPos('S_bawah', 'Trackbar', hsv[y,x][1]) cv2.setTrackbarPos('V_bawah', 'Trackbar', hsv[y,x][2]) if event == cv2.EVENT_RBUTTONUP: cv2.waitKey(2000) cv2.namedWindow("frame") cv2.setMouseCallback("frame",my_mouse_callback) tr2 = 0 dataLog['videoTimestamp'] = [] dataLog['pos'] = [] first = True while True: elapsedTime = cap.get(cv2.CAP_PROP_POS_MSEC)/1000. _, frame = cap.read() _, frame2 = cap.read() try : hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) except cv2.error: break H_bawah = cv2.getTrackbarPos('H_bawah','Trackbar') S_bawah = cv2.getTrackbarPos('S_bawah','Trackbar') V_bawah = cv2.getTrackbarPos('V_bawah','Trackbar') H_atas = cv2.getTrackbarPos('H_atas','Trackbar') S_atas = cv2.getTrackbarPos('S_atas','Trackbar') V_atas = cv2.getTrackbarPos('V_atas','Trackbar') ukuran = cv2.getTrackbarPos('ukuran','Trackbar') batas_atas = np.array([H_atas,S_atas,V_atas]) batas_bawah = np.array([H_bawah,S_bawah,V_bawah]) mask = cv2.inRange(hsv, batas_bawah, batas_atas) kernel = np.ones((10,10), np.uint8) hasil_dilasi = cv2.erode(mask, kernel) kernel2 = np.ones((10,10), np.uint8) hasil_erosi = cv2.erode(hasil_dilasi, kernel2) x, y, w, h = cv2.boundingRect(hasil_erosi) #print(x,y) if w*h>ukuran: cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 1) try : res = cv2.bitwise_and(frame2,frame2, mask= hasil_dilasi) except cv2.error: break frame = cv2.resize(frame, (940,640)) cv2.imshow('frame',frame) mask = cv2.resize(mask, (940,640)) cv2.imshow('mask',mask) res = cv2.resize(res, (940,640)) cv2.imshow('res',res) dataLog['videoTimestamp'].append(elapsedTime) titik_lantai = 1308 skala_jarak = 7 #hasil hitung dari jarak asli terukur/jarak pixel terukur hh = (y)/skala_jarak hh = int(hh) hi = (x)/skala_jarak hi= int(hi) dataLog['pos'].append(( hi, hh )) k = cv2.waitKey(1) & 0xFF if k == 27: break cv2.destroyAllWindows() cap.release() pickle.dump( dataLog, open( "cbjd.p", "wb" ) ) #pickle.dump( dataLog2, open( "jadi2.p", "wb" ) )
python
""" Developed by ThaumicMekanism [Stephan K.] - all credit goes to him! """ import contextlib import sys from typing import Callable, List from tqdm.contrib import DummyTqdmFile import examtool.api.download from examtool.api.gradescope_upload import APIClient from examtool.api.extract_questions import ( extract_groups, extract_questions, extract_public, ) from fullGSapi.api.client import GradescopeClient from fullGSapi.api.assignment_grader import ( GS_Crop_info, GS_Outline, GS_assignment_Grader, GS_Outline_Question, GS_Question, GroupTypes, RubricItem, QuestionRubric, ) import os import time from tqdm import tqdm def_tqdm_args = {"dynamic_ncols": True} @contextlib.contextmanager def std_out_err_redirect_tqdm(): orig_out_err = sys.stdout, sys.stderr try: sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err) yield orig_out_err[0] # Relay exceptions except Exception as exc: raise exc # Always restore sys.stdout/err if necessary finally: sys.stdout, sys.stderr = orig_out_err class GradescopeGrader: def __init__( self, email: str = None, password: str = None, gs_client: GradescopeClient = None, gs_api_client: APIClient = None, ): print(f"Setting up the Gradescope Grader...") if gs_client is None: gs_client = GradescopeClient() if gs_api_client is None: gs_api_client = APIClient() if (not email or not password) and ( not gs_client.is_logged_in() or not gs_api_client.is_logged_in() ): raise ValueError( "You must supply the username and password if you are not already logged into the passed in clients!" ) self.gs_client = gs_client self.gs_api_client = gs_api_client if email and password: if not gs_client.is_logged_in(): print(f"Logging into the normal Gradescope API...") self.gs_client.log_in(email, password) if not self.gs_api_client.is_logged_in(): print(f"Logging into the full Gradescope API...") self.gs_api_client.log_in(email, password) print(f"Finished setting up the Gradescope Grader") def main( self, exams: [str], out: str, name_question_id: str, sid_question_id: str, gs_class_id: str, gs_assignment_id: str = None, # If none, we will create a class. gs_assignment_title: str = "Examtool Exam", emails: [str] = None, blacklist_emails: [str] = None, email_mutation_list: {str: str} = {}, question_numbers: [str] = None, blacklist_question_numbers: [str] = None, custom_grouper_map: { str: Callable[[str, GS_Question, dict, dict], "QuestionGrouper"] } = None, ): if gs_assignment_title is None: gs_assignment_title = "Examtool Exam" if not exams: raise ValueError( "You must specify at least one exam you would like to upload!" ) out = out or "out/export/" + exams[0] exam_json, email_to_data_map = self.fetch_and_export_examtool_exam_data( exams, out, name_question_id, sid_question_id, emails=emails, email_mutation_list=email_mutation_list, ) # Remove blacklisted emails if blacklist_emails is not None: for bemail in blacklist_emails: email_to_data_map.pop(bemail, None) # Create assignment if one is not already created. if gs_assignment_id is None: print("Creating the gradescope assignment...") outline_path = f"{out}/OUTLINE.pdf" gs_assignment_id = self.create_assignment( gs_class_id, gs_assignment_title, outline_path ) if not gs_assignment_id: raise ValueError( "Did not receive a valid assignment id. Did assignment creation fail?" ) print(f"Created gradescope assignment with id {gs_assignment_id}!") else: print(f"Using assignment ({gs_assignment_id}) which was already created!") # Lets now get the assignment grader grader: GS_assignment_Grader = self.get_assignment_grader( gs_class_id, gs_assignment_id ) # Now that we have the assignment and outline pdf, lets generate the outline. print("Generating the examtool outline...") examtool_outline = ExamtoolOutline( grader, exam_json, [name_question_id, sid_question_id] ) # Finally we need to upload and sync the outline. print("Uploading the generated outline...") self.upload_outline(grader, examtool_outline) # We can now upload the student submission since we have an outline print("Uploading student submissions...") failed_uploads = self.upload_student_submissions( out, gs_class_id, gs_assignment_id, emails=email_to_data_map.keys() ) # Removing emails which failed to upload if failed_uploads: print( f"Removing emails which failed to upload. Note: These will NOT be graded! {failed_uploads}" ) for email in tqdm(failed_uploads, **def_tqdm_args): email_to_data_map.pop(email) # For each question, group, add rubric and grade print("Setting the grade type for grouping for each question...") gs_outline = examtool_outline.get_gs_outline() self.set_group_types(gs_outline) # Fetch the student email to question id map print("Fetching the student email to submission id's mapping...") email_to_question_sub_id = grader.email_to_qids() # Check to see which emails may not be in the Gradescope roster and attempt to correct self.attempt_fix_unknown_gs_email( email_to_question_sub_id, email_to_data_map, name_question_id=name_question_id, sid_question_id=sid_question_id, ) # Finally we can process each question print("Grouping and grading questions...") for qid, question in tqdm( list(gs_outline.questions_iterator()), desc="Questions Graded", unit="Question", **def_tqdm_args, ): if ( question_numbers is not None and qid not in question_numbers or blacklist_question_numbers is not None and qid in blacklist_question_numbers ): tqdm.write(f"[{qid}]: Skipping!") continue tqdm.write(f"[{qid}]: Processing question...") try: self.process_question( qid, question.get_gs_question(), email_to_data_map, email_to_question_sub_id, name_question_id, sid_question_id, custom_grouper_map, ) except Exception as e: import traceback traceback.print_exc(file=tqdm) tqdm.write(str(e)) def add_additional_exams( self, exams: [str], out: str, name_question_id: str, sid_question_id: str, gs_class_id: str, gs_assignment_id: str, emails: [str] = None, blacklist_emails: [str] = None, email_mutation_list: {str: str} = {}, question_numbers: [str] = None, blacklist_question_numbers: [str] = None, custom_grouper_map: { str: Callable[[str, GS_Question, dict, dict], "QuestionGrouper"] } = None, ): """ If emails is None, we will import the entire exam, if it has emails in it, it will only upload submissions from the students in the emails list contained in the exams list. If the student has submissions in multiple exams, the tool will warn you and ask which exam you would like to use as the student submission. """ if not exams: raise ValueError( "You must specify at least one exam you would like to upload!" ) if email_mutation_list is None: email_mutation_list = {} out = out or "out/export/" + exams[0] exam_json, email_to_data_map = self.fetch_and_export_examtool_exam_data( exams, out, name_question_id, sid_question_id, emails=emails, email_mutation_list=email_mutation_list, ) # Remove blacklisted emails if blacklist_emails is not None: for bemail in blacklist_emails: email_to_data_map.pop(bemail, None) # Lets now get the assignment grader grader: GS_assignment_Grader = self.get_assignment_grader( gs_class_id, gs_assignment_id ) # Now that we have the assignment and outline pdf, lets generate the outline. print("Generating the examtool outline...") examtool_outline = ExamtoolOutline( grader, exam_json, [name_question_id, sid_question_id] ) # Merge the outline with the existing one outline = grader.get_outline() if not outline: raise ValueError("Failed to fetch the existing outline") examtool_outline.merge_gs_outline_ids(outline) # We can now upload the student submission since we have an outline print("Uploading student submissions...") failed_uploads = self.upload_student_submissions( out, gs_class_id, gs_assignment_id, emails=email_to_data_map.keys() ) # Removing emails which failed to upload if failed_uploads: print( f"Removing emails which failed to upload. Note: These will NOT be graded! {failed_uploads}" ) for email in failed_uploads: email_to_data_map.pop(email) # Fetch the student email to question id map print("Fetching the student email to submission id's mapping...") email_to_question_sub_id = grader.email_to_qids() # Check to see which emails may not be in the Gradescope roster and attempt to correct self.attempt_fix_unknown_gs_email( email_to_question_sub_id, email_to_data_map, name_question_id=name_question_id, sid_question_id=sid_question_id, ) # Finally we can process each question print("Grouping and grading questions...") gs_outline = examtool_outline.get_gs_outline() for qid, question in tqdm( list(gs_outline.questions_iterator()), desc="Questions Graded", unit="Question", **def_tqdm_args, ): if ( question_numbers is not None and qid not in question_numbers or blacklist_question_numbers is not None and qid in blacklist_question_numbers ): tqdm.write(f"[{qid}]: Skipping!") continue tqdm.write(f"[{qid}]: Processing question...") try: self.process_question( qid, question.get_gs_question(), email_to_data_map, email_to_question_sub_id, name_question_id, sid_question_id, custom_grouper_map, ) except Exception as e: import traceback traceback.print_exc(file=tqdm) tqdm.write(str(e)) def fetch_and_export_examtool_exam_data( self, exams: [str], out: str, name_question_id: str, sid_question_id: str, emails: [str] = None, email_mutation_list: {str: str} = {}, ): """ Fetches the submissions from the exams in the exams list. If the emails list is None, it will fetch all emails, if it has emails in it, it will only return data for those emails. The mutation step occurres after the specific emails selection stage if applicable. The mutation list comes in the form of current email to new email. Returns: exam_json - The json of the exam email_to_data_map - the mapping of emails to their data. """ if not exams: raise ValueError( "You must specify at least one exam you would like to upload!" ) if email_mutation_list is None: email_mutation_list = {} print("Downloading exams data...") exam_json = None email_to_data_map = {} email_to_exam_map = {} first_exam = True for exam in exams: tmp_exam_json, tmp_template_questions, tmp_email_to_data_map, tmp_total = examtool.api.download.download( exam ) # Choose only the emails we want to keep. if emails: for email in list(tmp_email_to_data_map.keys()): if email not in emails: tmp_email_to_data_map.pop(email, None) # Next, we want to mutate any emails for orig_email, new_email in email_mutation_list.items(): if orig_email not in tmp_email_to_data_map: print( f"WARNING: Could not perform mutation on email {orig_email} (to {new_email}) because it does not exist in the data map!" ) continue if new_email in tmp_email_to_data_map: print( f"Could not mutate email {new_email} (from {orig_email}) as the original email is already in the data map!" ) continue tmp_email_to_data_map[new_email] = tmp_email_to_data_map.pop(orig_email) # Finally, we should merge together the student responses. for email, data in tmp_email_to_data_map.items(): if email in email_to_data_map: print( f"WARNING: Student with email {email} submitted to multiple exams!" ) def prompt_q(): input_data = None while not input_data: print( f"Student's current responses are from {email_to_exam_map[email]}, would you like to use {exam} instead?" ) input_data = input("[y/n]> ") if input_data.lower() in ["y", "yes"]: return True if input_data.lower() in ["n", "no"]: return False print("Please type yes or no!") if not prompt_q(): continue email_to_exam_map[email] = exam email_to_data_map[email] = data print(f"[{exam}]: Exporting exam pdfs...") self.export_exam( tmp_template_questions, tmp_email_to_data_map, tmp_total, exam, out, name_question_id, sid_question_id, include_outline=first_exam, ) # Set global data for the examtool if first_exam: first_exam = False exam_json = tmp_exam_json # Lets finally clean up the student responses self.cleanse_student_response_data(email_to_data_map) return exam_json, email_to_data_map def attempt_fix_unknown_gs_email( self, email_to_question_sub_id, email_to_data_map, name_question_id, sid_question_id, ): def prompt_fix(old_email, name, sid): input_data = None while not input_data: print( f"Could not find {old_email} (name: {name}; sid: {sid}) in Gradescope! Please enter the Gradescope email of the student or `skip` to remove this student from autograding." ) input_data = input("> ") if "@" in input_data.lower(): return input_data if input_data.lower() in ["n", "no", "skip"]: return False print( "The input is not a valid email (you are missing the `@`)! If you would like to skip, type `skip` or `no`." ) remove_email = ["DUMMY"] map_email = {} while remove_email or map_email: remove_email = [] map_email = {} for email, data in email_to_data_map.items(): if email not in email_to_question_sub_id: responses = data["responses"] name = responses.get(name_question_id, None) sid = responses.get(sid_question_id, None) new_email = prompt_fix(email, name, sid) if new_email: map_email[email] = new_email else: print( f"Skipping {email}! This will remove the email from the data map." ) remove_email.append(email) for email, new_email in map_email.items(): email_to_data_map[new_email] = email_to_data_map.pop(email) for email in remove_email: email_to_data_map.pop(email) def cleanse_student_response_data(self, email_to_data_map: dict): for email, data in email_to_data_map.items(): std_questions = data["student_questions"] std_responses = data["responses"] for question in std_questions: qid = question["id"] if qid not in std_responses: std_responses[qid] = ( [] if question["type"] in ["multiple_choice", "select_all"] else "" ) def export_exam( self, template_questions, email_to_data_map, total, exam, out, name_question_id, sid_question_id, include_outline=True, ): examtool.api.download.export( template_questions, email_to_data_map, total, exam, out, name_question_id, sid_question_id, include_outline=include_outline, ) def create_assignment(self, gs_class_id: str, gs_title: str, outline_path: str): assignment_id = self.gs_client.create_exam(gs_class_id, gs_title, outline_path) if not assignment_id: print("Failed to create the exam! Make sure it has a unique title.") return return assignment_id def get_assignment_grader( self, gs_class_id: str, assignment_id: str ) -> GS_assignment_Grader: return self.gs_client.get_assignment_grader(gs_class_id, assignment_id) def upload_outline( self, grader: GS_assignment_Grader, examtool_outline: "ExamtoolOutline" ): outline = grader.update_outline(examtool_outline.get_gs_outline()) if not outline: raise ValueError("Failed to upload or get the outline") examtool_outline.merge_gs_outline_ids(outline) def upload_student_submissions( self, out: str, gs_class_id: str, assignment_id: str, emails: [str] = None ): failed_emails = [] email_files = [] for file_name in os.listdir(out): if "@" not in file_name: continue student_email = file_name[:-4] if emails and student_email not in emails: continue email_files.append((file_name, student_email)) with std_out_err_redirect_tqdm() as orig_stdout: for file_name, student_email in tqdm( email_files, file=orig_stdout, unit="Submission", **def_tqdm_args ): if not self.gs_api_client.upload_submission( gs_class_id, assignment_id, student_email, os.path.join(out, file_name), ): failed_emails.append(student_email) return failed_emails def set_group_types(self, outline: GS_Outline, debug=True): questions = list(outline.questions_iterator()) with std_out_err_redirect_tqdm() as orig_stdout: for qid, question in tqdm( questions, file=orig_stdout, unit="Question", **def_tqdm_args ): self.set_group_type(question) def set_group_type(self, o_question: GS_Outline_Question): question_type = o_question.data.get("type") q = o_question.get_gs_question() q_type = GroupTypes.complex if question_type in ["select_all", "multiple_choice"]: q_type = GroupTypes.mc # if question_type in ["long_answer", "long_code_answer"]: # q_type = GroupTypes.non_grouped return q.set_group_type(q_type) def process_question( self, qid: str, question: GS_Question, email_to_data_map: dict, email_to_question_sub_id_map: dict, name_question_id: str, sid_question_id: str, custom_grouper_map: { str: Callable[[str, GS_Question, dict, dict], "QuestionGrouper"] }, ): # Group questions if question.data.get("id") in [name_question_id, sid_question_id]: tqdm.write("Skipping grouping of an id question!") return tqdm.write(f"[{qid}]: Grouping...") groups = self.group_question( qid, question, email_to_data_map, email_to_question_sub_id_map, custom_grouper_map, ) if groups: # Group answers tqdm.write(f"[{qid}]: Syncing groups on gradescope...") self.sync_groups_on_gradescope(qid, question, groups) tqdm.write(f"[{qid}]: Syncing rubric items...") rubric = self.sync_rubric(qid, question, groups) # in here, add check to see if qid is equal to either name or sid q id so we do not group those. tqdm.write(f"[{qid}]: Applying grades for each group...") self.grade_question(qid, question, rubric, groups) else: tqdm.write(f"[{qid}]: Failed to group question {qid}!") def group_question( self, qid: str, question: GS_Question, email_to_data_map: dict, email_to_question_sub_id_map: dict, custom_grouper_map: { str: Callable[[str, GS_Question, dict, dict], "QuestionGrouper"] }, ): if custom_grouper_map is not None: examtool_qid = question.data.get("id") if examtool_qid: return custom_grouper_map[qid]( qid, question, email_to_data_map, email_to_question_sub_id_map ) if qid in custom_grouper_map: return custom_grouper_map[qid]( qid, question, email_to_data_map, email_to_question_sub_id_map ) # Default handler qtype = question.data.get("type") if qtype in ["multiple_choice", "select_all"]: return self.group_mc_question( qid, question, email_to_data_map, email_to_question_sub_id_map ) elif qtype in ["short_answer", "short_code_answer"]: return self.group_short_ans_question( qid, question, email_to_data_map, email_to_question_sub_id_map ) elif qtype in ["long_answer", "long_code_answer"]: return self.group_long_ans_question( qid, question, email_to_data_map, email_to_question_sub_id_map ) else: tqdm.write( f"Unsupported question type {qtype} for question {question.data}!" ) return None def group_mc_question( self, qid: str, question: GS_Question, email_to_data_map: dict, email_to_question_sub_id_map: dict, custom_rubric_weights_fn: Callable[ [GS_Question, List[str], List[bool]], List[float] ] = None, ): data = question.data # This is a list of correct options from left (top) to right (bottom) correct_seq = [] seq_name = [] solution_options = data.get("solution", {}) if solution_options is not None: solution_options = solution_options.get("options", []) if solution_options is None: solution_options = [] all_options = [option.get("text") for option in data.get("options", [])] for option in all_options: correct_seq.append(option in solution_options) seq_name.append(option) # Add blank option correct_seq.append(None) seq_name.append("Blank") # Add student did not receive this question correct_seq.append(None) seq_name.append("Student did not receive this question") rubric_weights = ( self.get_basic_rubric_scores(question, seq_name, correct_seq) if custom_rubric_weights_fn is None else custom_rubric_weights_fn(question, seq_name, correct_seq) ) groups = QuestionGrouper( question, rubric=[ RubricItem(description=item[0], weight=item[1]) for item in zip(seq_name, rubric_weights) ], ) def list_to_str(l): s = "" for item in l: s += str(int(item)) return s eqid = question.data["id"] for email, data in email_to_data_map.items(): responses = data.get("responses", {}) response = responses.get(eqid) selection = [False] * len(correct_seq) if response is None: selection[-1] = True elif response == []: selection[-2] = True else: if not isinstance(response, list): response = [response] for i, option in enumerate(all_options): selection[i] = option in response s = list_to_str(selection) sid = email_to_question_sub_id_map[email][qid] if s not in groups: groups.add_group(QuestionGroup(s, selection)) groups.get_group(s).add_sid(sid) return groups def group_short_ans_question( self, qid: str, question: GS_Question, email_to_data_map: dict, email_to_question_sub_id_map: dict, lower_check: bool = True, custom_rubric_weights_fn: Callable[ [GS_Question, List[str], List[bool]], List[float] ] = None, strip_md_from_sol: bool = True, ): data = question.data # This is a list of correct options from left (top) to right (bottom) solution = data.get("solution", {}) if solution is not None: solution = solution.get("solution", {}) if solution is not None: solution = solution.get("text") if not solution: tqdm.write( f"[{qid}]: No solution defined for this question! Only grouping blank and std did not receive." ) solution = "Correct" correct_seq = [True] seq_name = [solution] # Add a wrong option correct_seq.append(None) seq_name.append("Incorrect") # Add blank option correct_seq.append(None) seq_name.append("Blank") # Add student did not receive this question correct_seq.append(None) seq_name.append("Student did not receive this question") rubric_weights = ( self.get_basic_rubric_scores(question, seq_name, correct_seq) if custom_rubric_weights_fn is None else custom_rubric_weights_fn(question, seq_name, correct_seq) ) groups = QuestionGrouper( question, rubric=[ RubricItem(description=item[0], weight=item[1]) for item in zip(seq_name, rubric_weights) ], ) # Process solution if lower_check: sol = solution.strip().lower() else: sol = solution.strip() if strip_md_from_sol: def strip_part(text, boundary): if text.startswith(boundary) and text.endswith(boundary): blen = len(boundary) return (text[blen:-blen], True) else: return (text, False) sol, replaced = strip_part(sol, "$") if not replaced: sol, replaced = strip_part(sol, "```") if not replaced: sol, replaced = strip_part(sol, "`") eqid = question.data["id"] for email, data in email_to_data_map.items(): responses = data.get("responses", {}) response = responses.get(eqid) selection = [False] * len(correct_seq) if response is None: selection[-1] = True response = "Student did not receive this question" elif response == "": selection[-2] = True response = "Blank" else: if solution is not None: same = None if lower_check: same = response.lower().strip() == sol else: same = response.strip() == sol if same: selection[0] = True else: selection[1] = True sid = email_to_question_sub_id_map[email][qid] if response not in groups: groups.add_group(QuestionGroup(response, selection)) groups.get_group(response).add_sid(sid) return groups def group_long_ans_question( self, qid: str, question: GS_Question, email_to_data_map: dict, email_to_question_sub_id_map: dict, ): """ We will only be grouping students who did not get the question or left it blank. """ data = question.data # This is a list of correct options from left (top) to right (bottom) correct_seq = [True] seq_name = ["Correct"] # Add blank option correct_seq.append(None) seq_name.append("Blank") # Add student did not receive this question correct_seq.append(None) seq_name.append("Student did not receive this question") rubric_weights = self.get_long_ans_rubric_scores( question, seq_name, correct_seq ) groups = QuestionGrouper( question, rubric=[ RubricItem(description=item[0], weight=item[1]) for item in zip(seq_name, rubric_weights) ], ) group_blank = QuestionGroup("Blank", [False, True, False]) groups.add_group(group_blank) group_sdnrtq = QuestionGroup( "Student did not receive this question", [False, False, True] ) groups.add_group(group_sdnrtq) eqid = question.data["id"] for email, data in email_to_data_map.items(): responses = data.get("responses", {}) response = responses.get(eqid) if not response: sid = email_to_question_sub_id_map[email][qid] if response is None: group_sdnrtq.add_sid(sid) elif response == "": group_blank.add_sid(sid) return groups def sync_groups_on_gradescope( self, qid: str, question: GS_Question, groups: "QuestionGrouper" ): """ Groups is a list of name, submission_id, selected answers """ failed_groups_names = [] i = 1 failed = False while not question.is_grouping_ready(): timeout = 5 tqdm.write( f"[{qid}]: Question grouping not ready! Retrying in {timeout} seconds!" ) time.sleep(timeout) # print(f"[{qid}]: Question grouping not ready! Retrying in {timeout} seconds" + (" " * timeout), end="\r") # for i in range (timeout): # print(f"[{qid}]: Question grouping not ready! Retrying in {timeout} seconds" + ("." * (1 + i)), end="\r") # time.sleep(1) # failed = True # if failed: # print("") gradescope_groups = question.get_groups() def all_zeros(s: str): return s and all(v == "0" for v in s) def set_group(group, gs_group): group.set_id(gs_group.get("id")) for group in groups.get_groups(): g_name = group.get_name() for gs_group in gradescope_groups: if gs_group["question_type"] == "mc": # The question type is mc so lets group by the internal mc if g_name == "Blank": # This is the blank group, lets use the internal label to group if all_zeros(gs_group["internal_title"]): set_group(group, gs_group) else: flip_g_name = g_name[:-2][::-1] if gs_group["internal_title"] is not None: if ( flip_g_name == gs_group["internal_title"] and g_name[len(g_name) - 1] != "1" ): set_group(group, gs_group) else: if g_name == gs_group["title"]: set_group(group, gs_group) else: # The question type is not mc so we should group on title and internal title for blank. # The internal title should only say Blank for default blank grouped submissions. # We then check the normal title if this is not true if ( g_name == gs_group["internal_title"] or g_name == gs_group["title"] ): set_group(group, gs_group) max_attempts = 5 attempt = 1 for group in tqdm( groups.get_groups(), desc=f"[{qid}]: Syncing Groups", unit="Group", **def_tqdm_args, ): g_name = group.get_name() sids = group.get_sids() if not sids: # We do not want to create groups which no questions exist. continue group_id = group.get_id() while attempt < max_attempts: if not group_id: group_id = question.add_group(g_name) if group_id is None: attempt += 1 time.sleep(1) continue if not question.group_submissions(group_id, sids): tqdm.write( f"[{qid}]: Failed to group submissions to {group_id}. SIDS: {sids}" ) failed_groups_names.append(g_name) break else: tqdm.write(f"[{qid}]: Failed to create group for {g_name}! ({groups})") failed_groups_names.append(g_name) # This is to decrease down stream errors for failed_group_name in failed_groups_names: groups.remove(failed_group_name) @classmethod def get_basic_rubric_scores(cls, question: GS_Question, group_names, correct_seq): scores = [] num_correct = sum([1 for correct in correct_seq if correct]) num_choices = sum([1 for correct in correct_seq if correct is not None]) points = question.data.get("points", 1) if points is None: points = 1 rubric_weight = 0 if num_correct != 0: rubric_weight = (1 / num_correct) * points for correct in correct_seq: if correct is None: scores.append(0) else: if correct: scores.append(rubric_weight) else: scores.append(-rubric_weight) return scores @classmethod def get_long_ans_rubric_scores( cls, question: GS_Question, group_names, correct_seq ): return [0] * len(correct_seq) def sync_rubric( self, qid: str, question: GS_Question, groups: "QuestionGrouper" ) -> QuestionRubric: rubric = QuestionRubric(question) if len(groups) == 0: return rubric qrubric: [RubricItem] = groups.get_rubric() if len(rubric) == 1: default_rubric_item = rubric.get_rubric_items()[0] if default_rubric_item.description == "Correct": first_item = qrubric[0] if not rubric.update_rubric_item( default_rubric_item, description=first_item.description, weight=first_item.weight, ): tqdm.write( f'[{qid}]: Failed to update default "Correct" rubric item!' ) # qrubric.remove(first_item) existing_rubric_items = rubric.get_rubric_items() existing_rubric_items_desc = [ item.description for item in existing_rubric_items ] for rubric_item in tqdm( qrubric, desc=f"[{qid}]: Syncing Rubric", unit="Rubric", **def_tqdm_args ): if rubric_item.description not in existing_rubric_items_desc: rubric.add_rubric_item(rubric_item) return rubric def grade_question( self, qid: str, question: GS_Question, rubric: QuestionRubric, groups: dict ): question_data = question.get_question_info() sub_id_mapping = {str(sub["id"]): sub for sub in question_data["submissions"]} for group in tqdm( groups.get_groups(), desc=f"[{qid}]: Grading", unit="Group", **def_tqdm_args ): group_sel = group.get_selected_items() group_sids = group.get_sids() if len(group_sids) > 0: sid = group_sids[0] if not sub_id_mapping[str(sid)]["graded"]: if not rubric.grade(sid, group_sel, save_group=True): tqdm.write(f"[{qid}]: Failed to grade group {group_name}!") class ExamtoolOutline: name_region = GS_Crop_info(1, 2.4, 11.4, 99, 18.8) sid_region = GS_Crop_info(1, 2.4, 18.9, 99, 28.7) def __init__( self, grader: GS_assignment_Grader, exam_json: dict, id_question_ids: [str] ): self.exam_json = exam_json self.gs_number_to_exam_q, self.gs_outline = self.generate_gs_outline( grader, exam_json, id_question_ids ) def get_gs_crop_info(self, page, question=None): return GS_Crop_info(page, 2, 2, 98, 98) def question_to_gso_question( self, grader: GS_assignment_Grader, page, question: dict ) -> GS_Outline_Question: weight = question.get("points") if not weight: weight = 0 return GS_Outline_Question( grader, None, [self.get_gs_crop_info(page, question=question)], title=question.get("name", ""), weight=weight, ) def generate_gs_outline( self, grader: GS_assignment_Grader, exam_json: dict, id_question_ids: [str] ): gs_number_to_exam_q = {} questions = [] page = 2 # Page 1 is an info page qid = 1 if exam_json.get("public"): prev_page = 1 pg = GS_Outline_Question( grader, None, [self.get_gs_crop_info(page, exam_json.get("public"))], title="Public", weight=0, ) sqid = 1 for question in extract_public(exam_json): question_id = question.get("id") if question_id in id_question_ids: print(f"Skipping {question_id} as it is an id question.") page += ( 1 ) # Still need to increment this as it is still on the exam pdf. continue pg.add_child(self.question_to_gso_question(grader, page, question)) gs_number_to_exam_q[f"{qid}.{sqid}"] = question sqid += 1 page += 1 if page != prev_page and len(pg.children) > 0: questions.append(pg) qid += 1 for group in extract_groups(exam_json): prev_page = page weight = group.get("points", "0") if not weight: weight = 0 g = GS_Outline_Question( grader, None, [self.get_gs_crop_info(page, group)], title=group.get("name", ""), weight=weight, ) sqid = 1 for question in extract_questions( group, extract_public_bool=False, top_level=False ): g.add_child(self.question_to_gso_question(grader, page, question)) gs_number_to_exam_q[f"{qid}.{sqid}"] = question sqid += 1 page += 1 if page != prev_page: questions.append(g) qid += 1 outline = GS_Outline(self.name_region, self.sid_region, questions) return (gs_number_to_exam_q, outline) def get_gs_outline(self): return self.gs_outline def merge_gs_outline_ids(self, outline: GS_Outline): self.gs_outline = outline for qnum, q in outline.questions_iterator(): q.data = self.gs_number_to_exam_q[qnum] def questions_iterator(self): yield from self.gs_outline.questions_iterator() class QuestionGroup: def __init__(self, name: str, selected_rubric_items: [bool], gid: str = None): self.name = name self.selected_rubric_items = ( selected_rubric_items ) # Bool array of selected items. self.gid = gid self.sids = set() def get_name(self): return self.name def get_id(self): return self.gid def set_id(self, gid: str): self.gid = gid def get_sids(self): return list(self.sids) def add_sid(self, sid: str): self.sids.add(sid) def add_sids(self, sids: [str]): self.sids = self.sids.union(sids) def get_selected_items(self): return self.selected_rubric_items class QuestionGrouper: def __init__( self, question: GS_Question, rubric: [RubricItem], # This is a list of rubric items. groups: {str: QuestionGroup} = None, ): self.groups = groups if not self.groups: self.groups = {} self.question = question self.rubric = rubric def get_groups(self): return self.groups.values() def get_group(self, name): return self.groups.get(name) def add_group(self, group: QuestionGroup): self.groups[group.get_name()] = group def remove(self, group_name): for g in self.groups: if g.get_name() == group_name: self.groups.remove(g) return def __len__(self): return len(self.groups) def get_rubric(self) -> [RubricItem]: return self.rubric def __contains__(self, key): return key in self.groups
python
from flask_wtf import FlaskForm from wtforms.validators import InputRequired from dmutils.forms.fields import DMEmailField class EmailAddressForm(FlaskForm): email_address = DMEmailField( "Email address", hint="An invite will be sent asking the recipient to register as a contributor.", validators=[ InputRequired(message="Email address must be provided") ] )
python
import RPi.GPIO as GPIO GPIO.setmode(GPIO.BCM) GPIO.setup(14, GPIO.OUT) GPIO.setup(15, GPIO.IN) try: while True: GPIO.output(14, GPIO.input(15)) finally: GPIO.output(14, 0) GPIO.cleanup()
python
# -*- coding: utf-8 -*- """ Sony .spimtx LUT Format Input / Output Utilities ================================================ Defines *Sony* *.spimtx* *LUT* Format related input / output utilities objects. - :func:`colour.io.read_LUT_SonySPImtx` - :func:`colour.io.write_LUT_SonySPImtx` """ from __future__ import division, unicode_literals import numpy as np import os import re from colour.io.luts import Matrix __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013-2018 - Colour Developers' __license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = '[email protected]' __status__ = 'Production' __all__ = ['read_LUT_SonySPImtx', 'write_LUT_SonySPImtx'] def read_LUT_SonySPImtx(path): array = np.loadtxt(path) array = array.reshape(3, 4) # TODO: Update with "develop" generic function. title = re.sub('_|-|\\.', ' ', os.path.splitext(os.path.basename(path))[0]) return Matrix(array, title) def write_LUT_SonySPImtx(matrix, path, decimals=6): if matrix.array.shape == (3, 4): array = matrix.array else: array = np.hstack([matrix.array, np.zeros((3, 1))]) np.savetxt(path, array, fmt='%.{0}f'.format(decimals).encode('utf-8'))
python
import jwt from os import environ from datetime import datetime, timedelta from flask_restful import (Resource, request) from models.user import UserModel, UserSchema from webargs.flaskparser import use_args from webargs import fields from flask import flash, redirect, url_for user_args ={'username': fields.Str(required=True), 'password': fields.Str(required=True), 'name': fields.Str(), 'email': fields.Str()} class UserRegister(Resource): @use_args(user_args, locations=('json', 'form')) def post(self, user_args): user_schema=UserSchema() user_data = user_args error = user_schema.validate(user_data) if error: if request.content_type == 'application/x-www-form-urlencoded': for message, message_value in error.items(): error_message = ''.join(message_value) return redirect(url_for('signup', error=error_message)) else: return {'status': 'fail', 'message': error}, 400 username_exist = UserModel.filter_and_find_first(username=user_data['username']) if username_exist: if request.content_type == 'application/x-www-form-urlencoded': return redirect(url_for('signup', error='username already exist')) else: return {'status': 'fail', 'message': 'username already exist'}, 409 email_exist = UserModel.filter_and_find_first(email=user_data['email']) if email_exist: if request.content_type == 'application/x-www-form-urlencoded': return redirect(url_for('signup', error='email already exist')) else: return {'status': 'fail', 'message': 'email already exist'}, 409 new_user = UserModel(**user_data) new_user.save_to_db() new_user_json = user_schema.dump(new_user).data if request.content_type == 'application/x-www-form-urlencoded': return redirect(url_for('index', error=request.args.get('error'))) else: return {'status': 'success', 'data': new_user_json}, 201 @staticmethod def put(): user_schema=UserSchema(partial={'password'}) user_data = request.get_json() error = user_schema.validate(user_data) if error: return {'status': 'fail','message': error}, 400 user_result = UserModel.filter_and_find_first(username=user_data['username']) if user_result: user_result.email = user_data['email'] user_result.name = user_data['name'] user_result.save_to_db() new_user_json = user_schema.dump(user_result).data return {'status': 'success', 'data': new_user_json}, 200 return {'status': 'fail', 'message': 'user does not exist'}, 404 class UserLogin(Resource): @use_args(user_args, locations=('json', 'form')) def post(self, args): user_schema=UserSchema(partial=('name', 'email')) user_data = args error = user_schema.validate(user_data) if error: if request.content_type == 'application/x-www-form-urlencoded': return redirect(url_for('index', error=error.get('username'))) else: return {'status': 'fail', 'message': error}, 400 user_exist = UserModel.filter_and_find_first(username=user_data['username'].lower(), password=user_data['password'].lower()) if not user_exist: if request.content_type == 'application/x-www-form-urlencoded': return redirect(url_for('index', error='username and password does not exist')) else: return {'status': 'fail', 'message': 'username and password does not exist'}, 409 user_data_json = UserSchema(exclude=('password',)).dump(user_exist).data key = environ.get('SECRET') payload = {'user': user_data_json, 'exp': datetime.utcnow() + timedelta(minutes=30)} token = jwt.encode(payload, key=key, algorithm='HS256').decode('utf-8') if request.content_type == 'application/x-www-form-urlencoded': return redirect(url_for('books_page')) else: return {'status': 'success', 'data': {'token': str(token), 'user': user_data_json}}
python
"""bsp methods""" import base64 import json import pprint import pyodata import sap.cli.core import sap.cli.helpers from sap import get_logger from sap.errors import ResourceAlreadyExistsError class CommandGroup(sap.cli.core.CommandGroup): """Management for BSP Applications""" def __init__(self): super().__init__('bsp') @CommandGroup.argument('--bsp', type=str, required=True, help='BSP ID') @CommandGroup.argument('--package', type=str, required=True, help='ABAP Package') @CommandGroup.argument('--app', type=str, required=True, help='Path to application packed in zip archive') @CommandGroup.argument('--corrnr', type=str, required=True, help='Transport Request to be used for application upload') @CommandGroup.command() def create(connection, args): """Creates the requested BSP application. Important: Target ABAP system needs following setup * update trnspace set editflag = 'X' role = 'P' license = '' sscrflag = 'X' where namespace = '/0CUST/' or namespace = '/0SAP/'. * table /IWFND/C_CONFIG je 'GATEWAY_VIRUSCAN_PROFILE'='-' """ # load zipped application from filesystem with open(args.app, 'rb') as file: app_data_archive = file.read() # convert raw zipped data to base54 encoding app_data_b64 = base64.b64encode(app_data_archive) # check if application exists try: connection.client.entity_sets.Repositories.get_entity(Name=args.bsp).execute() raise ResourceAlreadyExistsError except pyodata.exceptions.HttpError as ex: if ex.response.status_code != 404: raise ex app_data = { 'Name': args.bsp, 'Package': args.package, 'ZipArchive': app_data_b64.decode("utf-8"), } create_request = connection.client \ .entity_sets \ .Repositories \ .create_entity() \ .custom('CodePage', 'UTF8') \ .custom('TransportRequest', args.corrnr) \ .custom('client', args.client) create_request.set(**app_data) try: create_request.execute() except pyodata.exceptions.HttpError as ex: res = json.loads(ex.response.text) get_logger().info(pprint.pformat(res)) raise ex get_logger().info('BSP application successfully created and uploaded')
python
import pandas as pd from datetime import datetime from log import Log class Asset: def __init__(self): self.data = None self.name = None self.symbol = None self.exchange = None self.header_lines = None def read_header(self, filename): self.header_lines = 0 with open(filename) as file: head = [next(file) for n in range(3)] for line, nr in zip(head, range(1, 4)): parts = line.strip().split(":") if len(parts) != 2: break self.header_lines = nr key, value = [part.strip() for part in parts] if key == "Symbol": self.symbol = value elif key == "Name": self.name = value elif key == "Exchange": self.exchange = value def read_csv(self, filename): self.read_header(filename) self.data = pd.read_csv(filename, skiprows=self.header_lines, sep=";", converters={0: lambda x: datetime.strptime(x, "%Y-%m-%d")}) self.data = self.data.set_index('Date') def write_csv(self, filename): outfile = open(filename, "w") if self.symbol is not None: outfile.write("Symbol: %s\n" % self.symbol) if self.name is not None: outfile.write("Name: %s\n" % self.name) if self.exchange is not None: outfile.write("Exchange: %s\n" % self.exchange) self.data.to_csv(outfile, sep=";", line_terminator='\n') def append(self, col, series: pd.Series): self.data[col] = series
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ :mod:`ranges` ================== .. module:: ranges :platform: Unix, Windows :synopsis: .. moduleauthor:: hbldh <[email protected]> Created on 2015-06-04, 15:12 """ from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from operator import itemgetter from itertools import groupby import numpy as np def int_array_to_ranges(array): """Converts an monotonically increasing (or decreasing) array of integers into a list of index ranges with identical value. :param array: The array to segment. :type array: :py:class:`numpy.ndarray` or list :return: The list of ranges as index tuples. :rtype: list """ diffs = np.where(np.diff(array))[0] if len(diffs) == 0: ranges = [(0, len(array))] elif len(diffs) == 1: ranges = [(0, diffs[0] + 1), (diffs[0] + 1, len(array))] else: ranges = [(x + 1, y + 1) for x, y in zip(diffs[:-1], diffs[1:])] ranges.insert(0, (0, ranges[0][0])) ranges.append((ranges[-1][1], len(array))) return ranges def bool_array_to_ranges(array): """Converts a boolean array into a list of segments where it is ``True`` :param array: A boolean array to segment. :type array: :py:class:`numpy.ndarray` :return: A list of tuples with start and stop index of the ranges. :rtype: list """ ranges = [] for k, g in groupby(enumerate(np.where(array > 0)[0]), lambda (i, x): i - x): group = map(itemgetter(1), g) ranges.append((group[0], group[-1])) return ranges
python
from setuptools import setup setup( name="sup", author="Richard Liaw" )
python
#!/usr/bin/env python """ Monitor system calls with dockit. """ import os import sys import atexit import logging import traceback import lib_uris import lib_util import lib_common from lib_properties import pc os.environ["PYTHONUNBUFFERED"] = "1" if True: # TODO: Make this cleaner. # FIXME: This does not work yet because scripts/cim_objects_definitions.py needs survol/lib_event.py # FIXME: ... which cannot be imported due to path issues. if ".." not in sys.path: sys.path.append("..") if "../.." not in sys.path: sys.path.append("../..") try: from scripts import dockit except Exception as exc: logging.error("exc=%s" % exc) raise else: dockit = None def Snapshot(): logging.info("Snapshot mode") cgiEnv = lib_common.ScriptEnvironment() process_id = cgiEnv.GetId() logging.debug("Snapshot process_id=%s" % process_id) # This just returns one triple. grph = cgiEnv.GetGraph() process_node = lib_uris.gUriGen.PidUri(process_id) grph.add((process_node, pc.property_pid, lib_util.NodeLiteral(process_id))) cgiEnv.OutCgiRdf() # FIXME: Must finish this. if dockit: dockit_dirname = lib_util.standardized_file_path(os.path.dirname(dockit.__file__)) logging.debug("File=" + __file__ + " dockit_dirname=" + dockit_dirname) def _atexit_handler_detach(process_id): """This is called when this CGI script leaves for any reason. Its purpose is to detach from the target process.""" logging.info("_atexit_handler process_id=%d" % process_id) def SendEvents(): """This is called in a subprocess started by the Python module supervisor.""" logging.info("SendEvents") # FIXME: if not dockit: logging.error("dockit not available") return logging.info("dockit available") cgiEnv = lib_common.ScriptEnvironment() process_id = cgiEnv.GetId() logging.info("process_id=%s" % process_id) atexit.register(_atexit_handler_detach, process_id) logging.info("atexit handler set") # This is called by dockit with one of event to be inserted in the global events graph. def dockit_events_callback(rdf_triple): grph = cgiEnv.ReinitGraph() logging.info("dockit_events_callback rdf_triple=%s" % rdf_triple) grph.add(rdf_triple) cgiEnv.OutCgiRdf() class DockitParameters: """ We want to monitor all system calls of the target process. This class and its static values passed all parameters of the procvess to the module "dockit" which monitors the calls by attaching to the process given its pid. """ verbose = 1 with_warning = 1 map_params_summary = dockit.full_map_params_summary with_dockerfile = True input_process_id = int(process_id) command_line = [] output_format = "TXT" summary_format = None input_log_file = None output_files_prefix = "dockit_output" tracer = dockit.default_tracer(input_log_file, None) G_UpdateServer = dockit_events_callback aggregator = None duplicate_input_log = False output_makefile = None logging.debug("SendEvents process_id=%s DockitParameters (s) created" % process_id) # TODO: How to release the target process when this leaves ? try: dockit.start_processing(DockitParameters) except Exception as exc: logging.error("SendEvents caught (stderr): %s" % exc) logging.info("SendEvents after processing") def Main(): if lib_util.is_snapshot_behaviour(): logging.debug("system calls snapshot") Snapshot() else: logging.debug("system calls events") try: SendEvents() except Exception as err: logging.error("Caught:%s" % err) raise if __name__ == '__main__': Main()
python
from .video_resnet_triplet_attention import encoder as encoder_attention from .video_resnet_triplet_bilinear import encoder as encoder_bilinear from .video_resnet_triplet_gap import encoder as encoder_gap from .video_resnet_triplet_mxp import encoder as encoder_mxp from .video_resnet_triplet_frame_wise import encoder as encoder_frame_wise __all__ = [ 'encoder_attention', 'encoder_bilinear', 'encoder_gap', 'encoder_mxp', 'encoder_frame_wise' ]
python
# -*- coding: utf-8 -*- # Copyright 2018 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Module for creating clang-tidy builds.""" from __future__ import print_function from chromite.cbuildbot import manifest_version from chromite.cbuildbot.builders import generic_builders from chromite.cbuildbot.stages import artifact_stages from chromite.cbuildbot.stages import build_stages from chromite.cbuildbot.stages import chrome_stages from chromite.cbuildbot.stages import sync_stages class ClangTidyBuilder(generic_builders.Builder): """Builder that creates builds for clang-tidy warnings in Chrome OS.""" def GetVersionInfo(self): """Returns the CrOS version info from the chromiumos-overlay.""" return manifest_version.VersionInfo.from_repo(self._run.buildroot) def GetSyncInstance(self): """Returns an instance of a SyncStage that should be run.""" return self._GetStageInstance(sync_stages.ManifestVersionedSyncStage) def RunStages(self): """Run stages for clang-tidy builder.""" assert len(self._run.config.boards) == 1 board = self._run.config.boards[0] self._RunStage(build_stages.UprevStage) self._RunStage(build_stages.InitSDKStage) self._RunStage(build_stages.SetupBoardStage, board) self._RunStage(chrome_stages.SyncChromeStage) self._RunStage(build_stages.BuildPackagesStage, board) self._RunStage(artifact_stages.GenerateTidyWarningsStage, board)
python
from django.shortcuts import render from rest_framework import status, generics, viewsets, views from rest_framework.decorators import api_view from rest_framework.response import Response from django_filters.rest_framework import DjangoFilterBackend from .models import People, Companies from . import serializers class FruitsAndVegetablesViewset(viewsets.ReadOnlyModelViewSet): """ Given a person index (id, name or guid) returns a list of fruits and vegetables they like. """ queryset = People.objects.all() serializer_class = serializers.FruitsVegetablesSerializer filter_backends = (DjangoFilterBackend,) filter_fields = ('_id', 'name', 'guid', 'index') class CompanyEmployeesViewset(viewsets.ReadOnlyModelViewSet): """ Given a company index (or name) returns all its employees. """ queryset = Companies.objects.all() serializer_class = serializers.CompaniesEmployeesSerializer filter_backends = (DjangoFilterBackend,) filter_fields = ('index', 'company') class TwoPeopleView(views.APIView): """ Given 2 people, provides their information and the list of their friends in common which have brown eyes and are still alive. """ def get(self, request, pk1, pk2, format=None): people = People.objects.filter(index__in=(pk1, pk2)) if people.count() != 2: return Response({}) common_friends = people[0].friends.all().intersection(people[1].friends.all()) common_friends.filter(eyeColor='brown', has_died=False) twopeople = { 'person1': people[0], 'person2': people[1], 'common_friends': common_friends } serializer = serializers.TwoPeopleSerializer(twopeople) return Response(serializer.data)
python
from django.db import models # Create your models here. class Locker(models.Model): is_using = models.BooleanField(default=False)
python
# coding=utf-8 # Copyright 2014 Janusz Skonieczny """ Gra w kółko i krzyżyk """ import pygame import pygame.locals import logging # Konfiguracja modułu logowania, element dla zaawansowanych logging_format = '%(asctime)s %(levelname)-7s | %(module)s.%(funcName)s - %(message)s' logging.basicConfig(level=logging.DEBUG, format=logging_format, datefmt='%H:%M:%S') logging.getLogger().setLevel(logging.INFO) class Board(object): """ Plansza do gry. Odpowiada za rysowanie okna gry. """ def __init__(self, width): """ Konstruktor planszy do gry. Przygotowuje okienko gry. :param width: szerokość w pikselach """ self.surface = pygame.display.set_mode((width, width), 0, 32) pygame.display.set_caption('Tic-tac-toe') # Przed pisaniem tekstów, musimy zainicjować mechanizmy wyboru fontów PyGame pygame.font.init() font_path = pygame.font.match_font('arial') self.font = pygame.font.Font(font_path, 48) # tablica znaczników 3x3 w formie listy self.markers = [None] * 9 def draw(self, *args): """ Rysuje okno gry :param args: lista obiektów do narysowania """ background = (0, 0, 0) self.surface.fill(background) self.draw_net() self.draw_markers() self.draw_score() for drawable in args: drawable.draw_on(self.surface) # dopiero w tym miejscu następuje fatyczne rysowanie # w oknie gry, wcześniej tylko ustalaliśmy co i jak ma zostać narysowane pygame.display.update() def draw_net(self): """ Rysuje siatkę linii na planszy """ color = (255, 255, 255) width = self.surface.get_width() for i in range(1, 3): pos = width / 3 * i # linia pozioma pygame.draw.line(self.surface, color, (0, pos), (width, pos), 1) # linia pionowa pygame.draw.line(self.surface, color, (pos, 0), (pos, width), 1) def player_move(self, x, y): """ Ustawia na planszy znacznik gracza X na podstawie współrzędnych w pikselach """ cell_size = self.surface.get_width() / 3 x /= cell_size y /= cell_size self.markers[int(x) + int(y) * 3] = player_marker(True) def draw_markers(self): """ Rysuje znaczniki graczy """ box_side = self.surface.get_width() / 3 for x in range(3): for y in range(3): marker = self.markers[x + y * 3] if not marker: continue # zmieniamy współrzędne znacznika # na współrzędne w pikselach dla centrum pola center_x = x * box_side + box_side / 2 center_y = y * box_side + box_side / 2 self.draw_text(self.surface, marker, (center_x, center_y)) def draw_text(self, surface, text, center, color=(180, 180, 180)): """ Rysuje wskazany tekst we wskazanym miejscu """ text = self.font.render(text, True, color) rect = text.get_rect() rect.center = center surface.blit(text, rect) def draw_score(self): """ Sprawdza czy gra została skończona i rysuje właściwy komunikat """ if check_win(self.markers, True): score = u"Wygrałeś(aś)" elif check_win(self.markers, True): score = u"Przegrałeś(aś)" elif None not in self.markers: score = u"Remis!" else: return i = self.surface.get_width() / 2 self.draw_text(self.surface, score, center=(i, i), color=(255, 26, 26)) class TicTacToeGame(object): """ Łączy wszystkie elementy gry w całość. """ def __init__(self, width, ai_turn=False): """ Przygotowanie ustawień gry :param width: szerokość planszy mierzona w pikselach """ pygame.init() # zegar którego użyjemy do kontrolowania szybkości rysowania # kolejnych klatek gry self.fps_clock = pygame.time.Clock() self.board = Board(width) self.ai = Ai(self.board) self.ai_turn = ai_turn def run(self): """ Główna pętla gry """ while not self.handle_events(): # działaj w pętli do momentu otrzymania sygnału do wyjścia self.board.draw() if self.ai_turn: self.ai.make_turn() self.ai_turn = False self.fps_clock.tick(15) def handle_events(self): """ Obsługa zdarzeń systemowych, tutaj zinterpretujemy np. ruchy myszką :return True jeżeli pygame przekazał zdarzenie wyjścia z gry """ for event in pygame.event.get(): if event.type == pygame.locals.QUIT: pygame.quit() return True if event.type == pygame.locals.MOUSEBUTTONDOWN: if self.ai_turn: # jeśli jeszcze trwa ruch komputera to ignorujemy zdarzenia continue # pobierz aktualną pozycję kursora na planszy mierzoną w pikselach x, y = pygame.mouse.get_pos() self.board.player_move(x, y) self.ai_turn = True class Ai(object): """ Kieruje ruchami komputera na podstawie analizy położenia znaczników """ def __init__(self, board): self.board = board def make_turn(self): """ Wykonuje ruch komputera """ if not None in self.board.markers: # brak dostępnych ruchów return logging.debug("Plansza: %s" % self.board.markers) move = self.next_move(self.board.markers) self.board.markers[move] = player_marker(False) @classmethod def next_move(cls, markers): """ Wybierz następny ruch komputera na podstawie wskazanej planszy :param markers: plansza gry :return: index tablicy jednowymiarowe w której należy ustawić znacznik kółka """ # pobierz dostępne ruchy wraz z oceną moves = cls.score_moves(markers, False) # wybierz najlepiej oceniony ruch score, move = max(moves, key=lambda m: m[0]) logging.info("Dostępne ruchy: %s", moves) logging.info("Wybrany ruch: %s %s", move, score) return move @classmethod def score_moves(cls, markers, x_player): """ Ocenia rekurencyjne możliwe ruchy Jeśli ruch jest zwycięstwem otrzymuje +1, jeśli przegraną -1 lub 0 jeśli nie nie ma zwycięscy. Dla ruchów bez zwycięscy rekreacyjnie analizowane są kolejne ruchy a suma ich punktów jest wynikiem aktualnego ruchu. :param markers: plansza na podstawie której analizowane są następne ruchy :param x_player: True jeśli ruch dotyczy gracza X, False dla gracza O """ # wybieramy wszystkie możliwe ruchy na podstawie wolnych pól available_moves = (i for i, m in enumerate(markers) if m is None) for move in available_moves: from copy import copy # tworzymy kopię planszy która na której testowo zostanie # wykonany ruch w celu jego późniejszej oceny proposal = copy(markers) proposal[move] = player_marker(x_player) # sprawdzamy czy ktoś wygrywa gracz którego ruch testujemy if check_win(proposal, x_player): # dodajemy punkty jeśli to my wygrywamy # czyli nie x_player score = -1 if x_player else 1 yield score, move continue # ruch jest neutralny, # sprawdzamy rekurencyjne kolejne ruchy zmieniając gracza next_moves = list(cls.score_moves(proposal, not x_player)) if not next_moves: yield 0, move continue # rozdzielamy wyniki od ruchów scores, moves = zip(*next_moves) # sumujemy wyniki możliwych ruchów, to będzie nasz wynik yield sum(scores), move def player_marker(x_player): """ Funkcja pomocnicza zwracająca znaczniki graczy :param x_player: True dla gracza X False dla gracza O :return: odpowiedni znak gracza """ return "X" if x_player else "O" def check_win(markers, x_player): """ Sprawdza czy przekazany zestaw znaczników gry oznacza zwycięstwo wskazanego gracza :param markers: jednowymiarowa sekwencja znaczników w :param x_player: True dla gracza X False dla gracza O """ win = [player_marker(x_player)] * 3 seq = range(3) # definiujemy funkcję pomocniczą pobierającą znacznik # na podstawie współrzędnych x i y def marker(xx, yy): return markers[xx + yy * 3] # sprawdzamy każdy rząd for x in seq: row = [marker(x, y) for y in seq] if row == win: return True # sprawdzamy każdą kolumnę for y in seq: col = [marker(x, y) for x in seq] if col == win: return True # sprawdzamy przekątne diagonal1 = [marker(i, i) for i in seq] diagonal2 = [marker(i, abs(i-2)) for i in seq] if diagonal1 == win or diagonal2 == win: return True # Ta część powinna być zawsze na końcu modułu (ten plik jest modułem) # chcemy uruchomić naszą grę dopiero po tym jak wszystkie klasy zostaną zadeklarowane if __name__ == "__main__": game = TicTacToeGame(300) game.run()
python
from django.db import models # Create your models here. # Con este archivo creo lo necesario para importar luego otro archivo que finalizara creando una BD en postgres # Los metodos STR son los que van a mostrarse en la pagina en django class Domicilio(models.Model): calle = models.CharField(max_length=255) no_calle = models.IntegerField() pais = models.CharField(max_length=255) def __str__(self): cadena = f'Domicilio {self.id}: {self.calle} {self.no_calle} {self.pais}' return cadena class Persona(models.Model): nombre = models.CharField(max_length=255) apellido = models.CharField(max_length=255) email = models.CharField(max_length=255) domicilio = models.ForeignKey(Domicilio, on_delete=models.SET_NULL, null=True) #domicilio = models.ForeignKey(Domicilio, on_delete=models.CASCADE(), null=True) # Esto es para que si se borra de una tabla se borra en cascada en la otra def __str__(self): cadena = f'Persona {self.id}: {self.nombre} {self.apellido} {self.email}' return cadena
python
from PyObjCTools.TestSupport import * from Foundation import * try: unicode except NameError: unicode = str class TestNSLinguisticTagger (TestCase): @min_os_level('10.7') def testConstants(self): self.assertIsInstance(NSLinguisticTagSchemeTokenType, unicode) self.assertIsInstance(NSLinguisticTagSchemeLexicalClass, unicode) self.assertIsInstance(NSLinguisticTagSchemeNameType, unicode) self.assertIsInstance(NSLinguisticTagSchemeNameTypeOrLexicalClass, unicode) self.assertIsInstance(NSLinguisticTagSchemeLemma, unicode) self.assertIsInstance(NSLinguisticTagSchemeLanguage, unicode) self.assertIsInstance(NSLinguisticTagSchemeScript, unicode) self.assertIsInstance(NSLinguisticTagWord, unicode) self.assertIsInstance(NSLinguisticTagPunctuation, unicode) self.assertIsInstance(NSLinguisticTagWhitespace, unicode) self.assertIsInstance(NSLinguisticTagOther, unicode) self.assertIsInstance(NSLinguisticTagNoun, unicode) self.assertIsInstance(NSLinguisticTagVerb, unicode) self.assertIsInstance(NSLinguisticTagAdjective, unicode) self.assertIsInstance(NSLinguisticTagAdverb, unicode) self.assertIsInstance(NSLinguisticTagPronoun, unicode) self.assertIsInstance(NSLinguisticTagDeterminer, unicode) self.assertIsInstance(NSLinguisticTagParticle, unicode) self.assertIsInstance(NSLinguisticTagPreposition, unicode) self.assertIsInstance(NSLinguisticTagNumber, unicode) self.assertIsInstance(NSLinguisticTagConjunction, unicode) self.assertIsInstance(NSLinguisticTagInterjection, unicode) self.assertIsInstance(NSLinguisticTagClassifier, unicode) self.assertIsInstance(NSLinguisticTagIdiom, unicode) self.assertIsInstance(NSLinguisticTagOtherWord, unicode) self.assertIsInstance(NSLinguisticTagSentenceTerminator, unicode) self.assertIsInstance(NSLinguisticTagOpenQuote, unicode) self.assertIsInstance(NSLinguisticTagCloseQuote, unicode) self.assertIsInstance(NSLinguisticTagOpenParenthesis, unicode) self.assertIsInstance(NSLinguisticTagCloseParenthesis, unicode) self.assertIsInstance(NSLinguisticTagWordJoiner, unicode) self.assertIsInstance(NSLinguisticTagDash, unicode) self.assertIsInstance(NSLinguisticTagOtherPunctuation, unicode) self.assertIsInstance(NSLinguisticTagParagraphBreak, unicode) self.assertIsInstance(NSLinguisticTagOtherWhitespace, unicode) self.assertIsInstance(NSLinguisticTagPersonalName, unicode) self.assertIsInstance(NSLinguisticTagPlaceName, unicode) self.assertIsInstance(NSLinguisticTagOrganizationName, unicode) self.assertEqual(NSLinguisticTaggerOmitWords, 1 << 0) self.assertEqual(NSLinguisticTaggerOmitPunctuation, 1 << 1) self.assertEqual(NSLinguisticTaggerOmitWhitespace, 1 << 2) self.assertEqual(NSLinguisticTaggerOmitOther, 1 << 3) self.assertEqual(NSLinguisticTaggerJoinNames, 1 << 4) @min_os_level('10.7') def testMethods(self): self.assertArgHasType(NSLinguisticTagger.orthographyAtIndex_effectiveRange_, 1, b'o^' + NSRange.__typestr__) self.assertArgIsBlock(NSLinguisticTagger.enumerateTagsInRange_scheme_options_usingBlock_, 3, b'v@' + NSRange.__typestr__ + NSRange.__typestr__ + b'o^' + objc._C_NSBOOL) self.assertArgHasType(NSLinguisticTagger.tagAtIndex_scheme_tokenRange_sentenceRange_, 2, b'o^' + NSRange.__typestr__) self.assertArgHasType(NSLinguisticTagger.tagAtIndex_scheme_tokenRange_sentenceRange_, 3, b'o^' + NSRange.__typestr__) self.assertArgHasType(NSLinguisticTagger.tagsInRange_scheme_options_tokenRanges_, 3, b'o^@') self.assertArgHasType(NSLinguisticTagger.possibleTagsAtIndex_scheme_tokenRange_sentenceRange_scores_, 2, b'o^' + NSRange.__typestr__) self.assertArgHasType(NSLinguisticTagger.possibleTagsAtIndex_scheme_tokenRange_sentenceRange_scores_, 3, b'o^' + NSRange.__typestr__) self.assertArgHasType(NSLinguisticTagger.possibleTagsAtIndex_scheme_tokenRange_sentenceRange_scores_, 4, b'o^@') self.assertArgIsOut(NSString.linguisticTagsInRange_scheme_options_orthography_tokenRanges_, 4) self.assertArgIsBlock(NSString.enumerateLinguisticTagsInRange_scheme_options_orthography_usingBlock_, 4, b'v@' + NSRange.__typestr__ + NSRange.__typestr__ + b'o^' + objc._C_NSBOOL) if __name__ == "__main__": main()
python
# Written by Jeremy Lee, 2020-10-30 from django.conf import settings from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('umap', '0007_auto_20190416_1757'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.AlterField( model_name='map', name='share_status', field=models.SmallIntegerField(choices=[(1, 'everyone (public)'), (2, 'anyone with link'), (3, 'editors only'), (4, 'viewers and editors'), (5, 'authenticated'), (9, 'blocked')], default=1, verbose_name='share status'), ), migrations.AddField( model_name='map', name='viewers', field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL, verbose_name='viewers', related_name='map_viewers'), ), migrations.AlterField( model_name='map', name='editors', field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL, verbose_name='editors', related_name='map_editors'), ), ]
python
from pyomo.core import * class Model: model = AbstractModel() model.T = Set() # Index Set for time steps of optimization horizon model.S = Set() # Index Set for time steps of optimization horizon ################################## PARAMETERS ################################# ################################################################################################ # definition of the PV model.P_PV = Param(model.S, model.T, within=NonNegativeReals) # PV PMPP forecast model.P_Load = Param(model.T, within=NonNegativeReals)
python
""" This module contains event handlers related to the restart options after a Codenames game ends. """ from flask_socketio import emit, leave_room from flask_login import current_user from app import socketio, db from app.models.user_data import UserData from app.games.codenames.models import CodenamesTeams from app.utils import is_admin from .constants import NAMESPACE, STATE_KEYS, TEAMS, STATES, SPYMASTER from .utils import is_codenames_player, player_distribution_is_valid, create_word_list ######################################################################### # # # EVENT HANDLERS # # ================== # # # # EVENTS EXPLANATION # # # # restart Fired when the room admin chooses the # # "Start a new game" option. # # # # restart_with_same_teams Fired when the room admin chooses the # # restart with same teams option. # # # ######################################################################### @socketio.on("restart", namespace=NAMESPACE) @is_codenames_player @is_admin def on_restart(): """ Handles the restart_with_same_teams event. This is fired when the room admin choses to start a new game. """ if not current_user.room.codenames_room.state == STATES.GAME_OVER: return emit("set_state", {STATE_KEYS.GAME_STATE: STATES.JOIN}, room=current_user.room_id) team = CodenamesTeams.query.filter_by( room_id=current_user.room_id, team_name=TEAMS.NEUTRAL ).first() current_user.room.codenames_room.state = STATES.JOIN current_user.room.codenames_room.state_details = None team_list = dict() team_list["players"] = [] users = UserData.query.filter_by(room_id=current_user.room_id) for user in users: leave_room(user.room_id + user.codenames_player.team.team_name) user.codenames_player.team = team team_list["players"].append( {"id": user.id, "user": user.username, "team": TEAMS.NEUTRAL} ) if user.codenames_player.spymaster_of is not None: user.codenames_player.spymaster_of.spymaster = None db.session.commit() team_list["currentTeam"] = TEAMS.NEUTRAL team_list["state"] = STATES.JOIN team_list[TEAMS.BLUE + SPYMASTER] = None team_list[TEAMS.RED + SPYMASTER] = None emit("team_list", team_list, room=current_user.room_id) @socketio.on("restart_with_same_teams", namespace=NAMESPACE) @is_codenames_player @is_admin def on_restart_with_same_teams(): """ Handles the restart_with_same_teams event. This is fired when the room admin choses to start a new game with the same teams. """ if not current_user.room.codenames_room.state == STATES.GAME_OVER: return if ( current_user.room.codenames_room.state_details == TEAMS.BLUE + SPYMASTER or current_user.room.codenames_room.state_details == TEAMS.RED + SPYMASTER ): return if not player_distribution_is_valid: return current_user.room.codenames_room.state = STATES.STARTED current_user.room.codenames_room.turns_left = 0 db.session.commit() emit( "set_state", {STATE_KEYS.GAME_STATE: STATES.STARTED}, room=current_user.room_id ) create_word_list()
python
from pyg_base import is_num, is_ts, df_concat import pandas as pd import numpy as np import numba @numba.njit def _p(x, y, vol = 0): if vol == 0: return 1. if x<y else -1. if x>y else 0.0 else: one_sided_tail = 0.5 * np.exp(-abs(y-x)/vol) return 1-one_sided_tail if x<y else one_sided_tail @numba.njit def _xrank(a, w, b, vol, scale = 0 , reweight = False): """ performs a cross-sectional rank a = np.random.normal(0,1,20) a[np.random.normal(0,1,20) > 1] = np.nan w = np.full(20, 1.) b = np.full(20, 1.) scale = 0; vol = -1; reweight = False a _xrank(a, w, b, vol) ranks a from -1 to +1 such that: i) a[i] < a[j] ==> rank[i] < rank[j] ii) rank[i] in (-1, 1) iii) \sum w[i] rank[i] = 0 Parameters ---------- a : TYPE DESCRIPTION. w : TYPE DESCRIPTION. b : TYPE DESCRIPTION. vol : TYPE DESCRIPTION. scale : TYPE, optional DESCRIPTION. The default is 0. reweight : TYPE, optional DESCRIPTION. The default is False. Returns ------- None. """ not_ok = np.isnan(a) ok = ~not_ok if np.max(not_ok): a = a.copy(); w = w.copy(); b = b.copy() a[not_ok] = 0.0 b[not_ok] = 0.0 w[not_ok] = 0.0 wb = w * b total_wb = np.sum(wb) if total_wb == 0: return np.full_like(a, np.nan) else: r = np.zeros_like(a) wb = wb / total_wb if vol < 0: wba = wb * a m1 = np.sum(wba) m2 = np.sum(wba * a) vol = (m2 - m1**2) ** 0.5 for i in range(a.shape[0]): if ok[i] and w[i]!=0: for j in range(i): if ok[j] and w[j]!=0: qq = _p(a[i], a[j], vol) pp = 1-qq r[i] += (2*pp-1) * wb[j] r[j] += (2*qq-1) * wb[i] if scale == 0: std = 1 elif scale == 1: # scale weightes so that total weight = 1 total_w = np.sum(w) w = w / total_w std = np.sum((w*r)**2*(1-b**2)) ** 0.5 r = r/std elif scale == 2: std = (np.sum(r**2) - np.sum(r)**2) ** 0.5 r = r/std elif scale == 3: total_w = np.sum(w) w = w / total_w std = np.sum(w*(r**2)) ** 0.5 r = r/std r[not_ok] = np.nan if reweight: r = r * w return r @numba.njit def _xrank_2d(a, w, b, vol, scale, reweight): res = np.empty_like(a) for i in range(a.shape[0]): res[i] = _xrank(a = a[i], w = w[i], b = b[i], vol = vol[i], scale = scale , reweight = reweight) return res def xrank(a, weight = None, beta = None, vol = True, scale = 0 , reweight = False, columns = None): """ performs a cross-sectional rank a = np.random.normal(0,1,20) a[np.random.normal(0,1,20) > 1] = np.nan w = np.full(20, 1.) b = np.full(20, 1.) scale = 0; vol = -1; reweight = False a _xrank(a, w, b, vol) ranks a from -1 to +1 such that: i) a[i] < a[j] ==> rank[i] < rank[j] ii) rank[i] in (-1, 1) iii) \sum w[i] rank[i] = 0 Parameters ---------- a : TYPE DESCRIPTION. w : TYPE DESCRIPTION. b : TYPE DESCRIPTION. vol : TYPE DESCRIPTION. scale : TYPE, optional DESCRIPTION. The default is 0. reweight : TYPE, optional DESCRIPTION. The default is False. Returns ------- None. :Example: --------- >>> a = pd.DataFrame(np.random.normal(0,1,(1000,20)), drange(-999)) >>> aa = cumsum(a) >>> aa.plot() >>> beta = weight = None >>> vol = True; scale = 0; columns = None >>> res = xrank(aa) >>> res.plot() """ a = df_concat(a, columns).ffill() index = a.index cols = a.columns a_ = a.values if weight is None: w = np.full_like(a_, 1.) elif is_num(weight): w = np.full_like(a_, weight) else: w = df_concat(weight, columns).reindex(index, method = 'ffill') if beta is None: b = np.full_like(a_, 1.) elif is_num(beta): b = np.full_like(a_, beta) else: b = df_concat(beta, columns).reindex(index, method = 'ffill') if vol is True: vol = -1 if is_ts(vol): vol - vol.reindex(index, method = 'ffill') if isinstance(vol, pd.DataFrame) and vol.shape[1] == 1: vol = vol.iloc[:,0] else: vol = np.full(a_.shape[0], vol) b, w, vol = [df.values if is_ts(df) else df for df in (b,w,vol)] res = _xrank_2d(a_, w, b, vol, scale, reweight) return pd.DataFrame(res, index, cols)
python
from .data import * from .selector import * from .utils import * from .dataset import *
python
# Search for lines that start 'X' followed by any non whitespace # characters and ':' then output the first group of non whitespace # characters that follows import re hand = open('mbox-short.txt') for line in hand: line = line.rstrip() x = re.findall('^X\S*: (\S+)', line) if not x: continue print(x)
python
from datetime import datetime import numpy as np from multiprocessing import Pool, cpu_count import os import sys import torch import torch.nn as nn import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from .utils.preprocess_conllu import * from .utils.helpers import * from .utils.tools import * from .models import * # TODO put this in a config file fcodebook = "/home/leo/projects/minibrain/predictors/sequence/text/utf8-codes/utf8_codebook_overfit_matrix_2seg_dim64.npy" utf8codematrix = "/home/leo/projects/minibrain/predictors/sequence/text/utf8-codes/utf8_code_matrix_2seg.npy" dataset_train = "/home/leo/projects/Datasets/text/UniversalDependencies/ud-treebanks-v2.5/traindev_np_batches_779000x3x1024_uint16.npy" BASE_DATA_DIR_UD_TREEBANK = "/home/leo/projects/Datasets/text/UniversalDependencies/ud-treebanks-v2.5" # cuda seems to reverse the GPU ids with CUDA id so ... mess # Cuda maps cuda:0 to my RTX 2080ti (GPU#1) and # Cuda maps cuda:1 to my GTX 1080 (GPU#0) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") def train_test(model, checkpoint_path, base_name, max_seq_len=384, test_loss=True, test_accuracy=False, max_data=45): model = model.to(device) data_train = np.load(dataset_train) # optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-4) optimizer = torch.optim.AdamW(model.parameters()) # loss_function = F.nll_loss loss_function = pos_loss_function epoch_size = 10000 batch_size = 50 # TODO tis is for testing purposes data = data_train # data = data_train[-1000 * batch_size:, :, :] # just for the trials, use the last 1000 batches only test_data = None if test_loss: test_data = load_test_data(BASE_DATA_DIR_UD_TREEBANK, max_data) epochs = chunks(data, epoch_size, dim=0) epoch_count = 1 for e in epochs: batches = chunks(e, batch_size, dim=0) train(model, optimizer, loss_function, batches, epoch_count, epoch_size, device, max_seq_len) torch.cuda.empty_cache() # checkpoint cid = f"{epoch_count:04}" # cid = str(epoch_count).zfill(4) model.save_checkpoint(checkpoint_path, base_name, cid) # TODO test loss and accuracy to be measured in CPU (or another GPU) # with batches bigger than 50 my GPU is out of memory if test_loss: test(model, loss_function, test_data, epoch_count, device, max_data, max_seq_len) torch.cuda.empty_cache() if test_accuracy: test_accuracy(model, test_data, epoch_count, device, max_data) torch.cuda.empty_cache() epoch_count += 1 # model.network.save_model("./trained_models/conv1dcol", "conv1dcol_nll-loss_epoch-{}".format(epoch_count)) def test_async(checkpoint_path, test_data_path, epoch_count, device, max_data, test_acc=False): # load checkpoint # model is hardcoded for the moment utf8codes = np.load(fcodebook) utf8codes = utf8codes.reshape(1987, 64) model = GatedConv1DPoS(utf8codes).to(device) model.load_checkpoint(checkpoint_path) test_data = load_test_data(test_data_path) print("launching test in CPU") test(model, pos_loss_function, test_data, epoch_count, device, max_data) if test_acc: print("launching Accuracy test in CPU") test_accuracy(model, test_data, epoch_count, device, max_data) def test_acc_async(checkpoint_path, test_data_path, epoch_count, device, max_data): # load checkpoint # model is hardcoded for the moment utf8codes = np.load(fcodebook) utf8codes = utf8codes.reshape(1987, 64) model = GatedConv1DPoS(utf8codes).to(device) model.load_checkpoint(checkpoint_path) test_data = load_test_data(test_data_path) print("launching Accuracy test in CPU") test_accuracy(model, test_data, epoch_count, device, max_data) def err_ckb(err): print("error with the subprocess ", err) # Note this is TOO slow, GPU test is 30-50 times faster than in CPU, so CPU not useful for practical purposes def train_cputest(model, checkpoint_path, base_name, test_accuracy=True, max_data=45): pool = Pool(cpu_count() - 2) model = model.to(device) data_train = np.load(dataset_train) # optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-4) optimizer = torch.optim.AdamW(model.parameters()) # loss_function = F.nll_loss loss_function = pos_loss_function epoch_size = 10000 batch_size = 50 # TODO this is for testing purposes data = data_train # data = data_train[-1000*batch_size:, :, :] # just for the trials, use the last 1000 batches only epochs = chunks(data, epoch_size, dim=0) epoch_count = 1 for e in epochs: batches = chunks(e, batch_size, dim=0) train(model, optimizer, loss_function, batches, epoch_count, epoch_size, device) torch.cuda.empty_cache() # checkpoint cid = f"{epoch_count:04}" # cid = str(epoch_count).zfill(4) fchkpoint = model.save_checkpoint(checkpoint_path, base_name, cid) # test loss and accuracy to be measured in CPU (or another GPU) # with batches bigger than less than 50 my GPU is out of memory res_test = pool.apply_async(test_async, [fchkpoint, BASE_DATA_DIR_UD_TREEBANK, epoch_count, device, max_data], error_callback=err_ckb) if test_accuracy: res_acc = pool.apply_async(test_acc_async, [fchkpoint, BASE_DATA_DIR_UD_TREEBANK, epoch_count, device, max_data], error_callback=err_ckb) torch.cuda.empty_cache() epoch_count += 1 # model.network.save_model("./trained_models/conv1dcol", "conv1dcol_nll-loss_epoch-{}".format(epoch_count)) def old_main_conv1d(): utf8codes = np.load(fcodebook) utf8codes = utf8codes.reshape(1987, 64) model = OLD_Conv1DPoS(utf8codes) path = "./trained_models/conv1dcol" base_name = "conv1dcol_nll-loss" train_test(model, path, base_name) def old_main_gatedconv1d(): utf8codes = np.load(fcodebook) utf8codes = utf8codes.reshape(1987, 64) model = GatedConv1DPoS(utf8codes) path = "./trained_models/GatedConv1DCol" base_name = "GatedConv1DPoS_nll-loss" train_test(model, path, base_name) def main_conv1dcolnet(): utf8codes = np.load(utf8codematrix) # utf8codes = utf8codes.reshape(1987, 324) encoder = Conv1DColNet(transpose_output=True) # use default parameters decoder = LinearUposDeprelDecoder(transpose_input=False) model = NetContainer(utf8codes, encoder, decoder) path = "./trained_models/Conv1dColNet_try3" base_name = "Conv1dColNet_nll-loss" train_test(model, path, base_name) CONV1D_PRETRAIN_FILE = "/home/leo/projects/minibrain/predictors/sequence/text/trained_models/Conv1dColNet/Conv1dColNet_nll-loss_0078.state_dict.pth" def main_convattnet(conv1d_pretrain_file=CONV1D_PRETRAIN_FILE): utf8codes = np.load(utf8codematrix) # utf8codes = utf8codes.reshape(1987, 324) # the convolutional encoder must NOT be retrained (that is what I'm trying to test) with torch.no_grad(): conv1d_encoder = Conv1DColNet(transpose_output=False) # use default parameters conv1d_decoder = LinearUposDeprelDecoder(transpose_input=False) conv1d_model = NetContainer(utf8codes, conv1d_encoder, conv1d_decoder) # load pre-trained conv1dcolnet # conv1d_model.load_checkpoint(conv1d_pretrain_file) # cleanup things that we'll not use, we just need the encoder del conv1d_model del conv1d_decoder torch.cuda.empty_cache() # conv1d_encoder = Conv1DColNet(transpose_output=False) # use default parameters encoder = ConvAttColNet(conv1d_encoder, transpose_output=False) decoder = LinearUposDeprelDecoder(transpose_input=False) model = NetContainer(utf8codes, encoder, decoder) print("Starting training for model with column type ConvAttNetCol and pretrained Conv1dColNet") print("Parameter model details: ") print("conv1d_encoder parameters {} from which {} are trainable ". format(count_parameters(conv1d_encoder), count_parameters(conv1d_encoder))) print("ConvAttColNet parameters {} from which {} are trainable ". format(count_parameters(encoder), count_parameters(encoder))) print("decoder parameters {} from which {} are trainable ". format(count_parameters(decoder), count_parameters(decoder))) print("Total model parameters {} from which {} are trainable ". format(count_parameters(model), count_parameters(model))) path = "./trained_models/ConvAttNet" base_name = "ConvAttNet_nll-loss" train_test(model, path, base_name, max_seq_len=384, max_data=60)
python
import sys, re from mk_yaml_ontology import ont_node, dump_yaml def replace_token(pattern, replacement, s): replacement = f' {replacement} ' s = re.sub(f' ?{pattern} ', replacement, s) s = re.sub(f' {pattern} ?', replacement, s) return s def mk_ont_node(line_string): fields = line_string.split("\t") assert(len(fields) >= 4) var_name = fields[0].strip() description = fields[3].strip() description = replace_token("C", "carbon", description) description = replace_token("CO2", "carbon dioxide", description) description = replace_token("CH2O", "formaldehyde", description) description = replace_token("N", "nitrogen", description) description = replace_token("NH3", "ammonia", description) description = replace_token("NH4", "ammonium", description) description = replace_token("NO3", "nitrate", description) description = replace_token("P", "phosphorus", description) return ont_node(var_name, [description], None, add_name = False) # the name isn't in a format we can use def main(): flat_file = sys.argv[1] ont_file = sys.argv[2] ont_name = sys.argv[3] with open(flat_file, "r") as f: _ = f.readline() # read header lines = [line.rstrip() for line in f.readlines()] nodes = [mk_ont_node(line) for line in lines] dump_yaml(nodes, ont_file, ont_name) main()
python
import os import pytest from aztk.models.plugins import PluginConfiguration from aztk.models.plugins.internal import PluginManager from aztk.error import InvalidPluginReferenceError dir_path = os.path.dirname(os.path.realpath(__file__)) fake_plugin_dir = os.path.join(dir_path, "fake_plugins") def RequiredArgPlugin(req_arg): return PluginConfiguration(name="required-arg") def test_missing_plugin(): plugin_manager = PluginManager() message = "Cannot find a plugin with name .*" with pytest.raises(InvalidPluginReferenceError, match=message): plugin_manager.get_plugin("non-existing-plugin") def test_extra_args_plugin(): plugin_manager = PluginManager() message = "Plugin JupyterPlugin doesn't have an argument called 'invalid'" with pytest.raises(InvalidPluginReferenceError, match=message): plugin_manager.get_plugin("jupyter", args=dict(invalid="foo")) def test_missing_required_arg(): plugin_manager = PluginManager() plugin_manager.plugins["required-arg"] = RequiredArgPlugin message = "Missing a required argument req_arg for plugin RequiredArgPlugin" with pytest.raises(InvalidPluginReferenceError, match=message): plugin_manager.get_plugin("required-arg")
python
from abt.cli import main
python
import asyncio import logging from rsp1570serial.discovery import discover_source_aliases if __name__ == "__main__": logging.basicConfig( level=logging.INFO, format="%(asctime)s %(levelname)s:%(message)s" ) # asyncio.run(discover_source_aliases("socket://192.168.50.211:50002")) asyncio.run(discover_source_aliases())
python
from copy import deepcopy class ensemble: def __init__(self, obj): self.vals = {None: (None,obj)} def get(self): if self.vals is None: return self.val if len(self.vals)==1: for nm in self.vals.values(): return nm[1] return self def add(self, guard, obj): if id(obj) in self.vals: (g,obj) = self.vals[id(obj)] self.vals[id(obj)] = (g|guard,obj) else: self.vals[id(obj)] = (guard,obj) def __call__(self): if self.vals is not None: self.val = self.vals[None][1] for (g,obj) in self.vals.values(): if g is None: continue if isinstance(obj,ensemble): obj=obj() self.val = g.if_else(obj,self.val) self.vals = None return self.val class values: def __init__(self): self.dic = {} def __getitem__(self, var): if not var in self.dic: raise NameError("name '" + var + "' is not always set") if isinstance(self.dic[var], ensemble): # print("ifthenelsing", var) # if self.dic[var].guard is not None: print("* ifthenelse", var, self.dic[var].guard, "?", self.dic[var].ifval, ":", self.dic[var].elseval) self.dic[var]=self.dic[var]() return self.dic[var] def get(self, var): if isinstance(self.dic[var], ensemble): return self.dic[var].get() else: return self.dic[var] def __setitem__(self, var, val): self.dic[var] = val def __delitem__(self, var): del self.dic[var] def __iter__(self): return self.dic.__iter__() def clear(self): self.dic = {} def copy(self): ret = values() ret.dic = dict(self.dic) return ret def __repr__(self): return repr(self.dic) def apply_to_label(vals, orig): if orig is None: return vals ifguard = vals["__guard"] ret = values() for nm in orig: if nm in vals: if (vif:=vals.get(nm)) is (velse:=orig.get(nm)): ret[nm] = vif elif isinstance(velse, ensemble): velse.add(ifguard, vif) ret[nm] = velse else: ret[nm] = ensemble(velse) ret.dic[nm].add(ifguard, vif) return ret def apply_to_labels(vals, orig1, orig2, cond): if cond is True: return [apply_to_label(vals, orig1), orig2] elif cond is False: return [orig1, apply_to_label(vals, orig2)] guard = vals["__guard"] guard1 = guard&cond guard2 = guard&(1-cond) vals["__guard"] = guard1 ret1 = apply_to_label(vals, orig1) if orig1 is None and orig2 is None: vals = vals.copy() vals["__guard"] = guard2 ret2 = apply_to_label(vals, orig2) ret1["__guard"] = guard1 # because may be overwritten to guard2 if we do not copy vals return [ret1,ret2] def values_new(): return values()
python
''' Created on May 2, 2016 @author: damianpa '''
python
import yaml import os import git import logging from .i_repository_parser import IRepositoryParser class RosdistroRepositoryParser(IRepositoryParser): """ Pulls the rosdistro-package and gets all urls from the rosdistro files. """ def __init__(self, settings: dict): """ Creates a new instance of the RosdistroRepositoryParser class :param settings: Settings containing information about rosdistro_workspace and rosdistro_url """ self.__settings = settings def __get_rosdistro_repository(self) -> None: """ Clones the repository from rosdistro_url into rosdistro_workspace (defined in settings) :return: None """ if not os.path.exists(self.__settings["rosdistro_workspace"]): os.makedirs(self.__settings["rosdistro_workspace"]) try: logging.info("[RosdistroRepositoryParser]: Cloning rosdistro repository...") git.Repo.clone_from(self.__settings["rosdistro_url"], self.__settings["rosdistro_workspace"]) except git.exc.GitCommandError: logging.warning("[RosdistroRepositoryParser]: Repository already exists, pulling changes...") repo = git.Repo(self.__settings["rosdistro_workspace"]) repo.remotes.origin.pull() logging.info("[RosdistroRepositoryParser]: Rosdistro up-to-date...") def __get_urls_from_file(self, file_path: str, repository_dict: dict) -> None: """ Gets the URLs from a distribution.yaml that adheres to rosdistro-specs. :param file_path: path to a distribution.yaml file :param repository_dict: dictionary with repository-type (git, svn, hg, ...) as key and the repo-url as value :return: None """ # Load file. file = open(file_path, 'r') rosdistro = yaml.load(file) # Iterate repositories and add them to the repository_dict. for repository in rosdistro["repositories"]: try: vcs_type = str(rosdistro["repositories"][repository]["doc"]["type"]) url = str(rosdistro["repositories"][repository]["doc"]["url"]) repository_dict[vcs_type].add(url) except KeyError: pass try: vcs_type = str(rosdistro["repositories"][repository]["doc"]["type"]) url = str(rosdistro["repositories"][repository]["source"]["url"]) repository_dict[vcs_type].add(url) except KeyError: pass try: # This has to be a git repository (required by bloom) repository_dict["git"].add(rosdistro["repositories"][repository]["release"]["url"]) except KeyError: pass def parse_repositories(self, repository_dict: dict) -> None: # Actually get the repository self.__get_rosdistro_repository() # Parse index.yaml index_file = open(self.__settings["rosdistro_workspace"] + "index.yaml", "r") index_yaml = yaml.load(index_file) # Get all urls from all distribution.yaml files for distribution in index_yaml["distributions"]: logging.info("Parsing distribution " + index_yaml["distributions"][distribution]["distribution"][0]) self.__get_urls_from_file(self.__settings["rosdistro_workspace"] + index_yaml["distributions"][distribution]["distribution"][0], repository_dict)
python
import sklearn as sk import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces faces = fetch_olivetti_faces() print "DESCR" print faces.DESCR print "images.shape" print faces.images.shape print "data.shape" print faces.data.shape print "target.shape" print faces.target.shape
python
import argparse if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-p","--principal",action="store", type=float, help="Principal", default=10000, required=True) parser.add_argument("-r", "--rate", action="store", type=float, help="rate of interest", default=10, required=True) parser.add_argument("-t", "--time", action="store", type=int, help="time in years", default=1, required=True) args = parser.parse_args() principal = float(args.principal) rate = float(args.rate) time = int(args.time) si = principal * time * rate / 100 ci = principal * ( 1 + (rate/100))**time print(f"Simple Interest = {si} \nCompound Interest = {ci}")
python
from ASTModels import Node memory_size = 30000 def execute(ast: [Node]) -> ([int],int): memory = [0]*memory_size mp = 0 for node in ast: memory, mp = _evaluate(node,memory,mp) print() return memory, mp def _evaluate(node: Node, memory: [int], mp: int) -> ([int],int): if node.node_type == "INCREMENT": memory[mp] += node.val if memory[mp] >= 256: memory[mp] -= 256 elif node.node_type == "DECREMENT": memory[mp] -= node.val if memory[mp] < 0: memory[mp] += 255 elif node.node_type == "INCREMENT_POINTER": mp += node.val if mp > memory_size: mp -= memory_size-1 elif node.node_type =="DECREMENT_POINTER": mp -= node.val if mp < 0: mp += memory_size-1 elif node.node_type == "OUTPUT": print(chr(memory[mp]),end='') elif node.node_type == "INPUT": i = '' while i == '': i = input() memory[mp] = ord(i[0]) elif node.node_type =="LOOP": while memory[mp] != 0: for block_node in node.nodes: memory, mp = _evaluate(block_node,memory,mp) return memory, mp
python
from __future__ import print_function from __future__ import absolute_import from __future__ import division import os import sys import tensorflow as tf import numpy as np from tensorflow.python.client import timeline with tf.device("/cpu:0"): a = tf.Variable([1],) with tf.device("/cpu:1"): b = tf.Variable([2],) with tf.device("/cpu:2"): c = tf.Variable([3],) with tf.device("/cpu:3"): d = tf.Variable([4],) with tf.device("/cpu:0"): total_a = tf.add_n([a, b]) with tf.device("/cpu:1"): total_b = tf.add_n([a, b, c]) with tf.device("/cpu:2"): total_c = tf.add_n([b, c, d]) with tf.device("/cpu:3"): total_d = tf.add_n([c, d]) graph = tf.add_n([total_a, total_b, total_c, total_d]) config = tf.ConfigProto(device_count={"CPU": 4}) with tf.Session(config=config) as sess: sess.run(tf.global_variables_initializer()) options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() sess.run([total_a, total_b, total_c, total_d, graph], options=options, run_metadata=run_metadata) fetched_timeline = timeline.Timeline(run_metadata.step_stats) chrome_trace = fetched_timeline.generate_chrome_trace_format() with open('timeline_01.json', 'w') as f: f.write(chrome_trace)
python
class Vehicle: def __init__(self, vin): self.vin=vin def GetVin(self): return self.vin class Car(Vehicle): def Accelerate(self): print("Car accelerating...") class Truck(Vehicle): def Accelerate(self): print("Truck accelerating...") def main(): cars=[Car("A123456890"), Car("B123456890"), Truck("C123456890"), Truck("D123456890"), Car("E123456890")] for car in cars: car.Accelerate() # polymorphic site if __name__ == "__main__": main()
python
# -*- coding: utf-8 -*- import webapp2 from webapp2_extras import routes import json from api import routes as apiRoutes from fetch import routes as fetchRoutes class MainPage(webapp2.RequestHandler): def get(self): self.response.headers['Content-Type'] = 'text/plain' self.response.write('QCurrency is working.') app = webapp2.WSGIApplication([ routes.PathPrefixRoute('/api', apiRoutes), routes.PathPrefixRoute('/fetch', fetchRoutes), ('/', MainPage), ], debug=True)
python
# -*- coding:utf-8 -*- import unittest from simple_ml.classify_data import DataCollector, get_iris import numpy as np class TestDataCollector(unittest.TestCase): def test_get_iris(self): dc = DataCollector() x = dc.fetch_handled_data("iris") self.assertIsInstance(x, np.ndarray) self.assertEqual(x.shape[0], 150) self.assertEqual(x.shape[1], 6) def test_build_in_get_iris(self): x, y = get_iris() self.assertEqual(len(x.shape), 2) self.assertEqual(len(y.shape), 1) if __name__ == '__main__': unittest.main()
python
#!/usr/bin/env python # -*- coding: utf-8 -*- ################# # Module-Import # ################# #eegpy-modules try: import eegpy from eegpy.events import EventTable from eegpy.misc import FATALERROR from eegpy.ui.widgets.windowwidgets import EegpyBaseWin from eegpy.ui.icon import image_from_eegpy_stock, eegpy_logo except ImportError: raise FATALERROR('Your installation of EegPy seems to be incomplete.\nMaybe you need to set the PYTHONPATH environment-variable adequatly.') #from eegpy.filter.filt_misc import filterRecursively #Third-party try: import numpy from scipy.signal import lfilter, butter except ImportError: raise FATALERROR('SciPy or NumPy not found!\nPlease visit www.scipy.org or numeric.scipy.org for more information.') try: import pygtk pygtk.require('2.0') import gobject import gtk except ImportError: raise FATALERROR('GTK cannot be imported.') #try: # from matplotlib.axes import Subplot # # uncomment to select /GTK/GTKAgg/GTKCairo # from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas # from matplotlib.backends.backend_gtk import NavigationToolbar2GTK as NavigationToolbar # import matplotlib # #from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg, NavigationToolbar # from matplotlib.figure import Figure, SubplotParams # from matplotlib.axis import Axis # import matplotlib.cm #except ImportError: # raise FATALERROR('Error while importing matplotib. Please visit http://matplotlib.sf.net for more information.') #native python import sys import os import pickle class EventManager(gtk.Frame): _et = None _fn = None _keylist = None def __init__(self, label=""): gtk.Frame.__init__(self,label) self.vbox=gtk.VBox() self.tb_box = gtk.HBox() self.add(self.vbox) self.vbox.pack_start(self.tb_box,expand=False) self.tb = gtk.Toolbar() self.tooltips = gtk.Tooltips() self.tb.set_style(gtk.TOOLBAR_ICONS) self.add_toolbutton_from_stock(gtk.STOCK_OPEN, 'Load', 'Load an EventTable from a file', 'Private', self.load_et) self.add_toolbutton_from_stock(gtk.STOCK_SAVE, 'Save', 'Save the EventTable back to the original file', 'Private', self.save_et, False) self.add_toolbutton_from_stock(gtk.STOCK_SAVE_AS, 'Save to', 'Save the EventTable to a file, choose new file', 'Private', self.save_et, True) self.tb.insert(gtk.SeparatorToolItem(),-1) self.add_toolbutton_eegpy("add_trigger_type", "Add type", "Add a new trigger type", 'Private', self.cb_add_trigger_type, None) self.add_toolbutton_eegpy("add_trigger", "Add trigger", "Add a new trigger", 'Private', self.cb_add_trigger, None) self.tb_box.pack_start(self.tb,expand=True) self.lb_fn = gtk.Label("New EventTable...") self.lb_fn.set_max_width_chars(50) self.lb_fn.set_justify(gtk.JUSTIFY_RIGHT) self.tb_box.pack_end(self.lb_fn, expand=False) #HBox für _keylist/triggerlist self.pane_kl = gtk.HPaned() self.vbox.pack_end(self.pane_kl) self.setup_trees() self._et = EventTable() def setup_trees(self): #First: Keys self.tvsw_keys = gtk.ScrolledWindow() self.tvsw_keys.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC) self.tree_keys = gtk.TreeStore(gobject.TYPE_STRING) #self.treeS = gtk.TreeModelSort(self.tree) self.tv_keys = gtk.TreeView(self.tree_keys) self.tv_keys.get_selection().connect("changed",self.key_selected) #self.tv_keys.get_selection().set_mode(gtk.SELECTION_MULTIPLE) #renderer = gtk.CellRendererText() #self.col1 = gtk.TreeViewColumn("File ...", renderer,text=0) self.tv_keys.append_column(gtk.TreeViewColumn("Key", gtk.CellRendererText(),text=0)) #self.tv_keys.show() self.tvsw_keys.add(self.tv_keys) self.pane_kl.add1(self.tvsw_keys) #Second: Triggers self.tvsw_tr = gtk.ScrolledWindow() self.tvsw_tr.set_policy(gtk.POLICY_AUTOMATIC,gtk.POLICY_AUTOMATIC) self.tree_tr = gtk.TreeStore(gobject.TYPE_INT) #self.treeS = gtk.TreeModelSort(self.tree) self.tv_tr = gtk.TreeView(self.tree_tr) self.tv_tr.get_selection().set_mode(gtk.SELECTION_MULTIPLE) #renderer = gtk.CellRendererText() #self.col1 = gtk.TreeViewColumn("File ...", renderer,text=0) self.tv_tr.append_column(gtk.TreeViewColumn("Timepoint", gtk.CellRendererText(),text=0)) #self.tv_keys.show() #Setting up drag'n'drop self.tv_tr.enable_model_drag_source( gtk.gdk.BUTTON1_MASK, [('INT',0,0)], gtk.gdk.ACTION_DEFAULT| gtk.gdk.ACTION_MOVE) self.tv_tr.enable_model_drag_dest([('INT',0,0)], gtk.gdk.ACTION_DEFAULT) self.tv_tr.connect("drag_data_get", self.tr_drag_get) self.tv_tr.connect("drag_data_received", self.tr_drag_received) self.tv_keys.connect("key_press_event", self.cb_key_pressed) self.tv_tr.connect("key_press_event", self.cb_key_pressed) self.tvsw_tr.add(self.tv_tr) self.pane_kl.add2(self.tvsw_tr) def add_toolbutton_eegpy(self, icon_name, text, tip_text, tip_private, clicked_function, clicked_param1=None): iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR iconw = eegpy.ui.icon.image_from_eegpy_stock(icon_name) toolitem = gtk.ToolButton(iconw, text) #toolitem = gtk.ToolButton(iconw) toolitem.set_icon_widget(iconw) toolitem.show_all() toolitem.set_tooltip(self.tooltips, tip_text, tip_private) toolitem.connect("clicked", clicked_function, clicked_param1) #toolitem.connect("scroll_event", clicked_function) self.tb.insert(toolitem, -1) def add_toolbutton_from_stock(self, icon_name, text, tip_text, tip_private, clicked_function, clicked_param1=None): iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR iconw = gtk.Image() iconw.set_from_stock(icon_name, iconSize) toolitem = gtk.ToolButton(iconw, text) #toolitem = gtk.ToolButton(iconw) toolitem.set_icon_widget(iconw) toolitem.show_all() toolitem.set_tooltip(self.tooltips, tip_text, tip_private) toolitem.connect("clicked", clicked_function, clicked_param1) #toolitem.connect("scroll_event", clicked_function) self.tb.insert(toolitem, -1) def load_et(self,event,data): dialog = gtk.FileChooserDialog("Open EventTable from file..", None, gtk.FILE_CHOOSER_ACTION_OPEN, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN, gtk.RESPONSE_OK)) dialog.set_default_response(gtk.RESPONSE_OK) filter = gtk.FileFilter() filter.set_name("eegpy EventTable or similar") filter.add_pattern("*.evt") filter.add_pattern("*.vmrk") dialog.add_filter(filter) filter = gtk.FileFilter() filter.set_name("All files") filter.add_pattern("*") dialog.add_filter(filter) response = dialog.run() if response == gtk.RESPONSE_OK: self.set_filename(dialog.get_filename()) #print dialog.get_filename(), 'selected' elif response == gtk.RESPONSE_CANCEL: print 'Closed, no files selected' dialog.destroy() def save_et(self, event, do_save_as = True): if do_save_as == False: self._et.save(self._fn) else: dialog = gtk.FileChooserDialog("Save EventTable to file...", None, gtk.FILE_CHOOSER_ACTION_SAVE, (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_SAVE, gtk.RESPONSE_OK)) dialog.set_default_response(gtk.RESPONSE_OK) filter = gtk.FileFilter() filter.set_name("eegpy EventTable") filter.add_pattern("*.evt") dialog.add_filter(filter) filter = gtk.FileFilter() filter.set_name("All files") filter.add_pattern("*") dialog.add_filter(filter) response = dialog.run() if response == gtk.RESPONSE_OK: fn = dialog.get_filename() print fn, 'selected' dialog.destroy() self._fn = fn #Now save... self._et.save(self._fn) lbtext = "" if len(fn)>40: lbtext = "..."+fn[-37:] self.lb_fn.set_text(lbtext) #fh.close() else:# response == gtk.RESPONSE_CANCEL: dialog.destroy() print 'Closed, no files selected' pass def set_filename(self,fn): print fn, "selected for opening" #success = False try: if not os.path.exists(fn): raise ValueError("File doesn't exist") self._et = EventTable(fn) if len(self._et.keys())==0: print self._et.keys() raise ValueError("EventTable empty!") self._fn = fn except ValueError, e: print "Error opening EventTable", e self._et=None self._fn=None return False lbtext = "" if len(fn)>40: lbtext = "..."+fn[-37:] self.lb_fn.set_text(lbtext) self.setup_keylist() def setup_keylist(self): #if self._tv!=None: # try: # self._keylist.hide() # self._keylist.destroy() # except Exception,e: # print "Cannot destroy keylist" #TODO: Real functionalityself.tvsw_keys = gtk.ScrolledWindow() keys = self._et.keys() keys.sort() self.tree_keys.clear() for k in keys: iter = self.tree_keys.append(None) self.tree_keys.set(iter, 0, k) self.tree_keys.set_sort_column_id(0,gtk.SORT_ASCENDING) self.show_all() def setup_triggerlist(self, key): self.tree_tr.clear() for tr in self._et[key]: #print tr iter = self.tree_tr.append(None) self.tree_tr.set(iter, 0, int(tr)) self.tree_tr.set_sort_column_id(0,gtk.SORT_ASCENDING) def key_selected(self,treeselection,*args): #print tv, path, col, args, self.tree_keys.get(self.tree_keys.get_iter(path),0)[0] self.tv_tr.get_selection().unselect_all() #self.tree_tr.clear() paths = treeselection.get_selected_rows()[1] if len(paths)>0: iter = self.tree_keys.get_iter(paths[0]) key = self.tree_keys.get(iter,0)[0] self.setup_triggerlist(key) def cb_add_trigger_type(self,event,data): dialog_label = gtk.Dialog("Choose name...", None, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_OK)) entry1 = gtk.Entry() entry1.set_text("Trigger") dialog_label.vbox.pack_start(entry1) entry1.show() response = dialog_label.run() print response if response == gtk.RESPONSE_OK: trig_name = entry1.get_text() print trig_name else: print "Adding trigger-type aborted by user." dialog_label.destroy() return False dialog_label.destroy() self.add_trigger_type(trig_name, []) def cb_add_trigger(self,event,data): dialog_label = gtk.Dialog("Add trigger...", None, gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT, gtk.STOCK_OK, gtk.RESPONSE_OK)) dialog_label.vbox.pack_start(gtk.Label("Timepoint:")) sb_time = gtk.SpinButton(gtk.Adjustment(0,0,100000000,1,1000)) dialog_label.vbox.pack_start(sb_time) dialog_label.vbox.show_all() response = dialog_label.run() print response if response == gtk.RESPONSE_OK: time = sb_time.get_value() print time else: print "Adding trigger aborted by user." dialog_label.destroy() return False dialog_label.destroy() self.add_trigger(time) def add_trigger_type(self,key,ts=[]): if not self._et.has_key(key): self._et.add_trigger_type(key, ts) self.setup_keylist() self.tree_tr.clear() def add_trigger(self,time): #find out key path = self.tv_keys.get_selection().get_selected_rows()[1][0] iter = self.tree_keys.get_iter(path) k = self.tree_keys.get(iter,0)[0] if self._et.has_key(k): self._et.add_trigger(k, time) self.setup_triggerlist(k) def tr_drag_get(self, treeview, context, selection, target_id, etime): pathlist = treeview.get_selection().get_selected_rows()[1] model = treeview.get_model() iterlist = [model.get_iter(row) for row in pathlist] datalist = [model.get(iter,0)[0] for iter in iterlist] #print datalist selection.set(selection.target,8,pickle.dumps(datalist)) #print "Drag_get: ", treeview, context, selection, target_id, etime def tr_drag_received(self, treeview, context, x, y, selection, info, etime): #print pickle.loads(selection.data) datalist = pickle.loads(selection.data) self.add_trigger(datalist[0]) #print "Drag_received:", treeview, context, x, y, selection, info, etime def cb_key_pressed(self, widget, event, data=None): keyname = gtk.gdk.keyval_name(event.keyval) #print "Key %s (%d) was pressed in widget %s" % (keyname, event.keyval, str(widget)) if keyname == "Delete": #find out key path = self.tv_keys.get_selection().get_selected_rows()[1][0] iter = self.tree_keys.get_iter(path) k = self.tree_keys.get(iter,0)[0] if widget==self.tv_keys: self._et.remove(k) self.setup_keylist() self.tv_keys.get_selection().unselect_all() self.tree_tr.clear() if widget==self.tv_tr: pathlist = self.tv_tr.get_selection().get_selected_rows()[1] iterlist = [self.tree_tr.get_iter(row) for row in pathlist] datalist = [self.tree_tr.get(iter,0)[0] for iter in iterlist] for tr in datalist: self._et.remove(k,tr) self.setup_triggerlist(k) class EventTableEditorWin(EegpyBaseWin): programName = "eegpy: Frequency-Filtering" # Konstruktor def __init__(self): EegpyBaseWin.__init__(self) self.inner_pane.set_position(300) self.em1 = EventManager("EventTable 1") self.em1.tv_tr.get_selection().connect("changed",self.cb_plot_marks)#, "blue") self.em2 = EventManager("EventTable 2") self.em2.tv_tr.get_selection().connect("changed",self.cb_plot_marks)#, "red") self.pane_edit = gtk.HPaned() self.upper_hbox.pack_start(self.pane_edit) self.pane_edit.add1(self.em1) self.pane_edit.pack2(self.em2,False) self.pane_edit.set_position(self.get_size()[0]/2) #self.setupOptions() self.show_all() #self.setupGUI() def setupGUI(self): EegpyBaseWin.setupGUI(self) def cb_plot_marks(self, treeselection, *args): #print "Color", color self.a.cla() pathlist = self.em1.tv_tr.get_selection().get_selected_rows()[1] iterlist = [self.em1.tree_tr.get_iter(row) for row in pathlist] datalist1 = [self.em1.tree_tr.get(iter,0)[0] for iter in iterlist] pathlist = self.em2.tv_tr.get_selection().get_selected_rows()[1] iterlist = [self.em2.tree_tr.get_iter(row) for row in pathlist] datalist2 = [self.em2.tree_tr.get(iter,0)[0] for iter in iterlist] #print datalist1, datalist2 for i in datalist1: # print i, self.a.axvline(i, lw=1, color="blue", ymin=0.5, ymax=1) #self.a.plot(datalist1,numpy.zeros(len(datalist1)),"bD") #self.a.plot(datalist2,numpy.ones(len(datalist2)),"rD") #print "" for i in datalist2: # print i, self.a.axvline(i, lw=1, color="red", ymin=0, ymax=0.5) #print "" # if len(datalist1) == 1: # self.a.set_xlim(datalist1[0]-1000,datalist1[0]+1000) # elif len(datalist2)==1: # self.a.set_xlim(datalist2[0]-1000,datalist2[0]+1000) # else: # self.a.autoscale_view() # elif: # xlim0 = max(min(datalist1),min(datalist2))-500 # xlim1 = min(max(datalist1),max(datalist2))+500 # if xlim1<xlim0: # xlim0 = min(min(datalist1),min(datalist2))-500 # xlim1 = max(max(datalist1),max(datalist2))+500 # self.a.set_xlim(xlim0,xlim1) #self.a.set_xlim(numpy.array(datalist1+datalist2).min()-1000,numpy.array(datalist1+datalist2).max()+1000) self.a.set_ylim(0,1) self.a.set_yticks([]) self.canvas.draw() def main(): gtk.main() return 0 if __name__ == "__main__": etew = EventTableEditorWin() main()
python
#!/usr/bin/env python3 """ Prepares the test environment prior to starting hyperglass. """ import os import glob import shutil from logzero import logger working_directory = os.path.dirname(os.path.abspath(__file__)) parent_directory = os.path.dirname(working_directory) def ci_copy_config(): """Copies test configuration files to usable config files""" logger.info("Migrating test config files...") config_dir = os.path.join(parent_directory, "hyperglass/configuration/") test_files = glob.iglob(os.path.join(working_directory, "*.toml")) config_files = glob.iglob(os.path.join(config_dir, "*.toml")) logger.debug(config_dir) logger.debug(working_directory) logger.debug(parent_directory) status = False for file in config_files: if os.path.exists(file): logger.debug(f"{file} already exists") os.remove(file) logger.info(f"Deleted {file}") for file in test_files: try: shutil.copy(file, config_dir) logger.debug(f"Copied {file}") logger.debug(os.listdir(config_dir)) logger.info("Successfully migrated test config files") status = True except: logger.error(f"Failed to migrate {file}") raise return status if __name__ == "__main__": ci_copy_config()
python
# Version: @VERSIONEER-VERSION@ """The Versioneer - like a rocketeer, but for versions. @README@ """ # pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring # pylint:disable=missing-class-docstring,too-many-branches,too-many-statements # pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error # pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with # pylint:disable=attribute-defined-outside-init,too-many-arguments import configparser import errno import json import os import re import subprocess import sys from typing import Callable, Dict class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. my_path = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(my_path)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(my_path), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise OSError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.ConfigParser() with open(setup_cfg, "r") as cfg_file: parser.read_file(cfg_file) VCS = parser.get("versioneer", "VCS") # mandatory # Dict-like interface for non-mandatory entries section = parser["versioneer"] cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = section.get("style", "") cfg.versionfile_source = section.get("versionfile_source") cfg.versionfile_build = section.get("versionfile_build") cfg.tag_prefix = section.get("tag_prefix") if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = section.get("parentdir_prefix") cfg.verbose = section.get("verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY: Dict[str, str] = {} HANDLERS: Dict[str, Dict[str, Callable]] = {} def register_vcs_handler(vcs, method): # decorator """Create decorator to mark a method as the handler of a VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" HANDLERS.setdefault(vcs, {})[method] = f return f return decorate
python
from dj_rest_auth.serializers import PasswordResetSerializer from django.conf import settings class PasswordResetSerializerFrontendHost(PasswordResetSerializer): """ Serializer for requesting a password reset e-mail. """ def save(self): if "allauth" in settings.INSTALLED_APPS: from allauth.account.forms import default_token_generator else: from django.contrib.auth.tokens import default_token_generator request = self.context.get("request") # Set some values to trigger the send_email method. opts = { "use_https": request.is_secure(), "from_email": getattr(settings, "DEFAULT_FROM_EMAIL"), "request": None, # None triggers to use the host from site object "token_generator": default_token_generator, } opts.update(self.get_email_options()) self.reset_form.save(**opts)
python
#!/usr/bin/env python # -*- coding: utf-8 -*- #2018-05-29 08-49 # Standard Modules import logging # Extra Modules dependencies_missing = False try: import teradata except ImportError: dependencies_missing = True from metasploit import module, login_scanner # Metasploit Metadata metadata = { 'name': 'Teradata ODBC Login Scanner Module', 'description': ''' Login scanner module for ODBC connections to Teradata databases. Port specification (TCP 1025 by default) is not necessary for ODBC connections. Blank passwords are not supported by ODBC connections. Requires ODBC driver and Python Teradata module. ''', 'authors': [ 'Ted Raffle (actuated)' ], 'date': '2018-03-30', 'license': 'MSF_LICENSE', 'references': [ {'type': 'url', 'ref': 'https://developer.teradata.com/tools/reference/teradata-python-module'}, {'type': 'url', 'ref': 'https://downloads.teradata.com/download/connectivity/odbc-driver/linux'} ], 'type': 'single_host_login_scanner', 'options': { 'rhost': {'type': 'address', 'description': 'Host to target', 'required': True}, 'rport': {'type': 'port', 'description': 'Port to target, ignored by the ODBC driver', 'required': True, 'default': 1025}, 'userpass': {'type': 'string', 'description': 'A list of username/password combinations to try', 'required': False}, 'sleep_interval': {'type': 'float', 'description': 'Time in seconds to wait between login attempts', 'required': False} }, 'service_name': 'teradata', 'notes': { 'AKA': ['Teradata ODBC Login Scanner'] } } def valid_login(udaExec, host, user, password): try: udaExec.connect(method="odbc", system=host, username=user, password=password) except teradata.api.Error as e: return False else: return True def run(args): if dependencies_missing: module.log('Python Teradata module missing, cannot continue', level=error) return # Define UdaExec ODBC connection "application" globally, must be before LogHandler udaExec = teradata.UdaExec(appName="Auth", version="1.0", logConsole=False, configureLogging=False) module.LogHandler.setup(msg_prefix='{}:{} - '.format(args['rhost'], 1025)) scanner = login_scanner.make_scanner(lambda host, port, username, password: valid_login(udaExec, host, username, password)) scanner(args) if __name__ == '__main__': module.run(metadata, run)
python
import itertools import discord from discord.ext import commands from bot.constants import Colours with open('bot/resources/evergreen/python_facts.txt') as file: FACTS = itertools.cycle(list(file)) COLORS = itertools.cycle([Colours.python_blue, Colours.python_yellow]) class PythonFacts(commands.Cog): """Sends a random fun fact about Python.""" def __init__(self, bot: commands.Bot) -> None: self.bot = bot @commands.command(name='pythonfact', aliases=['pyfact']) async def get_python_fact(self, ctx: commands.Context) -> None: """Sends a Random fun fact about Python.""" embed = discord.Embed(title='Python Facts', description=next(FACTS), colour=next(COLORS)) embed.add_field(name='Suggestions', value="Suggest more facts [here!](https://github.com/python-discord/meta/discussions/93)") await ctx.send(embed=embed) def setup(bot: commands.Bot) -> None: """Load PythonFacts Cog.""" bot.add_cog(PythonFacts(bot))
python
from django.core.management.base import BaseCommand, CommandError from ghu_main.email import EmailAPI class Command(BaseCommand): """This command refers to the API in email.py for sending emails in-app""" def __init__(self): super(Command, self).__init__() def add_arguments(self, parser): parser.add_argument('subject', type=str) parser.add_argument('body', type=str) parser.add_argument('recipients', type=str) def handle(self, *args, **options): EmailAPI.send_email(options['subject'], options['body'], options['recipients'].split(','))
python
#!/usr/bin/python # # Copyright (c) 2016 Juniper Networks, Inc. All rights reserved. # import os import sys import argparse import ConfigParser import requests from netaddr.ip import IPNetwork from vnc_api.vnc_api import * class ProvisionVgwInterface(object): def __init__(self, args_str=None): self._args = None if not args_str: args_str = ' '.join(sys.argv[1:]) self._parse_args(args_str) headers = {'content-type': 'application/json'} url = "http://localhost:9091/gateway" if self._args.oper == "create": print "Creating virtual-gateway ..." with open("/proc/sys/net/ipv4/ip_forward", "w") as file: file.write("1") vif_command = '/usr/bin/vif --create ' + self._args.interface vif_command += ' --mac 00:00:5e:00:01:00' self.execute_command(vif_command) ifconfig_command = 'ifconfig ' + self._args.interface + ' up' self.execute_command(ifconfig_command) for subnet in self._args.subnets: route_command = 'route add -net ' + subnet route_command += ' dev ' + self._args.interface self.execute_command(route_command) subnet_list = [] first = True subnets_str = "\"subnets\":[" for subnet in self._args.subnets: net = IPNetwork(subnet) if not first: subnets_str += "," first = False subnets_str += "{\"ip-address\":\"%s\", \"prefix-len\":%d}" % (str(net.ip), net.prefixlen) subnets_str += "]" route_list = [] first = True routes_str = "\"routes\":[" for subnet in self._args.routes: net = IPNetwork(subnet) if not first: routes_str += "," first = False routes_str += "{\"ip-address\":\"%s\", \"prefix-len\":%d}" % (str(net.ip), net.prefixlen) routes_str += "]" gw_str = "[{\"interface\":\"%s\", \"routing-instance\":\"%s\", %s, %s}]" %(self._args.interface, self._args.vrf, subnets_str, routes_str) try: r = requests.post(url, data=gw_str, headers=headers) except ConnectionError: print "Error: Error adding VGW interface" return if r.status_code != 200: print "Failed to Add VGW interface" return print "Done creating virtual-gateway..." else: print "Deleting virtual-gateway ..." gw_str = "[{\"interface\":\"%s\"}]" % (self._args.interface) try: r = requests.delete(url, data=gw_str, headers=headers) except ConnectionError: print "Error: Error deleting VGW interface" return if r.status_code != 200: print "Failed to Delete VGW interface" return for subnet in self._args.subnets: route_command = 'route del -net ' + subnet route_command += ' dev ' + self._args.interface self.execute_command(route_command) ifconfig_command = 'ifconfig ' + self._args.interface + ' down' self.execute_command(ifconfig_command) interface_index = self.get_interface_index(self._args.interface) if interface_index != -1: vif_command = '/usr/bin/vif --delete ' + interface_index self.execute_command(vif_command) del_cmd = 'ip link del ' + self._args.interface self.execute_command(del_cmd) print "Done deleting virtual-gateway..." # end __init__ def execute_command(self, cmd): print cmd out = os.system(cmd) if out != 0: print "Error executing : " + cmd #end execute_command def get_interface_index(self, interface): import subprocess proc = subprocess.Popen(["/usr/bin/vif", "--list"], stdout=subprocess.PIPE) vif_list, err = proc.communicate() vif_match = 'OS: ' + interface lines = [line for line in vif_list.split('\n') if line.endswith(vif_match)] for line in lines: lineitems = line.split(' ') first = lineitems[0] index = first.split('/') return index[1] return -1 #end get_interface_index def _parse_args(self, args_str): ''' Eg. python provision_vgw_interface.py --oper <create | delete> --interface vgw1 --subnets 1.2.3.0/24 7.8.9.0/24 --routes 8.8.8.0/24 9.9.9.0/24 --vrf default-domain:admin:vn1:vn1 ''' # Source any specified config/ini file # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'oper': 'create', 'interface': '', 'subnets': [], 'routes': [], 'vrf': '', } # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.set_defaults(**defaults) parser.add_argument( "--oper", help="Operation : create / delete") parser.add_argument( "--interface", help="Name of the gateway interface") parser.add_argument( "--subnets", nargs='+', help="List of subnets in virtual-network configured for gateway (Ex: 1.1.1.0/24 2.2.2.0/24)") parser.add_argument( "--routes", nargs='+', help="List of public routes injected into virtual-network routing-instance (Ex: 8.8.8.0/24 9.9.9.0/24)") parser.add_argument( "--vrf", help="Routing instance for virtual-network configured for gateway (as FQDN)") self._args = parser.parse_args(remaining_argv) if not self._args.interface: parser.error('Missing argument interface') if not self._args.subnets: parser.error('Missing argument subnets') if self._args.oper == "create": if not self._args.routes: parser.error('Missing argument routes') if not self._args.vrf: parser.error('Missing argument vrf') # end _parse_args # end class ProvisionVgwInterface def main(args_str=None): ProvisionVgwInterface(args_str) # end main if __name__ == "__main__": main()
python
#!/usr/bin/env python def plain_merge(array_a: list, array_b: list) -> list: pointer_a, pointer_b = 0, 0 length_a, length_b = len(array_a), len(array_b) result = [] while pointer_a < length_a and pointer_b < length_b: if array_a[pointer_a] <= array_b[pointer_b]: result.append(array_a[pointer_a]) pointer_a += 1 else: result.append(array_b[pointer_b]) pointer_b += 1 if pointer_a != length_a: result += array_a[pointer_a:] elif pointer_b != length_b: result += array_b[pointer_b:] return result
python
class RuleWriterMount(type): def __init__(cls, name, bases, attrs): if not hasattr(cls, 'rule_writers'): cls.rule_writers = {} else: cls.register_rule_writer(cls) def register_rule_writer(cls, rule_writer): instance = rule_writer() cls.rule_writers[instance.rule_name] = instance class RuleWriter(metaclass=RuleWriterMount): pass
python
#!/usr/bin/env python import os import re import shutil import subprocess import sys toplevel = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) re_setup = re.compile(r'setup\(') re_version = re.compile(r'(?<=\bversion=[\'"])([0-9a-zA-Z._+-]+)') def update_version(gitversion, foundversion): """Chooses version string to write to setup.py. """ return gitversion def make_pkg(): # Get version from git describe version = subprocess.check_output(['git', 'describe', '--always', '--tags'], cwd=toplevel).strip() dest = os.path.join(toplevel, 'dist') if not os.path.exists(dest): os.mkdir(dest) #for project in ('reprozip', 'reprounzip', 'reprounzip-docker', # 'reprounzip-vagrant', 'reprounzip-vistrails'): project = 'reprozip' pdir = os.path.join(toplevel, project) setup_py = os.path.join(pdir, 'setup.py') # Update setup.py file with open(setup_py, 'rb') as fp: lines = fp.readlines() i = 0 setup_found = False while i < len(lines): line = lines[i] if not setup_found and re_setup.search(line): setup_found = True if setup_found: m = re_version.search(line) if m is not None: version = update_version(version, m.group(1)) lines[i] = re_version.sub(version, line) break i += 1 with open(setup_py, 'wb') as fp: for line in lines: fp.write(line) # Run sdist subprocess.check_call([sys.executable, setup_py, 'sdist']) # Run bdist_wheel try: __import__('wheel') except ImportError: pass else: subprocess.check_call([sys.executable, setup_py, 'bdist_wheel']) # Move output to top-level dist/ for f in os.listdir(os.path.join(pdir, 'dist')): shutil.copyfile(os.path.join(pdir, 'dist', f), os.path.join(dest, f)) if __name__ == '__main__': make_pkg()
python
""" Merge the tools Consider the following: A string, s, of length n. An integer, k, where k is a factor of n. We can split s into n/k subsegments where each subsegment, t(i), consists of a contiguous block of k characters in s. Then, use each t(i) to create string u(i) such that: The characters in u(i) are a subsequence of the characters in t(i). Any repeat occurrence of a character is removed from the string such that each character in u(i) occurs exactly once. In other words, if the character at some index j in t(i) occurs at a previous index < j in t(i), then do not include the character in string u(i). Given s and k, print n/k lines where each line i denotes string u(i). Input Format The first line contains a single string denoting s. The second line contains an integer, k, denoting the length of each subsegment. Output Format Print n/k lines where each line i contains string u(i). Sample Input AABCAAADA 3 Sample Output AB CA AD """ import textwrap def merge_the_tools(string, k): for i in textwrap.wrap(string, k): d = dict() print(''.join([ d.setdefault(c, c) for c in i if c not in d ])) if __name__ == '__main__': string, k = input(), int(input()) merge_the_tools(string, k)
python
import string def is_pangram(sentence: str) -> bool: """ Determine if a given string contains all the characters from a to z. sentence -- Any string. returns -- true/false for if string contains all letters from a to z. """ letters = set(string.ascii_lowercase) return letters.issubset(sentence.lower())
python
import numpy as np from scipy.linalg import solve from js.geometry.quaternion import Quaternion for path in [ "../data/gazebo_winter/", "../data/mountain_plain/", "../data/gazebo_summer/" ]: #for path in [ "../data/stairs/", "../data/apartment/", "../data/wood_summer/" ]: with open(path+"pose_scanner_leica.csv") as f: f.readline() x = np.loadtxt(f,delimiter=",") for i in range(x.shape[0]): T_wc = np.reshape(x[i,2:],(4,4)) R_wc = T_wc[:3,:3] q_wc = Quaternion() q_wc.fromRot3(R_wc) t_wc = T_wc[:3,3] print t_wc, q_wc with open(path+"pose_{}.csv".format(i),"w") as fout: fout.write("q_w q_x q_y q_z t_x t_y t_z\n") fout.write("{} {} {} {} {} {} {}".format(q_wc.q[0],\ q_wc.q[1],q_wc.q[2],q_wc.q[3],t_wc[0],t_wc[1],t_wc[2]))
python
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.template.defaultfilters import title from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ from horizon import tables from horizon.utils.filters import replace_underscores from trove_dashboard import api from django.core import urlresolvers LOG = logging.getLogger(__name__) ACTIVE_STATES = ("COMPLETED", "FAILED") def date(string): """Strip off the T from the datetime string""" return string.replace('T', ' ') class LaunchLink(tables.LinkAction): name = "create" verbose_name = _("Create Backup") url = "horizon:project:database_backups:create" classes = ("btn-launch", "ajax-modal") def allowed(self, request, datum): return True # The action should always be displayed class RestoreLink(tables.LinkAction): name = "restore" verbose_name = _("Restore Backup") url = "horizon:project:databases:launch" classes = ("btn-launch", "ajax-modal") def get_link_url(self, datam): url = urlresolvers.reverse(self.url) return url + '?backup=%s' % datam.id class DeleteBackup(tables.BatchAction): name = "delete" action_present = _("Delete") action_past = _("Scheduled deletion of") data_type_singular = _("Backup") data_type_plural = _("Backups") classes = ('btn-danger', 'btn-terminate') def allowed(self, request, instance=None): return True def action(self, request, obj_id): api.trove.backup_delete(request, obj_id) class UpdateRow(tables.Row): ajax = True def get_data(self, request, backup_id): backup = api.trove.backup_get(request, backup_id) try: backup.instance = api.trove.instance_get(request, backup.instance_id) except: pass return backup def db_link(obj): if not hasattr(obj, 'instance'): return if hasattr(obj.instance, 'name'): return reverse( 'horizon:project:databases:detail', kwargs={'instance_id': obj.instance_id}) def db_name(obj): if hasattr(obj.instance, 'name'): return obj.instance.name return obj.instance_id class BackupsTable(tables.DataTable): STATUS_CHOICES = ( ("BUILDING", None), ("COMPLETED", True), ("DELETE_FAILED", False), ("FAILED", False), ("NEW", None), ("SAVING", None), ) name = tables.Column("name", link=("horizon:project:database_backups:detail"), verbose_name=_("Name")) created = tables.Column("created", verbose_name=_("Created At"), filters=[date]) location = tables.Column(lambda obj: _("Download"), link=lambda obj: obj.locationRef, verbose_name=_("Backup File")) instance = tables.Column(db_name, link=db_link, verbose_name=_("Database")) status = tables.Column("status", filters=(title, replace_underscores), verbose_name=_("Status"), status=True, status_choices=STATUS_CHOICES) class Meta: name = "backups" verbose_name = _("Backups") status_columns = ["status"] row_class = UpdateRow table_actions = (LaunchLink, DeleteBackup) row_actions = (RestoreLink, DeleteBackup)
python
nome = str(input('Digite seu nome: ')).strip().upper() print(f'Seu nome tem SILVA: ','SILVA' in nome)
python
from .clip import * from .esresnet import * from .audioclip import AudioCLIP from .audioclip_finetune import AudioCLIPFinetune
python
loan_amount = eval(input('Loan Amount: ')) r = eval(input('Annual Interest Rate: ')) n = eval(input('Loan Duration in Months: ')) payment = (r*loan_amount)/(1-((1+r)**-n)) print('$', payment)
python
from .settings import * INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'myapp', ] # Uncomment if you want to use a mysql/mariadb database. Don't forget to change docker-compose.yml! # DATABASES = { # 'default': { # 'NAME': 'mydjango', # 'ENGINE': 'django.db.backends.mysql', # 'USER': 'root', # 'PASSWORD': 'root', # 'HOST': 'db', # 'PORT': 3306, # 'OPTIONS': { # 'init_command': "SET sql_mode='STRICT_TRANS_TABLES'", # }, # 'CONN_MAX_AGE': 550, # } # } # ... add more settings, will override default settings.py
python
# coding:utf-8 """ 以tushare为源的市场数据表 """ import loguru import random import time import datetime import tushare as ts import pandas as pd import matplotlib.pyplot as plt from .sqldata import SqlDayManager, SqlBaseManager import sys,os sys.path.append(os.path.abspath("../timedata")) import settings from loguru import logger class StockBasics(SqlBaseManager): """ 获取此刻有意义的股票列表 依据 tushare的get_stock_basic 同时去掉 暂停上市的,终止上市的,风险警示 """ def stock_basic_fun(self): pro = ts.pro_api() data = pro.stock_basic() if type(data) != pd.DataFrame: logger.info('从tushare获取stock_basic数据更新失败') return None if data.empty: logger.info('数据为空,从tushare获取stock_basic数据更新失败') return None return data def __init__(self): SqlBaseManager.__init__(self) self.table_name = 'stock_basics' self.data_fun = self.stock_basic_fun # class StockMeaning(SqlBaseManager): # """日常有用的stock,运行正常的stock # """ # def stock_meaning_fun(self, THRESHOLD=50): # sb = StockBasics() # sb_data = sb.read() # filter_stock = [] # # 过滤规则 # # 近2个月有交易,最后一个交易日价格在50以下 # start_day = datetime.datetime.now() - datetime.timedelta(days=14) # start_day_str = start_day.strftime('%Y-%m-%d') # hd = HistData() # for code in sb_data.code: # temp = hd.read(code, start=start_day_str) # if not temp.empty: # if 5 < temp.iloc[0]['high'] < THRESHOLD: # filter_stock.append(code) # print code # result = sb_data[sb_data.code.isin(filter_stock)] # return result # def __init__(self): # SqlBaseManager.__init__(self) # self.table_name = 'stock_meaning' # self.data_fun = self.stock_meaning_fun class HistData(SqlDayManager): """ 以tushare为数据源的历史天的数据 数据源是Hist_DATA """ def __init__(self): SqlDayManager.__init__(self) self.table_name = 'hist_data' pro = ts.pro_api() self.get_data_fun = pro.daily def add_all(self): """遍历所有code,把所有数据新增 """ sb = StockBasics() AllStocks = sb.read() no_data_code = [] # 没有数据,或者没有更新数据的code for code in AllStocks.ts_code: logger.debug(u"add %s" % code) is_success = self.add(code) if not is_success: no_data_code.append(code) sleeptime=random.randint(0, 15) time.sleep(sleeptime) return no_data_code # def plot_code_box(self, code, start='2015-11-01',end=None,): # """画出code的时间蜡烛图 # Args: # code: str| 代码code # flag: str or list of str| code返回数据中指定的列名 # start_day: str|样式'2017-01-01'|开始时间 # end_day: str|样式'2017-01-01'|结束时间 # eg: # dm = DataManager() # dm.plot_code_line('300254') # """ # data = self.read(code, start, end) # data.get(['open','high','close','low']).T.plot.box() # plt.show() # class IndustryClassified( SqlBaseManager): # """工业分类的类 # """ # def __init__(self): # SqlBaseManager.__init__(self) # self.table_name = 'industry_classified' # self.data_fun = ts.get_industry_classified
python
from localstack.dashboard import infra from localstack.config import USE_SSL def test_infra_graph_generation(): try: graph = infra.get_graph() except Exception as e: if USE_SSL: print('TODO: the Web UI in combination with USE_SSL=true is currently broken.') return assert 'nodes' in graph assert 'edges' in graph # TODO add more tests/assertions
python
from typing import Optional from ..helpers.const import * class ConfigData: name: str host: str port: int username: Optional[str] password: Optional[str] password_clear_text: Optional[str] unit: int update_entities_interval: int update_api_interval: int monitored_devices: list monitored_interfaces: list device_trackers: list log_level: str log_incoming_messages: bool consider_away_interval: int def __init__(self): self.name = DEFAULT_NAME self.host = "" self.port = 0 self.username = None self.password = None self.password_clear_text = None self.unit = ATTR_BYTE self.update_entities_interval = DEFAULT_UPDATE_ENTITIES_INTERVAL self.update_api_interval = DEFAULT_UPDATE_API_INTERVAL self.monitored_devices = [] self.monitored_interfaces = [] self.device_trackers = [] self.log_level = "" self.log_incoming_messages = False self.store_debug_files = False self.consider_away_interval = DEFAULT_CONSIDER_AWAY_INTERVAL @property def unit_size(self): return ALLOWED_UNITS[self.unit] @property def has_credentials(self): has_username = self.username and len(self.username) > 0 has_password = self.password_clear_text and len(self.password_clear_text) > 0 has_credentials = has_username or has_password return has_credentials @property def url(self): url = API_URL_TEMPLATE.format(self.host) return url def __repr__(self): obj = { CONF_NAME: self.name, CONF_HOST: self.host, CONF_USERNAME: self.username, CONF_PASSWORD: self.password, CONF_UNIT: self.unit, CONF_UPDATE_API_INTERVAL: self.update_api_interval, CONF_UPDATE_ENTITIES_INTERVAL: self.update_entities_interval, CONF_MONITORED_DEVICES: self.monitored_devices, CONF_MONITORED_INTERFACES: self.monitored_interfaces, CONF_TRACK_DEVICES: self.device_trackers, CONF_LOG_LEVEL: self.log_level, CONF_LOG_INCOMING_MESSAGES: self.log_incoming_messages, CONF_CONSIDER_AWAY_INTERVAL: self.consider_away_interval, } to_string = f"{obj}" return to_string
python
# Generated by Django 3.2.8 on 2021-11-30 17:55 from django.conf import settings import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Address', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('active', models.CharField(choices=[(0, 'Active'), (1, 'Inactive'), (2, 'Archived')], max_length=1, verbose_name='Activate Account')), ('role', models.CharField(choices=[('shipping', 'Shipping Address'), ('billing', 'Billing Address'), ('both', 'Billing/Shipping')], max_length=10, verbose_name='Role')), ('created_on', models.DateField(auto_now_add=True, verbose_name='Created Date')), ('last_modified', models.DateTimeField(auto_now_add=True, verbose_name='Last Modified Date')), ('address_1', models.CharField(max_length=50, verbose_name='Address 1')), ('address_2', models.CharField(max_length=50, verbose_name='Address 2')), ('city', models.CharField(max_length=50, verbose_name='City')), ('state', models.CharField(choices=[('AL', 'Alabama'), ('AK', 'Alaska'), ('AS', 'American Samoa'), ('AZ', 'Arizona'), ('AR', 'Arkansas'), ('CA', 'California'), ('CO', 'Colorado'), ('CT', 'Connecticut'), ('DE', 'Delaware'), ('DC', 'District of Columbia'), ('FL', 'Florida'), ('GA', 'Georgia'), ('GU', 'Guam'), ('HI', 'Hawaii'), ('ID', 'Idaho'), ('IL', 'Illinois'), ('IN', 'Indiana'), ('IA', 'Iowa'), ('KS', 'Kansas'), ('KY', 'Kentucky'), ('LA', 'Louisiana'), ('ME', 'Maine'), ('MD', 'Maryland'), ('MA', 'Massachusetts'), ('MI', 'Michigan'), ('MN', 'Minnesota'), ('MS', 'Mississippi'), ('MO', 'Missouri'), ('MT', 'Montana'), ('NE', 'Nebraska'), ('NV', 'Nevada'), ('NH', 'New Hampshire'), ('NJ', 'New Jersey'), ('NM', 'New Mexico'), ('NY', 'New York'), ('NC', 'North Carolina'), ('ND', 'North Dakota'), ('MP', 'Northern Mariana Islands'), ('OH', 'Ohio'), ('OK', 'Oklahoma'), ('OR', 'Oregon'), ('PA', 'Pennsylvania'), ('PR', 'Puerto Rico'), ('RI', 'Rhode Island'), ('SC', 'South Carolina'), ('SD', 'South Dakota'), ('TN', 'Tennessee'), ('TX', 'Texas'), ('UT', 'Utah'), ('VT', 'Vermont'), ('VI', 'Virgin Islands'), ('VA', 'Virginia'), ('WA', 'Washington'), ('WV', 'West Virginia'), ('WI', 'Wisconsin'), ('WY', 'Wyoming')], max_length=50, verbose_name='State')), ('zip_code', models.CharField(max_length=50, verbose_name='Zip Code')), ('phone', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^(\\d{10}$)', 'Please use numerical format without any spaces or special characters')], verbose_name='Phone')), ('country', models.CharField(max_length=2, verbose_name='Country')), ], options={ 'verbose_name_plural': 'Addresses', }, ), migrations.CreateModel( name='Contact', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=50, verbose_name='Name')), ('position', models.CharField(max_length=50, verbose_name='Position or Role')), ('description', models.TextField(verbose_name='Contact Notes')), ('phone_1', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^(\\d{10}$)', 'Please use numerical format without any spaces or special characters')], verbose_name='Phone 1')), ('phone_2', models.CharField(max_length=15, validators=[django.core.validators.RegexValidator('^(\\d{10}$)', 'Please use numerical format without any spaces or special characters')], verbose_name='Phone 2')), ('email_1', models.EmailField(max_length=254, verbose_name='')), ('email_2', models.EmailField(max_length=254, verbose_name='')), ], ), migrations.CreateModel( name='Customer', fields=[ ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('role', models.CharField(choices=[('customer', 'Customer'), ('client', 'Client'), ('vendor', 'Vendor'), ('employee', 'Employee')], max_length=50, verbose_name='Role')), ('dba', models.CharField(max_length=50, verbose_name='dba')), ('name', models.CharField(max_length=50, verbose_name='Legal Business Entity')), ('start_date', models.DateField(verbose_name='Start Date')), ('end_date', models.DateField(blank=True, null=True, verbose_name='End Date')), ('active', models.CharField(choices=[(0, 'Active'), (1, 'Inactive'), (2, 'Archived')], max_length=1, verbose_name='Active')), ('created_date', models.DateTimeField(auto_now_add=True, verbose_name='Created Date')), ('ein', models.CharField(max_length=50, verbose_name='EIN')), ('industry', models.CharField(choices=[('agriculture', 'Agriculture'), ('arts entertainment', 'Arts & Entertainment'), ('construction', 'Construction'), ('education', 'Education'), ('energy', 'Energy'), ('food', 'Food & Hospitality'), ('finance', 'Finance and Insurance'), ('healthcare', 'Healthcare'), ('manufacturing', 'Manufacturing'), ('mining', 'Mining'), ('other', 'Other Services'), ('services', 'Professional, Scientific, and Tech Services'), ('real estate', 'Real Estate'), ('retail', 'Retail'), ('transportation', 'Transportation & Logistics'), ('utilities', 'Utilities'), ('wholesale', 'Wholesale')], max_length=100, verbose_name='Industry')), ('website', models.URLField(verbose_name='Webiste')), ('account_manager', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='customer_Account', to=settings.AUTH_USER_MODEL, verbose_name='Account Manager')), ('billing_address', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='customer_billing', to='customer.address', verbose_name='Address')), ('contact', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='customer_employee', to='customer.contact', verbose_name='Contact')), ('created_by', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='created_by_customer', to=settings.AUTH_USER_MODEL, verbose_name='Created by')), ('shipping_address', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='customer_location', to='customer.address', verbose_name='Address')), ], options={ 'verbose_name_plural': 'Customers', }, ), migrations.AddField( model_name='contact', name='employer', field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='contact_employer', to='customer.customer', verbose_name='Employer'), ), ]
python
from user.models import CourseRegistration # TODO: # send mail with formatted relevant student test results to course adviser # generate a list of courses registered buy a student for the current semester and session def get_registered_courses(student, session, semester): reg = CourseRegistration.objects.filter(student=student, session=session, semester=semester) return reg def get_current_registered_courses(student, semester): reg = CourseRegistration.objects.filter(student=student, session__is_current=True, semester=semester) return reg
python
from output.models.ms_data.regex.re_l32_xsd.re_l32 import ( Regex, Doc, ) __all__ = [ "Regex", "Doc", ]
python
from app import app ''' set debug=False bellow when deploying to prod ''' app.run(host='0.0.0.0', debug=True)
python
#!/usr/bin/python3 # -*- mode: python -*- """ s3_gateway: bottle/boto3 interface to view an s3 bucket in a web browser. 2021-02-15 slg - updated to use anonymous s3 requests, per https://stackoverflow.com/questions/34865927/can-i-use-boto3-anonymously 2021-02-20 slg - add support for database queries to augment what's in S3 """ import json import logging import mimetypes import os import sys import urllib.parse from os.path import dirname import boto3 import botocore import botocore.exceptions from botocore import UNSIGNED from botocore.client import Config import bottle #from botocore.exceptions import ClientError from bottle import request, response, redirect import db_lookup DESCRIPTION=""" This is the testing program for the gateway that allows S3 files to be accessed from the website. """ DEFAULT_BUCKET = 'digitalcorpora' BYPASS_URL = 'https://digitalcorpora.s3.amazonaws.com/' USE_BYPASS = True IGNORE_FILES = ['.DS_Store', 'Icon'] # Specify files in the runtime environment S3_TEMPLATE_FILENAME = os.path.join(dirname(__file__), "templates/s3_index.tpl") S3_ERROR_404_FILENAME = os.path.join(dirname(__file__), "templates/error_404.tpl") # Create the S3_INDEX bottle SimpleTemplate here, outside of the # s3_list_prefix_v1, so that it gets read when s3_gateway.py is imported. # This causes bottle to compile it ONCE and repeatedly serve it out S3_INDEX = bottle.SimpleTemplate( open( S3_TEMPLATE_FILENAME ).read()) ERROR_404 = bottle.SimpleTemplate( open( S3_TEMPLATE_FILENAME ).read()) def s3_get_dirs_files(bucket_name, prefix): """ Returns a tuple of the s3 objects of the 'dirs' and the 'files' Makes an unauthenticated call :param bucket_name: bucket to read :param prefix: prefix to examine :return: (prefixes,keys) - a list of prefixes under `prefix`, and keys under `prefix`. """ s3client = boto3.client('s3', config=Config(signature_version=UNSIGNED)) paginator = s3client.get_paginator('list_objects_v2') pages = paginator.paginate( Bucket=bucket_name, Prefix=prefix, Delimiter='/') dirs = [] files = [] for page in pages: for obj in page.get('CommonPrefixes', []): dirs.append(obj) for obj in page.get('Contents', []): files.append(obj) if (not dirs) and (not files): raise FileNotFoundError(prefix) return (dirs, files) def s3_to_link(obj): """Given a s3 object, return a link to it""" # pylint: disable=R1705 if 'Prefix' in obj: name = obj['Prefix'].split("/")[-2]+"/" return request.url + urllib.parse.quote(name) elif 'Key' in obj: return BYPASS_URL + urllib.parse.quote(obj['Key']) else: raise RuntimeError("obj: "+json.dumps(obj, default=str)) def s3_list_prefix(bucket_name, prefix, auth=None): """The revised s3_list_prefix implementation: uses the Bottle template system to generate HTML. Get a list of the sub-prefixes (dirs) and the objects with this prefix (files), and then construct the dirs[] and files[] arrays. Elements of dirs are strings (one for each prefix). Elements of files[] are (url,name, size,sha256,sha3) tuples. :param bucket_name: the bucket to list :param path: the path within the bucket (no leading /) """ path = '/' paths = [] for part in prefix.split('/')[:-1]: part += '/' path += part paths.append((path, part)) (s3_dirs, s3_files) = s3_get_dirs_files(bucket_name, prefix) dirs = [obj['Prefix'].split('/')[-2]+'/' for obj in s3_dirs] if auth is not None and s3_files: db_lookup.annotate_s3files(auth, s3_files) files = [{'a': s3_to_link(obj), 'basename': os.path.basename(obj['Key']), 'size': "{:,}".format(obj['Size']), 'ETag': obj['ETag'], 'sha2_256': obj.get('sha2_256','n/a'), 'sha3_256': obj.get('sha3_256','n/a') } for obj in s3_files] return S3_INDEX.render(prefix=prefix, paths=paths, files=files, dirs=dirs, sys_version=sys.version) def s3_app(*, bucket, quoted_prefix, auth=None): """ Fetching a file. Called from bottle. :param bucket: - the bucket that we are serving from :param quoted_prefix: - the path to display. :param auth: - Database authenticator """ prefix = urllib.parse.unquote(quoted_prefix) logging.warning("bucket=%s quoted_prefix=%s prefix=%s", bucket, quoted_prefix, prefix) if prefix.endswith("/"): try: return s3_list_prefix(bucket, prefix, auth=auth) except FileNotFoundError as e: logging.warning("e:%s", e) response.status = 404 return ERROR_404.render(bucket=bucket,prefix=prefix) # If the prefix does not end with a '/' and there is object there, see if it is a prefix try: obj = boto3.client('s3', config=Config( signature_version=UNSIGNED)).get_object(Bucket=bucket, Key=prefix) except botocore.exceptions.ClientError as e: try: return s3_list_prefix(bucket, prefix+"/", auth=auth) except FileNotFoundError as e: # No object and not a prefix response.status = 404 return ERROR_404.render(bucket=bucket,prefix=prefix) # If we are using the bypass, redirect if USE_BYPASS: logging.info("redirect to %s", BYPASS_URL + prefix) redirect(BYPASS_URL + prefix) # Otherwise download directly try: response.content_type = mimetypes.guess_type(prefix)[0] except (TypeError,ValueError,KeyError) as e: response.content_type = 'application/octet-stream' return obj['Body'] if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=DESCRIPTION) parser.add_argument("--bucket", default=DEFAULT_BUCKET, help='which bucket to use.') parser.add_argument('--prefix', help='specify prefix') args = parser.parse_args() if args.prefix: print(s3_app(bucket=args.bucket, quoted_prefix=args.prefix))
python
from torch import Tensor, _VF # noqa: F401 from torch.nn.utils.rnn import PackedSequence import torch import warnings from typing import List, Optional, Tuple class QuantizedLinear(torch.jit.ScriptModule): __constants__ = ['scale', 'zero_point'] def __init__(self, other): super(QuantizedLinear, self).__init__() self.in_features = other.in_features self.out_features = other.out_features # Quantize weight and discard the original self.weight, self.col_offsets, self.scale, self.zero_point = torch.fbgemm_linear_quantize_weight( other.weight.clone(memory_format=torch.contiguous_format).float()) self.weight = torch.nn.Parameter(self.weight, requires_grad=False) self.col_offsets = torch.nn.Parameter(self.col_offsets, requires_grad=False) assert other.bias is not None, 'QuantizedLinear requires a bias' self.bias = torch.nn.Parameter(other.bias.clone(memory_format=torch.contiguous_format).float(), requires_grad=False) self.register_buffer( 'packed_tensor_ptr', torch.fbgemm_pack_quantized_matrix(self.weight.clone(memory_format=torch.contiguous_format))) @torch.jit.script_method def _unpack(self): self.packed_tensor_ptr.set_( torch.fbgemm_pack_quantized_matrix(self.weight)) @torch.jit.script_method def _pack(self): self.packed_tensor_ptr.set_( torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach()) @torch.jit.script_method def forward(self, input): out = torch.fbgemm_linear_int8_weight_fp32_activation( input.float(), self.weight, self.packed_tensor_ptr, self.col_offsets, self.scale, self.zero_point, self.bias) return out.to(input.dtype) def extra_repr(self): repr = 'in_features={in_features}, out_features={out_features}, ' \ 'scale={scale}, zero_point={zero_point}'.format(**self.__dict__) return repr # FP16 weights class QuantizedLinearFP16(torch.jit.ScriptModule): def __init__(self, other): super(QuantizedLinearFP16, self).__init__() self.in_features = other.in_features self.out_features = other.out_features self.original_weight = other.weight self.weight = torch.fbgemm_pack_gemm_matrix_fp16( other.weight.clone(memory_format=torch.contiguous_format).float()) assert other.bias is not None, 'QuantizedLinearFP16 requires a bias' self.bias = torch.nn.Parameter(other.bias.clone(memory_format=torch.contiguous_format).float(), requires_grad=False) self.register_buffer('packed_weight', self.weight) @torch.jit.script_method def _unpack(self): self.packed_weight.set_( torch.fbgemm_pack_gemm_matrix_fp16( self.original_weight)) @torch.jit.script_method def _pack(self): self.packed_weight.set_( torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach()) @torch.jit.script_method def forward(self, input): out = torch.fbgemm_linear_fp16_weight_fp32_activation( input.float(), self.packed_weight, self.bias) return out def extra_repr(self): repr = 'in_features={in_features}, out_features={out_features}, '.format(**self.__dict__) return repr # Quantized RNN cell implementations class QuantizedRNNCellBase(torch.jit.ScriptModule): __constants__ = ['input_size', 'hidden_size', 'bias', 'scale_hh', 'scale_ih', 'zero_point_ih', 'zero_point_hh'] def __init__(self, other): super(QuantizedRNNCellBase, self).__init__() self.input_size = other.input_size self.hidden_size = other.hidden_size self.bias = other.bias if not self.bias: raise ValueError("Quantized RNN cells require bias terms") weight_ih, col_offsets_ih, self.scale_ih, self.zero_point_ih = \ torch.fbgemm_linear_quantize_weight(other.weight_ih.clone(memory_format=torch.contiguous_format).float()) self.register_buffer('weight_ih', weight_ih) self.register_buffer('col_offsets_ih', col_offsets_ih) weight_hh, col_offsets_hh, self.scale_hh, self.zero_point_hh = \ torch.fbgemm_linear_quantize_weight(other.weight_hh.clone(memory_format=torch.contiguous_format).float()) self.register_buffer('weight_hh', weight_hh) self.register_buffer('col_offsets_hh', col_offsets_hh) packed_ih = torch.fbgemm_pack_quantized_matrix(self.weight_ih) self.register_buffer('packed_ih', packed_ih) packed_hh = torch.fbgemm_pack_quantized_matrix(self.weight_hh) self.register_buffer('packed_hh', packed_hh) self.bias_ih = torch.nn.Parameter(other.bias_ih.clone(memory_format=torch.contiguous_format).float(), requires_grad=False) self.bias_hh = torch.nn.Parameter(other.bias_hh.clone(memory_format=torch.contiguous_format).float(), requires_grad=False) def extra_repr(self): s = '{input_size}, {hidden_size}' if 'bias' in self.__dict__ and self.bias is not True: s += ', bias={bias}' if 'nonlinearity' in self.__dict__ and self.nonlinearity != "tanh": s += ', nonlinearity={nonlinearity}' return s.format(**self.__dict__) @torch.jit.script_method def check_forward_input(self, input): if input.size(1) != self.input_size: raise RuntimeError( "input has inconsistent input_size: got {}, expected {}".format( input.size(1), self.input_size)) @torch.jit.script_method def check_forward_hidden(self, input: Tensor, hx: Tensor, hidden_label: str = '') -> None: if input.size(0) != hx.size(0): raise RuntimeError( "Input batch size {} doesn't match hidden{} batch size {}".format( input.size(0), hidden_label, hx.size(0))) if hx.size(1) != self.hidden_size: raise RuntimeError( "hidden{} has inconsistent hidden_size: got {}, expected {}".format( hidden_label, hx.size(1), self.hidden_size)) # TODO: for some reason weak_script_method causes a destruction of the # module to occur, which in turn frees the packed_ih object via its DataPtr # deleter. This is bizarre and should probably get fixed. # @torch._jit_internal.weak_script_method @torch.jit.script_method def _unpack(self): self.packed_ih.set_(torch.fbgemm_pack_quantized_matrix(self.weight_ih)) self.packed_hh.set_(torch.fbgemm_pack_quantized_matrix(self.weight_hh)) # @torch._jit_internal.weak_script_method @torch.jit.script_method def _pack(self): self.packed_ih.set_( torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach()) self.packed_hh.set_( torch.zeros(torch.jit.annotate(List[int], []), dtype=torch.uint8).detach()) class QuantizedRNNCell(QuantizedRNNCellBase): __constants__ = ['input_size', 'hidden_size', 'bias', 'scale_hh', 'scale_ih', 'zero_point_ih', 'zero_point_hh', 'nonlinearity'] def __init__(self, other): super(QuantizedRNNCell, self).__init__(other) self.nonlinearity = other.nonlinearity @torch.jit.script_method def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: self.check_forward_input(input) if hx is None: hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) self.check_forward_hidden(input, hx, '') if self.nonlinearity == "tanh": ret = _VF.quantized_rnn_tanh_cell( input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih, self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih, self.zero_point_hh ) elif self.nonlinearity == "relu": ret = _VF.quantized_rnn_relu_cell( input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih, self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih, self.zero_point_hh ) else: ret = input # TODO: remove when jit supports exception flow raise RuntimeError( "Unknown nonlinearity: {}".format(self.nonlinearity)) return ret class QuantizedLSTMCell(QuantizedRNNCellBase): def __init__(self, other): super(QuantizedLSTMCell, self).__init__(other) @torch.jit.script_method def forward(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tensor]: self.check_forward_input(input) if hx is None: zeros = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) hx = (zeros, zeros) self.check_forward_hidden(input, hx[0], '[0]') self.check_forward_hidden(input, hx[1], '[1]') return _VF.quantized_lstm_cell( input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih, self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih, self.zero_point_hh ) class QuantizedGRUCell(QuantizedRNNCellBase): def __init__(self, other): super(QuantizedGRUCell, self).__init__(other) @torch.jit.script_method def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor: self.check_forward_input(input) if hx is None: hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device) self.check_forward_hidden(input, hx, '') return _VF.quantized_gru_cell( input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh, self.packed_ih, self.packed_hh, self.col_offsets_ih, self.col_offsets_hh, self.scale_ih, self.scale_hh, self.zero_point_ih, self.zero_point_hh ) def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor: return tensor.index_select(dim, permutation) class QuantizedRNNBase(torch.jit.ScriptModule): __constants__ = ['mode', 'input_size', 'hidden_size', 'num_layers', 'bias', 'batch_first', 'dropout', 'bidirectional', 'dtype'] def __init__(self, other, dtype=torch.int8): super(QuantizedRNNBase, self).__init__() self.mode = other.mode self.input_size = other.input_size self.hidden_size = other.hidden_size self.num_layers = other.num_layers self.bias = other.bias self.batch_first = other.batch_first if self.mode != 'GRU': assert not self.batch_first self.dropout = other.dropout self.bidirectional = other.bidirectional num_directions = 2 if self.bidirectional else 1 self.dtype = dtype assert self.bias # TODO: support more than just LSTM if self.mode != 'LSTM' and self.mode != 'GRU': raise RuntimeError('Only LSTM or GRU is supported for QuantizedRNN') if dtype != torch.int8 and dtype != torch.float16: raise RuntimeError('Unsupported dtype: {}'.format(dtype)) self.all_weights = [] # type: ignore for layer in range(self.num_layers): for direction in range(num_directions): layer_input_size = self.input_size if layer == 0 else self.hidden_size * num_directions suffix = '_reverse' if direction == 1 else '' def get_weight_bias(ihhh): weight_name = 'weight_{}_l{}{}'.format(ihhh, layer, suffix) bias_name = 'bias_{}_l{}{}'.format(ihhh, layer, suffix) weight = getattr(other, weight_name) bias = getattr(other, bias_name) return weight, bias weight_ih, bias_ih = get_weight_bias('ih') weight_hh, bias_hh = get_weight_bias('hh') if dtype == torch.int8: cell_params = torch.ops.quantized.make_quantized_cell_params( weight_ih, weight_hh, bias_ih, bias_hh) else: packed_ih = torch.ops.quantized.linear_prepack_fp16( weight_ih.float(), bias_ih) packed_hh = torch.ops.quantized.linear_prepack_fp16( weight_hh.float(), bias_hh) cell_params = torch.ops.quantized.make_quantized_cell_params_fp16( packed_ih, packed_hh) setattr(self, 'cell_params_{}_{}'.format(layer, suffix), cell_params) self.all_weights.append(cell_params) @torch.jit.script_method def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None: expected_input_dim = 2 if batch_sizes is not None else 3 if input.dim() != expected_input_dim: raise RuntimeError( 'input must have {} dimensions, got {}'.format( expected_input_dim, input.dim())) if self.input_size != input.size(-1): raise RuntimeError( 'input.size(-1) must be equal to input_size. Expected {}, got {}'.format( self.input_size, input.size(-1))) @torch.jit.script_method def get_expected_hidden_size(self, input: Tensor, batch_sizes: Optional[Tensor]) -> Tuple[int, int, int]: if batch_sizes is not None: mini_batch = int(batch_sizes[0]) else: mini_batch = input.size(0) if self.batch_first else input.size(1) num_directions = 2 if self.bidirectional else 1 expected_hidden_size = (self.num_layers * num_directions, mini_batch, self.hidden_size) return expected_hidden_size @torch.jit.script_method def check_hidden_size(self, hx: Tensor, expected_hidden_size: Tuple[int, int, int], msg: str = 'Expected hidden size {}, got {}') -> None: if hx.size() != expected_hidden_size: raise RuntimeError(msg.format(expected_hidden_size, list(hx.size()))) @torch.jit.script_method def check_forward_args(self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]) -> None: self.check_input(input, batch_sizes) expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) self.check_hidden_size(hidden, expected_hidden_size, msg='Expected hidden size {}, got {}') @torch.jit.script_method def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]) -> Tensor: if permutation is None: return hx return apply_permutation(hx, permutation) class QuantizedLSTM(QuantizedRNNBase): __overloads__ = {'forward': ['forward_packed', 'forward_tensor']} def __init__(self, other, dtype): super(QuantizedLSTM, self).__init__(other, dtype) @torch.jit.script_method def forward_impl(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]], batch_sizes: Optional[Tensor], max_batch_size: int, sorted_indices: Optional[Tensor]) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: # noqa if hx is None: num_directions = 2 if self.bidirectional else 1 zeros = torch.zeros(self.num_layers * num_directions, max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device) hx = (zeros, zeros) else: # Each batch of the hidden state should match the input sequence that # the user believes he/she is passing in. hx = self.permute_hidden(hx, sorted_indices) self.check_forward_args(input, hx, batch_sizes) assert batch_sizes is None result = torch.quantized_lstm(input, hx, self.all_weights, self.bias, self.num_layers, float(self.dropout), self.training, self.bidirectional, self.batch_first, dtype=self.dtype, use_dynamic=False) output = result[0] hidden = result[1:] return output, hidden @torch.jit.script_method def forward_tensor(self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None) -> Tuple[Tensor, Tuple[Tensor, Tensor]]: batch_sizes = None max_batch_size = input.size(0) if self.batch_first else input.size(1) sorted_indices = None unsorted_indices = None output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices) return output, self.permute_hidden(hidden, unsorted_indices) @torch.jit.script_method def forward_packed(self, input: PackedSequence, hx: Optional[Tuple[Tensor, Tensor]] = None ) -> Tuple[PackedSequence, Tuple[Tensor, Tensor]]: input, batch_sizes, sorted_indices, unsorted_indices = input max_batch_size = batch_sizes[0] max_batch_size = int(max_batch_size) output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices) output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices) return output, self.permute_hidden(hidden, unsorted_indices) @torch.jit.script_method def permute_hidden(self, hx: Tuple[Tensor, Tensor], permutation: Optional[Tensor]) -> Tuple[Tensor, Tensor]: if permutation is None: return hx return apply_permutation(hx[0], permutation), apply_permutation(hx[1], permutation) @torch.jit.script_method def check_forward_args(self, input: Tensor, hidden: Tuple[Tensor, Tensor], batch_sizes: Optional[Tensor]) -> None: self.check_input(input, batch_sizes) expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes) self.check_hidden_size(hidden[0], expected_hidden_size, 'Expected hidden[0] size {}, got {}') self.check_hidden_size(hidden[1], expected_hidden_size, 'Expected hidden[1] size {}, got {}') def forward(self, input, hx=None): if isinstance(input, PackedSequence): return self.forward_packed(input, hx) else: return self.forward_tensor(input, hx) class QuantizedGRU(QuantizedRNNBase): __overloads__ = {'forward': ['forward_packed', 'forward_tensor']} @torch.jit.script_method def forward_impl(self, input: Tensor, hx: Optional[Tensor], batch_sizes: Optional[Tensor], max_batch_size: int, sorted_indices: Optional[Tensor]) -> Tuple[Tensor, Tensor]: # noqa if hx is None: num_directions = 2 if self.bidirectional else 1 hx = torch.zeros(self.num_layers * num_directions, max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device) else: # Each batch of the hidden state should match the input sequence that # the user believes he/she is passing in. hx = self.permute_hidden(hx, sorted_indices) self.check_forward_args(input, hx, batch_sizes) if batch_sizes is None: result = torch.quantized_gru(input, hx, self.all_weights, self.bias, self.num_layers, float(self.dropout), self.training, self.bidirectional, self.batch_first) else: result = torch.quantized_gru(input, batch_sizes, hx, self.all_weights, self.bias, self.num_layers, float(self.dropout), self.training, self.bidirectional) output = result[0] hidden = result[1] return output, hidden @torch.jit.script_method def forward_tensor(self, input: Tensor, hx: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: batch_sizes = None max_batch_size = input.size(0) if self.batch_first else input.size(1) sorted_indices = None unsorted_indices = None output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices) return output, self.permute_hidden(hidden, unsorted_indices) @torch.jit.script_method def forward_packed(self, input: PackedSequence, hx: Optional[Tensor] = None) -> Tuple[PackedSequence, Tensor]: input, batch_sizes, sorted_indices, unsorted_indices = input max_batch_size = batch_sizes[0] max_batch_size = int(max_batch_size) output, hidden = self.forward_impl(input, hx, batch_sizes, max_batch_size, sorted_indices) output = PackedSequence(output, batch_sizes, sorted_indices, unsorted_indices) return output, self.permute_hidden(hidden, unsorted_indices) def forward(self, input, hx=None): if isinstance(input, PackedSequence): return self.forward_packed(input, hx) else: return self.forward_tensor(input, hx) def quantize_rnn_cell_modules(module): warnings.warn("quantize_rnn_cell_modules function has been deprecated. " "Please use torch.quantization.quantize_dynamic API instead.") reassign = {} for name, mod in module.named_modules(): if mod is module: continue new_mod = quantize_rnn_cell_modules(mod) if new_mod is not mod: reassign[name] = new_mod for name, mod in reassign.items(): setattr(module, name, mod) if isinstance(module, torch.nn.LSTMCell): return QuantizedLSTMCell(module) if isinstance(module, torch.nn.GRUCell): return QuantizedGRUCell(module) if isinstance(module, torch.nn.RNNCell): return QuantizedRNNCell(module) return module def quantize_linear_modules(module, dtype=torch.int8): warnings.warn("quantize_linear_modules function has been deprecated. " "Please use torch.quantization.quantize_dynamic API instead.") reassign = {} for name, mod in module.named_modules(): if mod is module: continue new_mod = quantize_linear_modules(mod, dtype) if new_mod is not mod: reassign[name] = new_mod for name, mod in reassign.items(): setattr(module, name, mod) if isinstance(module, torch.nn.Linear): if dtype == torch.int8: return QuantizedLinear(module) elif dtype == torch.float16: return QuantizedLinearFP16(module) else: raise RuntimeError( "Unsupported dtype: {}".format(dtype)) return module def quantize_rnn_modules(module, dtype=torch.int8): warnings.warn("quantize_rnn_modules function has been deprecated. " "Please use torch.quantization.quantize_dynamic API instead.") reassign = {} for name, mod in module.named_modules(): if mod is module: continue new_mod = quantize_rnn_modules(mod, dtype) if new_mod is not mod: reassign[name] = new_mod for name, mod in reassign.items(): setattr(module, name, mod) if isinstance(module, torch.nn.LSTM): if dtype != torch.int8 and dtype != torch.float16: raise RuntimeError("Unsupported dtype: {}".format(dtype)) return QuantizedLSTM(module, dtype) if isinstance(module, torch.nn.GRU): return QuantizedGRU(module) return module
python
import argparse import os import json import xml.etree.cElementTree as ET import logging import numpy as np import sys sys.path.insert(0,'common') from transforms3dbatch import * from utils.quaternion import * def parse_motions(path): xml_tree = ET.parse(path) xml_root = xml_tree.getroot() xml_motions = xml_root.findall('Motion') motions = [] if len(xml_motions) > 1: logging.warn('more than one <Motion> tag in file "%s", only parsing the first one', path) motions.append(_parse_motion(xml_motions[0], path)) return motions def _parse_motion(xml_motion, path): xml_joint_order = xml_motion.find('JointOrder') if xml_joint_order is None: raise RuntimeError('<JointOrder> not found') joint_names = [] joint_indexes = [] for idx, xml_joint in enumerate(xml_joint_order.findall('Joint')): name = xml_joint.get('name') if name is None: raise RuntimeError('<Joint> has no name') joint_indexes.append(idx) joint_names.append(name) frames = {'root_pos':[], 'root_rot':[], 'joint_pos':[]} xml_frames = xml_motion.find('MotionFrames') if xml_frames is None: raise RuntimeError('<MotionFrames> not found') for xml_frame in xml_frames.findall('MotionFrame'): root_pos, root_rot, joint_pos = _parse_frame(xml_frame, joint_indexes) frames['root_pos'].append(root_pos) frames['root_rot'].append(root_rot) frames['joint_pos'].append(joint_pos) return joint_names, frames def _parse_frame(xml_frame, joint_indexes): xml_root_pos = xml_frame.find('RootPosition') xml_root_rot = xml_frame.find('RootRotation') n_joints = len(joint_indexes) xml_joint_pos = xml_frame.find('JointPosition') if xml_joint_pos is None: raise RuntimeError('<JointPosition> not found') root_pos = _parse_list(xml_root_pos, 3) root_rot = _parse_list(xml_root_rot, 3) joint_pos = _parse_list(xml_joint_pos, n_joints, joint_indexes) return root_pos, root_rot, joint_pos def _parse_list(xml_elem, length, indexes=None): if indexes is None: indexes = range(length) elems = [float(x) for idx, x in enumerate(xml_elem.text.rstrip().split(' ')) if idx in indexes] if len(elems) != length: raise RuntimeError('invalid number of elements') return elems def mmm2csv(src): joint_names, mmm_dict = parse_motions(src.as_posix())[0] root_pos = np.array(mmm_dict['root_pos'], dtype=np.float) * 0.001 / 0.056444 root_rot = np.array(mmm_dict['root_rot'], dtype=np.float) joint_pos = np.array(mmm_dict['joint_pos'], dtype=np.float) joint_dict = {} for idx, name in enumerate(joint_names): if name.split('_')[0][-1] != 't': xyz = name.split('_')[0][-1] joint = name.split('_')[0][:-1] else: xyz = 'y' joint = name.split('_')[0] if joint not in joint_dict: joint_dict[joint] = dict() joint_dict[joint][xyz] = joint_pos[:, idx] joints = [] values = [] for cnt, joint in enumerate(joint_dict): joint_vals = [] joints.append(joint) for axes in ['x', 'y', 'z']: if axes in joint_dict[joint]: joint_vals.append(joint_dict[joint][axes]) else: joint_vals.append(np.zeros_like(root_pos[:, 0])) values.append(np.stack(joint_vals, axis=1)) values = np.stack(values, axis=0) return joints, root_pos, root_rot, values, joint_dict def mmm2amc(src, dest): joints, root_pos, root_rot, values, joint_dict = mmm2csv(src) axesMap = {'x':'x', 'y':'y', 'z':'z'} root_pos = root_pos[..., [0,2,1]] ## convert to quaternion and back by changing the axes order root_rot = quat2eulerbatch(qinv_np(euler2quatbatch(root_rot, 'sxyz')[...,[0, 1, 3, 2]]), 'sxyz') * 180/np.pi values = quat2eulerbatch(qinv_np(euler2quatbatch(values, 'sxyz')[..., [0, 1, 3, 2]]), 'sxyz') * 180/np.pi joint_pos = [] for cnt, joint in enumerate(joints): for axes_num, axes in enumerate(['x', 'y', 'z']): if axesMap[axes] in joint_dict[joint]: joint_dict[joint][axesMap[axes]] = values[cnt, :, axes_num] lines = ["#!OML:ASF H:", ":FULLY-SPECIFIED", ":DEGREES"] for idx in range(root_pos.shape[0]): lines.append('{}'.format(idx+1)) lines.append('root' + (' {}'*6).format(root_pos[idx, 0], root_pos[idx, 1], root_pos[idx, 2], root_rot[idx, 0], root_rot[idx, 1], root_rot[idx, 2])) for cnt, joint in enumerate(joint_dict): format_str = '{} ' * (len(joint_dict[joint])+1) format_str = format_str[:-1] joint_vals = [] for axes in ['x', 'y', 'z']: if axes in joint_dict[joint]: joint_vals.append(joint_dict[joint][axes][idx]) lines.append(format_str.format(*([joint] + joint_vals))) lines = '\n'.join(lines) + '\n' os.makedirs(dest.parent, exist_ok=True) with open(dest, 'w') as fp: fp.writelines(lines)
python
from django.apps import apps as django_apps default_app_config = 'scrapyd_dash.apps.ScrapydDashConfig'
python