content
stringlengths
0
894k
type
stringclasses
2 values
from lxml import etree from io import StringIO from django.urls import path from django.http import HttpResponse from django.template import Template, Context, Engine, engines def a(request): xslt_root = etree.XML('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:template match="/"> <foo><xsl:value-of select="/a/b/text()" /></foo> </xsl:template> </xsl:stylesheet>''') transform = etree.XSLT(xslt_root) def b(request): xslt_root = etree.XML('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:template match="/"> <foo><xsl:value-of select="/a/b/text()" /></foo> </xsl:template> </xsl:stylesheet>''') f = StringIO('<foo><bar></bar></foo>') tree = etree.parse(f) result_tree = tree.xslt(xslt_root) def c(request): xslt_root = etree.XML('''\ <xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform"> <xsl:template match="/"> <foo><xsl:value-of select="/a/b/text()" /></foo> </xsl:template> </xsl:stylesheet>''') f = StringIO('<foo><bar></bar></foo>') tree = etree.parse(f) result = tree.xslt(xslt_root, a="'A'") urlpatterns = [ path('a', a), path('b', b), path('c', c) ] if __name__ == "__main__": a(None) b(None) c(None)
python
from VisualisationPlugin import VisualisationPlugin import pygame import math import logging from DDRPi import FloorCanvas class SineWaveVisualisationPlugin(VisualisationPlugin): logger = logging.getLogger(__name__) def __init__(self): self.clock = pygame.time.Clock() def configure(self, config): self.config = config self.logger.info("Config: %s" % config) def draw_frame(self, canvas): # Limit the frame rate. # This sleeps so that at least 25ms has passed since tick() # was last called. It is a no-op if the loop is running slow self.clock.tick(25) # Draw whatever this plugin does return self.draw_surface(canvas, pygame.time.get_ticks()) def draw_splash(self, canvas): return self.draw_surface(canvas, 0) def draw_surface(self, canvas): return self.draw_surface(canvas, 0) def draw_surface(self, canvas, ticks): # Get the background colour background_colour = FloorCanvas.GREEN wave_colour = FloorCanvas.WHITE amplitude = (canvas.get_height() / 2) -1 period = 18.0 if self.config is not None: try: background_colour = getattr(FloorCanvas, self.config["background_colour"].upper()) except (AttributeError, KeyError): pass # Get the wave colour try: wave_colour = getattr(FloorCanvas, self.config["colour"].upper()) except (AttributeError, KeyError): pass # Get the amplitude try: amplitude = float(self.config["amplitude"]) except (AttributeError, ValueError, KeyError): pass # Get the period try: period = float(self.config["period"]) except (AttributeError, ValueError, KeyError): pass # Set the background colour canvas.set_colour(background_colour) phase_offset = 0.0 frequency = 1.0 phase_offset = 2 * math.pi * frequency * ticks / 1000 # phase_offset = 0 w = canvas.get_width(); h = canvas.get_height() previous_x = None previous_y = None for x in range(w): phase = math.pi * 2 * x / period y = h / 2.0 + amplitude * math.sin(phase_offset + phase) if previous_y != None and previous_x != None: # Draw line between previous point at this one #self.surface.draw_line(int(round(previous_x)), int(round(previous_y)), int(round(x)), int(round(y)), FloorCanvas.WHITE) canvas.draw_line(int(previous_x), int(previous_y), int(x), int(y), wave_colour) #self.surface.set_pixel(int(x),int(y),FloorCanvas.WHITE) previous_x = x previous_y = y return canvas def get_valid_arguments(self): args = ["background_colour", # The background colour of the wave "colour", # The colour of the wave "speed", # The speed of the wave "amplitude", # The amplitude of the wave ] return args
python
import socket sock = socket.socket() address = "agps.u-blox.com" port = 46434 print "Connecting to u-blox" sock.connect((address, port)) print "Connection established" print "Sending the request" sock.send("cmd=full;[email protected];token=4HWt1EvhQUKJ2InFyaaZDw;lat=30.0;lon=30.0;pacc=10000;") print "Sending the request - done" data = "" buffer = True; while buffer: print(".") buffer = sock.recv(1024) if buffer: data += buffer print("\n") print(data)
python
import os.path as osp from pathlib import Path import pandas as pd from jitenshea.stats import find_cluster _here = Path(osp.dirname(osp.abspath(__file__))) DATADIR = _here / 'data' CENTROIDS_CSV = DATADIR / 'centroids.csv' def test_find_cluster(): df = pd.read_csv(CENTROIDS_CSV) df = df.set_index('cluster_id') cluster = find_cluster(df) expected = {3: 'evening', 1: 'high', 0: 'morning', 2: 'noon'} assert expected == cluster
python
# -*- coding: utf-8 -*- """ Created on Wed Jan 23 22:40:34 2018 @author: boele """ # 03 read csv and find unique survey vessels... # open csv file f = open('fartoey_maaleoppdrag.csv', 'r') data = f.read() surveys_and_vessels = data.split('\n') # print number of rows and show first 5 rows print(len(surveys_and_vessels)) print(surveys_and_vessels[0:5]) print() # remove header surveys_and_vessels = surveys_and_vessels[1:] # create empty vessels list vessels = [] # for each row extract second column and add to vessel list for row in surveys_and_vessels: col = row.split(';') if len(col)>1: vessels.append(col[1]) # print first 5 ned rows print(vessels[0:5]) print() # create vessel_counts dictonary with vessel name as key and count as value vessel_counts = {} for item in vessels: if item in vessel_counts: vessel_counts[item] = vessel_counts[item] + 1 else: vessel_counts[item] = 1 print(vessel_counts) print('number of unique vessels: ' + str(len(vessel_counts)))
python
# -*- coding: utf-8 -*- """ Created on Wed Oct 11 13:59:21 2017 @author: tuur """ from __future__ import print_function from dateutil import parser as dparser from lib.evaluation import get_selective_rel_metrics, get_acc_from_confusion_matrix,save_confusion_matrix_from_metrics, viz_docs_rel_difference, save_entity_error_analysis import random, re, os, shutil, time, datetime, pickle import numpy as np import torch.nn as nn import torch.autograd as autograd import torch.optim as optim import plotly as py import plotly.figure_factory as ff import plotly.graph_objs as go import torch from lib.data import reverse_dict_list from lib.timeml import write_timebank_folder, get_dur_from_value from lib.transformer.SubLayers import MultiHeadAttention import itertools from copy import copy from collections import Counter, OrderedDict import subprocess from gensim.models.keyedvectors import KeyedVectors from lib.yellowfin import YFOptimizer random.seed(0) torch.backends.cudnn.enabled=True class TimelineModel(object): def setup_vocabularies(self, data, unk_threshold, special_conflation=False, entity_sequence=False): # Sets up indices for characters, POS, and words if entity_sequence: self.word_frequencies = Counter([token if not special_conflation else self.conflate_digits(token) for text in data for token in text.entity_tokens]) else: self.word_frequencies = Counter([token if not special_conflation else self.conflate_digits(token) for text in data for token in text.tokens]) if unk_threshold: self.word_frequencies = Counter({token for token in self.word_frequencies if self.word_frequencies[token] > unk_threshold}) all_features = set([f for doc in data for tok_index in range(len(doc.tokens)) for f in self.get_features(tok_index, doc)]) cindex = {c:autograd.Variable(torch.from_numpy(np.array([i]))) for i,c in enumerate(set([c for w in self.word_frequencies for c in w]).union([self.unk_token]).union([str(n) for n in range(10)]))} pindex = {p:autograd.Variable(torch.from_numpy(np.array([i]))) for i,p in enumerate(set([p for text in data for p in text.pos] + [self.unk_token]))} windex = {w:autograd.Variable(torch.from_numpy(np.array([i]))) for i,w in enumerate(list(self.word_frequencies.keys()) + [self.unk_token])} findex = {f:i for i,f in enumerate(list(all_features))} return windex, cindex, pindex, findex def get_params_from_nn_dict(self, nn_dict): params = [] for name, component in nn_dict.items(): params += self.get_component_params(name, nn_dict) return params def get_component_params(self, name, component_dict): if name in component_dict: component = component_dict[name] if hasattr(component, 'parameters'): return list(component.parameters()) else: return [component] def fix_component_by_name(self, name): component_names = [name] if name in self.nn else self.nn_by_subtask[name] for component_name in component_names: for par in self.get_component_params(component_name, self.nn): par.requires_grad=False self.tied_components.add(component_name) def free_component_by_name(self, name): component_names = [name] if name in self.nn else self.nn_by_subtask[name] for component_name in component_names: for par in self.get_component_params(component_name, self.nn): par.requires_grad=True if component_name in self.tied_components: self.tied_components.remove(component_name) def print_gradient_by_name(self, name=None): if name is None: components = self.nn.keys() else: components = [name] if name in self.nn else self.nn_by_subtask[name] for component in components: params = self.get_component_params(component, self.nn) summed = 0 n_params = 0 for p in params: if not p.grad is None: n_params += np.prod(list(p.size())) summ = sum(torch.abs(p.grad)) if summ.size()[0] > 1: summ = sum(summ) summed += summ summed_grad = summed.data[0] if not type(summed)==int else summed print(component, round(summed_grad,2), '/',round(n_params,2),'=',round(float(summed_grad)/(n_params+1),2)) def get_trainable_params(self): pars = set() for task in self.active_subtasks: component_names = self.nn_by_subtask[task] for comp in component_names: if comp in self.tied_components: continue for par in self.get_component_params(comp, self.nn): if par is not None and par.requires_grad: pars.add(par) return pars def reset_optimizer(self): trainable_params = self.get_trainable_params() if self.optimizer_type == 'adam': self.optimizer = optim.Adam(trainable_params, lr=self.lr) if self.optimizer_type == 'adaml2': self.optimizer = optim.Adam(trainable_params, lr=self.lr, weight_decay=0.0001) if self.optimizer_type == 'amsgrad': self.optimizer = optim.Adam(trainable_params, lr=self.lr, amsgrad=True) if self.optimizer_type == 'amsgrad0.01': self.optimizer = optim.Adam(trainable_params, lr=self.lr, amsgrad=True, eps=0.01) if self.optimizer_type == 'amsgrad0.001': self.optimizer = optim.Adam(trainable_params, lr=self.lr, amsgrad=True, eps=0.001) elif self.optimizer_type== 'adadelta': self.optimizer = optim.Adadelta(trainable_params, lr=self.lr) elif self.optimizer_type == 'rmsprop': self.optimizer = optim.RMSprop(trainable_params, lr=self.lr) elif self.optimizer_type == 'sgd': self.optimizer = optim.SGD(trainable_params, lr=self.lr, momentum=0.9, weight_decay=0.001) elif self.optimizer_type == 'nesterov': self.optimizer = optim.SGD(trainable_params, lr=self.lr, momentum=0.9, weight_decay=0.001, nesterov=True) elif self.optimizer_type == 'asgd': self.optimizer = optim.ASGD(trainable_params, lr=self.lr) elif self.optimizer_type == 'yf': self.optimizer = YFOptimizer(trainable_params) def move_to_gpu(self): for cname, component in self.nn.items(): if hasattr(component, 'data'): component.data = component.data.cuda() else: component = component.cuda() for cname, constant in self.constants.items(): constant.data = constant.data.cuda() for indices in [self.windex, self.pindex, self.cindex]: for w,i in indices.items(): indices[w] = indices[w].cuda() def get_features(self, w_index, doc): w_span = doc.spans[w_index] annotations = doc.reverse_span_annotations[w_span] if w_span in doc.reverse_span_annotations else [] features = [] if len(annotations) > 0 and self.feature_keys: for feat_key in self.feature_keys: for ann in annotations: if feat_key in ann: features.append(ann) return features def get_feature_vec(self, w_index, doc): features = self.get_features(w_index, doc) vec = torch.zeros(len(self.findex)) for f in features: if f in self.findex: findex = self.findex[f] vec[findex] = 1.0 if self.gpu: vec = vec.cuda() return autograd.Variable(vec, requires_grad=False) def get_tif_vec(self, w_index, doc): span = doc.spans[w_index] if span in doc.reverse_span_annotations: k = [tif for tif in doc.reverse_span_annotations[span] if tif[:3]=='TIF'] #print(k) if len(k) >0: return self.tif_vecs[k[0]] return self.tif_vecs['TIF-UNKNOWN'] def set_train_mode(self): for component in self.nn.values(): if hasattr(component, 'train'): component.train() def set_eval_mode(self): for component in self.nn.values(): if hasattr(component, 'eval'): component.eval() def __init__(self, model_dir='tml_model', data=[], margin=0.01, dmin=0.1, pemb_size=20, wemb_size=25, cemb_size=10, rnn_size=50, crnn_size=20, lr=0.001, gpu=True, relations=['BEFORE', 'AFTER', 'INCLUDES', 'IS_INCLUDED','SIMULTANEOUS'], dropout=0.5, depth=1, unk_threshold=0, special_conflation=False, rnn_unit='LSTM', pos=False, optimizer='adam', loss_func='Ldce', subtasks=['sc','dc','sa','da'], word_vectors=None, fix_wembs=False, dct_start_fixed=True, dct_duration_fixed=False, rnn_bias=True, linear_bias=True, use_character_level_encoding=True,doc_normalization=True,blinding=False, feature_keys = None, deep_word_modeling=False, entity_sequence=False, absolute=False, pointwise_loss='hinge'): self.model_dir = model_dir if not os.path.exists(self.model_dir): os.makedirs(self.model_dir) self.unk_token = '_unk_' self.feature_keys = feature_keys.split(',') if feature_keys else None self.windex, self.cindex, self.pindex, self.findex = self.setup_vocabularies(data, unk_threshold, special_conflation=special_conflation, entity_sequence=entity_sequence) print ('wvocab:', len(self.windex), 'cvocab:', len(self.cindex), 'pvocab:', len(self.pindex), 'fvocab:', len(self.findex), '( using pos:', bool(pos),', features:', self.feature_keys, ')') print('features:', self.findex.keys()) self.train_margin, self.pred_margin = margin, margin self.dmin, self.rels_train, self.loss_func, self.pointwise_loss = dmin, relations, loss_func, pointwise_loss self.gpu, self.optimizer_type, self.lr = gpu, optimizer, lr self.special_conflation=special_conflation self.entity_sequence=entity_sequence self.absolute = absolute self.doc_normalization=doc_normalization # Some stats about layer sizes (for easy usage later on) self.pemb_size, self.wemb_size, self.crnn_size, self.cemb_size, self.rnn_size = (pemb_size if pos else 0), wemb_size,(crnn_size if use_character_level_encoding else 0), (cemb_size if use_character_level_encoding else 0), rnn_size self.pos, self.use_character_level_encoding, self.blinding, self.dropout, self.rnn_unit, self.deep_word_modeling = pos, use_character_level_encoding, blinding, dropout, rnn_unit, deep_word_modeling # --- Constructing Network Components self.nn, self.constants = OrderedDict(), OrderedDict() self.contextual_subtasks, self.word_level_subtasks = ['sc','dc'], ['sa','da'] # Set which subtasks should be used for prediction self.active_subtasks = subtasks print('Active subtasks',self.active_subtasks) # optional dropout if self.dropout: self.nn['dropout*'] = nn.Dropout(self.dropout) # Single parameters (or constants) self.nn['s_dct*'] = autograd.Variable(torch.zeros(1), requires_grad=True) self.nn['d_dct*'] = autograd.Variable(torch.ones(1), requires_grad=True) self.constants['ZERO'] = autograd.Variable(torch.FloatTensor([0]),requires_grad=False) # Word representation modules if word_vectors: wv = read_word_vectors(word_vectors) for subtask in self.contextual_subtasks + self.word_level_subtasks: if word_vectors: self.windex, self.nn['wembs_'+subtask], self.wemb_size = self.set_word_embeddings(wv) else: self.nn['wembs_'+subtask] = nn.Embedding(len(self.windex), self.wemb_size) if pos: self.nn['pembs_'+subtask] = nn.Embedding(len(self.pindex), self.pemb_size) if use_character_level_encoding: self.nn['cembs_'+subtask] = nn.Embedding(len(self.cindex), self.cemb_size) self.nn['crnn_'+subtask] = nn.LSTM(self.cemb_size, self.crnn_size, bidirectional=False, num_layers=depth, bias=rnn_bias) self.word_repr_size = self.pemb_size + self.wemb_size + self.crnn_size + (len(self.findex) if self.feature_keys else 0) if deep_word_modeling: for subtask in self.contextual_subtasks + self.word_level_subtasks: self.nn['wff_'+subtask] = nn.Linear(self.word_repr_size, deep_word_modeling) self.word_repr_size = deep_word_modeling # Contextual modules for subtask in self.contextual_subtasks: if self.rnn_unit == 'LSTM': self.nn['wrnn_'+subtask] = nn.LSTM(self.word_repr_size, self.rnn_size, bidirectional=True, num_layers=depth, bias=rnn_bias) elif self.rnn_unit == 'Att': self.nn['wrnn_'+subtask] = MultiHeadAttention(n_head=2, d_model=self.word_repr_size, d_k=10, d_v=10) self.nn['out_'+subtask] = nn.Linear(self.word_repr_size, 1, bias=linear_bias) # Non-contextual modules: self.out_repr_size_d = 0 + (1 if 'dp' in self.active_subtasks else 0) + (1 if 'sp' in self.active_subtasks else 0) + (2*self.rnn_size if 'dc' in self.active_subtasks else 0) + (self.word_repr_size if 'da' in self.active_subtasks else 0) self.out_repr_size_s = 0 + (1 if 'dp' in self.active_subtasks else 0) + (1 if 'sp' in self.active_subtasks else 0) + (2*self.rnn_size if 'sc' in self.active_subtasks else 0) + (self.word_repr_size if 'sa' in self.active_subtasks else 0) self.nn['out_s'] = nn.Linear(self.out_repr_size_d, 1, bias=linear_bias) self.nn['out_d'] = nn.Linear(self.out_repr_size_s, 1, bias=linear_bias) # Easy access to subparts of the net by subtask, to easily free or fix parameters self.nn_by_subtask = {subtask:{name:component for (name,component) in self.nn.items() if subtask in name or '*' in name} for subtask in self.contextual_subtasks+self.word_level_subtasks} for subtask in self.nn_by_subtask: self.nn_by_subtask[subtask]['out_s']=self.nn['out_s'] self.nn_by_subtask[subtask]['out_d']=self.nn['out_d'] # Set all components to trainable by default except checking the DCT start and duration self.tied_components = set() if dct_start_fixed: self.fix_component_by_name('s_dct*') if dct_duration_fixed: self.fix_component_by_name('d_dct*') self.reset_optimizer() print('Full model parameters:', sum([np.prod(list(par.size())) for par in self.get_trainable_params()])) print('Word representation size:',self.word_repr_size) print ('Dims - wemb:',self.wemb_size, '- pemb:',self.pemb_size, '- cemb:',self.cemb_size, '- wrnn:', self.rnn_size, '- crnn:', self.crnn_size) print ('Relations:', relations) if self.gpu: self.move_to_gpu() def index_w(self, w): return self.windex[w] if w in self.windex else self.windex[self.unk_token] def index_p(self, p): return self.pindex[p] if p in self.pindex else self.pindex[self.unk_token] def index_c(self, c): return self.cindex[c] if c in self.cindex else self.cindex[self.unk_token] def get_e_vec(self, e): return self.e_vecs[e] if e in self.e_vecs else self.e_vecs[self.unk_token] def encode_char(self,c, subtask): return self.nn['cembs_'+subtask](self.index_c(c)) def conflate_digits(self, w): return re.sub('\d', '5', w) def set_word_embeddings(self, wv): print('setting word embeddings') wv_vocab = [w for w in wv.vocab.keys() if (not ('_' in w) or w=='_') and w in self.windex] # ! only words that overlap are initialized (so no bigger vocab)! new_windex, wemb_size = {w:i for i,w in enumerate(wv_vocab + [self.unk_token])}, wv.vector_size wembs = nn.Embedding(len(new_windex), wemb_size) emb_matrix = np.zeros([len(new_windex), wemb_size], dtype=float) for w in new_windex: if w in wv: emb_matrix[new_windex[w]] = wv[w] emb_tensor = torch.from_numpy(emb_matrix).float() wembs.weight.data = emb_tensor.view(len(new_windex), wemb_size) new_windex = {w:autograd.Variable(torch.from_numpy(np.array([i]))) for w,i in new_windex.items()} print ('vocab size:', len(wv_vocab)) return new_windex, wembs, wemb_size def encode_word_for_subtask(self, w_index, doc, subtask): if self.entity_sequence: token_str = doc.span_to_tokens(doc.entity_spans[w_index])[-1] else: token_str = doc.tokens[w_index] if self.blinding == 1 and subtask in ['dc','sc' ] and doc.entities[w_index]!='O': token_str = self.unk_token elif self.blinding == 2 and subtask in ['dc','sc']: token_str = self.unk_token # Getting the word embedding if self.special_conflation: word_encoding = self.nn['wembs_'+subtask](self.index_w(self.conflate_digits(token_str))) else: word_encoding = self.nn['wembs_'+subtask](self.index_w(token_str)) # Adding Character RNN encoding if self.use_character_level_encoding: # Constructing sequence of char-embeddings cembs_lr = torch.stack([self.encode_char(c, subtask) for c in token_str]) # Running Char-RNN cencoding_lr, _ = self.nn['crnn_'+subtask](cembs_lr) # Concatenating the word embedding and last Char-RNN output word_encoding = torch.cat([word_encoding,cencoding_lr[-1]], dim=1) # Adding POS if self.pos: pemb = self.nn['pembs_'+subtask](self.index_p(doc.pos[w_index])) word_encoding = torch.cat([word_encoding, pemb], dim=1) # Adding Entity encoding (EVENT, TIMEX3, or NONE) if self.feature_keys: feat_vec = self.get_feature_vec(w_index, doc).view(1,-1) word_encoding = torch.cat([word_encoding, feat_vec], dim=1) if self.deep_word_modeling: word_encoding = torch.tanh(word_encoding) word_encoding = self.nn['wff_'+subtask](word_encoding) # Add dropout if self.dropout: word_encoding = self.nn['dropout*'](word_encoding) return word_encoding def encode_tokens_for_subtask(self, doc, subtask): # construct word representations if self.entity_sequence: word_encoding = torch.stack([self.encode_word_for_subtask(e_index,doc, subtask) for e_index in range(len(doc.entity_spans))]) else: word_encoding = torch.stack([self.encode_word_for_subtask(w_index,doc, subtask) for w_index in range(len(doc.tokens))]) # For contextual subtasks apply the corresponding word-level RNN if subtask in self.contextual_subtasks: if self.rnn_unit in ['LSTM', 'GRU','RNN']: word_encoding, _ = self.nn['wrnn_'+subtask](word_encoding) if self.rnn_unit in ['Att']: word_encoding, enc_slf_attn = self.nn['wrnn_'+subtask](word_encoding, word_encoding, word_encoding) # Add dropout (dropout is already appliedon word representation level as well) if self.dropout: word_encoding = self.nn['dropout*'](word_encoding) return word_encoding def pred_subtask(self, token_index, doc, encoded_text, subtask): token_representation = encoded_text[subtask][token_index] return self.nn['out_'+subtask](token_representation) def encode_tokens(self, doc, entity_spans=None, measure_speed=False): if measure_speed: t0 = time.time() entity_spans = entity_spans if entity_spans else doc.entity_spans encodings = {} sp,dp = 0,0 for subtask in self.active_subtasks: if not subtask in set(['dp','sp']): encodings[subtask] = self.encode_tokens_for_subtask(doc, subtask) encodings['s'], encodings['d'] = {},{} # span (0,0) corresponds to the document-creation-time s, d = self.nn['s_dct*'].view(1,1), self.clamp(self.nn['d_dct*'].view(1,1), self.dmin)#.clamp(self.dmin) encodings['s'][(0,0)], encodings['d'][(0,0)] = s, d sp,dp = s, d for span in entity_spans: # Get the token index corresponding to the span token_ix = doc.entity_indices[span] if self.entity_sequence else doc.span_to_tokens(span,token_index=True)[-1] tok_rs, tok_rd = None,None if 'sa' in self.active_subtasks: tok_rs = encodings['sa'][token_ix] if 'da' in self.active_subtasks: tok_rd = encodings['da'][token_ix] if 'sc' in self.active_subtasks: tok_rs = torch.cat([tok_rs, encodings['sc'][token_ix]], dim=1) if tok_rs is not None else encodings['sc'][token_ix] if 'dc' in self.active_subtasks: tok_rd = torch.cat([tok_rd, encodings['dc'][token_ix]], dim=1) if tok_rd is not None else encodings['dc'][token_ix] if 'sp' in self.active_subtasks: tok_rs = torch.cat([tok_rs, sp], dim=1) tok_rd = torch.cat([tok_rd, sp], dim=1) if 'dp' in self.active_subtasks: tok_rs = torch.cat([tok_rs, dp], dim=1) tok_rd = torch.cat([tok_rd, dp], dim=1) s, d = self.nn['out_s'](tok_rs), self.clamp(self.nn['out_d'](tok_rd), self.dmin) encodings['s'][span] = s encodings['d'][span] = d sp,dp = s, d if measure_speed: print(doc.id, 'enc t:',time.time()-t0,'s', 'words:', len(doc.tokens),'w/s:', float(len(doc.tokens)) / (time.time()-t0)) return encodings def clamp(self, tensor, min_value): return torch.log(1.0 + torch.exp(tensor)) + min_value def pred_starttime(self, span, doc, encoded_text): return encoded_text['s'][span] def pred_duration(self, span, doc, encoded_text): return encoded_text['d'][span] def pointwise_loss_before(self, x, y, train_mode=False): # X < Y, interpreted as: max(X + m - Y, 0) margin_t = self.train_margin if train_mode else self.pred_margin if self.pointwise_loss == 'hinge': loss = torch.max(torch.stack([x[0] + margin_t - y[0], self.constants['ZERO']])) elif self.pointwise_loss == 'log': loss = torch.log(1 + torch.exp(x[0] - y[0] + margin_t)) elif self.pointwise_loss == 'exp': loss = torch.exp(x[0] - y[0] + margin_t) return loss.view(1) def pointwise_loss_equal(self, x, y, train_mode=False): # |x-y| < margin --> max(|x-y| - self.loss_margin , 0) margin_t = self.train_margin if train_mode else self.pred_margin if self.pointwise_loss == 'hinge': loss = torch.max(torch.stack([torch.abs(x[0] - y[0]) - margin_t, self.constants['ZERO']])) elif self.pointwise_loss == 'log': loss = torch.log(1 + torch.exp(torch.abs(x[0] - y[0]) - margin_t)) elif self.pointwise_loss == 'exp': loss = torch.exp(torch.abs(x[0] - y[0]) - margin_t) return loss.view(1) def get_Lt(self, rel, s1, d1, s2, d2, train_mode=False): e1 = s1 + d1 e2 = s2 + d2 if rel == 'IS_INCLUDED': loss = self.pointwise_loss_before(s2, s1, train_mode) + self.pointwise_loss_before(e1, e2, train_mode) # + self.pointwise_loss_before(d1,d2) elif rel =='INCLUDES': loss = self.pointwise_loss_before(s1, s2, train_mode) + self.pointwise_loss_before(e2, e1, train_mode) # + self.pointwise_loss_before(d2,d1) elif rel == 'BEFORE': loss = self.pointwise_loss_before(e1, s2, train_mode) elif rel == 'AFTER': loss = self.pointwise_loss_before(e2, s1, train_mode) elif rel == 'SIMULTANEOUS': loss = self.pointwise_loss_equal(s1, s2, train_mode) + self.pointwise_loss_equal(e1, e2, train_mode) # + self.pointwise_loss_equal(d1,d2) elif rel == 'BEGINS': loss = self.pointwise_loss_equal(s1, s2, train_mode) + self.pointwise_loss_before(e1, e2, train_mode) elif rel == 'BEGUN_BY': loss = self.pointwise_loss_equal(s2, s1, train_mode) + self.pointwise_loss_before(e2, e1, train_mode) elif rel == 'ENDS': loss = self.pointwise_loss_before(s2, s1, train_mode) + self.pointwise_loss_equal(e1, e2, train_mode) elif rel == 'ENDED_BY': loss = self.pointwise_loss_before(s1, s2, train_mode) + self.pointwise_loss_equal(e2, e1, train_mode) elif rel == 'IBEFORE': loss = self.pointwise_loss_equal(e1, s2, train_mode) elif rel == 'IAFTER': loss = self.pointwise_loss_equal(e2, s1, train_mode) else: print('ERROR: no loss for relation:', rel) #print(rel, loss, s1, e1, s2, e2) return loss def get_Lr(self, rel, s1, d1, s2, d2, all_relations, train_mode=False): if self.loss_func == 'Lt': return self.get_Lt(rel, s1, d1, s2, d2, train_mode) elif self.loss_func == 'Ldh': # the timeline loss of the true label should be lower than that of all false/other labels gt_loss = self.get_Lt(rel, s1, d1, s2, d2, train_mode) loss = 0.0 for other_rel in all_relations: if other_rel != rel: loss += torch.max(torch.stack([gt_loss - self.get_Lt(other_rel, s1, d1, s2, d2, train_mode) + self.dmin, self.constants['ZERO']])) return loss elif self.loss_func == 'Ldcem': # Uses standard normalization instead of softmax f = lambda x: -x score_per_relation = torch.stack([f(self.get_Lt(rel, s1, d1, s2, d2, train_mode))] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel]) lifted_scores = score_per_relation + (0 - torch.min(score_per_relation)) minmaxnorm = lambda x: x / torch.sum(x) mm1 = minmaxnorm(lifted_scores) return 1 - mm1[0] elif self.loss_func == 'Ldcemt': # Uses standard normalization instead of softmax and use tanh to flatten low scores (and prevent forever pushing away from unlikely relations, causing the time-line to move always during learning) f = lambda x: torch.tanh(-x) score_per_relation = torch.stack([f(self.get_Lt(rel, s1, d1, s2, d2, train_mode))] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel]) lifted_scores = score_per_relation + (0 - torch.min(score_per_relation)) minmaxnorm = lambda x: x / torch.sum(x) mm1 = minmaxnorm(lifted_scores) return 1 - mm1[0] elif self.loss_func == 'Ldce': f = lambda x: -x new_score = torch.stack([f(self.get_Lt(rel, s1, d1, s2, d2, train_mode))] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel]) score_per_relation = new_score ref_vector = autograd.Variable(torch.LongTensor([0]), requires_grad=False) if self.gpu: ref_vector = ref_vector.cuda() cross_entropy = torch.nn.CrossEntropyLoss() return cross_entropy(score_per_relation.t(), ref_vector) elif self.loss_func in ['Lt+Ldh','Ldh+Lt']: gt_loss = self.get_Lt(rel, s1, d1, s2, d2, train_mode) loss = 0.0 for other_rel in all_relations: if other_rel != rel: loss += torch.max(torch.stack([gt_loss - self.get_Lt(other_rel, s1, d1, s2, d2, train_mode) + self.dmin, self.constants['ZERO']])) return loss + gt_loss elif self.loss_func in ['Lt+Ldce','Ldce+Lt']: f = lambda x: -x gt_loss = self.get_Lt(rel, s1, d1, s2, d2, train_mode) new_score = torch.stack([f(gt_loss)] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel]) score_per_relation = new_score ref_vector = autograd.Variable(torch.LongTensor([0]), requires_grad=False) if self.gpu: ref_vector = ref_vector.cuda() cross_entropy = torch.nn.CrossEntropyLoss() return cross_entropy(score_per_relation.t(), ref_vector) + gt_loss elif self.loss_func in ['Ldh+Ldce','Ldce+Ldh']: gt_loss = self.get_Lt(rel, s1, d1, s2, d2, train_mode) f = lambda x: -x loss = 0.0 for other_rel in all_relations: if other_rel != rel: loss += torch.max(torch.stack([gt_loss - self.get_Lt(other_rel, s1, d1, s2, d2, train_mode) + self.dmin, self.constants['ZERO']])) new_score = torch.stack([f(gt_loss)] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel]) score_per_relation = new_score ref_vector = autograd.Variable(torch.LongTensor([0]), requires_grad=False) if self.gpu: ref_vector = ref_vector.cuda() cross_entropy = torch.nn.CrossEntropyLoss() loss += cross_entropy(score_per_relation.t(), ref_vector) return loss elif self.loss_func == 'L*': gt_loss = self.get_Lt(rel, s1, d1, s2, d2, train_mode) f = lambda x: -x loss = 0.0 for other_rel in all_relations: if other_rel != rel: loss += torch.max(torch.stack([gt_loss - self.get_Lt(other_rel, s1, d1, s2, d2, train_mode) + self.dmin, self.constants['ZERO']])) new_score = torch.stack([f(gt_loss)] + [f(self.get_Lt(r, s1, d1, s2, d2, train_mode)) for r in all_relations if not r==rel]) score_per_relation = new_score ref_vector = autograd.Variable(torch.LongTensor([0]), requires_grad=False) if self.gpu: ref_vector = ref_vector.cuda() cross_entropy = torch.nn.CrossEntropyLoss() loss += cross_entropy(score_per_relation.t(), ref_vector) loss += self.get_Lt(rel, s1, d1, s2, d2, train_mode)[0] return loss def train(self, data, num_epochs=5, max_docs=None, viz_inbetween=False, verbose=0,save_checkpoints=None, eval_on=None, batch_size=32, temporal_awareness_ref_dir=None, clip=1.0, pred_relations=None, patience=100, loss_func=None, pointwise_loss=None,tune_margin=1, checkpoint_interval=1000,timex3_dur_loss=False, reset_optimizer=None): training_start_time = time.time() print('Fixed components:', self.tied_components) print('Trainable parameters:', sum([np.prod(list(par.size())) for par in self.get_trainable_params()])) print ('epochs:', num_epochs, 'dropout:', self.dropout, 'batch_size:', batch_size) print('checkpoints:', save_checkpoints) torch.backends.cudnn.benchmark = True self.reset_optimizer() if loss_func: self.loss_func = loss_func if pointwise_loss: self.pointwise_loss=pointwise_loss print('Lr loss func:', self.loss_func) print('Lp loss func:',self.pointwise_loss) if max_docs: data = data[:max_docs] # Taking subsection from training to calculate training accuracy train_err_subset = data[:max(int(len(data)*0.05),5)] pred_relations = pred_relations if pred_relations else self.rels_train if save_checkpoints: checkpoint_dir = self.model_dir + '/checkpoints/' os.makedirs(checkpoint_dir) if eval_on: error_dir_conf = self.model_dir + '/errors/confusion/' error_dir_entities = self.model_dir + '/errors/entities/' os.makedirs(error_dir_conf) os.makedirs(error_dir_entities) dev_metrics, F1_TA, P_TA, R_TA = evaluate_timelinemodel(self, eval_on, pred_relations,temporal_awareness_ref_dir=temporal_awareness_ref_dir,all_pairs=True) train_metrics, _, _, _ = evaluate_timelinemodel(self, train_err_subset, pred_relations, all_pairs=True, entity_error_analysis_file_path=error_dir_entities+'/train_0.txt') save_confusion_matrix_from_metrics(train_metrics, error_dir_conf + '/train_0.html') save_confusion_matrix_from_metrics(dev_metrics, error_dir_conf + '/dev_0.html') # saving initial evaluation (before training) best_eval_acc = get_acc_from_confusion_matrix(dev_metrics) epoch_stats = {'loss':[None], 'eval_acc':[get_acc_from_confusion_matrix(dev_metrics)], 'train_acc':[get_acc_from_confusion_matrix(train_metrics)]} if temporal_awareness_ref_dir: epoch_stats['F1_TA'], epoch_stats['P_TA'], epoch_stats['R_TA'] = [F1_TA], [P_TA], [R_TA] else: best_eval_acc = 0,0 if viz_inbetween: viz_dir = self.model_dir + '/viz/' os.makedirs(viz_dir) viz_doc = data[0] self.pred_viz(viz_doc, path=viz_dir + '/timeline0.html') num_examples_seen, num_examples_seen_prev_chkpt = 0, 0 batch_id = 0 e = 0 chkpt_id,best_chkpt = 0,0 while (e < num_epochs + 1) and (chkpt_id - best_chkpt <= patience): e+=1 # ------------------------------------- start of epoch ------------------------ # set network to training mode (for dropout) streaming_avg_loss = [] start_time = time.time() batches = [] num_batches_per_doc = {} for doc_id,doc in enumerate(data): c_rels = [(r, p) for (r,ps) in doc.span_pair_annotations.items() for p in ps if r in self.rels_train] random.shuffle(c_rels) num_batches = int(len(c_rels)/batch_size) + 1 num_batches_per_doc[doc_id] = num_batches batch_indices = range(num_batches) for batch_i in batch_indices: batch = c_rels[batch_i*batch_size:(batch_i+1)*batch_size] batches.append((doc_id,batch)) random.shuffle(batches) print ('\n===== Epoch', e, '(',(len(data)),' docs,',len(batches),'batches ) =====\n') self.set_train_mode() for doc_id, batch in batches: if chkpt_id - best_chkpt > patience: print('no more patience...') break if reset_optimizer and len(streaming_avg_loss) % reset_optimizer: # reset optimizer every X iterations self.reset_optimizer() doc, batch_start_time, batch_id, num_examples_seen = data[doc_id], time.time(), batch_id + 1, num_examples_seen + len(batch) loss, predicted_spans = 0.0, {} self.optimizer.zero_grad() encoded_text = self.encode_tokens(doc) # Make span predictions for rel, (span_a1, span_a2) in batch: if not span_a1 in predicted_spans: predicted_spans[span_a1] = self.pred_span(doc, span_a1, encoded_text, convert_to_floats=False) if not span_a2 in predicted_spans: predicted_spans[span_a2] = self.pred_span(doc, span_a2, encoded_text, convert_to_floats=False) # Calculate TLink Loss for rel, (span_a1, span_a2) in batch: s1, d1 = predicted_spans[span_a1] s2, d2 = predicted_spans[span_a2] Lr = self.get_Lr(rel, s1, d1, s2, d2, pred_relations, train_mode=True).view(1) loss += Lr if self.absolute: # Calculate Span Loss for span in predicted_spans: #print('--------------') #print(doc.span_to_string(span)) anns = doc.reverse_span_annotations[span] if span in doc.reverse_span_annotations else [] vs = [ann.split(':')[1] for ann in anns if ann.split(':')[0] == 'value'] value = vs[0] if len(vs) > 0 else None if value: num_seconds = get_dur_from_value(value) if num_seconds: gt_duration = float(num_seconds) / 86400 # to number of days s, d = predicted_spans[span] #print('gt',num_seconds, gt_duration, d) Ldur = torch.abs(d - gt_duration).view(1) #print('Ldur>>', Ldur) loss += Ldur if self.doc_normalization: loss = loss / num_batches_per_doc[doc_id] loss_end_time = time.time() batch_loss = loss.cpu().data.numpy()[0] / len(batch) if type(loss) != float else 0 if batch_loss > 0: loss.backward() #self.print_gradient_by_name() if clip: for params in self.get_trainable_params(): nn.utils.clip_grad_norm(params,clip) self.optimizer.step() streaming_avg_loss.append(batch_loss) print (batch_id, '/',len(batches), doc.id, '\tbatch_loss:', round(batch_loss,5), 'streaming_avg_loss:',round(np.mean(streaming_avg_loss[-100:]),5),'\t t:', round(loss_end_time - batch_start_time,2),'backprop t:',round(time.time()-loss_end_time,2)) if num_examples_seen - num_examples_seen_prev_chkpt > checkpoint_interval : # After every 10.000 examples evaluate the status quo chkpt_id += 1 num_examples_seen_prev_chkpt = num_examples_seen self.set_eval_mode() if viz_inbetween: viz_start_time = time.time() self.pred_viz(viz_doc, path=viz_dir + '/timeline'+str(chkpt_id)+'.html') print ('viz t:',round(time.time() - viz_start_time, 2)) avg_loss = np.mean(streaming_avg_loss[-100:]) epoch_stats['loss'].append(avg_loss) print('\n-- checkpoint', chkpt_id, '--') print('> avg loss: [', avg_loss, '] examples seen:', num_examples_seen,'chkpt t:', round(time.time() - start_time,2)) print('DCT\ts:', self.nn['s_dct*'].data.cpu().numpy(),'\td:',self.clamp(self.nn['d_dct*'], self.dmin).data.cpu().numpy()) if eval_on: start_time_eval = time.time() print('eval rels:', pred_relations) original_margin = self.pred_margin m_range = set([max(original_margin+d,0) for d in np.arange(-0.15, 0.2, 0.05)]) if tune_margin == 2 else [original_margin] best_m_acc, best_m = 0, original_margin for test_margin in m_range: self.pred_margin = test_margin dev_metrics, F1_TA, P_TA, R_TA = evaluate_timelinemodel(self, eval_on, pred_relations,temporal_awareness_ref_dir=temporal_awareness_ref_dir, all_pairs=True, entity_error_analysis_file_path=error_dir_entities + '/dev_' +str(chkpt_id) + '.txt') eval_acc=get_acc_from_confusion_matrix(dev_metrics) if tune_margin == 2: print('m:', round(test_margin, 3), 'eval_acc', round(eval_acc, 3)) if eval_acc > best_m_acc: best_m, best_m_acc, best_eval_metric = test_margin, eval_acc, dev_metrics if temporal_awareness_ref_dir: best_F1_TA, best_P1_TA, best_R_TA = F1_TA, P_TA, R_TA self.pred_margin = best_m train_metrics, _, _, _ = evaluate_timelinemodel(self, train_err_subset, pred_relations, all_pairs=True, entity_error_analysis_file_path=error_dir_entities + '/train_' +str(chkpt_id) + '.txt') train_acc=get_acc_from_confusion_matrix(train_metrics) save_confusion_matrix_from_metrics(train_metrics, error_dir_conf + '/train_' + str(chkpt_id) + '-m'+ str(self.pred_margin) + '.html') save_confusion_matrix_from_metrics(best_eval_metric, error_dir_conf + '/dev_' + str(chkpt_id) + '-m'+ str(self.pred_margin) + '.html') epoch_stats['eval_acc'].append(eval_acc) epoch_stats['train_acc'].append(train_acc) if temporal_awareness_ref_dir: epoch_stats['F1_TA'].append(F1_TA) epoch_stats['P_TA'].append(P_TA) epoch_stats['R_TA'].append(R_TA) print ('M:',round(self.pred_margin,3), 'f1_ta', best_F1_TA,'p_ta', best_P1_TA, 'r_ta', best_R_TA, 'eval_acc:', round(best_m_acc, 3), 'train_acc:',round(train_acc, 3), 't:', round(time.time()-start_time_eval, 2)) else: print ('M:',round(self.pred_margin,3), '\teval_acc:', round(best_m_acc, 3), 'train_acc:',round(train_acc, 3), 't:', round(time.time()-start_time_eval, 2)) if epoch_stats['eval_acc'][-1] >= best_eval_acc: print(epoch_stats['eval_acc'][-1],'>=', best_eval_acc) best_chkpt, best_eval_acc = chkpt_id, epoch_stats['eval_acc'][-1] if save_checkpoints: self.save_timelinemodel(checkpoint_dir + '/checkpoint_' + str(chkpt_id) + '.p') plot_data = [go.Scatter(x=np.array(range(num_epochs)), y=np.array(values), mode='lines+markers', name=key) for key,values in epoch_stats.items()] py.offline.plot(plot_data, filename=self.model_dir + '/train_stats.html', auto_open=False) print() self.set_train_mode() self.set_eval_mode() if save_checkpoints: best_checkpoint, best_score = best_chkpt, best_eval_acc print('>>> using best checkpoint:', best_checkpoint, 'with dev score', best_score) if best_checkpoint > 0: best_checkpoint_model = load_timelinemodel(checkpoint_dir + '/checkpoint_' + str(best_checkpoint) + '.p') print('setting checkpoint') self.__dict__.update(best_checkpoint_model.__dict__) if tune_margin: self.tune_pred_margin(data, pred_relations) self.save_timelinemodel(self.model_dir + '/model.p') print ('finished training t:',round(time.time()-training_start_time, 2)) def pred_span(self, doc, span, encoded_text, convert_to_floats=True): start, duration = self.pred_starttime(span, doc, encoded_text), self.pred_duration(span, doc, encoded_text) if convert_to_floats: start, duration = float(start.cpu().data.numpy()[0,0]), float(duration.cpu().data.numpy()[0,0]) return start, duration def start_duration_pair_to_relation(self, s1, d1, s2, d2, rels): # Returns the relation from rels that has the lowest Lt loss rel_losses = [(rel, self.get_Lt(rel, s1, d1, s2, d2).cpu().data.numpy()[0]) for rel in rels] return min(rel_losses, key=lambda x:x[1])[0] def pred_viz(self, doc, path='timeline.path'): # https://plot.ly/python/gantt/ encoded_text = self.encode_tokens(doc) events = {} dct_str = [label[6:] for label in doc.reverse_span_annotations[(0,0)] if 'value:' in label][0] dct_date_str = re.findall(r'\d\d\d\d-\d\d-\d\d', dct_str)[0] dct= datetime.datetime.strptime(dct_date_str, '%Y-%m-%d') for event_span in doc.span_annotations['EType:EVENT']: event_str = doc.text[event_span[0]:event_span[1]] start, duration = self.pred_span(doc, event_span, encoded_text) events[event_str] = {'start_date':self.num_to_date(float(start),dct_date=dct), 'end_date':self.num_to_date(float(start + duration),dct_date=dct)} df_events = [dict(Task=event, Start=events[event]['start_date'], Finish=events[event]['end_date'], Resource='EVENT') for event in events] timex3s = {'DCT': {'start_date':self.num_to_date(float(0),dct_date=dct), 'end_date':self.num_to_date(float(0 + 1),dct_date=dct)}} for timex_span in doc.span_annotations['EType:TIMEX3']: timex3_str = doc.text[timex_span[0]:timex_span[1]] start, duration = self.pred_span(doc, timex_span, encoded_text) timex3s[timex3_str] = {'start_date':self.num_to_date(float(start),dct_date=dct), 'end_date':self.num_to_date(float(start + duration),dct_date=dct)} df_timex3 = [dict(Task=timex3, Start=timex3s[timex3]['start_date'], Finish=timex3s[timex3]['end_date'], Resource='TIMEX3') for timex3 in timex3s] colors = {'EVENT': 'rgb(0, 0, 255)', 'TIMEX3': 'rgb(0, 255, 100)' } fig = ff.create_gantt(sorted(df_events+df_timex3, key=lambda x: self.date_to_num(x['Start'])), title=doc.id, colors=colors, index_col='Resource',show_colorbar=True, group_tasks=True) py.offline.plot(fig, filename=path,auto_open=False) def predict_doc(self, doc, span_labels): self.set_eval_mode() encoded_text = self.encode_tokens(doc) for label in span_labels: for span in doc.span_annotations[label] + [(0,0)]: start, duration = self.pred_span(doc, span, encoded_text) st_lab, dur_lab = 'start:' + str(start), 'duration:' + str(duration) if not st_lab in doc.span_annotations: doc.span_annotations[st_lab] = [] if not dur_lab in doc.span_annotations: doc.span_annotations[dur_lab] = [] doc.span_annotations[st_lab].append(span) doc.span_annotations[dur_lab].append(span) doc.reverse_span_annotations = reverse_dict_list(doc.span_annotations) return doc def classify_rels_in_doc(self, doc, rels, all_pairs=False): if all_pairs: pairs = set([pair for pair in doc.reverse_span_pair_annotations]) else: pairs = set([pair for rel in rels if rel in doc.span_pair_annotations for pair in doc.span_pair_annotations[rel]]) encoded_text = self.encode_tokens(doc) span_predictions = {} span_pair_predictions = {r:[] for r in rels} for a1,a2 in pairs: if not a1 in span_predictions: span_predictions[a1] = self.pred_span(doc, a1, encoded_text, convert_to_floats=False) if not a2 in span_predictions: span_predictions[a2] = self.pred_span(doc, a2, encoded_text, convert_to_floats=False) s1, d1 = span_predictions[a1] s2, d2 = span_predictions[a2] pred_rel = self.start_duration_pair_to_relation(s1, d1, s2, d2, rels) span_pair_predictions[pred_rel].append((a1, a2)) return span_pair_predictions,span_predictions def save_timelinemodel(self, path): print ('saving model', path) init_time = time.time() with open(path, 'wb') as f: pickle.dump(self, f, pickle.HIGHEST_PROTOCOL) print('saved t:',round(time.time()-init_time,2),'s') def parse_date(self, date): return dparser.parse(date) def date_to_num(self, date, dct_date=None): if not dct_date: dct_date = datetime.datetime(2017,10,12) return (date - dct_date).total_seconds() def num_to_date(self, num, dct_date=None): if not dct_date: dct_date = datetime.datetime(2017,10,12) return dct_date + datetime.timedelta(0, num) def tune_pred_margin(self, dataset, relations, margins=np.arange(0,1,0.1/3), max_docs=10): print('Tuning prediction margin') print('Training margin:', self.train_margin) tuning_dir = self.model_dir + '/tuning_m/' os.mkdir(tuning_dir) random.shuffle(dataset) max_acc, max_margin = 0, 0 for m in margins: self.pred_margin = m metrics, F1, P, R = evaluate_timelinemodel(self, dataset[:max_docs], relations, all_pairs=True) acc = get_acc_from_confusion_matrix(metrics) save_confusion_matrix_from_metrics(metrics, tuning_dir + '/m'+ str(self.pred_margin) + '.html') if acc > max_acc: max_acc = acc max_margin = m print('m:',round(m,3),'\tacc:', acc) print('best margin:', max_margin) self.pred_margin = max_margin def load_timelinemodel(path): print ('loading model', path) with open(path, 'rb') as f: return pickle.load(f) def read_word_vectors(path): print('reading word vectors:', path) try: wv = KeyedVectors.load_word2vec_format(path, binary=True) except: wv = KeyedVectors.load_word2vec_format(path, binary=False) return wv def write_average_durs_and_starts(model, preds, file_path): word_to_s, word_to_d = {}, {} pred_dir = '/'.join(file_path.split('/')[:-1]) if not os.path.exists(pred_dir): os.makedirs(pred_dir) for doc in preds: encoded_text = model.encode_tokens(doc) for espan in doc.span_annotations['EType:EVENT']: s, d = model.pred_span(doc, espan, encoded_text, convert_to_floats=True) tok_str = doc.span_to_string(espan) if not tok_str in word_to_s: word_to_s[tok_str],word_to_d[tok_str] = [],[] word_to_s[tok_str].append(s) word_to_d[tok_str].append(d) word_to_avg_s = sorted([(w,np.mean(values),np.var(values)) for w,values in word_to_s.items()], key=lambda x: x[1]) word_to_avg_d = sorted([(w,np.mean(values),np.var(values)) for w,values in word_to_d.items()], key=lambda x: x[1], reverse=True) with open(file_path, 'w') as f: f.write('--- Start Times Ascending --- (WORD, START, FREQ)\n\n') for w,avg_s,var_s in word_to_avg_s: f.write(w + '\t' + str(round(avg_s,4)) + '\t' + str(round(var_s,4)) + '\t' + str(model.word_frequencies[w] if w in model.word_frequencies else '<UNK>') + '\n') f.write('\n\n--- Durations Descending --- (WORD, DURATION, FREQ)\n\n') for w,avg_d,var_d in word_to_avg_d: f.write(w + '\t' + str(round(avg_d,4)) + '\t' + str(round(var_d,4)) + '\t' + str(model.word_frequencies[w] if w in model.word_frequencies else '<UNK>') + '\n') def evaluate_timelinemodel(model, docs, rel_labels, temporal_awareness_ref_dir=None, all_pairs=False, error_viz_dir=None, entity_error_analysis_file_path=None, write_average_durations_and_starts=False,print_sd_preds=False): preds, entity_errors_per_doc = [], [] for doc in docs: #remove relations that you don't want to evaluate on for rel in doc.span_pair_annotations: if not rel in rel_labels: doc.span_pair_annotations[rel] = [] # copy ref doc text etc pred = copy(doc) # remove relation annotations pred.span_pair_annotations = {} # classify relations using the model pairwise_labels, pointwise_preds = model.classify_rels_in_doc(doc, rel_labels,all_pairs=all_pairs) pred.update_annotations(span_pair_update=pairwise_labels) preds.append(pred) if print_sd_preds: if not os.path.exists(print_sd_preds): os.mkdir(print_sd_preds) with open(print_sd_preds + '/' + doc.id + '.txt', 'w') as f: preds_string = '\n'.join([str(s[0][0].cpu().data.numpy()) + '\t'+str(d[0][0].cpu().data.numpy()) + '\t' + str(span) +'\t'+ doc.span_to_string(span) for (span, (s,d)) in sorted(pointwise_preds.items(), key=lambda x: x[0][0])]) preds_string = 'start\tduration\tspan\ttext\n' + preds_string f.write(preds_string) if error_viz_dir: viz_docs_rel_difference(docs, preds, error_viz_dir) # evaluate predictions metrics, entity_errors_per_doc = get_eval_metrics_docs(docs, preds, rel_labels, entity_error_analysis_file_path, error_viz_dir) if entity_error_analysis_file_path: save_entity_error_analysis(docs, entity_errors_per_doc, entity_error_analysis_file_path) if write_average_durations_and_starts: write_average_durs_and_starts(model, preds, write_average_durations_and_starts) if temporal_awareness_ref_dir: #print('[temporal awareness evaluation subscripts]') # write preds to tmp folder tmp_pred_dir = model.model_dir + '/tmp_preds_'+str(len(docs))+'/' if not os.path.exists(tmp_pred_dir): os.mkdir(tmp_pred_dir) else: shutil.rmtree(tmp_pred_dir) os.mkdir(tmp_pred_dir) if not temporal_awareness_ref_dir[-1]=='/': temporal_awareness_ref_dir = temporal_awareness_ref_dir + '/' write_timebank_folder(preds, tmp_pred_dir, verbose=0) # 1. normalize temporal graphs norm_cmd = 'java -jar ./tempeval-3-tools/TimeML-Normalizer/TimeML-Normalizer.jar -a "'+temporal_awareness_ref_dir+';'+tmp_pred_dir+'"' norm_out_str = subprocess.check_output(norm_cmd, shell=True,stderr=subprocess.STDOUT) # 2. eval eval_cmd = 'python2.7 ./tempeval-3-tools/evaluation-relations/temporal_evaluation.py '+temporal_awareness_ref_dir[:-1]+'-normalized/'+' '+tmp_pred_dir[:-1]+'-normalized/ '+str(0) eval_out_str = subprocess.check_output(eval_cmd, shell=True).decode("utf-8") F1, P, R = [float(x) for x in eval_out_str.split('\n')[3].split()] return metrics, F1, P, R else: return metrics, None, None, None def get_eval_metrics_docs(docs, preds, rel_labels, entity_error_analysis_file_path, error_viz_dir): entity_errors_per_doc = [] metrics = {rel:{rel:0 for rel in rel_labels} for rel in rel_labels} for i in range(len(preds)): # evaluate prediction if error_viz_dir: pred_metrics, metrics_per_span = get_selective_rel_metrics(docs[i], preds[i], rels=rel_labels, print_pairwise_errors=error_viz_dir + '/pairwise_errors_viz/') else: pred_metrics, metrics_per_span = get_selective_rel_metrics(docs[i], preds[i], rels=rel_labels) if entity_error_analysis_file_path: entity_errors_per_doc.append(metrics_per_span) # summing results for all documents for ref_rel in metrics: for pred_rel in metrics[ref_rel]: metrics[ref_rel][pred_rel] += pred_metrics[ref_rel][pred_rel] return metrics, entity_errors_per_doc class TimelineFinder(TimelineModel): # TL2RTL Model def __init__(self, timeml_docs, dmin=0.025, rels_train=['BEFORE','AFTER','INCLUDES','IS_INCLUDED','SIMULTANEOUS'], rels_pred=['BEFORE','AFTER','INCLUDES','IS_INCLUDED','SIMULTANEOUS']): # Builds timelines from TimeML files self.dmin=dmin self.constants = {} self.constants['ZERO'] = autograd.Variable(torch.FloatTensor([0]),requires_grad=False) self.entity_starts = {doc.id:{eid:autograd.Variable(torch.FloatTensor([[0]]),requires_grad=True) for eid in doc.get_span_labels_by_regex('ei\d+').union(doc.get_span_labels_by_regex('t\d+')) }for doc in timeml_docs} self.entity_durations = {doc.id:{eid:autograd.Variable(torch.FloatTensor([[self.dmin]]),requires_grad=True) for eid in doc.get_span_labels_by_regex('ei\d+').union(doc.get_span_labels_by_regex('t\d+')) }for doc in timeml_docs} self.rels_pred = rels_pred self.rels_train = rels_train self.gpu=False self.unk_token = '__unk__' self.feature_keys = None self.windex, self.cindex, self.pindex, self.findex = self.setup_vocabularies(timeml_docs, 0, special_conflation=0, entity_sequence=0) return def encode_tokens(self, doc, entity_spans=None): if not doc.id in self.entity_starts: print('ERROR:', doc.id, 'not found in timeline encoded documents') exit() encodings = {'s':{}, 'd':{}} for eid in self.entity_starts[doc.id]: if not eid in doc.span_annotations: print('ERROR: eid not in document annotations:', eid, doc.get_span_labels_by_regex(eid[:2]+'.*')) exit() spans = doc.span_annotations[eid] if len(spans) > 1: print('!!!!!!!', doc.id, eid) span = spans[0] s, d = s, d = self.entity_starts[doc.id][eid], self.clamp(self.entity_durations[doc.id][eid], self.dmin) encodings['s'][span] = s encodings['d'][span] = d return encodings def train(self, timeml_docs, num_epochs): print('\n===== Building Timeline for each Document =====') # Starting to construct timelines for doc in timeml_docs: params = list(self.entity_starts[doc.id].values()) + list(self.entity_durations[doc.id].values()) optimizer = torch.optim.Adam(params, lr=0.001) print(doc.id) for i in range(0,num_epochs): optimizer.zero_grad() loss = 0.0 num_rels = 0 for rel_type in self.rels_train: if rel_type in doc.span_pair_annotations: for sp_a1, sp_a2 in doc.span_pair_annotations[rel_type]: eid_a1 = [label for label in doc.reverse_span_annotations[sp_a1] if label in self.entity_starts[doc.id]][0] eid_a2 = [label for label in doc.reverse_span_annotations[sp_a2] if label in self.entity_starts[doc.id]][0] s1, d1 = self.entity_starts[doc.id][eid_a1], self.clamp(self.entity_durations[doc.id][eid_a1], min_value=self.dmin) s2, d2 = self.entity_starts[doc.id][eid_a2], self.clamp(self.entity_durations[doc.id][eid_a2], min_value=self.dmin) loss += self.get_Lr(rel_type, s1, d1, s2, d2, self.rels_pred, train_mode=True).view(1) num_rels += 1 loss.backward() optimizer.step() if loss == 0.0: break print('loss', loss, 'after',i+1,'steps')
python
import pygame from cell_class import * import copy vec = pygame.math.Vector2 CELL_SIZE = 20 class GameWindow: def __init__(self, screen, x, y): self.screen = screen self.position = vec(x, y) self.width, self.height = 600, 600 self.image = pygame.Surface((self.width, self.height)) self.rect = self.image.get_rect() self.init_grids() def init_grids(self): # self.num_cols = 33 # self.num_rows = 33 self.num_cols = int(self.width / CELL_SIZE) self.num_rows = int(self.height / CELL_SIZE) self.grid = [[Cell(self.image, x, y) for x in range(self.num_cols)] for y in range(self.num_rows)] for row in self.grid: for cell in row: cell.get_neighbors(self.grid) def update(self): # inspect the current active gen # update the inactive grid to store next gen # swap out the active grid self.rect.topleft = self.position for row in self.grid: for cell in row: cell.update() def draw(self): self.image.fill((255, 255, 255)) for row in self.grid: for cell in row: cell.draw() self.screen.blit(self.image, (self.position.x, self.position.y)) pygame.display.flip() def reset_grid(self): self.grid = [[Cell(self.image, x, y) for x in range(self.num_cols)] for y in range(self.num_rows)] def evaluate(self): new_grid = copy.copy(self.grid) for row in self.grid: for cell in row: cell.live_neighbors() for yidx, row in enumerate(self.grid): for xidx, cell in enumerate(row): if cell.alive: if cell.alive_neighbors == 2 or cell.alive_neighbors == 3: new_grid[yidx][xidx].alive = True if cell.alive_neighbors < 2: new_grid[yidx][xidx].alive = False if cell.alive_neighbors > 3: new_grid[yidx][xidx].alive = False else: if cell.alive_neighbors == 3: new_grid[yidx][xidx].alive = True for yidx, row in enumerate(self.grid): for xidx, cell in enumerate(row): if cell.alive: new_grid[yidx][xidx].set_color() self.grid = new_grid
python
''' Created on Jul 28, 2013 @author: akittredge ''' import pandas as pd import pymongo class MongoDataStore(object): def __init__(self, collection): self._collection = collection def __repr__(self): return '{}(collection={})'.format(self.__class__.__name__, self._collection.full_name) @classmethod def _ensure_indexes(cls, collection): collection.ensure_index([('index_val', pymongo.ASCENDING), ('identifier', pymongo.ASCENDING)]) def get(self, metric, df): '''Populate a DataFrame. ''' identifiers = list(df.columns) start, stop = df.index[0], df.index[-1] index = 'date' metric = self.sanitize_key(metric) query = {'identifier' : {'$in' : identifiers}, metric : {'$exists' : True}, index : {'$gte' : start, '$lte' : stop}, } store_data = read_frame(qry=query, index=index, values=metric, collection=self._collection) df.update(store_data) return df def set(self, metric, df): metric = self.sanitize_key(metric) write_frame(metric=metric, df=df, collection=self._collection) @classmethod def sanitize_key(cls, key): '''Can't have . or $ in mongo field names.''' key = key.replace('.', unichr(0xFF0E)) key = key.replace('$', unichr(0xFF04)) return key # after pandas.io.sql def read_frame(qry, index, values, collection): documents = collection.find(qry) result = pd.DataFrame.from_records(documents) if not result.empty: result = result.pivot(index=index, columns='identifier', values=values) return result def write_frame(metric, df, collection): docs = [] index_name = 'date' for column in df: for index_value, value in df[column].iteritems(): docs.append({'identifier' : column, index_name : index_value, metric : value}) collection.insert(docs)
python
# -*- coding: utf-8 -*- # Copyright 2018 Whitestack, LLC # ************************************************************* # This file is part of OSM Monitoring module # All Rights Reserved to Whitestack, LLC # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # For those usages not covered by the Apache License, Version 2.0 please # contact: [email protected] or [email protected] ## import asyncio import json import logging import os import sys import unittest from aiokafka import AIOKafkaProducer, AIOKafkaConsumer from kafka.errors import KafkaError from osm_policy_module.core.config import Config log = logging.getLogger() log.level = logging.INFO stream_handler = logging.StreamHandler(sys.stdout) log.addHandler(stream_handler) class KafkaMessagesTest(unittest.TestCase): def setUp(self): super() cfg = Config() self.kafka_server = '{}:{}'.format(cfg.get('message', 'host'), cfg.get('message', 'port')) self.loop = asyncio.new_event_loop() def tearDown(self): super() def test_send_instantiated_msg(self): async def test_send_instantiated_msg(): producer = AIOKafkaProducer(loop=self.loop, bootstrap_servers=self.kafka_server, key_serializer=str.encode, value_serializer=str.encode) await producer.start() consumer = AIOKafkaConsumer( "ns", loop=self.loop, bootstrap_servers=self.kafka_server, consumer_timeout_ms=10000, auto_offset_reset='earliest', value_deserializer=bytes.decode, key_deserializer=bytes.decode) await consumer.start() try: with open( os.path.join(os.path.dirname(__file__), '../examples/instantiated.json')) as file: payload = json.load(file) await producer.send_and_wait("ns", key="instantiated", value=json.dumps(payload)) finally: await producer.stop() try: async for message in consumer: if message.key == 'instantiated': self.assertIsNotNone(message.value) return finally: await consumer.stop() try: self.loop.run_until_complete(test_send_instantiated_msg()) except KafkaError: self.skipTest('Kafka server not present.') if __name__ == '__main__': unittest.main()
python
import json from .miioservice import MiIOService def twins_split(string, sep, default=None): pos = string.find(sep) return (string, default) if pos == -1 else (string[0:pos], string[pos+1:]) def string_to_value(string): if string == 'null' or string == 'none': return None elif string == 'false': return False elif string == 'true': return True else: return int(string) def string_or_value(string): return string_to_value(string[1:]) if string[0] == '#' else string def miio_command_help(did=None, prefix='?'): quote = '' if prefix == '?' else "'" return f'\ Get Props: {prefix}<siid[-piid]>[,...]\n\ {prefix}1,1-2,1-3,1-4,2-1,2-2,3\n\ Set Props: {prefix}<siid[-piid]=[#]value>[,...]\n\ {prefix}2=#60,2-2=#false,3=test\n\ Do Action: {prefix}<siid[-piid]> <arg1|#NA> [...] \n\ {prefix}2 #NA\n\ {prefix}5 Hello\n\ {prefix}5-4 Hello #1\n\n\ Call MIoT: {prefix}<cmd=prop/get|/prop/set|action> <params>\n\ {prefix}action {quote}{{"did":"{did or "267090026"}","siid":5,"aiid":1,"in":["Hello"]}}{quote}\n\n\ Call MiIO: {prefix}/<uri> <data>\n\ {prefix}/home/device_list {quote}{{"getVirtualModel":false,"getHuamiDevices":1}}{quote}\n\n\ Devs List: {prefix}list [name=full|name_keyword] [getVirtualModel=false|true] [getHuamiDevices=0|1]\n\ {prefix}list Light true 0\n\n\ MiIO Spec: {prefix}spec [model_keyword|type_urn] [format=text|python|json]\n\ {prefix}spec\n\ {prefix}spec speaker\n\ {prefix}spec xiaomi.wifispeaker.lx04\n\ {prefix}spec urn:miot-spec-v2:device:speaker:0000A015:xiaomi-lx04:1\n\ ' async def miio_command(service: MiIOService, did, text, prefix='?'): cmd, arg = twins_split(text, ' ') if cmd.startswith('/'): return await service.miio_request(cmd, arg) if cmd.startswith('prop') or cmd == 'action': return await service.miot_request(cmd, json.loads(arg) if arg else None) argv = arg.split(' ') if arg else [] argc = len(argv) if cmd == 'list': return await service.device_list(argc > 0 and argv[0], argc > 1 and string_to_value(argv[1]), argc > 2 and argv[2]) if cmd == 'spec': return await service.miot_spec(argc > 0 and argv[0], argc > 1 and argv[1]) if not did or not cmd or cmd == '?' or cmd == '?' or cmd == 'help' or cmd == '-h' or cmd == '--help': return miio_command_help(did, prefix) props = [] isget = False for item in cmd.split(','): iid, value = twins_split(item, '=') siid, apiid = twins_split(iid, '-', '1') if not siid.isdigit() or not apiid.isdigit(): return 'ERROR: siid/piid/aiid must be integer' prop = [int(siid), int(apiid)] if not isget: if value is None: isget = True else: prop.append(string_or_value(value)) props.append(prop) if argc > 0: args = [string_or_value(a) for a in argv] if arg != '#NA' else [] return await service.miot_action(did, props[0][0], props[0][1], args) return await (service.miot_get_props if isget else service.miot_set_props)(did, props)
python
import socket import logging logger = logging.getLogger(__name__) class P2PSocket: def __init__(self): self.s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) def bind(self, host, port): logger.debug("Binding P2P socket to (%s, %i)", host, port) self.s.bind((host, port)) self.s.setblocking(False) def close(self): self.s.close() def recv(self, num=4096): try: return self.s.recvfrom(num) except BlockingIOError: pass except OSError: return b"" def send(self, data, addr): self.s.sendto(data, addr) def client_address(self): return self.s.getsockname()
python
from django.shortcuts import render, get_object_or_404 from blog_posts.models import Post from blog_posts.forms import PostForm def index(request): posts = Post.objects.all() return render(request, 'administracao/index-admin.html', context ={"index": "Index", "posts": posts, }) def post_detalhes(request, id): post = get_object_or_404(Post, id=id) if request.method == "POST": form = PostForm(request.POST, instance=post) return render(request, "blog_posts/post_detalhes.html", context = {"form":form}) def erro401(request): return render(request, 'administracao/erro401.html') def erro403(request): return render(request, 'administracao/erro403.html') def erro404(request): return render(request, 'administracao/erro404.html') def erro500(request): return render(request, 'administracao/erro500.html') def esqueceu(request): return render(request, 'administracao/esqueceu.html') def login(request): return render(request, 'administracao/login.html')
python
"""Algorithm for simulating a 2048 game using Monte-Carlo method.""" import random, _2048 SIMULATE_TIMES = 100000 DIRECTIONS = ('UP', 'DOWN', 'LEFT', 'RIGHT') def simulate_to_end(game): while game.get_state(): dircts = list(DIRECTIONS) for i in xrange(3): c = random.choice(dircts) if game.move(c): break dircts.remove(c) return game.get_score() def score_sum(game,direction): score = 0 temp = game.clone() temp.move(direction) for i in xrange(SIMULATE_TIMES): score += simulate_to_end(temp) return score def monte_carlo(game): scores = {} biggest = 0 best = None directions = list(DIRECTIONS) for d in DIRECTIONS: test = game.clone() if not test.move(d): directions.remove(d) for direction in directions: temp = game.clone() score = score_sum(temp, direction) if score > biggest: biggest = score best = direction scores[direction] = score print scores if len(set(scores)) == 1: return False else: return best if __name__ == '__main__': a_game = _2048.Gameplay() print monte_carlo(a_game)
python
# Define a procedure is_palindrome, that takes as input a string, and returns a # Boolean indicating if the input string is a palindrome. # Base Case: '' => True # Recursive Case: if first and last characters don't match => False # if they do match, is the middle a palindrome? def is_palindrome(s): #print is_palindrome('') #>>> True #print is_palindrome('abab') #>>> False #print is_palindrome('abba') #>>> True
python
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings import mmcv import numpy as np import torch from mmdet.core.visualization.image import imshow_det_bboxes from ..builder import DETECTORS, build_backbone, build_head, build_neck from .base import BaseDetector INF = 1e8 @DETECTORS.register_module() class SingleStageInstanceSegmentor(BaseDetector): """Base class for single-stage instance segmentors.""" def __init__(self, backbone, neck=None, bbox_head=None, mask_head=None, train_cfg=None, test_cfg=None, pretrained=None, init_cfg=None): if pretrained: warnings.warn('DeprecationWarning: pretrained is deprecated, ' 'please use "init_cfg" instead') backbone.pretrained = pretrained super(SingleStageInstanceSegmentor, self).__init__(init_cfg=init_cfg) self.backbone = build_backbone(backbone) if neck is not None: self.neck = build_neck(neck) else: self.neck = None if bbox_head is not None: bbox_head.update(train_cfg=copy.deepcopy(train_cfg)) bbox_head.update(test_cfg=copy.deepcopy(test_cfg)) self.bbox_head = build_head(bbox_head) else: self.bbox_head = None assert mask_head, f'`mask_head` must ' \ f'be implemented in {self.__class__.__name__}' mask_head.update(train_cfg=copy.deepcopy(train_cfg)) mask_head.update(test_cfg=copy.deepcopy(test_cfg)) self.mask_head = build_head(mask_head) self.train_cfg = train_cfg self.test_cfg = test_cfg def extract_feat(self, img): """Directly extract features from the backbone and neck.""" x = self.backbone(img) if self.with_neck: x = self.neck(x) return x def forward_dummy(self, img): """Used for computing network flops. See `mmdetection/tools/analysis_tools/get_flops.py` """ raise NotImplementedError( f'`forward_dummy` is not implemented in {self.__class__.__name__}') def forward_train(self, img, img_metas, gt_masks, gt_labels, gt_bboxes=None, gt_bboxes_ignore=None, **kwargs): """ Args: img (Tensor): Input images of shape (B, C, H, W). Typically these should be mean centered and std scaled. img_metas (list[dict]): A List of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see :class:`mmdet.datasets.pipelines.Collect`. gt_masks (list[:obj:`BitmapMasks`] | None) : The segmentation masks for each box. gt_labels (list[Tensor]): Class indices corresponding to each box gt_bboxes (list[Tensor]): Each item is the truth boxes of each image in [tl_x, tl_y, br_x, br_y] format. Default: None. gt_bboxes_ignore (list[Tensor] | None): Specify which bounding boxes can be ignored when computing the loss. Returns: dict[str, Tensor]: A dictionary of loss components. """ gt_masks = [ gt_mask.to_tensor(dtype=torch.bool, device=img.device) for gt_mask in gt_masks ] x = self.extract_feat(img) losses = dict() # CondInst and YOLACT have bbox_head if self.bbox_head: # bbox_head_preds is a tuple bbox_head_preds = self.bbox_head(x) # positive_infos is a list of obj:`InstanceData` # It contains the information about the positive samples # CondInst, YOLACT det_losses, positive_infos = self.bbox_head.loss( *bbox_head_preds, gt_bboxes=gt_bboxes, gt_labels=gt_labels, gt_masks=gt_masks, img_metas=img_metas, gt_bboxes_ignore=gt_bboxes_ignore, **kwargs) losses.update(det_losses) else: positive_infos = None mask_loss = self.mask_head.forward_train( x, gt_labels, gt_masks, img_metas, positive_infos=positive_infos, gt_bboxes=gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore, **kwargs) # avoid loss override assert not set(mask_loss.keys()) & set(losses.keys()) losses.update(mask_loss) return losses def simple_test(self, img, img_metas, rescale=False): """Test function without test-time augmentation. Args: img (torch.Tensor): Images with shape (B, C, H, W). img_metas (list[dict]): List of image information. rescale (bool, optional): Whether to rescale the results. Defaults to False. Returns: list(tuple): Formatted bbox and mask results of multiple \ images. The outer list corresponds to each image. \ Each tuple contains two type of results of single image: - bbox_results (list[np.ndarray]): BBox results of single image. The list corresponds to each class. each ndarray has a shape (N, 5), N is the number of bboxes with this category, and last dimension 5 arrange as (x1, y1, x2, y2, scores). - mask_results (list[np.ndarray]): Mask results of single image. The list corresponds to each class. each ndarray has a shape (N, img_h, img_w), N is the number of masks with this category. """ feat = self.extract_feat(img) if self.bbox_head: outs = self.bbox_head(feat) # results_list is list[obj:`InstanceData`] results_list = self.bbox_head.get_results( *outs, img_metas=img_metas, cfg=self.test_cfg, rescale=rescale) else: results_list = None results_list = self.mask_head.simple_test( feat, img_metas, rescale=rescale, instances_list=results_list) format_results_list = [] for results in results_list: format_results_list.append(self.format_results(results)) return format_results_list def format_results(self, results): """Format the model predictions according to the interface with dataset. Args: results (:obj:`InstanceData`): Processed results of single images. Usually contains following keys. - scores (Tensor): Classification scores, has shape (num_instance,) - labels (Tensor): Has shape (num_instances,). - masks (Tensor): Processed mask results, has shape (num_instances, h, w). Returns: tuple: Formatted bbox and mask results.. It contains two items: - bbox_results (list[np.ndarray]): BBox results of single image. The list corresponds to each class. each ndarray has a shape (N, 5), N is the number of bboxes with this category, and last dimension 5 arrange as (x1, y1, x2, y2, scores). - mask_results (list[np.ndarray]): Mask results of single image. The list corresponds to each class. each ndarray has shape (N, img_h, img_w), N is the number of masks with this category. """ data_keys = results.keys() assert 'scores' in data_keys assert 'labels' in data_keys assert 'masks' in data_keys, \ 'results should contain ' \ 'masks when format the results ' mask_results = [[] for _ in range(self.mask_head.num_classes)] num_masks = len(results) if num_masks == 0: bbox_results = [ np.zeros((0, 5), dtype=np.float32) for _ in range(self.mask_head.num_classes) ] return bbox_results, mask_results labels = results.labels.detach().cpu().numpy() if 'bboxes' not in results: # create dummy bbox results to store the scores results.bboxes = results.scores.new_zeros(len(results), 4) det_bboxes = torch.cat([results.bboxes, results.scores[:, None]], dim=-1) det_bboxes = det_bboxes.detach().cpu().numpy() bbox_results = [ det_bboxes[labels == i, :] for i in range(self.mask_head.num_classes) ] masks = results.masks.detach().cpu().numpy() for idx in range(num_masks): mask = masks[idx] mask_results[labels[idx]].append(mask) return bbox_results, mask_results def aug_test(self, imgs, img_metas, rescale=False): raise NotImplementedError def show_result(self, img, result, score_thr=0.3, bbox_color=(72, 101, 241), text_color=(72, 101, 241), mask_color=None, thickness=2, font_size=13, win_name='', show=False, wait_time=0, out_file=None): """Draw `result` over `img`. Args: img (str or Tensor): The image to be displayed. result (tuple): Format bbox and mask results. It contains two items: - bbox_results (list[np.ndarray]): BBox results of single image. The list corresponds to each class. each ndarray has a shape (N, 5), N is the number of bboxes with this category, and last dimension 5 arrange as (x1, y1, x2, y2, scores). - mask_results (list[np.ndarray]): Mask results of single image. The list corresponds to each class. each ndarray has shape (N, img_h, img_w), N is the number of masks with this category. score_thr (float, optional): Minimum score of bboxes to be shown. Default: 0.3. bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines. The tuple of color should be in BGR order. Default: 'green' text_color (str or tuple(int) or :obj:`Color`):Color of texts. The tuple of color should be in BGR order. Default: 'green' mask_color (None or str or tuple(int) or :obj:`Color`): Color of masks. The tuple of color should be in BGR order. Default: None thickness (int): Thickness of lines. Default: 2 font_size (int): Font size of texts. Default: 13 win_name (str): The window name. Default: '' wait_time (float): Value of waitKey param. Default: 0. show (bool): Whether to show the image. Default: False. out_file (str or None): The filename to write the image. Default: None. Returns: img (Tensor): Only if not `show` or `out_file` """ assert isinstance(result, tuple) bbox_result, mask_result = result bboxes = np.vstack(bbox_result) img = mmcv.imread(img) img = img.copy() labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) if len(labels) == 0: bboxes = np.zeros([0, 5]) masks = np.zeros([0, 0, 0]) # draw segmentation masks else: masks = mmcv.concat_list(mask_result) if isinstance(masks[0], torch.Tensor): masks = torch.stack(masks, dim=0).detach().cpu().numpy() else: masks = np.stack(masks, axis=0) # dummy bboxes if bboxes[:, :4].sum() == 0: num_masks = len(bboxes) x_any = masks.any(axis=1) y_any = masks.any(axis=2) for idx in range(num_masks): x = np.where(x_any[idx, :])[0] y = np.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: bboxes[idx, :4] = np.array( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32) # if out_file specified, do not show image in window if out_file is not None: show = False # draw bounding boxes img = imshow_det_bboxes( img, bboxes, labels, masks, class_names=self.CLASSES, score_thr=score_thr, bbox_color=bbox_color, text_color=text_color, mask_color=mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) if not (show or out_file): return img
python
#LordLynx #Part of PygameLord import pygame,os from pygame.locals import* pygame.init() #Loading Objects ''' Parse_Locations(file) file: Your text file, use a .txt # Like in Python will be ingored thusly follow this example #Coment ./File/File ./File/Other File ... ''' def Parse_Locations(file): file = open(file, 'r')#read the file lines = [] folders = [] for text_line in file: lines.append(text_line) #pull the files info file.close()#close it moding = [] for i in lines: s =i.strip('\n')#split the lines up moding.append(s) for i in moding: if i != '\n' and i[0] != '#': #ignore new lines or coments '#' folders.append(i) return folders ''' Lord_Loaders(paths,files) paths: The folders returned in the Parse_Locations function files: The .files which you wish to use Modified versions of this are in Sounds and Images If the opertunity arises copy and paste this code into your program and change the files like the Image and Sound loaeders ''' def Lord_Loader(paths,files): Files = [] File_Set = {} for path in paths: file = os.listdir(path) for Object in file: #loops through the parts for fileEnd in files: if Object.endswith(fileEnd): Images.append(os.path.join(path, Object)) for file in Files:#appends them text = os.path.split(file)[-1] text = text.split('.') text =text[0] File_Set[text] = file return Image_Set
python
from results_saver import LogWriter from .ModelType import ModelType from .lda_lsa_model_tester import LModelTester from .naive_bayes_model_tester import NBModelTester from .lsa_tester import LSAModelTester from .svm_model_tester import SVMModelTester from ..methods.Lda import Lda from ..methods.Lsa import Lsa from ..methods.Lda_sklearn import LdaSklearn from ..methods.Naive_bayes import NaiveBayes from ..methods.SVM import SupportVectorMachines from ..methods.Decision_tree import DecisionTree from ..methods.Random_forest import RandomForest from results_saver import plot_confusion_matrix import numpy as np class GeneralTester: def __init__(self, log_writer, start_time): self.testing_docs = None self.training_docs = None self.num_of_topics = None self.log_writer:LogWriter = log_writer self.start_time = start_time self.topic_names = None self.model_results = [] self.preprocess_style = "" self.preproces_results = {} self.num_of_tests = 1 def set_new_dataset(self, num_of_topics, topic_names): """ Notifies that new dataset has been set and updates num_of_topics and topic_names atribtes :param num_of_topics: :param topic_names: """ self.num_of_topics = num_of_topics self.topic_names = topic_names def set_new_preprocess_docs(self, training_docs, testing_docs): """ Sets new dataset documents to be tested :param training_docs: :param testing_docs: :param preprocess_style: """ self.testing_docs = testing_docs self.training_docs = training_docs def do_test(self, model_type, num_of_tests, statistics, params, test_params, stable=False): """ Do test on provided model type. Also sets things up before the test. :param model_type: ModelType enum for model that should be tested :param num_of_tests: number of tests to be performed on this model :param statistics: list to which accuracy and other information will be written :param params: Parameters for tested model :param test_params: Parameters for test :param stable: Indicates whether algorithm is deterministic. If True only one test will be commited and the rest of results will be padded with same result (for charts comparisons). """ self.num_of_tests = num_of_tests accuracies = [] statistics.append([]) statistics.append([model_type.name]) statistics.append([x for x in range(num_of_tests)]) statistics[len(statistics) - 1].append("Average") statistics.append([]) for i in range(num_of_tests): accuracy = self.test_model(model_type, test_params.get("dataset_name", "none"), params,test_params) accuracies.append(accuracy) statistics[len(statistics) - 1].append(accuracy) self.log_writer.add_log("Testing {} model done with {}% accuracy".format(model_type, accuracy * 100)) self.log_writer.add_log("\n\n") if stable: for j in range(num_of_tests - 1): accuracies.append(accuracy) statistics[len(statistics) - 1].append(accuracy) break total_accuracy = sum(accuracies) / len(accuracies) self.log_writer.add_to_plot(model_type.name, accuracies) self.log_writer.draw_plot(model_type.name + " " + test_params.get("dataset_name", "none"), '{}_model_accuracy'.format(test_params.get("dataset_name", "none")), num_of_tests) self.model_results.append((model_type.name, accuracies)) if model_type in self.preproces_results: self.preproces_results[model_type].append((self.preprocess_style, accuracies)) else: self.preproces_results[model_type] = [(self.preprocess_style, accuracies)] statistics[len(statistics) - 1].append(total_accuracy) self.log_writer.add_log("Total accuracy is: {}".format(total_accuracy)) def test_model(self, model_type, test_name, params, test_params): """ Runs actual test on a model :param model_type: ModelType enum for model that should be tested :param test_name: name that will be used for creating output folder :param params: Parameters for tested model :return: Accuracy of provided model """ model = None tester = None if model_type == ModelType.LDA: model = Lda(self.num_of_topics, params=params) elif model_type == ModelType.LDA_Sklearn: model = LdaSklearn(self.num_of_topics, params=params) if model is not None: self.log_writer.add_log("Starting training {} model".format(model_type)) model.train(self.training_docs) # TODO watch out for rewrites self.log_writer.add_log("Starting testing {} model".format(model_type)) tester = LModelTester(self.training_docs, self.testing_docs, self.num_of_topics, self.log_writer, self.topic_names) if model_type == ModelType.LSA: model = Lsa(self.num_of_topics, params=params) self.log_writer.add_log("Starting training {} model".format(model_type)) model.train(self.training_docs) # TODO watch out for rewrites self.log_writer.add_log("Starting testing {} model".format(model_type)) tester = LSAModelTester(self.training_docs, self.testing_docs, self.num_of_topics, self.log_writer, self.topic_names) if model_type == ModelType.NB: model = NaiveBayes(params) self.log_writer.add_log("Starting training {} model".format(model_type)) model.train(self.training_docs, self.testing_docs) self.log_writer.add_log("Starting testing {} model".format(model_type)) tester = NBModelTester(self.training_docs, self.testing_docs, self.num_of_topics, self.log_writer, self.topic_names) if model_type == ModelType.SVM or model_type == ModelType.DT or model_type == ModelType.RF: if model_type == ModelType.SVM: model = SupportVectorMachines(params) elif model_type == ModelType.DT: model = DecisionTree(params) elif model_type == ModelType.RF: model = RandomForest(params) self.log_writer.add_log("Starting training {} model".format(model_type)) model.train(self.training_docs) self.log_writer.add_log("Starting testing {} model".format(model_type)) tester = SVMModelTester(self.training_docs, self.testing_docs, self.num_of_topics, self.log_writer, self.topic_names) accuracy = tester.test_model(model,test_name) cm:np.ndarray = np.array(tester.confusion_matrix) cm = cm[1:,1:] cm = cm.transpose() cm = cm.astype(np.uint32) dataset_helper = test_params.get('dataset_helper',None) plot_confusion_matrix(cm,dataset_helper.get_num_of_topics(),dataset_helper.get_dataset_name(),self.log_writer) return accuracy def create_test_name(self, dataset_name, start_time, model_name, preprocess_index, test_num): """ Helper function to create path to a current test folder :param dataset_name: name of a tested dataset :param start_time: can be any unique number. (if number was already used in past test results will rewrite those past test results) :param model_name: name of a tested model :param preprocess_index: Index of a preprocess settings :param test_num: number of a test (if multiple tests are conducted on a single model) :return: path to test folder """ return "\\results\\results{}{}\\{}\\preprocess{}\\test_num{}".format(dataset_name, start_time, model_name, preprocess_index, test_num)
python
#!/usr/bin/env python3 import pathlib import sys sys.path += ['/opt/py', str(pathlib.Path.home() / 'py')] import basedir import shlex import subprocess def info_beamer_invocation(): custom_cmd = pathlib.Path.home() / '.config' / 'fenhl' / 'info-beamer' if custom_cmd.exists(): return [str(custom_cmd)] #TODO support info-beamer-open-source (see ~/.config/fenhl/info-beamer @ familiepc) return ['sudo', '-E', str(pathlib.Path.home() / 'info-beamer-pi' / 'info-beamer')] def run_node(node, *args, check=True, **kwargs): configured_nodes = basedir.config_dirs('fenhl/syncbin.json').json(base={}).get('info-beamer', {}).get('nodes', {}) if node in configured_nodes: return subprocess.run(configured_nodes[node] + list(args), check=check, **kwargs) else: node_path = pathlib.Path(node).expanduser().resolve() return subprocess.run(info_beamer_invocation() + [str(node_path)] + list(args), check=check, **kwargs) if __name__ == '__main__': if sys.argv[1] == '--list': for node_name, invocation in basedir.config_dirs('fenhl/syncbin.json').json(base={}).get('info-beamer', {}).get('nodes', {}).items(): print('{}: {}'.format(node_name, ' '.join(shlex.quote(arg) for arg in invocation))) else: sys.exit(run_node(*sys.argv[1:], check=False).returncode)
python
import random from app.core.utils import get_random_date def build_demo_data(): """ Helper method, just to demo the app :return: a list of demo docs sorted by ranking """ samples = ["Messier 81", "StarBurst", "Black Eye", "Cosmos Redshift", "Sombrero", "Hoags Object", "Andromeda", "Pinwheel", "Cartwheel", "Mayall's Object", "Milky Way", "IC 1101", "Messier 87", "Ring Nebular", "Centarus A", "Whirlpool", "Canis Major Overdensity", "Virgo Stellar Stream"] res = [] for index, item in enumerate(samples): res.append(DocumentInfo(item, (item + " ") * 5, get_random_date(), "doc_details?id={}&param1=1&param2=2".format(index), random.random())) # simulate sort by ranking res.sort(key=lambda doc: doc.ranking, reverse=True) return res class SearchEngine: """educational search engine""" i = 12345 def search(self, search_query): print("Search query:", search_query) results = [] ##### your code here ##### results = build_demo_data() # replace with call to search algorithm ##### your code here ##### return results class DocumentInfo: def __init__(self, title, description, doc_date, url, ranking): self.title = title self.description = description self.doc_date = doc_date self.url = url self.ranking = ranking
python
#!/usr/bin/env python3 """ Project Icarus creator: derilion date: 01.07.2019 version: 0.1a """ """ TODO: - Installer - Database Structure - Special Characters in *.ini - Setup of skills - Configuration of Clients - multi language support """ # imports from icarus.icarus import Icarus # thread safe init if __name__ == "__main__": Icarus().start()
python
import requests import json remote_url = "" device_id = "" bearer = "" api_key = "" app_id = "" def url(endpoint): return "{0}{1}".format(remote_url, endpoint) def headers_with_headers(headers): new_headers = {} new_headers["Content-Type"] = "application/json" new_headers["X-BLGREQ-UDID"] = device_id new_headers["X-BLGREQ-SIGN"] = api_key new_headers["X-BLGREQ-APPID"] = app_id if bearer: new_headers["Authorization"] = "Bearer {0}".format(bearer) if headers: return dict(list(new_headers.items()) + list(headers.items())) else: return new_headers def get(endpoint, parameters, headers): return requests.get(url(endpoint), params=parameters, headers=headers_with_headers(headers)) def post(endpoint, parameters, headers): return requests.post(url(endpoint), data=json.dumps(parameters), headers=headers_with_headers(headers))
python
from .iotDualMotor import IotDualMotor class IotEncodedMotor(IotDualMotor): """ the base class for motor with encoder The speed range from -100 to 100 with zero (less than minMovingSpeed) to stop the motor. """ def __init__(self, name, parent, minMovingSpeed=5): """ construct a PiIotNode name: the name of the node parent: parent IotNode object. None for root node. minMovingSpeed: the minimum valid moving absolute speed """ super(IotEncodedMotor, self).__init__(name, parent, minMovingSpeed) def runAngle(self, angle, speed, speed2 = None): """ move the motor by specified angle for either single or dual motor angle range from 0 to 360 degree speed controls the direction ranges from -100 to 100 """ pass def goToPosition(self, position, position2 = None, speed = 100): """ run the motor to specified positions for either single or dual motor position range from int.min to int.max speed controls the direction ranges from -100 to 100 """ pass
python
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class TaskProperties(Model): """Task properties of the software update configuration. :param parameters: Gets or sets the parameters of the task. :type parameters: dict[str, str] :param source: Gets or sets the name of the runbook. :type source: str """ _attribute_map = { 'parameters': {'key': 'parameters', 'type': '{str}'}, 'source': {'key': 'source', 'type': 'str'}, } def __init__(self, *, parameters=None, source: str=None, **kwargs) -> None: super(TaskProperties, self).__init__(**kwargs) self.parameters = parameters self.source = source
python
import sys import argparse from absynthe.graph_builder import TreeBuilder def treeGeneration(numRoots: int = 2, numLeaves: int = 4, branching: int = 2, numInnerNodes: int = 16): loggerNodeTypes: str = "SimpleLoggerNode" tree_kwargs = {TreeBuilder.KW_NUM_ROOTS: str(numRoots), TreeBuilder.KW_NUM_LEAVES: str(numLeaves), TreeBuilder.KW_BRANCHING_DEGREE: str(branching), TreeBuilder.KW_NUM_INNER_NODES: str(numInnerNodes), TreeBuilder.KW_SUPPORTED_NODE_TYPES: loggerNodeTypes} simpleTreeBuilder = TreeBuilder(**tree_kwargs) simpleTreeBuilder.generateNewGraph().dumpDotFile(sys.stdout) return if "__main__" == __name__: """ Dumps a simple, tree-like control flow graph on standard output. This output can be redirected to a file and converted to an image using graphviz's 'dot' utility. The graph is generated with fair amount of randomness, so repeated invocations with the same set of parameters will yield different graphs. """ argParser = argparse.ArgumentParser(description="Dumps a simple control flow graph on standard" + " output. This output can be redirected to a file and" + " converted to an image using graphviz's 'dot' utility." + " The graph is generated with fair amount of randomness," + " so repeated invocations with identical parameters will" + " yield different graphs.") argParser.add_argument("-r", "--num_roots", required=True, type=int, help="Number of roots in the graph.") argParser.add_argument("-l", "--num_leaves", type=int, required=True, help="Number of leaves in the graph.") argParser.add_argument("-n", "--num_nodes", type=int, required=True, help="Approximate number of inner nodes that this graph should contain." + " The actual number is usually larger" + " than what is specified here.") argParser.add_argument("-b", "--branching", type=int, required=True, help="Approximate avg. branching degree of nodes in this graph.") args = argParser.parse_args() r: int = args.num_roots l: int = args.num_leaves n: int = args.num_nodes b: int = args.branching treeGeneration(r, l, b, n)
python
import sys import time dy_import_module_symbols("shimstackinterface") SERVER_IP = getmyip() SERVER_PORT = 34829 UPLOAD_RATE = 1024 * 1024 * 15 # 15MB/s DOWNLOAD_RATE = 1024 * 1024 * 128 # 15MB/s DATA_TO_SEND = "HelloWorld" * 1024 * 1024 RECV_SIZE = 2**14 # 16384 bytes. MSG_RECEIVED = '' END_TAG = "@@END" def launchserver(): """ <Purpose> Launch a server that receives and echos the message back. <Arguments> None <Side Effects> None <Exceptions> None <Return> None """ shim_object = ShimStackInterface("(NoopShim)") tcpserver_socket = shim_object.listenforconnection(SERVER_IP, SERVER_PORT) while True: try: rip, rport, sockobj = tcpserver_socket.getconnection() break except SocketWouldBlockError: pass except (SocketClosedLocal, SocketClosedRemote): break msg_received = '' recv_closed = False send_closed = False # Echo back all the message that we receive. Exit out of the # loop once we get socket closed error. while True: try: msg_received += sockobj.recv(RECV_SIZE) except SocketWouldBlockError: pass except (SocketClosedLocal, SocketClosedRemote): break try: if len(msg_received) > 0: data_sent = sockobj.send(msg_received) msg_received = msg_received[data_sent : ] except SocketWouldBlockError: pass except (SocketClosedLocal, SocketClosedRemote): break def launch_test(): log("\nSetting upload rate to %dbytes/s. \nSetting download rate to %dbytes/s" % (UPLOAD_RATE, DOWNLOAD_RATE)) # Launch the server and sleep for couple of seconds. createthread(launchserver) sleep(3) shim_obj = ShimStackInterface("(RateLimitShim,%s,%s)" % (UPLOAD_RATE, DOWNLOAD_RATE)) try: sockobj = shim_obj.openconnection(SERVER_IP, SERVER_PORT, SERVER_IP, SERVER_PORT + 1, 10) except Exception, err: print "Found error: " + str(err) exitall() msg_to_send = DATA_TO_SEND + END_TAG # --------------------- Testing Upload -------------------------------- cur_data_sent = 0 log("\nStarting to send msg.") starttime = getruntime() while msg_to_send: try: data_sent = sockobj.send(msg_to_send) except SocketWouldBlockError, err: pass else: msg_to_send = msg_to_send[data_sent:] cur_data_sent += data_sent elapsed_time = getruntime() - starttime log("\nTime to upload: %fs. Upload rate: %fbytes/s" % (elapsed_time, len(DATA_TO_SEND + END_TAG)*1.0 / elapsed_time)) log("\nTesting upload rate with 10% error") rate_over_percent = ((len(DATA_TO_SEND + END_TAG)*1.0 / elapsed_time) - UPLOAD_RATE) / UPLOAD_RATE if rate_over_percent > 0.10: log("[ FAIL ]") sys.stdout.flush() exitall() else: log("[ PASS ]") # -------------------------- Testing Download ------------------------------ msg_received = '' log("\nStarting to recv echo msg.") starttime = getruntime() while True: try: data_received = sockobj.recv(RECV_SIZE) except SocketWouldBlockError, err: pass else: msg_received += data_received if END_TAG in data_received: break elapsed_time = getruntime() - starttime sockobj.close() log("\nTime to download: %fs. Download rate: %fbytes/s" % (elapsed_time, len(msg_received)*1.0 / elapsed_time)) log("\nTesting download rate with 10% error") rate_over_percent = ((len(msg_received)*1.0 / elapsed_time) - DOWNLOAD_RATE) / DOWNLOAD_RATE if rate_over_percent > 0.10: log("[ FAIL ]") sys.stdout.flush() exitall() else: log("[ PASS ]") log("\nChecking message received len: ") try: assert(len(msg_received) == len(DATA_TO_SEND + END_TAG)) except AssertionError: log("[ FAIL ]") sys.stdout.flush() exitall() else: log("[ PASS ]")
python
#%% from pssr import pssr from speech_recognition import UnknownValueError, RequestError, Recognizer print('oi') r = Recognizer() #recognizes audio, outputs transcript ps = pssr.PSRecognizer() #PSRecognizer instance to listen and generate the audio psmic = pssr.PSMic(nChannels=3) #ps eye mic array with psmic as source: print('*recording') audio = ps.listen(source) print('*done recording') try: # to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")` print("Google Speech Recognition thinks you said ") print(r.recognize_google(audio, language='de-DE',show_all=True)) except UnknownValueError: print("Google Speech Recognition could not understand audio") except RequestError as e: print("Could not request results from Google Speech Recognition service; {0}".format(e))
python
from connect_four.envs import TwoPlayerGameEnvVariables from connect_four.problem.connecting_group_manager import ConnectingGroupManager class ConnectFourGroupManager(ConnectingGroupManager): def __init__(self, env_variables: TwoPlayerGameEnvVariables): super().__init__(env_variables, num_to_connect=4)
python
__author__ = 'Felix Simkovic' __date__ = '2019-05-11' __license__ = 'MIT License' import os import sys APPLICATION_NAME = 'Pomodoro TaskWarrior' if sys.platform.startswith('darwin'): try: from Foundation import NSBundle bundle = NSBundle.mainBundle() if bundle: app_info = bundle.localizedInfoDictionary() or bundle.infoDictionary() if app_info: app_info['CFBundleName'] = APPLICATION_NAME except ImportError: pass
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # pylint: disable=C0103,C0111 import argparse import sys from snake.game import PureGame, GameConf from snake.utils import dotdict from snake.rl.coach import Coach from snake.rl.nnet_wrapper import NNetWrapper import logging logging.basicConfig(level=logging.INFO) sys.setrecursionlimit(5001) args = dotdict({ 'lr': 0.001, 'dropout': 0.3, 'epochs': 10, 'batch_size': 64, 'cuda': False, 'num_channels': 128, 'checkpoint': './temp/', 'load_model': False, 'load_folder_file': ('/dev/models/8x100x50','best.pth.tar'), 'numItersForTrainExamplesHistory': 20, 'numIters': 20, 'numEps': 100, # Number of complete self-play games to simulate during a new iteration. 'tempThreshold': 15, # 'updateThreshold': 0.6, # During arena playoff, new neural net will be accepted if threshold or more of games are won. 'maxlenOfQueue': 20000, # Number of game examples to train the neural networks. 'numMCTSSims': 25, # Number of games moves for MCTS to simulate. 'cpuct': 1, }) def main(): logging.info('Loading %s...', PureGame.__name__) game = PureGame(GameConf()) logging.info('Loading %s...', NNetWrapper.__name__) nnet = NNetWrapper(game, args) if args.load_model: logging.info('Loading checkpoint "%s/%s"...', args.load_folder_file) nnet.load_checkpoint(args.load_folder_file[0], args.load_folder_file[1]) else: logging.warning('Not loading a checkpoint!') logging.info('Loading the Coach...') coach = Coach(game, nnet, args) if args.load_model: logging.info("Loading 'trainExamples' from file...") coach.loadTrainExamples() logging.info('Starting the learning process 🎉') coach.learn() if __name__ == "__main__": main()
python
# flake8: noqa # This file is autogenerated by /metadata-ingestion/scripts/avro_codegen.py # Do not modify manually! # fmt: off from ......schema_classes import ChartKeyClass from ......schema_classes import CorpGroupKeyClass from ......schema_classes import CorpUserKeyClass from ......schema_classes import DashboardKeyClass from ......schema_classes import DataFlowKeyClass from ......schema_classes import DataHubPolicyKeyClass from ......schema_classes import DataJobKeyClass from ......schema_classes import DataPlatformKeyClass from ......schema_classes import DataProcessKeyClass from ......schema_classes import DatasetKeyClass from ......schema_classes import GlossaryNodeKeyClass from ......schema_classes import GlossaryTermKeyClass from ......schema_classes import MLFeatureKeyClass from ......schema_classes import MLFeatureTableKeyClass from ......schema_classes import MLModelDeploymentKeyClass from ......schema_classes import MLModelGroupKeyClass from ......schema_classes import MLModelKeyClass from ......schema_classes import MLPrimaryKeyKeyClass from ......schema_classes import SchemaFieldKeyClass from ......schema_classes import TagKeyClass ChartKey = ChartKeyClass CorpGroupKey = CorpGroupKeyClass CorpUserKey = CorpUserKeyClass DashboardKey = DashboardKeyClass DataFlowKey = DataFlowKeyClass DataHubPolicyKey = DataHubPolicyKeyClass DataJobKey = DataJobKeyClass DataPlatformKey = DataPlatformKeyClass DataProcessKey = DataProcessKeyClass DatasetKey = DatasetKeyClass GlossaryNodeKey = GlossaryNodeKeyClass GlossaryTermKey = GlossaryTermKeyClass MLFeatureKey = MLFeatureKeyClass MLFeatureTableKey = MLFeatureTableKeyClass MLModelDeploymentKey = MLModelDeploymentKeyClass MLModelGroupKey = MLModelGroupKeyClass MLModelKey = MLModelKeyClass MLPrimaryKeyKey = MLPrimaryKeyKeyClass SchemaFieldKey = SchemaFieldKeyClass TagKey = TagKeyClass # fmt: on
python
# // Copyright 2016 The go-vgo Project Developers. See the COPYRIGHT # // file at the top-level directory of this distribution and at # // https://github.com/go-vgo/robotgo/blob/master/LICENSE # // # // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or # // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license # // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your # // option. This file may not be copied, modified, or distributed # // except according to those terms. from __future__ import print_function import sys import os from cffi import FFI is_64b = sys.maxsize > 2**32 ffi = FFI() if is_64b: ffi.cdef("typedef long GoInt;\n") else: ffi.cdef("typedef int GoInt;\n") ffi.cdef(""" typedef struct { GoInt x; GoInt y; } GoRInt; typedef struct { char* arr; char* err; } GoStr; char* GetVersion(); void Sleep(GoInt tm); void MSleep(double tm); char* GetPixelColor(GoInt x, GoInt y); char* GetMouseColor(); GoRInt GetScreenSize(); GoRInt GetScaleSize(); void MoveMose(GoInt x, GoInt y); void DargMose(GoInt x, GoInt y, char* btn); void MoveSmooth(GoInt x, GoInt y, double low, double high); GoRInt GetMousePos(); void Click(char* btn, bool double_c); void MoseToggle(char* key, char* btn); void Scroll(GoInt x, GoInt y); char* KeyTap(char* key, char* vals); char* KeyToggle(char* key, char* vals); void TypeStr(char* str, double args); GoStr ReadAll(); char* WriteAll(char* str); void PasteStr(char* str); bool AddEvent(char* p0); void StopEvent(); bool AddEvents(char* p0, char* p1); void End(); bool AddMouse(char* p0, GoInt p1, GoInt p2); bool AddMousePos(GoInt p0, GoInt p1); char* GetTitle(GoInt pid); GoStr FindIds(char* name); GoStr FindName(GoInt pid); GoStr FindNames(); char* ActivePID(GoInt pid); char* ActiveName(char* name); char* Kill(GoInt pid); """) dir = os.path.dirname(__file__) bin = os.path.join(dir, "../robotgo") lib = ffi.dlopen(bin) def ch(s): return s.encode('utf-8') def f_str(cs): return ffi.string(cs) def getVersion(): ver = lib.GetVersion() return f_str(ver) def sleep(tm): lib.Sleep(tm) def MSleep(tm): lib.MSleep(tm) # /* # _______. ______ .______ _______ _______ .__ __. # / | / || _ \ | ____|| ____|| \ | | # | (----`| ,----'| |_) | | |__ | |__ | \| | # \ \ | | | / | __| | __| | . ` | # .----) | | `----.| |\ \----.| |____ | |____ | |\ | # |_______/ \______|| _| `._____||_______||_______||__| \__| # */ def getPixelColor(x, y): color = lib.GetPixelColor(x, y) return f_str(color) def getMouseColor(): color = lib.GetMouseColor() return f_str(color) def getScreenSize(): s = lib.GetScreenSize() return s.x, s.y def getScaleSize(): s = lib.GetScaleSize() return s.x, s.y # /* # .___ ___. ______ __ __ _______. _______ # | \/ | / __ \ | | | | / || ____| # | \ / | | | | | | | | | | (----`| |__ # | |\/| | | | | | | | | | \ \ | __| # | | | | | `--' | | `--' | .----) | | |____ # |__| |__| \______/ \______/ |_______/ |_______| # */ def moveMose(x, y): lib.MoveMose(x, y) def dargMose(x, y, btn="left"): lib.dargMose(x, y, ch(btn)) def moveSmooth(x, y, low=1.0, high=3.0): lib.MoveSmooth(x, y, low, high) def click(btn="left", double_c=False): lib.Click(ch(btn), double_c) def moseToggle(key, btn): lib.moseToggle(ch(key), ch(btn)) def scroll(x, y): lib.Scroll(x, y) # /* # __ ___ ___________ ____ .______ ______ ___ .______ _______ # | |/ / | ____\ \ / / | _ \ / __ \ / \ | _ \ | \ # | ' / | |__ \ \/ / | |_) | | | | | / ^ \ | |_) | | .--. | # | < | __| \_ _/ | _ < | | | | / /_\ \ | / | | | | # | . \ | |____ | | | |_) | | `--' | / _____ \ | |\ \----.| '--' | # |__|\__\ |_______| |__| |______/ \______/ /__/ \__\ | _| `._____||_______/ # */ def arr_add(args): arr = "" for i in range(len(args)): if i < len(args)-1: arr += args[i] + "," else: arr += args[i] return arr def keyTap(key, *vals): arr = arr_add(vals) s = lib.KeyTap(ch(key), ch(arr)) return f_str(s) def KeyToggle(key, *vals): arr = arr_add(vals) s = lib.KeyToggle(ch(key), ch(arr)) return f_str(s) def typeStr(s, args=3.0): lib.TypeStr(ch(s), args) def errStr(s): err = str(f_str(s.err)) if err == "b''": return arr(s.arr) return err def readAll(): s = lib.ReadAll() return errStr(s) def writeAll(s): return lib.WriteAll(ch(s)) def pasteStr(s): lib.pasteStr(ch(s)) # /* # .______ __ .___________..___ ___. ___ .______ # | _ \ | | | || \/ | / \ | _ \ # | |_) | | | `---| |----`| \ / | / ^ \ | |_) | # | _ < | | | | | |\/| | / /_\ \ | ___/ # | |_) | | | | | | | | | / _____ \ | | # |______/ |__| |__| |__| |__| /__/ \__\ | _| # */ # /* # ___________ ____ _______ .__ __. .___________. # | ____\ \ / / | ____|| \ | | | | # | |__ \ \/ / | |__ | \| | `---| |----` # | __| \ / | __| | . ` | | | # | |____ \ / | |____ | |\ | | | # |_______| \__/ |_______||__| \__| |__| # */ def addEvent(key): return lib.AddEvent(ch(key)) def end(): lib.End() def addEvents(key, *vals): arr = arr_add(vals) return lib.AddEvents(ch(key), ch(arr)) def end(): lib.End() def addMouse(btn, x=-1, y=-1): return lib.AddMouse(ch(btn), x, y) def addMousePos(x, y): return lib.AddMousePos(x, y) # /* # ____ __ ____ __ .__ __. _______ ______ ____ __ ____ # \ \ / \ / / | | | \ | | | \ / __ \ \ \ / \ / / # \ \/ \/ / | | | \| | | .--. | | | | \ \/ \/ / # \ / | | | . ` | | | | | | | | \ / # \ /\ / | | | |\ | | '--' | `--' | \ /\ / # \__/ \__/ |__| |__| \__| |_______/ \______/ \__/ \__/ # */ def arr(s): st = bytes.decode(f_str(s)) return st.split(' ') def getTitle(pid=-1): s = lib.GetTitle(pid) return f_str(s) def findIds(name): s = lib.FindIds(ch(name)) return errStr(s) def findName(pid): s = lib.FindName(pid) return f_str(s) def findNames(): s = lib.FindNames() return errStr(s) def activePID(pid): err = lib.ActivePID(pid) return f_str(err) def activeName(name): err = lib.ActiveName(ch(name)) return f_str(err) def kill(pid): lib.Kill(pid)
python
class Solution: def arrayNesting(self, nums: List[int]) -> int: max_length = -1 visited = [False] * len(nums) for i in range(0, len(nums)): if visited[i]: continue start, count = nums[i], 0 visited[i] = True # form the cycle while True: start = nums[start] visited[start] = True count += 1 if start == nums[i]: break max_length = max(max_length, count) return max_length
python
from typing import Dict, List from elasticsearch_dsl.query import Q from elasticsearch_dsl.response import Response from elasticsearch_dsl.response.hit import Hit from elasticsearch_dsl.search import Search from flask_restful import Resource, reqparse from meetup_search.models.group import Group from .argument_validator import date_validator, positive_int_validator class MeetupSearchApi(Resource): def __init__(self): super().__init__() self.parser = reqparse.RequestParser() # query self.parser.add_argument( "query", type=str, required=True, help="Bad query: {error_msg}" ) # pagination self.parser.add_argument( "page", type=positive_int_validator, help="Bad pagination page number: {error_msg}", default=0, ) self.parser.add_argument( "limit", type=int, help="Bad pagination limit: {error_msg}", choices=(5, 10, 25, 100), default=10, ) # sort self.parser.add_argument( "sort", type=str, help="Bad sorting: {error_msg}", ) # load events self.parser.add_argument( "load_events", type=bool, help="Bad sorting: {error_msg}", default=False, ) # event time filter self.parser.add_argument( "event_time_gte", type=date_validator, help="Bad date: {error_msg}", ) self.parser.add_argument( "event_time_lte", type=date_validator, help="Bad date: {error_msg}", ) # geo_distance self.parser.add_argument( "geo_lat", type=float, help="Bad geo latitute: {error_msg}", ) self.parser.add_argument( "geo_lon", type=float, help="Bad geo longitute: {error_msg}", ) self.parser.add_argument( "geo_distance", type=str, help="Bad distance (example: 100km): {error_msg}", ) def put(self) -> dict: """ search for a group in Elasticsearch Returns: dict -- search results """ args = self.parser.parse_args() # init search search: Search = Group.search() search_query: dict = { "bool": { "should": [ {"query_string": {"query": args["query"], "fields": ["*"]}}, { "nested": { "path": "topics", "score_mode": "avg", "query": { "bool": { "must": [ { "query_string": { "query": args["query"], "fields": ["*"], } } ] } }, } }, { "nested": { "path": "events", "score_mode": "avg", "query": { "bool": { "must": [ { "query_string": { "query": args["query"], "fields": ["*"], } } ] } }, } }, ], "must": [], } } # set event time filter if args["event_time_gte"] or args["event_time_lte"]: range_query: dict = {} if args["event_time_gte"]: range_query["gte"] = args["event_time_gte"] if args["event_time_lte"]: range_query["lte"] = args["event_time_lte"] search_query["bool"]["must"].append( { "nested": { "path": "events", "score_mode": "avg", "query": { "bool": {"must": [{"range": {"events.time": range_query}}]} }, } } ) # set geo_distance filter if args["geo_distance"] and args["geo_lat"] and args["geo_lon"]: search_query["bool"]["must"].append( { "nested": { "path": "events", "score_mode": "avg", "query": { "bool": { "must": [ { "geo_distance": { "distance": args["geo_distance"], "events.venue_location": { "lat": args["geo_lat"], "lon": args["geo_lon"], }, } } ] } }, } } ) # pagination strat_entry: int = args["page"] * args["limit"] end_entry: int = strat_entry + args["limit"] search = search[strat_entry:end_entry] # sort if args["sort"]: search = Search().sort(args["sort"]) # execute search search = search.query(Q(search_query)) # set highlight score search.highlight_options(order="score") # load response from elasticsearch results: Response = search.execute() # get response found_groups: List[dict] = [] map_center_lat: float = 0 map_center_lon: float = 0 for group in results.hits: group_dict: dict = {} if isinstance(group, Hit): group_object = Group.get_group(urlname=group.to_dict()["urlname"]) group_dict = group_object.to_json_dict(load_events=args["load_events"]) else: group_dict = group.to_json_dict(load_events=args["load_events"]) if "venue_location_average" in group_dict: map_center_lat = ( map_center_lat + group_dict["venue_location_average"]["lat"] ) map_center_lon = ( map_center_lon + group_dict["venue_location_average"]["lon"] ) else: map_center_lat = map_center_lat + group_dict["location"]["lat"] map_center_lon = map_center_lon + group_dict["location"]["lon"] # add group dict to array found_groups.append( {**group_dict,} ) if len(found_groups) > 0: map_center_lat = map_center_lat / len(found_groups) map_center_lon = map_center_lon / len(found_groups) return { "results": found_groups, "hits": results.hits.total["value"], "map_center": {"lat": map_center_lat, "lon": map_center_lon}, } class MeetupSearchSuggestApi(Resource): def __init__(self): super().__init__() self.parser = reqparse.RequestParser() # query self.parser.add_argument( "query", type=str, required=True, help="Bad query: {error_msg}" ) def put(self) -> Dict[str, List[str]]: """ Get Suggestion for query term in Group name Returns: Dict[str, List[str]] -- a list to 5 suggestions """ args = self.parser.parse_args() # run suggest query search: Search = Group.search() search = search.suggest( "suggestion", args["query"], completion={"field": "name_suggest"}, ) response: Response = search.execute() # get suggestion suggestion: List[str] = [] for result in response.suggest.suggestion: for option in result.options: suggestion.append(option.text) return {"suggestions": suggestion}
python
from a10sdk.common.A10BaseClass import A10BaseClass class Crl(A10BaseClass): """This class does not support CRUD Operations please use parent. :param crl_sec: {"minLength": 1, "maxLength": 255, "type": "string", "description": "Secondary CRL File Name or URL (http://www.example.com/ocsp) (only .der filetypes)", "format": "string-rlx"} :param crl_pri: {"minLength": 1, "maxLength": 255, "type": "string", "description": "Primary CRL File Name or URL (http://www.example.com/ocsp) (only .der filetypes)", "format": "string-rlx"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "crl" self.DeviceProxy = "" self.crl_sec = "" self.crl_pri = "" for keys, value in kwargs.items(): setattr(self,keys, value) class Ocsp(A10BaseClass): """This class does not support CRUD Operations please use parent. :param ocsp_pri: {"minLength": 1, "maxLength": 31, "type": "string", "description": "Primary OCSP Authentication Server", "format": "string"} :param ocsp_sec: {"minLength": 1, "maxLength": 31, "type": "string", "description": "Secondary OCSP Authentication Server", "format": "string"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.b_key = "ocsp" self.DeviceProxy = "" self.ocsp_pri = "" self.ocsp_sec = "" for keys, value in kwargs.items(): setattr(self,keys, value) class Revocation(A10BaseClass): """Class Description:: IPsec VPN revocation settings. Class revocation supports CRUD Operations and inherits from `common/A10BaseClass`. This class is the `"PARENT"` class for this module.` :param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"} :param ca: {"description": "Certificate Authority file name", "format": "string", "minLength": 1, "optional": true, "maxLength": 31, "type": "string"} :param name: {"description": "Revocation name", "format": "string", "minLength": 1, "optional": false, "maxLength": 31, "type": "string"} :param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py` URL for this object:: `https://<Hostname|Ip address>//axapi/v3/vpn/revocation/{name}`. """ def __init__(self, **kwargs): self.ERROR_MSG = "" self.required = [ "name"] self.b_key = "revocation" self.a10_url="/axapi/v3/vpn/revocation/{name}" self.DeviceProxy = "" self.uuid = "" self.ca = "" self.name = "" self.crl = {} self.ocsp = {} for keys, value in kwargs.items(): setattr(self,keys, value)
python
""" Noop migration to test rollback """ from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('oauth_dispatch', '0010_noop_migration_to_test_rollback'), ] operations = [ migrations.RunSQL(migrations.RunSQL.noop, reverse_sql=migrations.RunSQL.noop) ]
python
from senscritiquescraper.utils import survey_utils def test_get_category_from_survey(survey_movie): if survey_utils.get_category_from_survey(survey_movie) != "films": raise AssertionError() def test_get_rows_from_survey(survey_movie): rows = survey_utils.get_rows_from_survey(survey_movie) if len(rows) != 15: print(len(rows)) raise AssertionError() def test_get_infos_from_survey(survey_movie): category = survey_utils.get_category_from_survey(survey_movie) infos = survey_utils.get_survey_infos(survey_movie, category) if len(infos) != 15: raise AssertionError() if infos[0]["Title"] != "La Haine": raise AssertionError()
python
from jira.exceptions import JIRAError from tests.conftest import JiraTestCase class VersionTests(JiraTestCase): def test_create_version(self): name = "new version " + self.project_b desc = "test version of " + self.project_b release_date = "2015-03-11" version = self.jira.create_version( name, self.project_b, releaseDate=release_date, description=desc ) self.assertEqual(version.name, name) self.assertEqual(version.description, desc) self.assertEqual(version.releaseDate, release_date) version.delete() def test_create_version_with_project_obj(self): project = self.jira.project(self.project_b) version = self.jira.create_version( "new version 2", project, releaseDate="2015-03-11", description="test version!", ) self.assertEqual(version.name, "new version 2") self.assertEqual(version.description, "test version!") self.assertEqual(version.releaseDate, "2015-03-11") version.delete() def test_update_version(self): version = self.jira.create_version( "new updated version 1", self.project_b, releaseDate="2015-03-11", description="new to be updated!", ) version.update(name="new updated version name 1", description="new updated!") self.assertEqual(version.name, "new updated version name 1") self.assertEqual(version.description, "new updated!") v = self.jira.version(version.id) self.assertEqual(v, version) self.assertEqual(v.id, version.id) version.delete() def test_delete_version(self): version_str = "test_delete_version:" + self.test_manager.jid version = self.jira.create_version( version_str, self.project_b, releaseDate="2015-03-11", description="not long for this world", ) version.delete() self.assertRaises(JIRAError, self.jira.version, version.id)
python
# -*- coding: utf-8 -*- import logging from _pytest.main import EXIT_OK, EXIT_NOTESTSCOLLECTED, EXIT_INTERRUPTED # NOQA def assert_fnmatch_lines(output, matches): if isinstance(output, str): output = output.split('\n') missing = [] for match in matches: if match not in output: missing.append(match) assert len(missing) == 0, "The following matches were not found:\n - %s" % '\n - '.join(missing) def test_debug_logging(testdir, capsys): '''verifies pytest-github loads configuration from the default configuration file''' # setup logging logger = logging.getLogger() logger.setLevel(logging.DEBUG) # create stderr StreamHandler sh = logging.StreamHandler() sh.setLevel(logging.DEBUG) # create formatter and add it to the handlers formatter = logging.Formatter('%(levelname)s - %(message)s') sh.setFormatter(formatter) # add handler to logger logger.addHandler(sh) src = """\ def test_foo(): pass """ result = testdir.inline_runsource(src) # Assert py.test exit code assert result.ret == EXIT_OK (stdout, stderr) = capsys.readouterr() fnmatch_lines = [ 'DEBUG - pytest_cmdline_main() called', 'DEBUG - pytest_configure() called', 'DEBUG - GitHubPytestPlugin initialized', 'DEBUG - pytest_runtest_setup() called', ] # Assert stderr logging assert_fnmatch_lines(stderr, fnmatch_lines)
python
#!/usr/bin/env python # -*- coding:utf-8 -*- # Author: Deformable ConvNets v2: More Deformable, Better Results # Modified by: RainbowSecret([email protected]) # Select Seg Model for img segmentation. import pdb import torch import torch.nn as nn import torch.utils.checkpoint as cp from collections import OrderedDict from lib.models.tools.module_helper import ModuleHelper from lib.extensions.dcn import ( ModulatedDeformConv, ModulatedDeformRoIPoolingPack, DeformConv, ) def conv3x3(in_planes, out_planes, stride=1, dilation=1): "3x3 convolution with padding" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False, ) class BasicBlock(nn.Module): expansion = 1 def __init__( self, inplanes, planes, stride=1, dilation=1, downsample=None, style="pytorch", with_cp=False, bn_type=None, ): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride, dilation) self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes) self.relu = nn.ReLU(inplace=False) self.relu_in = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes) self.downsample = downsample self.stride = stride self.dilation = dilation assert not with_cp def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out = out + residual out = self.relu_in(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__( self, inplanes, planes, stride=1, dilation=1, downsample=None, style="pytorch", with_cp=False, with_dcn=False, num_deformable_groups=1, dcn_offset_lr_mult=0.1, use_regular_conv_on_stride=False, use_modulated_dcn=False, bn_type=None, ): """Bottleneck block. If style is "pytorch", the stride-two layer is the 3x3 conv layer, if it is "caffe", the stride-two layer is the first 1x1 conv layer. """ super(Bottleneck, self).__init__() conv1_stride = 1 conv2_stride = stride self.conv1 = nn.Conv2d( inplanes, planes, kernel_size=1, stride=conv1_stride, bias=False ) self.with_dcn = with_dcn self.use_modulated_dcn = use_modulated_dcn if use_regular_conv_on_stride and stride > 1: self.with_dcn = False if self.with_dcn: print( "--->> use {}dcn in block where c_in={} and c_out={}".format( "modulated " if self.use_modulated_dcn else "", planes, inplanes ) ) if use_modulated_dcn: self.conv_offset_mask = nn.Conv2d( planes, num_deformable_groups * 27, kernel_size=3, stride=conv2_stride, padding=dilation, dilation=dilation, ) self.conv_offset_mask.lr_mult = dcn_offset_lr_mult self.conv_offset_mask.zero_init = True self.conv2 = ModulatedDeformConv( planes, planes, 3, stride=conv2_stride, padding=dilation, dilation=dilation, deformable_groups=num_deformable_groups, no_bias=True, ) else: self.conv2_offset = nn.Conv2d( planes, num_deformable_groups * 18, kernel_size=3, stride=conv2_stride, padding=dilation, dilation=dilation, ) self.conv2_offset.lr_mult = dcn_offset_lr_mult self.conv2_offset.zero_init = True self.conv2 = DeformConv( planes, planes, (3, 3), stride=conv2_stride, padding=dilation, dilation=dilation, num_deformable_groups=num_deformable_groups, ) else: self.conv2 = nn.Conv2d( planes, planes, kernel_size=3, stride=conv2_stride, padding=dilation, dilation=dilation, bias=False, ) self.bn1 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes) self.bn2 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes) self.conv3 = nn.Conv2d( planes, planes * self.expansion, kernel_size=1, bias=False ) self.bn3 = ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes * self.expansion) self.relu = nn.ReLU(inplace=False) self.relu_in = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride self.dilation = dilation self.with_cp = with_cp def forward(self, x): def _inner_forward(x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) if self.with_dcn: if self.use_modulated_dcn: offset_mask = self.conv_offset_mask(out) offset1, offset2, mask_raw = torch.chunk(offset_mask, 3, dim=1) offset = torch.cat((offset1, offset2), dim=1) mask = torch.sigmoid(mask_raw) out = self.conv2(out, offset, mask) else: offset = self.conv2_offset(out) # add bias to the offset to solve the bug of dilation rates within dcn. dilation = self.conv2.dilation[0] bias_w = torch.cuda.FloatTensor( [[-1, 0, 1], [-1, 0, 1], [-1, 0, 1]] ) * (dilation - 1) bias_h = bias_w.permute(1, 0) bias_w.requires_grad = False bias_h.requires_grad = False offset += torch.cat([bias_h.reshape(-1), bias_w.reshape(-1)]).view( 1, -1, 1, 1 ) out = self.conv2(out, offset) else: out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out = out + residual return out if self.with_cp and x.requires_grad: out = cp.checkpoint(_inner_forward, x) else: out = _inner_forward(x) out = self.relu_in(out) return out def make_res_layer( block, inplanes, planes, blocks, stride=1, dilation=1, style="pytorch", with_cp=False, with_dcn=False, dcn_offset_lr_mult=0.1, use_regular_conv_on_stride=False, use_modulated_dcn=False, bn_type=None, ): downsample = None if stride != 1 or inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d( inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, ), ModuleHelper.BatchNorm2d(bn_type=bn_type)(planes * block.expansion), ) layers = [] layers.append( block( inplanes, planes, stride, dilation, downsample, style=style, with_cp=with_cp, with_dcn=with_dcn, dcn_offset_lr_mult=dcn_offset_lr_mult, use_regular_conv_on_stride=use_regular_conv_on_stride, use_modulated_dcn=use_modulated_dcn, bn_type=bn_type, ) ) inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block( inplanes, planes, 1, dilation, style=style, with_cp=with_cp, with_dcn=with_dcn, dcn_offset_lr_mult=dcn_offset_lr_mult, use_regular_conv_on_stride=use_regular_conv_on_stride, use_modulated_dcn=use_modulated_dcn, bn_type=bn_type, ) ) return nn.Sequential(*layers) class DCNResNet(nn.Module): """ResNet backbone. Args: depth (int): Depth of resnet, from {18, 34, 50, 101, 152}. num_stages (int): Resnet stages, normally 4. strides (Sequence[int]): Strides of the first block of each stage. dilations (Sequence[int]): Dilation of each stage. out_indices (Sequence[int]): Output from which stages. style (str): `pytorch` or `caffe`. If set to "pytorch", the stride-two layer is the 3x3 conv layer, otherwise the stride-two layer is the first 1x1 conv layer. frozen_stages (int): Stages to be frozen (all param fixed). -1 means not freezing any parameters. bn_eval (bool): Whether to set BN layers to eval mode, namely, freeze running stats (mean and var). bn_frozen (bool): Whether to freeze weight and bias of BN layers. with_cp (bool): Use checkpoint or not. Using checkpoint will save some memory while slowing down the training speed. """ def __init__(self, block, layers, deep_base=True, bn_type=None): super(DCNResNet, self).__init__() # if depth not in self.arch_settings: # raise KeyError('invalid depth {} for resnet'.format(depth)) # assert num_stages >= 1 and num_stages <= 4 # block, stage_blocks = self.arch_settings[depth] # stage_blocks = stage_blocks[:num_stages] # assert len(strides) == len(dilations) == num_stages # assert max(out_indices) < num_stages self.style = "pytorch" self.inplanes = 128 if deep_base else 64 if deep_base: self.resinit = nn.Sequential( OrderedDict( [ ( "conv1", nn.Conv2d( 3, 64, kernel_size=3, stride=2, padding=1, bias=False ), ), ("bn1", ModuleHelper.BatchNorm2d(bn_type=bn_type)(64)), ("relu1", nn.ReLU(inplace=False)), ( "conv2", nn.Conv2d( 64, 64, kernel_size=3, stride=1, padding=1, bias=False ), ), ("bn2", ModuleHelper.BatchNorm2d(bn_type=bn_type)(64)), ("relu2", nn.ReLU(inplace=False)), ( "conv3", nn.Conv2d( 64, 128, kernel_size=3, stride=1, padding=1, bias=False ), ), ( "bn3", ModuleHelper.BatchNorm2d(bn_type=bn_type)(self.inplanes), ), ("relu3", nn.ReLU(inplace=False)), ] ) ) else: self.resinit = nn.Sequential( OrderedDict( [ ( "conv1", nn.Conv2d( 3, 64, kernel_size=7, stride=2, padding=3, bias=False ), ), ( "bn1", ModuleHelper.BatchNorm2d(bn_type=bn_type)(self.inplanes), ), ("relu1", nn.ReLU(inplace=False)), ] ) ) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = make_res_layer( block, self.inplanes, 64, layers[0], style=self.style, with_dcn=False, use_modulated_dcn=False, bn_type=bn_type, ) self.layer2 = make_res_layer( block, 256, 128, layers[1], stride=2, style=self.style, with_dcn=False, use_modulated_dcn=False, bn_type=bn_type, ) self.layer3 = make_res_layer( block, 512, 256, layers[2], stride=2, style=self.style, with_dcn=True, use_modulated_dcn=False, bn_type=bn_type, ) self.layer4 = make_res_layer( block, 1024, 512, layers[3], stride=2, style=self.style, with_dcn=True, use_modulated_dcn=False, bn_type=bn_type, ) def forward(self, x): x = self.resinit(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x class DCNResNetModels(object): def __init__(self, configer): self.configer = configer def deepbase_dcn_resnet50(self, **kwargs): """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on Places """ model = DCNResNet( Bottleneck, [3, 4, 6, 3], deep_base=True, bn_type=self.configer.get("network", "bn_type"), **kwargs ) model = ModuleHelper.load_model( model, all_match=False, pretrained=self.configer.get("network", "pretrained"), network="dcnet", ) return model def deepbase_dcn_resnet101(self, **kwargs): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on Places """ model = DCNResNet( Bottleneck, [3, 4, 23, 3], deep_base=True, bn_type=self.configer.get("network", "bn_type"), **kwargs ) model = ModuleHelper.load_model( model, all_match=False, pretrained=self.configer.get("network", "pretrained"), network="dcnet", ) return model
python
class LoggerError(Exception): """ Base class for all logger error classes. All exceptions raised by the benchmark runner library should inherit from this class. """ pass class MethodError(LoggerError): """ This class is fot method error """ def __init__(self, method_name, exception): self.message = f'method error: {method_name}, exception: {exception}' super(MethodError, self).__init__(self.message)
python
# Generated by Django 3.1.7 on 2021-12-24 18:34 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('tracker', '0005_movie_poster'), ] operations = [ migrations.AddField( model_name='movie', name='cast', field=models.CharField(default='Not Specified', max_length=64), ), ]
python
"""Coding Quiz: Check for Prime Numbers Prime numbers are whole numbers that have only two factors: 1 and the number itself. The first few prime numbers are 2, 3, 5, 7. For instance, 6 has four factors: 1, 2, 3, 6. 1 X 6 = 6 2 X 3 = 6 So we know 6 is not a prime number. In the following coding environment, write code to check if the numbers provided in the list check_prime are prime numbers. If the numbers are prime, the code should print "[number] is a prime number." If the number is NOT a prime number, it should print "[number] is not a prime number", and a factor of that number, other than 1 and the number itself: "[factor] is a factor of [number]". Example output: 7 IS a prime number 26 is NOT a prime number, because 2 is a factor of 26 """ check_prime = [26, 37, 39, 51, 53, 57, 73, 79, 85] # iterate through the check_prime list for num in check_prime: # search for factors, iterating through numbers ranging from 2 to the number itself for i in range(2, num): # number is not prime if module is 0 if (num % i) == 0: print('{} is not a prime number, because {} is a factor of {}'.format(num, i, num)) break # otherwise keep checking until we've searched all possible factors, and then declare it prime if i == num -1: print('{} is a prime number'.format(num)) """ Logic for our solution: We loop through each number in the check_prime list. Create a "search-for-factors" loop beginning at 2, and continuing up to the (number-1) Use a conditional statement with the modulo operator to check if our number when divided by the possible factor yields any remainder besides 0. If we ever find one factor, we can declare that the number is not prime, and state the factor we found. Then we can break out of the loop for that number. If we get up to the (number - 1) and haven't broken out of the loop, then we can declare that the number is prime. """
python
import timm import torchvision.models as models """" timm_models = [ 'adv_inception_v3', 'cait_m36_384', 'cait_m48_448', 'cait_s24_224', 'cait_s24_384', 'cait_s36_384', 'cait_xs24_384', 'cait_xxs24_224', 'cait_xxs24_384', 'cait_xxs36_224', 'cait_xxs36_384', 'coat_lite_mini', 'coat_lite_small', 'coat_lite_tiny', 'coat_mini', 'coat_tiny', 'convit_base', 'convit_small', 'convit_tiny', 'cspdarknet53', 'cspresnet50', 'cspresnext50', 'deit_base_distilled_patch16_224', 'deit_base_distilled_patch16_384', 'deit_base_patch16_224', 'deit_base_patch16_384', 'deit_small_distilled_patch16_224', 'deit_small_patch16_224', 'deit_tiny_distilled_patch16_224', 'deit_tiny_patch16_224', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'densenetblur121d', 'dla34', 'dla46_c', 'dla46x_c', 'dla60', 'dla60_res2net', 'dla60_res2next', 'dla60x', 'dla60x_c', 'dla102', 'dla102x', 'dla102x2', 'dla169', 'dm_nfnet_f0', 'dm_nfnet_f1', 'dm_nfnet_f2', 'dm_nfnet_f3', 'dm_nfnet_f4', 'dm_nfnet_f5', 'dm_nfnet_f6', 'dpn68', 'dpn68b', 'dpn92', 'dpn98', 'dpn107', 'dpn131', 'eca_nfnet_l0', 'eca_nfnet_l1', 'eca_nfnet_l2', 'ecaresnet26t', 'ecaresnet50d', 'ecaresnet50d_pruned', 'ecaresnet50t', 'ecaresnet101d', 'ecaresnet101d_pruned', 'ecaresnet269d', 'ecaresnetlight', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b1_pruned', 'efficientnet_b2', 'efficientnet_b2_pruned', 'efficientnet_b3', 'efficientnet_b3_pruned', 'efficientnet_b4', 'efficientnet_el', 'efficientnet_el_pruned', 'efficientnet_em', 'efficientnet_es', 'efficientnet_es_pruned', 'efficientnet_lite0', 'efficientnetv2_rw_m', 'efficientnetv2_rw_s', 'ens_adv_inception_resnet_v2', 'ese_vovnet19b_dw', 'ese_vovnet39b', 'fbnetc_100', 'gernet_l', 'gernet_m', 'gernet_s', 'ghostnet_100', 'gluon_inception_v3', 'gluon_resnet18_v1b', 'gluon_resnet34_v1b', 'gluon_resnet50_v1b', 'gluon_resnet50_v1c', 'gluon_resnet50_v1d', 'gluon_resnet50_v1s', 'gluon_resnet101_v1b', 'gluon_resnet101_v1c', 'gluon_resnet101_v1d', 'gluon_resnet101_v1s', 'gluon_resnet152_v1b', 'gluon_resnet152_v1c', 'gluon_resnet152_v1d', 'gluon_resnet152_v1s', 'gluon_resnext50_32x4d', 'gluon_resnext101_32x4d', 'gluon_resnext101_64x4d', 'gluon_senet154', 'gluon_seresnext50_32x4d', 'gluon_seresnext101_32x4d', 'gluon_seresnext101_64x4d', 'gluon_xception65', 'gmixer_24_224', 'hardcorenas_a', 'hardcorenas_b', 'hardcorenas_c', 'hardcorenas_d', 'hardcorenas_e', 'hardcorenas_f', 'hrnet_w18', 'hrnet_w18_small', 'hrnet_w18_small_v2', 'hrnet_w30', 'hrnet_w32', 'hrnet_w40', 'hrnet_w44', 'hrnet_w48', 'hrnet_w64', 'ig_resnext101_32x8d', 'ig_resnext101_32x16d', 'ig_resnext101_32x32d', 'ig_resnext101_32x48d', 'inception_resnet_v2', 'inception_v3', 'inception_v4', 'legacy_senet154', 'legacy_seresnet18', 'legacy_seresnet34', 'legacy_seresnet50', 'legacy_seresnet101', 'legacy_seresnet152', 'legacy_seresnext26_32x4d', 'legacy_seresnext50_32x4d', 'legacy_seresnext101_32x4d', 'levit_128', 'levit_128s', 'levit_192', 'levit_256', 'levit_384', 'mixer_b16_224', 'mixer_b16_224_in21k', 'mixer_b16_224_miil', 'mixer_b16_224_miil_in21k', 'mixer_l16_224', 'mixer_l16_224_in21k', 'mixnet_l', 'mixnet_m', 'mixnet_s', 'mixnet_xl', 'mnasnet_100', 'mobilenetv2_100', 'mobilenetv2_110d', 'mobilenetv2_120d', 'mobilenetv2_140', 'mobilenetv3_large_100', 'mobilenetv3_large_100_miil', 'mobilenetv3_large_100_miil_in21k', 'mobilenetv3_rw', 'nasnetalarge', 'nf_regnet_b1', 'nf_resnet50', 'nfnet_l0', 'pit_b_224', 'pit_b_distilled_224', 'pit_s_224', 'pit_s_distilled_224', 'pit_ti_224', 'pit_ti_distilled_224', 'pit_xs_224', 'pit_xs_distilled_224', 'pnasnet5large', 'regnetx_002', 'regnetx_004', 'regnetx_006', 'regnetx_008', 'regnetx_016', 'regnetx_032', 'regnetx_040', 'regnetx_064', 'regnetx_080', 'regnetx_120', 'regnetx_160', 'regnetx_320', 'regnety_002', 'regnety_004', 'regnety_006', 'regnety_008', 'regnety_016', 'regnety_032', 'regnety_040', 'regnety_064', 'regnety_080', 'regnety_120', 'regnety_160', 'regnety_320', 'repvgg_a2', 'repvgg_b0', 'repvgg_b1', 'repvgg_b1g4', 'repvgg_b2', 'repvgg_b2g4', 'repvgg_b3', 'repvgg_b3g4', 'res2net50_14w_8s', 'res2net50_26w_4s', 'res2net50_26w_6s', 'res2net50_26w_8s', 'res2net50_48w_2s', 'res2net101_26w_4s', 'res2next50', 'resmlp_12_224', 'resmlp_12_distilled_224', 'resmlp_24_224', 'resmlp_24_distilled_224', 'resmlp_36_224', 'resmlp_36_distilled_224', 'resmlp_big_24_224', 'resmlp_big_24_224_in22ft1k', 'resmlp_big_24_distilled_224', 'resnest14d', 'resnest26d', 'resnest50d', 'resnest50d_1s4x24d', 'resnest50d_4s2x40d', 'resnest101e', 'resnest200e', 'resnest269e', 'resnet18', 'resnet18d', 'resnet26', 'resnet26d', 'resnet34', 'resnet34d', 'resnet50', 'resnet50d', 'resnet51q', 'resnet101d', 'resnet152d', 'resnet200d', 'resnetblur50', 'resnetrs50', 'resnetrs101', 'resnetrs152', 'resnetrs200', 'resnetrs270', 'resnetrs350', 'resnetrs420', 'resnetv2_50x1_bit_distilled', 'resnetv2_50x1_bitm', 'resnetv2_50x1_bitm_in21k', 'resnetv2_50x3_bitm', 'resnetv2_50x3_bitm_in21k', 'resnetv2_101x1_bitm', 'resnetv2_101x1_bitm_in21k', 'resnetv2_101x3_bitm', 'resnetv2_101x3_bitm_in21k', 'resnetv2_152x2_bit_teacher', 'resnetv2_152x2_bit_teacher_384', 'resnetv2_152x2_bitm', 'resnetv2_152x2_bitm_in21k', 'resnetv2_152x4_bitm', 'resnetv2_152x4_bitm_in21k', 'resnext50_32x4d', 'resnext50d_32x4d', 'resnext101_32x8d', 'rexnet_100', 'rexnet_130', 'rexnet_150', 'rexnet_200', 'selecsls42b', 'selecsls60', 'selecsls60b', 'semnasnet_100', 'seresnet50', 'seresnet152d', 'seresnext26d_32x4d', 'seresnext26t_32x4d', 'seresnext50_32x4d', 'skresnet18', 'skresnet34', 'skresnext50_32x4d', 'spnasnet_100', 'ssl_resnet18', 'ssl_resnet50', 'ssl_resnext50_32x4d', 'ssl_resnext101_32x4d', 'ssl_resnext101_32x8d', 'ssl_resnext101_32x16d', 'swin_base_patch4_window7_224', 'swin_base_patch4_window7_224_in22k', 'swin_base_patch4_window12_384', 'swin_base_patch4_window12_384_in22k', 'swin_large_patch4_window7_224', 'swin_large_patch4_window7_224_in22k', 'swin_large_patch4_window12_384', 'swin_large_patch4_window12_384_in22k', 'swin_small_patch4_window7_224', 'swin_tiny_patch4_window7_224', 'swsl_resnet18', 'swsl_resnet50', 'swsl_resnext50_32x4d', 'swsl_resnext101_32x4d', 'swsl_resnext101_32x8d', 'swsl_resnext101_32x16d', 'tf_efficientnet_b0', 'tf_efficientnet_b0_ap', 'tf_efficientnet_b0_ns', 'tf_efficientnet_b1', 'tf_efficientnet_b1_ap', 'tf_efficientnet_b1_ns', 'tf_efficientnet_b2', 'tf_efficientnet_b2_ap', 'tf_efficientnet_b2_ns', 'tf_efficientnet_b3', 'tf_efficientnet_b3_ap', 'tf_efficientnet_b3_ns', 'tf_efficientnet_b4', 'tf_efficientnet_b4_ap', 'tf_efficientnet_b4_ns', 'tf_efficientnet_b5', 'tf_efficientnet_b5_ap', 'tf_efficientnet_b5_ns', 'tf_efficientnet_b6', 'tf_efficientnet_b6_ap', 'tf_efficientnet_b6_ns', 'tf_efficientnet_b7', 'tf_efficientnet_b7_ap', 'tf_efficientnet_b7_ns', 'tf_efficientnet_b8', 'tf_efficientnet_b8_ap', 'tf_efficientnet_cc_b0_4e', 'tf_efficientnet_cc_b0_8e', 'tf_efficientnet_cc_b1_8e', 'tf_efficientnet_el', 'tf_efficientnet_em', 'tf_efficientnet_es', 'tf_efficientnet_l2_ns', 'tf_efficientnet_l2_ns_475', 'tf_efficientnet_lite0', 'tf_efficientnet_lite1', 'tf_efficientnet_lite2', 'tf_efficientnet_lite3', 'tf_efficientnet_lite4', 'tf_efficientnetv2_b0', 'tf_efficientnetv2_b1', 'tf_efficientnetv2_b2', 'tf_efficientnetv2_b3', 'tf_efficientnetv2_l', 'tf_efficientnetv2_l_in21ft1k', 'tf_efficientnetv2_l_in21k', 'tf_efficientnetv2_m', 'tf_efficientnetv2_m_in21ft1k', 'tf_efficientnetv2_m_in21k', 'tf_efficientnetv2_s', 'tf_efficientnetv2_s_in21ft1k', 'tf_efficientnetv2_s_in21k', 'tf_inception_v3', 'tf_mixnet_l', 'tf_mixnet_m', 'tf_mixnet_s', 'tf_mobilenetv3_large_075', 'tf_mobilenetv3_large_100', 'tf_mobilenetv3_large_minimal_100', 'tf_mobilenetv3_small_075', 'tf_mobilenetv3_small_100', 'tf_mobilenetv3_small_minimal_100', 'tnt_s_patch16_224', 'tresnet_l', 'tresnet_l_448', 'tresnet_m', 'tresnet_m_448', 'tresnet_m_miil_in21k', 'tresnet_xl', 'tresnet_xl_448', 'tv_densenet121', 'tv_resnet34', 'tv_resnet50', 'tv_resnet101', 'tv_resnet152', 'tv_resnext50_32x4d', 'twins_pcpvt_base', 'twins_pcpvt_large', 'twins_pcpvt_small', 'twins_svt_base', 'twins_svt_large', 'twins_svt_small', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn', 'visformer_small', 'vit_base_patch16_224', 'vit_base_patch16_224_in21k', 'vit_base_patch16_224_miil', 'vit_base_patch16_224_miil_in21k', 'vit_base_patch16_384', 'vit_base_patch32_224', 'vit_base_patch32_224_in21k', 'vit_base_patch32_384', 'vit_base_r50_s16_224_in21k', 'vit_base_r50_s16_384', 'vit_huge_patch14_224_in21k', 'vit_large_patch16_224', 'vit_large_patch16_224_in21k', 'vit_large_patch16_384', 'vit_large_patch32_224_in21k', 'vit_large_patch32_384', 'vit_large_r50_s32_224', 'vit_large_r50_s32_224_in21k', 'vit_large_r50_s32_384', 'vit_small_patch16_224', 'vit_small_patch16_224_in21k', 'vit_small_patch16_384', 'vit_small_patch32_224', 'vit_small_patch32_224_in21k', 'vit_small_patch32_384', 'vit_small_r26_s32_224', 'vit_small_r26_s32_224_in21k', 'vit_small_r26_s32_384', 'vit_tiny_patch16_224', 'vit_tiny_patch16_224_in21k', 'vit_tiny_patch16_384', 'vit_tiny_r_s16_p8_224', 'vit_tiny_r_s16_p8_224_in21k', 'vit_tiny_r_s16_p8_384', 'wide_resnet50_2', 'wide_resnet101_2', 'xception', 'xception41', 'xception65', 'xception71'] """ timm_models = timm.list_models(pretrained=True) torchvison_models = sorted(name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name])) _all__ = ['get_model', 'get_model_list'] def get_model(name, **kwargs): """Returns a pre-defined model by name Parameters ---------- name : str Name of the model. pretrained : bool Whether to load the pretrained weights for model. root : str, default '~/.encoding/models' Location for keeping the model parameters. Returns ------- Module: The model. """ name = name.lower() if name in timm_models: net = timm.create_model(name, **kwargs) # elif name in torchvison_models: # net = models.__dict__[name](**kwargs) else: raise ValueError('%s\n\t%s' % (str(name), '\n\t'.join(sorted(timm_models)))) return net def get_model_list(): """Get the entire list of model names in model_zoo. Returns ------- list of str Entire list of model names in model_zoo. """ return list(timm_models) # + list(torchvison_models) if __name__ == '__main__': # models = get_model_list() # print(models) net = get_model("efficientnet_b1", pretrained=False) print(net)
python
# # Copyright (c) 2013 Juniper Networks, Inc. All rights reserved. # """ Service monitor to instantiate/scale/monitor services like firewall, LB, ... """ import sys reload(sys) sys.setdefaultencoding('UTF8') import gevent from gevent import monkey monkey.patch_all(thread=not 'unittest' in sys.modules) from cfgm_common.zkclient import ZookeeperClient import requests import ConfigParser import cStringIO import argparse import signal import random import hashlib import os import logging import logging.handlers import cfgm_common from cfgm_common import importutils from cfgm_common import svc_info from cfgm_common import vnc_cgitb from cfgm_common.utils import cgitb_hook from cfgm_common.vnc_amqp import VncAmqpHandle from cfgm_common.exceptions import ResourceExhaustionError from vnc_api.utils import AAA_MODE_VALID_VALUES from config_db import * from pysandesh.sandesh_base import Sandesh, SandeshSystem, SandeshConfig from pysandesh.gen_py.sandesh.ttypes import SandeshLevel from pysandesh.gen_py.process_info.ttypes import ConnectionStatus from sandesh_common.vns.ttypes import Module from vnc_api.vnc_api import * from agent_manager import AgentManager from db import ServiceMonitorDB from logger import ServiceMonitorLogger from module_logger import ServiceMonitorModuleLogger from loadbalancer_agent import LoadbalancerAgent from port_tuple import PortTupleAgent from snat_agent import SNATAgent from reaction_map import REACTION_MAP try: from novaclient import exceptions as nc_exc except ImportError: pass # zookeeper client connection _zookeeper_client = None class SvcMonitor(object): def __init__(self, sm_logger=None, args=None): self._args = args # initialize logger if sm_logger is not None: self.logger = sm_logger else: # Initialize logger self.logger = ServiceMonitorLogger(args) # init object_db self._object_db = ServiceMonitorDB(self._args, self.logger) DBBaseSM.init(self, self.logger, self._object_db) # init rabbit connection rabbitmq_cfg = get_rabbitmq_cfg(args) self.rabbit = VncAmqpHandle(self.logger._sandesh, self.logger, DBBaseSM, REACTION_MAP, 'svc_monitor', rabbitmq_cfg, self._args.trace_file) self.rabbit.establish() def post_init(self, vnc_lib, args=None): # api server self._vnc_lib = vnc_lib try: self._nova_client = importutils.import_object( 'svc_monitor.nova_client.ServiceMonitorNovaClient', self._args, self.logger) except Exception as e: self._nova_client = None # agent manager self._agent_manager = AgentManager() # load vrouter scheduler self.vrouter_scheduler = importutils.import_object( self._args.si_netns_scheduler_driver, self._vnc_lib, self._nova_client, None, self.logger, self._args) # load virtual machine instance manager self.vm_manager = importutils.import_object( 'svc_monitor.virtual_machine_manager.VirtualMachineManager', self._vnc_lib, self._object_db, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load network namespace instance manager self.netns_manager = importutils.import_object( 'svc_monitor.instance_manager.NetworkNamespaceManager', self._vnc_lib, self._object_db, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load a vrouter instance manager self.vrouter_manager = importutils.import_object( 'svc_monitor.vrouter_instance_manager.VRouterInstanceManager', self._vnc_lib, self._object_db, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load PNF instance manager self.ps_manager = importutils.import_object( 'svc_monitor.physical_service_manager.PhysicalServiceManager', self._vnc_lib, self._object_db, self.logger, self.vrouter_scheduler, self._nova_client, self._agent_manager, self._args) # load a loadbalancer agent self.loadbalancer_agent = LoadbalancerAgent( self, self._vnc_lib, self._object_db, self._args) self._agent_manager.register_agent(self.loadbalancer_agent) # load a snat agent self.snat_agent = SNATAgent(self, self._vnc_lib, self._object_db, self._args, ServiceMonitorModuleLogger(self.logger)) self._agent_manager.register_agent(self.snat_agent) # load port tuple agent self.port_tuple_agent = PortTupleAgent(self, self._vnc_lib, self._object_db, self._args, ServiceMonitorModuleLogger(self.logger)) self._agent_manager.register_agent(self.port_tuple_agent) # Read the object_db and populate the entry in ServiceMonitor DB self.sync_sm() # create default analyzer template self._create_default_template('analyzer-template', 'analyzer', flavor='m1.medium', image_name='analyzer') # create default NAT template self._create_default_template('nat-template', 'firewall', svc_mode='in-network-nat', image_name='analyzer', flavor='m1.medium') # create default netns SNAT template self._create_default_template('netns-snat-template', 'source-nat', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) # create default loadbalancer template self._create_default_template('haproxy-loadbalancer-template', 'loadbalancer', svc_mode='in-network-nat', hypervisor_type='network-namespace', scaling=True) self._create_default_template('docker-template', 'firewall', svc_mode='transparent', image_name="ubuntu", hypervisor_type='vrouter-instance', vrouter_instance_type='docker', instance_data={ "command": "/bin/bash" }) # upgrade handling self.upgrade() # check services self.vrouter_scheduler.vrouters_running() self.launch_services() self.rabbit._db_resync_done.set() def _upgrade_instance_ip(self, vm): for vmi_id in vm.virtual_machine_interfaces: vmi = VirtualMachineInterfaceSM.get(vmi_id) if not vmi: continue for iip_id in vmi.instance_ips: iip = InstanceIpSM.get(iip_id) if not iip or iip.service_instance_ip: continue iip_obj = InstanceIp() iip_obj.name = iip.name iip_obj.uuid = iip.uuid iip_obj.set_service_instance_ip(True) try: self._vnc_lib.instance_ip_update(iip_obj) except NoIdError: self.logger.error("upgrade instance ip to service ip failed %s" % (iip.name)) continue def _upgrade_auto_policy(self, si, st): if st.name != 'netns-snat-template': return if not si.params['auto_policy']: return si_obj = ServiceInstance() si_obj.uuid = si.uuid si_obj.fq_name = si.fq_name si_props = ServiceInstanceType(**si.params) si_props.set_auto_policy(False) si_obj.set_service_instance_properties(si_props) try: self._vnc_lib.service_instance_update(si_obj) self.logger.notice("snat policy upgraded for %s" % (si.name)) except NoIdError: self.logger.error("snat policy upgrade failed for %s" % (si.name)) return def upgrade(self): for lr in LogicalRouterSM.values(): self.snat_agent.upgrade(lr) for si in ServiceInstanceSM.values(): st = ServiceTemplateSM.get(si.service_template) if not st: continue self._upgrade_auto_policy(si, st) vm_id_list = list(si.virtual_machines) for vm_id in vm_id_list: vm = VirtualMachineSM.get(vm_id) self._upgrade_instance_ip(vm) if vm.virtualization_type: continue try: nova_vm = self._nova_client.oper('servers', 'get', si.proj_name, id=vm_id) except nc_exc.NotFound: nova_vm = None if nova_vm: vm_name = nova_vm.name vm.proj_fq_name = nova_vm.name.split('__')[0:2] else: vm_name = vm.name if not vm_name.split('__')[-1].isdigit(): continue vm.virtualization_type = st.virtualization_type self.delete_service_instance(vm) def launch_services(self): for si in ServiceInstanceSM.values(): self.create_service_instance(si) def sync_sm(self): # Read and Sync all DBase for cls in DBBaseSM.get_obj_type_map().values(): for obj in cls.list_obj(): cls.locate(obj['uuid'], obj) # Link SI and VM for vm in VirtualMachineSM.values(): if vm.service_instance: continue for vmi_id in vm.virtual_machine_interfaces: vmi = VirtualMachineInterfaceSM.get(vmi_id) if not vmi: continue self.port_delete_or_si_link(vm, vmi) # invoke port tuple handling try: self.port_tuple_agent.update_port_tuples() except Exception: cgitb_error_log(self) # Load the loadbalancer driver self.loadbalancer_agent.load_drivers() # Invoke the health monitors for hm in HealthMonitorSM.values(): hm.sync() # Invoke the loadbalancers for lb in LoadbalancerSM.values(): lb.sync() # Invoke the loadbalancer listeners for lb_listener in LoadbalancerListenerSM.values(): lb_listener.sync() # Invoke the loadbalancer pools for lb_pool in LoadbalancerPoolSM.values(): lb_pool.sync() # Audit the lb pools self.loadbalancer_agent.audit_lb_pools() # Audit the SNAT instances self.snat_agent.audit_snat_instances() # end sync_sm # create service template def _create_default_template(self, st_name, svc_type, svc_mode=None, hypervisor_type='virtual-machine', image_name=None, flavor=None, scaling=False, vrouter_instance_type=None, instance_data=None): domain_name = 'default-domain' domain_fq_name = [domain_name] st_fq_name = [domain_name, st_name] self.logger.info("Creating %s %s hypervisor %s" % (domain_name, st_name, hypervisor_type)) domain_obj = None for domain in DomainSM.values(): if domain.fq_name == domain_fq_name: domain_obj = Domain() domain_obj.uuid = domain.uuid domain_obj.fq_name = domain_fq_name break if not domain_obj: self.logger.error("%s domain not found" % (domain_name)) return for st in ServiceTemplateSM.values(): if st.fq_name == st_fq_name: self.logger.info("%s exists uuid %s" % (st.name, str(st.uuid))) return svc_properties = ServiceTemplateType() svc_properties.set_service_type(svc_type) svc_properties.set_service_mode(svc_mode) svc_properties.set_service_virtualization_type(hypervisor_type) svc_properties.set_image_name(image_name) svc_properties.set_flavor(flavor) svc_properties.set_ordered_interfaces(True) svc_properties.set_service_scaling(scaling) # set interface list if svc_type == 'analyzer': if_list = [['left', False]] elif hypervisor_type == 'network-namespace': if_list = [['right', True], ['left', True]] else: if_list = [ ['management', False], ['left', False], ['right', False]] for itf in if_list: if_type = ServiceTemplateInterfaceType(shared_ip=itf[1]) if_type.set_service_interface_type(itf[0]) svc_properties.add_interface_type(if_type) if vrouter_instance_type is not None: svc_properties.set_vrouter_instance_type(vrouter_instance_type) if instance_data is not None: svc_properties.set_instance_data( json.dumps(instance_data, separators=(',', ':'))) st_obj = ServiceTemplate(name=st_name, domain_obj=domain) st_obj.set_service_template_properties(svc_properties) try: st_uuid = self._vnc_lib.service_template_create(st_obj) except Exception as e: self.logger.error("%s create failed with error %s" % (st_name, str(e))) return # Create the service template in local db ServiceTemplateSM.locate(st_uuid) self.logger.info("%s created with uuid %s" % (st_name, str(st_uuid))) #_create_default_analyzer_template def port_delete_or_si_link(self, vm, vmi): if vmi.port_tuples: return if (vmi.service_instances and vmi.virtual_machine == None): self.vm_manager.cleanup_svc_vm_ports([vmi.uuid]) return if not vm or vm.service_instance: return if not vmi.if_type: return if len(vmi.name.split('__')) < 4: return si_fq_name = vmi.name.split('__')[0:3] index = int(vmi.name.split('__')[3]) - 1 for si in ServiceInstanceSM.values(): if si.fq_name != si_fq_name: continue st = ServiceTemplateSM.get(si.service_template) self.vm_manager.link_si_to_vm(si, st, index, vm.uuid) return def create_service_instance(self, si): if si.state == 'active': return st = ServiceTemplateSM.get(si.service_template) if not st: self.logger.error("template not found for %s" % ((':').join(si.fq_name))) return if st.params and st.params.get('version', 1) == 2: return self.logger.info("Creating SI %s (%s)" % ((':').join(si.fq_name), st.virtualization_type)) try: if st.virtualization_type == 'virtual-machine': self.vm_manager.create_service(st, si) elif st.virtualization_type == 'network-namespace': self.netns_manager.create_service(st, si) elif st.virtualization_type == 'vrouter-instance': self.vrouter_manager.create_service(st, si) elif st.virtualization_type == 'physical-device': self.ps_manager.create_service(st, si) else: self.logger.error("Unknown virt type: %s" % st.virtualization_type) except Exception: cgitb_error_log(self) si.launch_count += 1 self.logger.info("SI %s creation success" % (':').join(si.fq_name)) def delete_service_instance(self, vm): self.logger.info("Deleting VM %s %s for SI %s" % ((':').join(vm.fq_name), vm.uuid, vm.service_id)) try: if vm.virtualization_type == svc_info.get_vm_instance_type(): self.vm_manager.delete_service(vm) elif vm.virtualization_type == svc_info.get_netns_instance_type(): self.netns_manager.delete_service(vm) elif vm.virtualization_type == 'vrouter-instance': self.vrouter_manager.delete_service(vm) elif vm.virtualization_type == 'physical-device': self.ps_manager.delete_service(vm) self.logger.info("Deleted VM %s %s for SI %s" % ((':').join(vm.fq_name), vm.uuid, vm.service_id)) except Exception: cgitb_error_log(self) # generate UVE si_fq_name = vm.display_name.split('__')[:-2] si_fq_str = (':').join(si_fq_name) self.logger.uve_svc_instance(si_fq_str, status='DELETE', vms=[{'uuid': vm.uuid}]) return True def _relaunch_service_instance(self, si): si.state = 'relaunch' self.create_service_instance(si) def _check_service_running(self, si): st = ServiceTemplateSM.get(si.service_template) if st.params and st.params.get('version', 1) == 2: return if st.virtualization_type == 'virtual-machine': status = self.vm_manager.check_service(si) elif st.virtualization_type == 'network-namespace': status = self.netns_manager.check_service(si) elif st.virtualization_type == 'vrouter-instance': status = self.vrouter_manager.check_service(si) elif st.virtualization_type == 'physical-device': status = self.ps_manager.check_service(si) return status def delete_interface_route_table(self, irt_uuid): try: self._vnc_lib.interface_route_table_delete(id=irt_uuid) InterfaceRouteTableSM.delete(irt_uuid) except (NoIdError, RefsExistError): return def _delete_shared_vn(self, vn_uuid): try: self.logger.info("Deleting vn %s" % (vn_uuid)) self._vnc_lib.virtual_network_delete(id=vn_uuid) VirtualNetworkSM.delete(vn_uuid) except (NoIdError, RefsExistError): pass @staticmethod def reset(): for cls in DBBaseSM.get_obj_type_map().values(): cls.reset() def sighup_handler(self): if self._conf_file: config = ConfigParser.SafeConfigParser() config.read(self._conf_file) if 'DEFAULTS' in config.sections(): try: collectors = config.get('DEFAULTS', 'collectors') if type(collectors) is str: collectors = collectors.split() new_chksum = hashlib.md5("".join(collectors)).hexdigest() if new_chksum != self._chksum: self._chksum = new_chksum config.random_collectors = random.sample(collectors, len(collectors)) # Reconnect to achieve load-balance irrespective of list self.logger.sandesh_reconfig_collectors(config) except ConfigParser.NoOptionError as e: pass # end sighup_handler def skip_check_service(si): # wait for first launch if not si.launch_count: return True # back off going on if si.back_off > 0: si.back_off -= 1 return True # back off done if si.back_off == 0: si.back_off = -1 return False # set back off if not si.launch_count % 10: si.back_off = 10 return True return False def timer_callback(monitor): # delete orphan shared iips iip_delete_list = [] for iip in InstanceIpSM.values(): if not iip.instance_ip_secondary or not iip.service_instance_ip: continue if iip.service_instance: continue if len(iip.virtual_machine_interfaces): continue iip_delete_list.append(iip) for iip in iip_delete_list: monitor.port_tuple_agent.delete_shared_iip(iip) # delete vms without si vm_delete_list = [] for vm in VirtualMachineSM.values(): si = ServiceInstanceSM.get(vm.service_instance) if not si and vm.virtualization_type: vm_delete_list.append(vm) for vm in vm_delete_list: monitor.delete_service_instance(vm) # delete vmis with si but no vms vmi_delete_list = [] for vmi in VirtualMachineInterfaceSM.values(): for si_uuid in vmi.service_instances: si = ServiceInstanceSM.get(si_uuid) if si and not vmi.virtual_machine: vmi_delete_list.append(vmi.uuid) if len(vmi_delete_list): monitor.vm_manager.cleanup_svc_vm_ports(vmi_delete_list) # check vrouter agent status monitor.vrouter_scheduler.vrouters_running() # check status of service si_list = list(ServiceInstanceSM.values()) for si in si_list: if skip_check_service(si): continue if not monitor._check_service_running(si): monitor._relaunch_service_instance(si) if si.max_instances != len(si.virtual_machines): monitor._relaunch_service_instance(si) # check vns to be deleted for project in ProjectSM.values(): if project.service_instances: continue vn_id_list = list(project.virtual_networks) for vn_id in vn_id_list: vn = VirtualNetworkSM.get(vn_id) if not vn or vn.virtual_machine_interfaces: continue if vn.name in svc_info.get_shared_vn_list(): monitor._delete_shared_vn(vn.uuid) def launch_timer(monitor): if not monitor._args.check_service_interval.isdigit(): monitor.logger.emergency("set seconds for check_service_interval " "in contrail-svc-monitor.conf. \ example: check_service_interval=60") sys.exit() monitor.logger.notice("check_service_interval set to %s seconds" % monitor._args.check_service_interval) while True: gevent.sleep(int(monitor._args.check_service_interval)) try: timer_callback(monitor) except Exception: cgitb_error_log(monitor) def cgitb_error_log(monitor): string_buf = cStringIO.StringIO() cgitb_hook(file=string_buf, format="text") monitor.logger.log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR) def parse_args(args_str): ''' Eg. python svc_monitor.py --rabbit_server localhost --rabbit_port 5672 --rabbit_user guest --rabbit_password guest --cassandra_server_list 10.1.2.3:9160 --api_server_ip 10.1.2.3 --api_server_port 8082 --api_server_use_ssl False --zk_server_ip 10.1.2.3 --zk_server_port 2181 --collectors 127.0.0.1:8086 --http_server_port 8090 --log_local --log_level SYS_DEBUG --log_category test --log_file <stdout> --trace_file /var/log/contrail/svc-monitor.err --use_syslog --syslog_facility LOG_USER --cluster_id <testbed-name> --check_service_interval 60 [--region_name <name>] [--reset_config] ''' # Source any specified config/ini file # Turn off help, so we show all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--conf_file", action='append', help="Specify config file", metavar="FILE") args, remaining_argv = conf_parser.parse_known_args(args_str.split()) defaults = { 'rabbit_server': 'localhost', 'rabbit_port': '5672', 'rabbit_user': 'guest', 'rabbit_password': 'guest', 'rabbit_vhost': None, 'rabbit_ha_mode': False, 'cassandra_server_list': '127.0.0.1:9160', 'api_server_ip': '127.0.0.1', 'api_server_port': '8082', 'api_server_use_ssl': False, 'zk_server_ip': '127.0.0.1', 'zk_server_port': '2181', 'collectors': None, 'http_server_port': '8088', 'log_local': False, 'log_level': SandeshLevel.SYS_DEBUG, 'log_category': '', 'log_file': Sandesh._DEFAULT_LOG_FILE, 'trace_file': '/var/log/contrail/svc-monitor.err', 'use_syslog': False, 'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY, 'region_name': None, 'cluster_id': '', 'logging_conf': '', 'logger_class': None, 'check_service_interval': '60', 'nova_endpoint_type': 'internalURL', 'rabbit_use_ssl': False, 'kombu_ssl_version': '', 'kombu_ssl_keyfile': '', 'kombu_ssl_certfile': '', 'kombu_ssl_ca_certs': '', } defaults.update(SandeshConfig.get_default_options(['DEFAULTS'])) secopts = { 'use_certs': False, 'keyfile': '', 'certfile': '', 'ca_certs': '', } ksopts = { 'auth_host': '127.0.0.1', 'auth_protocol': 'http', 'auth_port': '5000', 'auth_version': 'v2.0', 'auth_insecure': True, 'admin_user': 'user1', 'admin_password': 'password1', 'admin_tenant_name': 'admin' } schedops = { 'si_netns_scheduler_driver': 'svc_monitor.scheduler.vrouter_scheduler.RandomScheduler', 'analytics_server_list': '127.0.0.1:8081', 'availability_zone': None, 'netns_availability_zone': None, 'aaa_mode': cfgm_common.AAA_MODE_DEFAULT_VALUE, } cassandraopts = { 'cassandra_user': None, 'cassandra_password': None, } sandeshopts = SandeshConfig.get_default_options() saved_conf_file = args.conf_file config = ConfigParser.SafeConfigParser() if args.conf_file: config.read(args.conf_file) defaults.update(dict(config.items("DEFAULTS"))) if ('SECURITY' in config.sections() and 'use_certs' in config.options('SECURITY')): if config.getboolean('SECURITY', 'use_certs'): secopts.update(dict(config.items("SECURITY"))) if 'KEYSTONE' in config.sections(): ksopts.update(dict(config.items("KEYSTONE"))) if 'SCHEDULER' in config.sections(): schedops.update(dict(config.items("SCHEDULER"))) if 'CASSANDRA' in config.sections(): cassandraopts.update(dict(config.items('CASSANDRA'))) SandeshConfig.update_options(sandeshopts, config) # Override with CLI options # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) defaults.update(secopts) defaults.update(ksopts) defaults.update(schedops) defaults.update(cassandraopts) defaults.update(sandeshopts) parser.set_defaults(**defaults) parser.add_argument( "--cassandra_server_list", help="List of cassandra servers in IP Address:Port format", nargs='+') parser.add_argument( "--cassandra_use_ssl", action="store_true", help="Enable TLS for cassandra communication") parser.add_argument( "--cassandra_ca_certs", help="Cassandra CA certs") parser.add_argument( "--reset_config", action="store_true", help="Warning! Destroy previous configuration and start clean") parser.add_argument("--api_server_ip", help="IP address of API server") parser.add_argument("--api_server_port", help="Port of API server") parser.add_argument("--api_server_use_ssl", help="Use SSL to connect with API server") parser.add_argument("--collectors", help="List of VNC collectors in ip:port format", nargs="+") parser.add_argument("--http_server_port", help="Port of local HTTP server") parser.add_argument( "--log_local", action="store_true", help="Enable local logging of sandesh messages") parser.add_argument( "--log_level", help="Severity level for local logging of sandesh messages") parser.add_argument( "--log_category", help="Category filter for local logging of sandesh messages") parser.add_argument("--log_file", help="Filename for the logs to be written to") parser.add_argument("--trace_file", help="Filename for the error " "backtraces to be written to") parser.add_argument("--use_syslog", action="store_true", help="Use syslog for logging") parser.add_argument("--syslog_facility", help="Syslog facility to receive log lines") parser.add_argument("--aaa_mode", choices=AAA_MODE_VALID_VALUES, help="AAA mode") parser.add_argument("--admin_user", help="Name of keystone admin user") parser.add_argument("--admin_password", help="Password of keystone admin user") parser.add_argument("--admin_tenant_name", help="Tenant name for keystone admin user") parser.add_argument("--region_name", help="Region name for openstack API") parser.add_argument("--cluster_id", help="Used for database keyspace separation") parser.add_argument( "--logging_conf", help=("Optional logging configuration file, default: None")) parser.add_argument( "--logger_class", help=("Optional external logger class, default: None")) parser.add_argument("--cassandra_user", help="Cassandra user name") parser.add_argument("--cassandra_password", help="Cassandra password") parser.add_argument("--check_service_interval", help="Check service interval") SandeshConfig.add_parser_arguments(parser) args = parser.parse_args(remaining_argv) args._conf_file = saved_conf_file args.config_sections = config if type(args.cassandra_server_list) is str: args.cassandra_server_list = args.cassandra_server_list.split() if type(args.collectors) is str: args.collectors = args.collectors.split() if args.region_name and args.region_name.lower() == 'none': args.region_name = None if args.availability_zone and args.availability_zone.lower() == 'none': args.availability_zone = None if args.netns_availability_zone and \ args.netns_availability_zone.lower() == 'none': args.netns_availability_zone = None args.sandesh_config = SandeshConfig.from_parser_arguments(args) args.cassandra_use_ssl = (str(args.cassandra_use_ssl).lower() == 'true') return args def get_rabbitmq_cfg(args): return { 'servers': args.rabbit_server, 'port': args.rabbit_port, 'user': args.rabbit_user, 'password': args.rabbit_password, 'vhost': args.rabbit_vhost, 'ha_mode': args.rabbit_ha_mode, 'use_ssl': args.rabbit_use_ssl, 'ssl_version': args.kombu_ssl_version, 'ssl_keyfile': args.kombu_ssl_keyfile, 'ssl_certfile': args.kombu_ssl_certfile, 'ssl_ca_certs': args.kombu_ssl_ca_certs } def run_svc_monitor(sm_logger, args=None): sm_logger.notice("Elected master SVC Monitor node. Initializing... ") sm_logger.introspect_init() monitor = SvcMonitor(sm_logger, args) monitor._zookeeper_client = _zookeeper_client monitor._conf_file = args._conf_file monitor._chksum = "" if args.collectors: monitor._chksum = hashlib.md5("".join(args.collectors)).hexdigest() """ @sighup SIGHUP handler to indicate configuration changes """ gevent.signal(signal.SIGHUP, monitor.sighup_handler) # Retry till API server is up connected = False monitor.logger.api_conn_status_update(ConnectionStatus.INIT) api_server_list = args.api_server_ip.split(',') while not connected: try: vnc_api = VncApi( args.admin_user, args.admin_password, args.admin_tenant_name, api_server_list, args.api_server_port, api_server_use_ssl=args.api_server_use_ssl) connected = True monitor.logger.api_conn_status_update(ConnectionStatus.UP) except requests.exceptions.ConnectionError as e: monitor.logger.api_conn_status_update( ConnectionStatus.DOWN, str(e)) time.sleep(3) except (RuntimeError, ResourceExhaustionError): # auth failure or haproxy throws 503 time.sleep(3) try: monitor.post_init(vnc_api, args) timer_task = gevent.spawn(launch_timer, monitor) gevent.joinall([timer_task]) except KeyboardInterrupt: monitor.rabbit.close() raise def main(args_str=None): global _zookeeper_client if not args_str: args_str = ' '.join(sys.argv[1:]) args = parse_args(args_str) if args.cluster_id: client_pfx = args.cluster_id + '-' zk_path_pfx = args.cluster_id + '/' else: client_pfx = '' zk_path_pfx = '' # randomize collector list args.random_collectors = args.collectors if args.collectors: args.random_collectors = random.sample(args.collectors, len(args.collectors)) # Initialize logger without introspect thread sm_logger = ServiceMonitorLogger(args, http_server_port=-1) # Initialize AMQP handler then close it to be sure remain queue of a # precedent run is cleaned rabbitmq_cfg = get_rabbitmq_cfg(args) vnc_amqp = VncAmqpHandle(sm_logger._sandesh, sm_logger, DBBaseSM, REACTION_MAP, 'svc_monitor', rabbitmq_cfg, args.trace_file) vnc_amqp.establish() vnc_amqp.close() sm_logger.debug("Removed remained AMQP queue") # Waiting to be elected as master node _zookeeper_client = ZookeeperClient( client_pfx+"svc-monitor", args.zk_server_ip) sm_logger.notice("Waiting to be elected as master...") _zookeeper_client.master_election(zk_path_pfx+"/svc-monitor", os.getpid(), run_svc_monitor, sm_logger, args) # end main def server_main(): vnc_cgitb.enable(format='text') main() # end server_main if __name__ == '__main__': server_main()
python
from .db.models import ModelWorker from .db.connection import DbEngine ModelWorker.metadata.create_all(DbEngine)
python
import string def encotel(frase): teclado = { 'abc' : '2', 'def' : '3', 'ghi': '4', 'jkl': '5', 'mno' : '6', 'pqrs' : '7', 'tuv' : '8', 'wxyz' : '9', } numeros = [] for letra in frase: if letra not in string.letters: numeros.append(letra) continue numeros.extend([teclado[chave] for chave in teclado.keys() if letra in chave]) return "".join(numeros)
python
import itertools import beatbox import pandas as pd def query_salesforce(line, query=''): """Runs SQL statement against a salesforce, using specified user,password and security token and beatbox. If no user,password and security token has been given, an error will be raised Examples:: %%salesforce user,password,security_token SELECT id FROM task """ assert len(line.split(',')) == 3, 'You should specify 3 arguments:\nuser_id, password, security_token' user, password, security_token = line.split(',') sf = Salesforce(user, password, security_token) df = sf.query(query, deleted_included=True) return df class Salesforce(object): def __init__(self, user_name, password, security_token): """Constructor for salesforce api which open session with salesforce with given credentials Args: * user_name: salesforce user * password: salesforce password * security_token: salesforcesecurity_token """ self.sf = beatbox._tPartnerNS self.svc = beatbox.Client() self.svc.login(user_name, password + security_token) def __get_query_results(self, is_actual_query, rest_of_query, deleted_included=False): """ Function to call the salesforce API given the calculated query Args: * is_actual_query: query to be sent to the api * rest_of_query: if is_actual_query=true its the query string else its the continuation of the query given in iteration before * deleted_included: should the query bring records from recycle bin (http://spanning.com/blog/what-you-need-to-know-about-salesforces-recycle-bin/) Returns: * res_[self.sf.records:] which represent list of the salesforce results and columns * res_.done[0] which indicates if there are more records which wasnt fetched for this specific query * res_.queryLocator[0]= the query locator to be sent to this function in the next page""" if is_actual_query: res_ = self.svc.query(rest_of_query) if deleted_included else self.svc.queryAll(rest_of_query) else: res_ = self.svc.queryMore(rest_of_query) return res_[self.sf.records:], \ res_.done[0] if hasattr(res_, 'done') else True, \ res_.queryLocator[0] if res_.queryLocator else None @staticmethod def get_columns_names(row): return [str(col._name[1].lower()) for col in row[2:]] @staticmethod def get_columns_values(row): return [str(col) for col in row[2:]] def query(self, query, deleted_included=False): """ Function to call the salesforce API given the calculated query Args: * query: a given query for salesforce (https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select.htm)d * deleted_included: should the query bring records from recycle bin (http://spanning.com/blog/what-you-need-to-know-about-salesforces-recycle-bin/) Returns: Dataframe with results from the given query""" res, done, header = [], 'false', [] rest_of_query = query for i in itertools.takewhile(lambda c: done == 'false', itertools.count()): first_iteration = i == 0 sf_results, done, rest_of_query = self.__get_query_results(first_iteration, \ rest_of_query, \ deleted_included) normalized_sf_results = [self.get_columns_values(row) for row in sf_results] res.extend(normalized_sf_results) if first_iteration and sf_results: header = self.get_columns_names(sf_results[0]) return pd.DataFrame(res, columns=header) def load_ipython_extension(ipython): ipython.register_magic_function(query_salesforce, 'cell', 'salesforce')
python
#!/usr/bin/env python3 import functools import logging import queue import threading class AsyncCaller: '''Singleton class which executes function calls in separate thread''' class _Caller: class Thread(threading.Thread): def __init__(self, queue, error_handler): self.queue = queue self.error_handler = error_handler self.logger = logging.getLogger('AsyncCaller') super().__init__(daemon=True) def run(self): while True: async_job = self.queue.get() if async_job == None: break try: async_job() except Exception as e: self.error_handler(str(e)) def __init__(self, error_handler): self.queue = queue.Queue() self.thread = self.Thread(self.queue, error_handler) self.thread.start() def call(self, target): self.queue.put(target) _instance = None def __new__(a, error_handler=None): if AsyncCaller._instance is None: AsyncCaller._instance = AsyncCaller._Caller(error_handler) return AsyncCaller._instance def asynchronous(f): '''Decorator which allows any function to be called asynchronously''' @functools.wraps(f) def _async_call(*args, **kwargs): AsyncCaller().call(lambda: f(*args, **kwargs)) return _async_call
python
from pyson0.json0diff import diff from pyson0.json0 import TypeJSON
python
import uuid import json import os import pytest import postgraas_server.backends.docker.postgres_instance_driver as pid import postgraas_server.backends.postgres_cluster.postgres_cluster_driver as pgcd import postgraas_server.configuration as configuration from postgraas_server.backends.exceptions import PostgraasApiException from postgraas_server.create_app import create_app from postgraas_server.management_resources import DBInstance DOCKER_CONFIG = { "metadb": { "db_name": "postgraas", "db_username": "postgraas", "db_pwd": "postgraas12", "host": "localhost", "port": "54321" }, "backend": { "type": "docker" } } CLUSTER_CONFIG = { "metadb": { "db_name": "postgraas", "db_username": "postgraas", "db_pwd": "postgraas12", "host": "localhost", "port": "54321" }, "backend": { "type": "pg_cluster", "host": os.environ.get('PGHOST', 'localhost'), "port": os.environ.get('PGPORT', '5432'), "database": os.environ.get('PGDATABASE', 'postgres'), "username": os.environ.get('PGUSER', 'postgres'), "password": os.environ.get('PGPASSWORD', 'postgres'), } } CONFIGS = { 'docker': DOCKER_CONFIG, 'pg_cluster': CLUSTER_CONFIG, } def remove_digits(s): return ''.join(c for c in s if not c.isdigit()) def delete_all_test_postgraas_container(): c = pid._docker_client() for container in c.containers.list(): if container.name.startswith("tests_postgraas_"): container.remove(force=True) def delete_all_test_database_and_user(config): con = pgcd._create_pg_connection(config) cur = con.cursor() cur.execute( '''SELECT d.datname, u.usename FROM pg_database d JOIN pg_user u ON (d.datdba = u.usesysid);''') for db in cur: if db[0].startswith("tests_postgraas_"): delete_test_database_and_user(db[0], db[1], config) cur.execute( '''SELECT u.usename FROM pg_user u;''') for db in cur: if db[0].startswith("tests_postgraas_"): pgcd.delete_user(db[0], config) def delete_test_database_and_user(db_name, username, config): pgcd.delete_database(db_name, config) pgcd.delete_user(username, config) @pytest.fixture(params=['docker', 'pg_cluster']) def parametrized_setup(request, tmpdir): from postgraas_server.management_resources import db cfg = tmpdir.join('config') with open(cfg.strpath, "w") as fp: json.dump(CONFIGS[request.param], fp) config = configuration.get_config(cfg.strpath) this_app = create_app(config) this_app.config['SQLALCHEMY_DATABASE_URI'] = "sqlite://" this_app.use_reloader = False this_app.config['TESTING'] = True ctx = this_app.app_context() ctx.push() db.create_all() username, db_name = str(uuid.uuid4()).replace('-', '_'), str(uuid.uuid4()).replace('-', '_') request.cls.this_app = this_app request.cls.app_client = this_app.test_client() request.cls.db_name = remove_digits(db_name) request.cls.username = remove_digits(username) request.cls.backend = request.param try: yield except Exception: pass if request.param == 'docker': delete_all_test_postgraas_container() elif request.param == 'pg_cluster': delete_all_test_database_and_user(config['backend']) db.drop_all() ctx.pop() @pytest.mark.usefixtures('parametrized_setup') class TestPostgraasApi(): def test_create_and_delete_postgres_instance(self): db_credentials = { "db_name": 'tests_postgraas_instance_name', "db_username": 'tests_postgraas_db_username', "db_pwd": 'test_db_pwd', "host": pid.get_hostname(), "port": pid.get_open_port() } db_entry = DBInstance( postgraas_instance_name=db_credentials['db_name'], db_name=db_credentials['db_name'], username=db_credentials['db_username'], password="", hostname=db_credentials['host'], port=db_credentials['port'] ) db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials) self.this_app.postgraas_backend.delete(db_entry) assert True def test_create_postgraas_twice(self): db_credentials = { "db_name": 'tests_postgraas_instance_name', "db_username": 'tests_postgraas_db_username', "db_pwd": 'test_db_pwd', "host": pid.get_hostname(), "port": pid.get_open_port() } db_entry = DBInstance( postgraas_instance_name=db_credentials['db_name'], db_name=db_credentials['db_name'], username=db_credentials['db_username'], password="", hostname=db_credentials['host'], port=db_credentials['port'] ) db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials) with pytest.raises(PostgraasApiException) as excinfo: db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials) if self.backend == "pg_cluster": assert excinfo.value.message == 'db or user already exists' elif self.backend == "docker": assert excinfo.value.message == 'Container exists already' self.this_app.postgraas_backend.delete(db_entry) assert True @pytest.mark.xfail(reason='Username now valid due to hardening against SQL injections.') def test_create_postgraas_bad_username(self): db_credentials = { "db_name": 'tests_postgraas_instance_name', "db_username": 'tests_postgraas_db-bad username', "db_pwd": 'test_db_pwd', "host": pid.get_hostname(), "port": pid.get_open_port() } db_entry = DBInstance( postgraas_instance_name=db_credentials['db_name'], db_name=db_credentials['db_name'], username=db_credentials['db_username'], password="", hostname=db_credentials['host'], port=db_credentials['port'] ) if self.backend == "pg_cluster": with pytest.raises(PostgraasApiException) as excinfo: db_entry.container_id = self.this_app.postgraas_backend.create(db_entry, db_credentials) self.this_app.postgraas_backend.delete(db_entry) assert 'syntax error at or near "-"' in excinfo.value.message def test_delete_nonexisting_db(self): db_credentials = { "db_name": 'tests_postgraas_instance_name', "db_username": 'tests_postgraas_db-bad username', "db_pwd": 'test_db_pwd', "host": pid.get_hostname(), "port": pid.get_open_port() } db_entry = DBInstance( postgraas_instance_name=db_credentials['db_name'], db_name=db_credentials['db_name'], username=db_credentials['db_username'], password="", hostname=db_credentials['host'], port=db_credentials['port'], container_id="4n8nz48az49prdmdmprmr4doesnotexit" ) with pytest.raises(PostgraasApiException) as excinfo: db_entry.container_id = self.this_app.postgraas_backend.delete(db_entry) assert 'does not exist' in excinfo.value.message
python
import argparse import ibapi from ib_tws_server.codegen.asyncio_client_generator import AsyncioWrapperGenerator from ib_tws_server.codegen import * from ib_tws_server.api_definition import * import logging import os import shutil import sys logging.basicConfig(stream=sys.stdout, level=logging.ERROR) def generate(output_dir: str): response_class_fname = os.path.join(output_dir, "client_responses.py") asyncio_client_fname = os.path.join(output_dir, "asyncio_client.py") asyncio_wrapper_fname = os.path.join(output_dir, "asyncio_wrapper.py") graphql_schema_fname = os.path.join(output_dir, "schema.graphql") graphql_resolver_fname = os.path.join(output_dir, "graphql_resolver.py") shutil.rmtree(output_dir, ignore_errors=True) os.mkdir(output_dir) print(f"Generating code for TWS API Version {ibapi.get_version_string()}") d = ApiDefinition.verify() ResponseTypesGenerator.generate(response_class_fname) AsyncioClientGenerator.generate(asyncio_client_fname) AsyncioWrapperGenerator.generate(asyncio_wrapper_fname) GraphQLSchemaGenerator.generate(graphql_schema_fname) GraphQLResolverGenerator.generate(graphql_resolver_fname) if __name__ == '__main__': parser = argparse.ArgumentParser(description="Generate wrapper classes from the request definitions") parser.add_argument('--output-dir', '-o', dest="output_dir", required=True, help='The output directory') args = parser.parse_args() generate(args.output_dir)
python
import unittest from cornflow_client.airflow import dag_utilities as du from unittest.mock import Mock, patch class DagUtilities(unittest.TestCase): @patch("cornflow_client.airflow.dag_utilities.CornFlow") def test_env_connection_vars(self, CornFlow): secrets = Mock() conn_uris = [ ( "cornflow://some_test_user:[email protected]", ("some_test_user", "very_classified_password"), "http://devsm.cornflow.baobabsoluciones.app", ), ( "https://some_test_user:[email protected]", ("some_test_user", "very_classified_password"), "https://devsm.cornflow.baobabsoluciones.app", ), ( "https://some_test_user:[email protected]/some_dir", ("some_test_user", "very_classified_password"), "https://devsm.cornflow.baobabsoluciones.app/some_dir", ), ( "http://airflow:airflow_test_password@localhost:5000", ("airflow", "airflow_test_password"), "http://localhost:5000", ), ] client_instance = CornFlow.return_value client_instance.login.return_value = "" for (conn_str, user_info, url) in conn_uris: secrets.get_conn_uri.return_value = conn_str du.connect_to_cornflow(secrets) client_instance.login.assert_called_with( username=user_info[0], pwd=user_info[1] ) CornFlow.assert_called_with(url=url)
python
import http import json from unittest import mock import pytest from sqlalchemy import orm from todos import crud, db, serializers from todos.db import models @pytest.fixture() def exemplary_event_path_parameters(exemplary_task_model: models.Task) -> dict: return {"task_id": exemplary_task_model.id} @pytest.fixture() def exemplary_event(exemplary_headers_with_access_token: dict, exemplary_event_path_parameters: dict) -> dict: return {"headers": exemplary_headers_with_access_token, "pathParameters": exemplary_event_path_parameters} @pytest.mark.usefixtures("exemplary_access_token") def test_should_return_unauthorized_when_access_token_is_missing() -> None: response = crud.get_task_details({}, {}) assert response["statusCode"] == http.HTTPStatus.UNAUTHORIZED assert response["body"] is None def test_should_successfully_return_task_details( dbsession: orm.Session, exemplary_event: dict, exemplary_task_model: models.Task ) -> None: with mock.patch.object(db, "get_session", return_value=dbsession): response = crud.get_task_details(exemplary_event, {}) assert response["statusCode"] == http.HTTPStatus.OK assert response["body"] == json.dumps(serializers.serialize_task(exemplary_task_model)) def test_should_return_bad_request_when_task_not_found( dbsession: orm.Session, exemplary_headers_with_access_token: dict ) -> None: event = {"headers": exemplary_headers_with_access_token, "pathParameters": {"task_id": 999}} with mock.patch.object(db, "get_session", return_value=dbsession): response = crud.get_task_details(event, {}) assert response["statusCode"] == http.HTTPStatus.BAD_REQUEST def test_should_return_service_unavailable_when_unexpected_error_occurs(exemplary_event: dict) -> None: with mock.patch.object(db, "get_session", side_effect=Exception()): response = crud.get_task_details(exemplary_event, {}) assert response["statusCode"] == http.HTTPStatus.SERVICE_UNAVAILABLE assert response["body"] is None
python
'''Standard Simple feedforward model feedforward takes in a single image Model-specific config.py options: (inherits from models.base_net): 'batch_size': An int. The number of input bundle to use in a batch 'hidden_size': An int. The size of representation size before FC layer In metric network: 'output_size': For discriminative task, the size of output. Encoder: 'encoder': A function that will build take 'input_placeholder', 'is_training', 'hidden_size', and returns a representation. -'encoder_kwargs': A Dict of all args to pass to 'encoder'. ''' from __future__ import absolute_import, division, print_function from functools import partial from models.base_net import BaseNet import losses.all as losses_lib import tensorflow as tf import tensorflow.contrib.slim as slim from models.sample_models import * from models.resnet_v1 import * import optimizers.train_steps as train_steps import optimizers.ops as optimize import pdb class StandardFeedforward(BaseNet): ''' ''' def __init__(self, global_step, cfg): ''' Args: cfg: Configuration. ''' super(StandardFeedforward, self).__init__(global_step, cfg) self.cfg = cfg if 'hidden_size' not in cfg: raise ValueError("config.py for Feedforward Network must specify 'hidden_size'") if 'encoder' not in cfg: raise ValueError("config.py for Feedforward Network must specify 'encoder'") if 'metric_net' not in cfg: raise ValueError("config.py for Feedforward Network must specify 'metric_net'") if 'loss_threshold' in cfg: self.threshold = tf.constant(cfg['loss_threshold']) else: self.threshold = None self.is_l1 = 'is_l1' in cfg and cfg['is_l1'] def build_encoder(self, input_imgs, is_training): '''Builds encoder. Args: input_img: input image to encode after scaling to [-1, 1] is_training: flag for whether the model is in training mode. Returns: encoder_output: tensor representing the ouptut of the encoder ''' encoder_kwargs = {} if 'encoder_kwargs' in self.cfg: encoder_kwargs = self.cfg['encoder_kwargs'] else: print("Not using 'kwargs' arguments for encoder.") with tf.variable_scope("feedforward") as scope: encoder_output, end_points = self.cfg['encoder']( input_imgs, is_training, reuse=None, hidden_size=self.cfg['hidden_size'], scope=scope, **encoder_kwargs) encoder_output = tf.reshape(encoder_output, [-1,16,16,8]) self.encoder_endpoints = end_points return encoder_output def build_postprocess(self, encoder_output, is_training): '''Build the post-process on feedforward network structure output. The default approach will be a three layer fully connected networks Args: encoder_output: a tensor output representations of input image is_training: flag for wheter the model is in training mode. Returns: final_output: final output for the whole model ''' metric_kwargs = {} if 'metric_kwargs' in self.cfg: metric_kwargs = self.cfg['metric_kwargs'] else: raise ValueError("config.py for Feedforward Network must specify 'metric_kwargs'") encoder_output = tf.contrib.layers.flatten(encoder_output) final_output, end_points = self.cfg['metric_net']( encoder_output, is_training, **metric_kwargs) self.metric_endpoints = end_points return final_output def build_model(self, input_imgs, is_training, targets, masks=None, privileged_input=None): '''Builds the model. Assumes that the input is from range [0, 1]. Args: input_imgs: batch of input images (scaled between -1 and 1) with the dimensions specified in the cfg is_training: flag for whether the model is in training mode or not mask: mask used for computing sum of squares loss. If None, we assume it is np.ones. ''' print('building model') cfg = self.cfg self.is_training= is_training self.masks = masks if self.decoder_only: encoder_output = input_imgs else: encoder_output = self.build_encoder(input_imgs, is_training) final_output = self.build_postprocess(encoder_output, is_training) losses = self.get_losses(final_output, targets, is_softmax='l2_loss' not in cfg) # use weight regularization if 'omit_weight_reg' in cfg and cfg['omit_weight_reg']: add_reg = False else: add_reg = True # get losses regularization_loss = tf.add_n( slim.losses.get_regularization_losses(), name='losses/regularization_loss' ) total_loss = slim.losses.get_total_loss( add_regularization_losses=add_reg, name='losses/total_loss') self.input_images = input_imgs self.targets = targets self.masks = masks self.encoder_output = encoder_output self.decoder_output = final_output self.losses = losses self.total_loss = total_loss # add summaries if self.extended_summaries: slim.summarize_variables() slim.summarize_weights() slim.summarize_biases() slim.summarize_activations() slim.summarize_collection(tf.GraphKeys.LOSSES) slim.summarize_tensor( regularization_loss ) slim.summarize_tensor( total_loss ) self.model_built = True def get_losses(self, final_output, target, is_softmax=True): '''Returns the loss for a Siamese Network. Args: final_output: tensor that represent the final output of the image bundle. target: Tensor of target to be output by the siamese network. Returns: losses: list of tensors representing each loss component ''' print('setting up losses...') self.target = target self.final_output = final_output self.predicted = slim.softmax(final_output) with tf.variable_scope('losses'): if is_softmax: if len(target.shape) == len(final_output.shape): correct_prediction = tf.equal(tf.argmax(final_output,1), tf.argmax(target, 1)) if len(self.masks.shape) == 2: self.masks = tf.squeeze(self.masks) siamese_loss = tf.reduce_mean( losses_lib.get_softmax_loss( final_output, target, self.masks, scope='softmax_loss')) else: correct_prediction = tf.equal(tf.argmax(final_output,1), target) siamese_loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits( logits=final_output, labels=target, name='softmax_loss')) self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) self.siamese_loss = siamese_loss else: # If it's not softmax, it's l2 norm loss. self.accuracy = 0 # self.l2_loss = tf.losses.mean_squared_error( # final_output, # target, # scope='d1', # loss_collection=tf.GraphKeys, # reduction="none") target = tf.to_float(target) final_output = tf.to_float(final_output) # self.l2_loss = tf.norm(target - final_output, axis=1) #self.l2_loss_sum = tf.reduce_sum(self.l2_loss, 1) # print(self.l2_loss) if self.is_l1: self.l_loss = losses_lib.get_l1_loss( final_output, target, scope='d1') print('Using L1 loss.....') else: self.l_loss = losses_lib.get_l2_loss( final_output, target, scope='d1') self.siamese_loss = self.l_loss self.robust_l_loss = self.l_loss # siamese_loss = self.l2_loss # if self.threshold is not None: # ind = tf.unstack(siamese_loss) # siamese_loss = [ tf.cond(tf.greater(x, self.threshold), # lambda: self.threshold + self.threshold * tf.log(x / self.threshold), # lambda: x) for x in ind ] # self.robust_l2_loss = siamese_loss # siamese_loss = tf.stack(siamese_loss) # self.siamese_loss = tf.reduce_sum(siamese_loss) / self.cfg['batch_size'] tf.add_to_collection(tf.GraphKeys.LOSSES, self.siamese_loss) losses = [self.siamese_loss] return losses def get_train_step_fn( self ): ''' Returns: A train_step funciton which takes args: (sess, train_ops, global_stepf) ''' return partial( train_steps.discriminative_train_step_fn, return_accuracy=self.cfg['return_accuracy'] ) def build_train_op( self, global_step ): ''' Builds train ops for discriminative task Args: global_step: A Tensor to be incremented Returns: [ loss_op, accuracy ] ''' if not self.model_built or self.total_loss is None : raise RuntimeError( "Cannot build optimizers until 'build_model' ({0}) and 'get_losses' {1} are run".format( self.model_built, self.losses_built ) ) self.global_step = global_step t_vars = tf.trainable_variables() # Create the optimizer train_op for the generator self.optimizer = optimize.build_optimizer( global_step=self.global_step, cfg=self.cfg ) if 'clip_norm' in self.cfg: self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, update_global_step=True, clip_gradient_norm=self.cfg['clip_norm']) else: if self.is_training: self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, update_global_step=True ) else: self.loss_op = optimize.create_train_op( self.total_loss, self.optimizer, is_training=False, update_global_step=True ) # Create a train_op for the discriminator self.train_op = [ self.loss_op, self.accuracy ] self.train_op_built = True return self.train_op
python
""" """ PROMPT_COLORS = { "purple": '\033[95m', "blue": '\033[94m', "green": '\033[92m', "yellow": '\033[93m', "red": '\033[91m', "bold": '\033[1m', "underline": '\033[4m'} PROMPT_TAILER = '\033[0m' class ColoredPrinter(object): def __init__(self, color): if not color in PROMPT_COLORS.keys(): raise ValueError('unknown color {}'.format(color)) self.print_fmt = PROMPT_COLORS[color] + '{string}' + PROMPT_TAILER def __str__(self): """return a colored version of the representation string""" return self.format(self.__repr__()) def format(self, *strings): """add coloration items to a list of strings """ string = " ".join([self.print_fmt.format(string=string) for string in strings]) return string def __call__(self, *strings, **kwargs): string = self.format(*strings) print(string, **kwargs) printpurple = ColoredPrinter('purple') printblue = ColoredPrinter('blue') printgreen = ColoredPrinter('green') printyellow = ColoredPrinter('yellow') printred = ColoredPrinter('red') printbold = ColoredPrinter('bold') printunderline = ColoredPrinter('underline') PRINTERS = {color: eval("print{}".format(color)) for color in PROMPT_COLORS} if __name__ == '__main__': for color, printer in PRINTERS.items(): print("{:<20s} {} ======> ".format(color, printer), end=" ") printer('hello world')
python
import math import os import random import re import sys n = int(input()) arr = list(map(int, input().rstrip().split())) numSwaps = 0 i = 0 while(i < len(arr)-1): if arr[i] != i+1: tmp = arr[i] arr[i], arr[tmp-1] = arr[tmp-1], arr[i] numSwaps += 1 else: i += 1 print(numSwaps)
python
""" This is a reST markup explaining the following code, compatible with `Sphinx Gallery <https://sphinx-gallery.github.io/>`_. """ # You can convert the file to a Jupyter notebook using the # sphx_glr_python_to_jupyter.py utility from Sphinx Gallery. import math sin = math.sin(0.13587) print(sin) #%% # And a sum with itself turns it into two sins, because the following holds: # # .. math:: # # 2 a = a + a # two_sins = sin + sin if two_sins != 2*sin: print("Assumptions broken. Restart the universe.")
python
import os.path from os import listdir import re from numpy.distutils.core import setup def find_version(*paths): fname = os.path.join(os.path.dirname(__file__), *paths) with open(fname) as fp: code = fp.read() match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", code, re.M) if match: return match.group(1) raise RuntimeError("Unable to find version string.") scripts = ['Scripts/' + i for i in listdir('Scripts/')] setup( name='obstools', version=find_version('obstools', '__init__.py'), description='Python tools for ocean bottom seismic instruments', author='Pascal Audet, Helen Janiszewski', author_email='[email protected]', maintainer='Pascal Audet, Helen Janiszewski', maintainer_email='[email protected], [email protected]', url='https://github.com/paudetseis/OBStools', classifiers=[ 'Development Status :: 3 - Alpha', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7'], install_requires=['numpy', 'obspy', 'stdb'], python_requires='>=3.6', packages=['obstools','obstools.atacr'], scripts=scripts)
python
# Simulate a Thomas cluster process on a rectangle. # Author: H. Paul Keeler, 2018. # Website: hpaulkeeler.com # Repository: github.com/hpaulkeeler/posts # For more details, see the post: # hpaulkeeler.com/simulating-a-thomas-cluster-point-process/ import numpy as np; # NumPy package for arrays, random number generation, etc import matplotlib.pyplot as plt # For plotting plt.close("all"); # close all figures # Simulation window parameters xMin = -.5; xMax = .5; yMin = -.5; yMax = .5; # Parameters for the parent and daughter point processes lambdaParent = 10; # density of parent Poisson point process lambdaDaughter = 100; # mean number of points in each cluster sigma = 0.05; # sigma for normal variables (ie random locations) of daughters # Extended simulation windows parameters rExt=6*sigma; # extension parameter # for rExt, use factor of deviation sigma eg 5 or 6 xMinExt = xMin - rExt; xMaxExt = xMax + rExt; yMinExt = yMin - rExt; yMaxExt = yMax + rExt; # rectangle dimensions xDeltaExt = xMaxExt - xMinExt; yDeltaExt = yMaxExt - yMinExt; areaTotalExt = xDeltaExt * yDeltaExt; # area of extended rectangle # Simulate Poisson point process for the parents numbPointsParent = np.random.poisson(areaTotalExt * lambdaParent);# Poisson number of points # x and y coordinates of Poisson points for the parent xxParent = xMinExt + xDeltaExt * np.random.uniform(0, 1, numbPointsParent); yyParent = yMinExt + yDeltaExt * np.random.uniform(0, 1, numbPointsParent); # Simulate Poisson point process for the daughters (ie final poiint process) numbPointsDaughter = np.random.poisson(lambdaDaughter, numbPointsParent); numbPoints = sum(numbPointsDaughter); # total number of points # Generate the (relative) locations in Cartesian coordinates by # simulating independent normal variables xx0 = np.random.normal(0, sigma, numbPoints); # (relative) x coordinaets yy0 = np.random.normal(0, sigma, numbPoints); # (relative) y coordinates # replicate parent points (ie centres of disks/clusters) xx = np.repeat(xxParent, numbPointsDaughter); yy = np.repeat(yyParent, numbPointsDaughter); # translate points (ie parents points are the centres of cluster disks) xx = xx + xx0; yy = yy + yy0; # thin points if outside the simulation window booleInside = ((xx >= xMin) & (xx <= xMax) & (yy >= yMin) & (yy <= yMax)); # retain points inside simulation window xx = xx[booleInside]; yy = yy[booleInside]; # Plotting plt.scatter(xx, yy, edgecolor='b', facecolor='none', alpha=0.5); plt.xlabel("x"); plt.ylabel("y"); plt.axis('equal');
python
# # (c) 2019, Ansible by Red Hat, inc # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # from __future__ import absolute_import, division, print_function __metaclass__ = type from ansible_collections.cisco.ios.tests.unit.compat.mock import patch from ansible_collections.cisco.ios.plugins.modules import ios_ospf_interfaces from ansible_collections.cisco.ios.tests.unit.modules.utils import ( set_module_args, ) from .ios_module import TestIosModule, load_fixture class TestIosOspfInterfacesModule(TestIosModule): module = ios_ospf_interfaces def setUp(self): super(TestIosOspfInterfacesModule, self).setUp() self.mock_get_config = patch( "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.get_config" ) self.get_config = self.mock_get_config.start() self.mock_load_config = patch( "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.network.Config.load_config" ) self.load_config = self.mock_load_config.start() self.mock_get_resource_connection_config = patch( "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base." "get_resource_connection" ) self.get_resource_connection_config = ( self.mock_get_resource_connection_config.start() ) self.mock_get_resource_connection_facts = patch( "ansible_collections.ansible.netcommon.plugins.module_utils.network.common.rm_base.resource_module_base." "get_resource_connection" ) self.get_resource_connection_facts = ( self.mock_get_resource_connection_facts.start() ) self.mock_edit_config = patch( "ansible_collections.cisco.ios.plugins.module_utils.network.ios.providers.providers.CliProvider.edit_config" ) self.edit_config = self.mock_edit_config.start() self.mock_execute_show_command = patch( "ansible_collections.cisco.ios.plugins.module_utils.network.ios.facts.ospf_interfaces.ospf_interfaces." "Ospf_InterfacesFacts.get_ospf_interfaces_data" ) self.execute_show_command = self.mock_execute_show_command.start() def tearDown(self): super(TestIosOspfInterfacesModule, self).tearDown() self.mock_get_resource_connection_config.stop() self.mock_get_resource_connection_facts.stop() self.mock_edit_config.stop() self.mock_get_config.stop() self.mock_load_config.stop() self.mock_execute_show_command.stop() def load_fixtures(self, commands=None): def load_from_file(*args, **kwargs): return load_fixture("ios_ospf_interfaces.cfg") self.execute_show_command.side_effect = load_from_file def test_ios_ospf_interfaces_merged(self): set_module_args( dict( config=[ dict( name="GigabitEthernet0/2", address_family=[ dict( afi="ipv4", bfd=True, cost=dict(interface_cost=30), network=dict(broadcast=True), priority=60, resync_timeout=90, ttl_security=dict(hops=120), authentication=dict(key_chain="test_key"), ), dict( afi="ipv6", bfd=True, dead_interval=dict(time=100), network=dict(manet=True), priority=50, ), ], ), dict( name="GigabitEthernet0/3", address_family=[ dict( afi="ipv4", bfd=True, cost=dict(interface_cost=50), priority=50, ttl_security=dict(hops=150), ) ], ), ], state="merged", ) ) commands = [ "interface GigabitEthernet0/3", "ip ospf bfd", "ip ospf cost 50", "ip ospf priority 50", "ip ospf ttl-security hops 150", "interface GigabitEthernet0/2", "ip ospf authentication key-chain test_key", "ip ospf bfd", "ip ospf cost 30", "ip ospf network broadcast", "ip ospf priority 60", "ip ospf resync-timeout 90", "ip ospf ttl-security hops 120", "ipv6 ospf bfd", "ipv6 ospf dead-interval 100", "ipv6 ospf network manet", "ipv6 ospf priority 50", ] result = self.execute_module(changed=True) self.assertEqual(sorted(result["commands"]), sorted(commands)) def test_ios_ospf_interfaces_merged_idempotent(self): set_module_args( dict( config=[ dict( address_family=[ dict( afi="ipv4", adjacency=True, cost=dict(interface_cost=30), priority=40, process=dict(id=10, area_id="20"), ttl_security=dict(hops=50), ) ], name="GigabitEthernet0/2", ), dict( address_family=[ dict( afi="ipv6", adjacency=True, priority=20, process=dict(id=55, area_id="105"), transmit_delay=30, ) ], name="GigabitEthernet0/3", ), ], state="merged", ) ) self.execute_module(changed=False, commands=[]) def test_ios_ospf_interfaces_replaced(self): set_module_args( dict( config=[ dict( name="GigabitEthernet0/3", address_family=[ dict( afi="ipv4", bfd=True, cost=dict(interface_cost=50), priority=50, ttl_security=dict(hops=150), ) ], ) ], state="replaced", ) ) commands = [ "interface GigabitEthernet0/3", "ip ospf bfd", "ip ospf cost 50", "ip ospf priority 50", "ip ospf ttl-security hops 150", ] result = self.execute_module(changed=True) self.assertEqual(sorted(result["commands"]), sorted(commands)) def test_ios_ospf_interfaces_replaced_idempotent(self): set_module_args( dict( config=[ dict( address_family=[ dict( afi="ipv4", adjacency=True, cost=dict(interface_cost=30), priority=40, process=dict(id=10, area_id="20"), ttl_security=dict(hops=50), ) ], name="GigabitEthernet0/2", ), dict( address_family=[ dict( afi="ipv6", adjacency=True, priority=20, process=dict(id=55, area_id="105"), transmit_delay=30, ) ], name="GigabitEthernet0/3", ), ], state="replaced", ) ) self.execute_module(changed=False, commands=[]) def test_ios_ospf_interfaces_overridden(self): set_module_args( dict( config=[ dict( address_family=[ dict( afi="ipv6", manet=dict(cost=dict(percent=10)), priority=40, process=dict(id=10, area_id="20"), transmit_delay=50, ) ], name="GigabitEthernet0/3", ) ], state="overridden", ) ) commands = [ "interface GigabitEthernet0/2", "no ip ospf 10 area 20", "no ip ospf adjacency stagger disable", "no ip ospf cost 30", "no ip ospf priority 40", "no ip ospf ttl-security hops 50", "interface GigabitEthernet0/3", "ipv6 ospf 10 area 20", "no ipv6 ospf adjacency stagger disable", "ipv6 ospf manet peering cost percent 10", "ipv6 ospf priority 40", "ipv6 ospf transmit-delay 50" "", ] result = self.execute_module(changed=True) self.assertEqual(sorted(result["commands"]), sorted(commands)) def test_ios_ospf_interfaces_overridden_idempotent(self): set_module_args( dict( config=[ dict( address_family=[ dict( afi="ipv4", adjacency=True, cost=dict(interface_cost=30), priority=40, process=dict(id=10, area_id="20"), ttl_security=dict(hops=50), ) ], name="GigabitEthernet0/2", ), dict( address_family=[ dict( afi="ipv6", adjacency=True, priority=20, process=dict(id=55, area_id="105"), transmit_delay=30, ) ], name="GigabitEthernet0/3", ), ], state="overridden", ) ) self.execute_module(changed=False, commands=[]) def test_ios_ospf_interfaces_deleted_interface(self): set_module_args( dict(config=[dict(name="GigabitEthernet0/2")], state="deleted") ) commands = [ "interface GigabitEthernet0/2", "no ip ospf priority 40", "no ip ospf adjacency stagger disable", "no ip ospf ttl-security hops 50", "no ip ospf 10 area 20", "no ip ospf cost 30", ] result = self.execute_module(changed=True) self.assertEqual(sorted(result["commands"]), sorted(commands)) def test_ios_ospf_interfaces_deleted_all(self): set_module_args(dict(config=[], state="deleted")) commands = [ "interface GigabitEthernet0/3", "no ipv6 ospf 55 area 105", "no ipv6 ospf adjacency stagger disable", "no ipv6 ospf priority 20", "no ipv6 ospf transmit-delay 30", "interface GigabitEthernet0/2", "no ip ospf 10 area 20", "no ip ospf adjacency stagger disable", "no ip ospf cost 30", "no ip ospf priority 40", "no ip ospf ttl-security hops 50", ] result = self.execute_module(changed=True) self.assertEqual(sorted(result["commands"]), sorted(commands)) def test_ios_ospf_interfaces_rendered(self): set_module_args( dict( config=[ dict( name="GigabitEthernet0/2", address_family=[ dict( afi="ipv4", bfd=True, cost=dict(interface_cost=30), network=dict(broadcast=True), priority=60, resync_timeout=90, ttl_security=dict(hops=120), ), dict( afi="ipv6", bfd=True, dead_interval=dict(time=100), network=dict(manet=True), priority=50, ), ], ), dict( name="GigabitEthernet0/3", address_family=[ dict( afi="ipv4", bfd=True, cost=dict(interface_cost=50), priority=50, ttl_security=dict(hops=150), ) ], ), ], state="rendered", ) ) commands = [ "interface GigabitEthernet0/3", "ip ospf bfd", "ip ospf cost 50", "ip ospf priority 50", "ip ospf ttl-security hops 150", "interface GigabitEthernet0/2", "ip ospf bfd", "ip ospf cost 30", "ip ospf network broadcast", "ip ospf priority 60", "ip ospf resync-timeout 90", "ip ospf ttl-security hops 120", "ipv6 ospf bfd", "ipv6 ospf dead-interval 100", "ipv6 ospf network manet", "ipv6 ospf priority 50", ] result = self.execute_module(changed=False) self.assertEqual(sorted(result["rendered"]), sorted(commands))
python
# Generated by Django 4.0 on 2021-12-29 18:47 from django.db import migrations, models import django.db.models.deletion import uuid class Migration(migrations.Migration): dependencies = [ ('games', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='game', name='genre', ), migrations.RemoveField( model_name='game', name='plataform', ), migrations.CreateModel( name='GamePlataform', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.game')), ('plataform', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.plataform')), ], ), migrations.CreateModel( name='GameGenre', fields=[ ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)), ('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.game')), ('genre', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='games.genre')), ], ), ]
python
#! /usr/bin/env python3 import sys f = sys.stdin s = f.read() words = s.split() n = len(words) d = {} for w in words: if w in d: d[w] += 1 else: d[w] = 1 def foo(s): return d[s] #sorted_keys = sorted(d.keys(), key=foo, reverse=True) sorted_keys = sorted(d.keys(), key = lambda x: d[x], reverse = True) i = 0 for k in sorted_keys: if i == 20: break print("{}: {}".format(k, d[k])) i += 1 print(d, file=sys.stdout, end='')
python
# -*- coding: utf-8 -*- """ Generic tests for all animations. These tests run against all animation classes found in earthstar.effects.animations.*. """ import glob import os import pytest import earthstar.effects.animations as animations from earthstar.effects.engine import EffectEngine from earthstar.frame_utils import FrameConstants def find_animations(): pkg_folder = os.path.dirname(animations.__file__) pkg_modules = [ os.path.splitext(os.path.basename(x))[0] for x in glob.glob(pkg_folder + "/*.py") if not x.endswith('/__init__.py') ] return [ animations.import_animation(x) for x in pkg_modules ] ANIMATIONS = find_animations() @pytest.mark.parametrize("animation_cls", ANIMATIONS) @pytest.mark.timeout(2.5) # at least 40 frames per second def test_generates_one_hundred_frames(animation_cls): """ Tests that each animation can generate one hundred frames correctly in a reasonable amount of time. """ fc = FrameConstants() engine = EffectEngine(fc=fc, tick=1. / 10, transition=60) engine.add_animation_type(animation_cls) for i in range(100): frame = engine.next_frame() assert frame.shape == fc.frame_shape assert frame.dtype == fc.frame_dtype
python
import pandas as pd import os import sys in_dir = sys.argv[1] types = ['Right', 'Left'] out_df_base = 'russian_combined_{}' files = [os.path.join(in_dir, f) for f in os.listdir(in_dir) if f.lower().endswith('.csv')] # dfs = [pd.read_csv(f) for f in files] for type in types: outdir = type.lower() if not os.path.isdir(outdir): os.makedirs(outdir) for i, f in enumerate(files): df = pd.read_csv(f, encoding='utf-8') sub = df.loc[df.account_type == type] sub.to_csv(os.path.join(outdir, type + '_' + os.path.basename(f)))
python
""" Contains all the models that can be used to impute missing data. """ from .daema import Daema from .holoclean import Holoclean from .mida import MIDA from .miss_forest import MissForestImpute from .baseline_imputations import MeanImputation, Identity MODELS = { "DAEMA": Daema, "Holoclean": Holoclean, "MIDA": MIDA, "MissForest": MissForestImpute, "Mean": MeanImputation, "Real": Identity, # Not a proper imputation algorithm, handled separately in the run.py file }
python
from django.contrib import admin from .models import AdminlteLog, AdminlteLogType admin.site.register(AdminlteLog) admin.site.register(AdminlteLogType)
python
from libsvm.python.svmutil import * from libsvm.python.svm import * import os import struct import numpy dic={} #数据加载函数,kind值标明了读取文件的类型 def loadforSVM(path, kind='train'): labels_path = os.path.join(path,'%s-labels.idx1-ubyte'% kind) images_path = os.path.join(path,'%s-images.idx3-ubyte'% kind) with open(labels_path, 'rb') as lbpath: magic, n = struct.unpack('>II',lbpath.read(8)) labels = numpy.fromfile(lbpath,dtype=numpy.uint8) with open(images_path, 'rb') as imgpath: magic, num, rows, cols = struct.unpack('>IIII',imgpath.read(16)) images = numpy.fromfile(imgpath,dtype=numpy.uint8).reshape(len(labels), 784) #由于源数据有些数据过大,会导致激活函数计算溢出,所以对数据集集体缩小, #由于图片数据每一位的值均为0-255之间,归一化处理 if kind=='train': f = open('trainforSVM.txt','w') if kind=='t10k': f = open('testforSVM.txt','w') count=0 for i in range(10): for j in range(len(images)): index=1 if labels[j]==i: string=str(i)+' ' for k in images[j]: string=string+str(index)+':'+str(k/255)+' ' index+=1 f.writelines(string+'\n') dic[count]=j count+=1 f.close() if __name__ == '__main__': loadforSVM("C:\\Users\\Anonymous\\Documents\\机器学习\\作业四赵虎201600301325", kind='train') loadforSVM("C:\\Users\\Anonymous\\Documents\\机器学习\\作业四赵虎201600301325", kind='t10k') y, x = svm_read_problem('trainforSVM.txt') yt,xt=svm_read_problem('testforSVM.txt') model=svm_train(y,x,'-t 0 -m 600') # print('test:') p_label, p_acc, p_val = svm_predict(yt, xt, model) f = open('classificationforSVM.txt','w') for i in range(len(p_label)): # f.write(str(int(p_label[dic[i]]))+' ') f.write(str(int(p_label[i]))+' ') f1=open("classificationforSVM.txt") s=f1.read().split() dic1={} for i in range(10000): dic1[dic[i]]=i f2=open("classificationforlinearSVM.txt",'w') for i in range(10000): f2.write(s[dic1[i]]+' ')
python
from abc import abstractmethod, ABC from typing import Callable, TypeVar T = TypeVar("T") class Policy(ABC): @abstractmethod def execute(self, function: Callable[[], T]) -> T: """ Accepts lambda function and execute it with pre-defined policy parameters Example: p.execute(lambda: api.call(1, 2)) :param function: lambda function to be executed :return: function result """ raise NotImplementedError
python
# Generated by Django 4.0.2 on 2022-03-06 06:44 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('polls', '0002_challenges_game_delete_choice_delete_question_and_more'), ] operations = [ migrations.AddField( model_name='game', name='console', field=models.CharField(default='N/A', max_length=100), ), ]
python
import os, sys, time sys.path.append(os.getcwd()) import torch import torchvision from torch import nn from torch import autograd from torch import optim import torch.nn.functional as F import time import tflib as lib import tflib.save_images import tflib.mnist import tflib.cifar10 import tflib.plot #import tflib.inception_score import numpy as np from tqdm import tqdm # Download CIFAR-10 (Python version) at # https://www.cs.toronto.edu/~kriz/cifar.html and fill in the path to the # extracted files here! DATA_DIR = '/mnt/7FC1A7CD7234342C/cifar-10-batches-py/' OUTPUT_BASE_DIR = '/mnt/7FC1A7CD7234342C/cifar10-results/' RUN_PATH = '{}{}/'.format(OUTPUT_BASE_DIR, time.strftime('%Y_%m_%d_%H_%M_%S')) #TODO: generate by settings if not os.path.exists(RUN_PATH): os.mkdir(RUN_PATH) #TODO:hack tflib.plot.log_dir = RUN_PATH if len(DATA_DIR) == 0: raise Exception('Please specify path to data directory in gan_cifar.py!') DIM = 64 # This overfits substantially; you're probably better off with 64 CRITIC_DIM = 64 # ambition INPUT_DIM = 128 # generator input dimension (latent variable dimension) LAMBDA = 10 # Gradient penalty lambda hyperparameter CRITIC_ITERS = 5 # How many critic iterations per generator iteration BATCH_SIZE = 64 # Batch size ITERS = 100000 # How many generator iterations to train for OUTPUT_DIM = 3072 # Number of pixels in CIFAR10 (3*32*32) KERNEL_SIZE = 4 CONSTANCY_LOSS = False CONSTANCY_LAMBDA = 8 LR = 1e-4 GENERATOR_INSTANCE_NORM = nn.BatchNorm2d ENCODER_INSTANCE_NORM = False # TODO DISCRIMINATOR_RECONSTRUCTION_LOSS = False DISCRIMINATOR_RECONSTRUCTION_LAMBDA = 8 GENERATOR_AUTOENCODER_LOSS = False GENERATOR_AUTOENCODER_LAMBDA = 1 GENERATOR_SCORE_LOSS = False GENERATOR_SCORE_LAMBDA = 8 AUTOENCODER_GP = False ONE_SIDED = False params = dict( MODE = 'cramer', # Valid options are dcgan, wgan, or wgan-gp DIM = DIM, # This overfits substantially; you're probably better off with 64 INPUT_DIM = INPUT_DIM, # generator input dimension (latent variable dimension) LAMBDA = LAMBDA, # Gradient penalty lambda hyperparameter CRITIC_ITERS = CRITIC_ITERS, # How many critic iterations per generator iteration BATCH_SIZE = BATCH_SIZE, # Batch size ITERS = ITERS, # How many generator iterations to train for OUTPUT_DIM = OUTPUT_DIM, # Number of pixels in CIFAR10 (3*32*32) KERNEL_SIZE = KERNEL_SIZE, GENERATOR_INSTANCE_NORM = GENERATOR_INSTANCE_NORM.__name__, ENCODER_INSTANCE_NORM = ENCODER_INSTANCE_NORM, DISCRIMINATOR_RECONSTRUCTION_LOSS = DISCRIMINATOR_RECONSTRUCTION_LOSS, LR=LR, AUTOENCODER_GP = AUTOENCODER_GP, ONE_SIDED=ONE_SIDED, CONSTANCY_LOSS = CONSTANCY_LOSS, CONSTANCY_LAMBDA = CONSTANCY_LAMBDA, GENERATOR_SCORE_LOSS = GENERATOR_SCORE_LOSS, GENERATOR_SCORE_LAMBDA = GENERATOR_SCORE_LAMBDA, GENERATOR_AUTOENCODER_LOSS = GENERATOR_AUTOENCODER_LOSS, GENERATOR_AUTOENCODER_LAMBDA = GENERATOR_AUTOENCODER_LAMBDA, CRITIC_DIM=CRITIC_DIM, ) with open(RUN_PATH + '/algo_params.txt','w') as f: import json json.dump(params, f, indent=2) def _upscale_resize(in_dim, out_dim, kernel_size): return nn.Sequential( nn.InstanceNorm2d(in_dim, affine=True), nn.Upsample(scale_factor=2, mode='nearest'), nn.ReflectionPad2d((1,2,1,2)), nn.Conv2d(in_dim, out_dim, kernel_size, bias=False) ) def _upblock(in_dim, out_dim, kernel_size, padding, norm=nn.InstanceNorm2d, non_linearity=lambda: nn.ReLU(True)): blocks = [] bias_conv = not norm # if no norm them add bias parameter if norm is not None: blocks.append(norm(in_dim)) blocks.append(nn.ConvTranspose2d(in_dim, out_dim, kernel_size, stride=2, padding=padding, bias=bias_conv)) blocks.append(non_linearity()) return nn.Sequential(*blocks) class Generator(nn.Module): def __init__(self, norm=GENERATOR_INSTANCE_NORM): super(Generator, self).__init__() preprocess = nn.Sequential( #nn.InstanceNorm2d(4 * 4 * 4 * DIM), nn.Linear(INPUT_DIM, 4 * 4 * 4 * DIM), nn.ReLU(True), ) non_linearity = nn.ReLU #block1 = _upscale_resize(4 * DIM, 2 * DIM, KERNEL_SIZE) #block2 = _upscale_resize(2 * DIM, DIM, KERNEL_SIZE) #self.last_norm = nn.InstanceNorm2d(DIM, affine=True) #deconv_out = nn.ConvTranspose2d(DIM, 3, KERNEL_SIZE, stride=2, padding=1, bias=False) #self.out_norm = nn.InstanceNorm2d(3, affine=True) self.preprocess = preprocess self.block1 = _upblock(4 * DIM, 2 * DIM, KERNEL_SIZE, 1, norm=norm, non_linearity=non_linearity) self.block2 = _upblock(2 * DIM, DIM, KERNEL_SIZE, 1, norm=norm, non_linearity=non_linearity) self.block_out = _upblock(DIM, 3, KERNEL_SIZE, 1, norm=norm, non_linearity=nn.Tanh) #self.deconv_out = deconv_out #self.tanh = nn.Tanh() def forward(self, input): output = self.preprocess(input) output = output.view(-1, 4 * DIM, 4, 4) #print(output.size()) output = self.block1(output) #print(output.size()) output = self.block2(output) #print(output.size()) output = self.block_out(output) #output = self.deconv_out(self.last_norm(output)) #output = self.deconv_out(output) #output = self.tanh(output) #output = self.out_norm(output) return output.view(-1, 3, 32, 32) class Encoder(nn.Module): def __init__(self, dim): super().__init__() if ENCODER_INSTANCE_NORM: main = nn.Sequential( nn.Conv2d(3, dim, KERNEL_SIZE, 2, padding=1, bias=False), nn.InstanceNorm2d(dim), nn.LeakyReLU(0.2, True), nn.Conv2d(dim, 2 * dim, KERNEL_SIZE, 2, padding=1, bias=False), nn.InstanceNorm2d(2 * dim), nn.LeakyReLU(0.2, True), nn.Conv2d(2 * dim, 4 * dim, KERNEL_SIZE, 2, padding=1, bias=False), nn.InstanceNorm2d(4 * dim), nn.LeakyReLU(0.2, True), ) else: main = nn.Sequential( nn.Conv2d(3, dim, KERNEL_SIZE, 2, padding=1, bias=True), nn.LeakyReLU(0.2, True), nn.Conv2d(dim, 2 * dim, KERNEL_SIZE, 2, padding=1, bias=True), nn.LeakyReLU(0.2, True), nn.Conv2d(2 * dim, 4 * dim, KERNEL_SIZE, 2, padding=1, bias=True), nn.LeakyReLU(0.2, True), ) self.dim = dim self.main = main self.linear = nn.Linear(4*4*4*dim, INPUT_DIM) def forward(self, input): output = self.main(input) before_linear = output.view(-1, 4 * 4 * 4 * self.dim) output = self.linear(before_linear) return output def cramer_loss(net_real, independent_encoded): "f from cramer gan paper" return torch.norm(net_real - independent_encoded, p=2, dim=-1) - \ torch.norm(net_real, p=2, dim=-1) def critic_schedule(): for i in range(10): yield 100 while True: yield CRITIC_ITERS def gen_schedule(): for i in range(10): yield 1 for i in range(100): yield 1 for i in range(7000): yield 1 while True: yield 1 # custom weights initialization called on netG and netD def weights_init(m): classname = m.__class__.__name__ if classname.find('Conv') != -1: m.weight.data.normal_(0.0, 0.02) if m.bias is not None: m.bias.data.fill_(0) elif classname.find('Norm') != -1: if m.weight is not None: m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) elif classname.find('Linear') != -1: m.weight.data.normal_(0.0, 0.01) m.bias.data.fill_(0) def print_weights(m): if isinstance(m, (nn.Conv2d, nn.Linear)): print(m.weight) if m.bias is not None: print(m.bias) def print_grads(m): if isinstance(m, (nn.Conv2d, nn.Linear)): print(m.weight.grad) if m.bias is not None: print(m.bias.grad) netG = Generator() netD = Encoder(CRITIC_DIM) netG.apply(weights_init) netD.apply(weights_init) print(netG) print(netD) use_cuda = torch.cuda.is_available() mse_loss = torch.nn.MSELoss() if use_cuda: gpu = 0 # makes things slower?! torch.backends.cudnn.benchmark = True if use_cuda: netD = netD.cuda(gpu) netG = netG.cuda(gpu) mse_loss = mse_loss.cuda(gpu) one = torch.FloatTensor([1]) mone = one * -1 if use_cuda: one = one.cuda(gpu) mone = mone.cuda(gpu) optimizerD = optim.Adam(netD.parameters(), lr=LR, betas=(0.5, 0.9)) optimizerG = optim.Adam(netG.parameters(), lr=LR, betas=(0.5, 0.9)) netG.train() netD.train() def calc_gradient_penalty(netD, netG, real_data, fake_data, encoded): if AUTOENCODER_GP: fake_data = netG(encoded) #TODO:investigate # print "real_data: ", real_data.size(), fake_data.size() alpha = torch.rand(BATCH_SIZE, 1) alpha = alpha.expand(BATCH_SIZE, real_data.nelement()//BATCH_SIZE).contiguous().view(BATCH_SIZE, 3, 32, 32) alpha = alpha.cuda(gpu) if use_cuda else alpha interpolates = alpha * real_data + ((1 - alpha) * fake_data.data) if use_cuda: interpolates = interpolates.cuda(gpu) interpolates = autograd.Variable(interpolates, requires_grad=True) # TODO: clashes with autoencoder_gp? disc_interpolates = cramer_loss(netD(interpolates), encoded) gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates, grad_outputs=torch.ones(disc_interpolates.size()).cuda(gpu) if use_cuda else torch.ones( disc_interpolates.size()), create_graph=True, retain_graph=True, only_inputs=True)[0] gradients = gradients.view(gradients.size(0), -1) if ONE_SIDED: gradient_penalty = (F.relu(gradients.norm(2, dim=1) - 1, inplace=True) ** 2).mean() * LAMBDA else: gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA return gradient_penalty # For generating samples def generate_image(frame, netG, input): noisev = autograd.Variable(input, volatile=True) netG.eval() samples = netG(noisev) netG.train() save_images(samples, RUN_PATH + 'samples_{}.jpg'.format(frame)) def save_images(images_tensor, output_path): samples = images_tensor.view(-1, 3, 32, 32) samples = samples.mul(0.5).add(0.5) samples = samples.cpu().data.numpy() lib.save_images.save_images(samples, output_path) # For calculating inception score def get_inception_score(G, ): all_samples = [] for i in xrange(10): samples_100 = torch.randn(100, INPUT_DIM) if use_cuda: samples_100 = samples_100.cuda(gpu) samples_100 = autograd.Variable(samples_100, volatile=True) all_samples.append(G(samples_100).cpu().data.numpy()) all_samples = np.concatenate(all_samples, axis=0) all_samples = np.multiply(np.add(np.multiply(all_samples, 0.5), 0.5), 255).astype('int32') all_samples = all_samples.reshape((-1, 3, 32, 32)).transpose(0, 2, 3, 1) return lib.inception_score.get_inception_score(list(all_samples)) # Dataset iterator train_gen, dev_gen = lib.cifar10.load(BATCH_SIZE, data_dir=DATA_DIR, cuda=use_cuda) def inf_train_gen(): while True: for images in train_gen(): # yield images.astype('float32').reshape(BATCH_SIZE, 3, 32, 32).transpose(0, 2, 3, 1) yield images gen = inf_train_gen() #preprocess = torchvision.transforms.Compose([ # torchvision.transforms.ToTensor(), # torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), # ]) preprocess = torchvision.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) CRITIC_GEN = critic_schedule() GEN_ITERS = gen_schedule() noise = torch.randn(BATCH_SIZE, INPUT_DIM) noise_independent = torch.randn(BATCH_SIZE, INPUT_DIM) if use_cuda: noise = noise.cuda(gpu) noise_independent = noise_independent.cuda(gpu) for iteration in tqdm(range(ITERS)): start_time = time.time() ############################ # (1) Update D network ########################### for p in netD.parameters(): # reset requires_grad p.requires_grad = True # they are set to False below in netG update for p in netG.parameters(): # reset requires_grad p.requires_grad = False # they are set to False below in netG update #for i in range(CRITIC_ITERS): netG.eval() netD.train() for i in range(next(CRITIC_GEN)): _data = next(gen) netD.zero_grad() noise.normal_(0, 1) noise_independent.normal_(0, 1) noisev = autograd.Variable(noise, volatile=True) noisev_independent = autograd.Variable(noise_independent, volatile=True) # Generate two independent fake batches fake = autograd.Variable(netG(noisev).data) fake_independent = autograd.Variable(netG(noisev_independent).data) # train with real _data = _data.view((BATCH_SIZE, 3, 32, 32)) real_data = _data # preprocess(_data)#torch.stack([preprocess(item) for item in _data]) #if use_cuda: # real_data = real_data.cuda(gpu) real_data_v = autograd.Variable(real_data) # import torchvision # filename = os.path.join("test_train_data", str(iteration) + str(i) + ".jpg") # torchvision.utils.save_image(real_data, filename) encoded_independent = netD(fake_independent) encoded_real = netD(real_data_v) D_real = cramer_loss(encoded_real, encoded_independent) encoded_fake = netD(fake) D_fake = cramer_loss(encoded_fake, encoded_independent) #print(D_real, D_fake) loss = (D_fake - D_real).mean() #netD.apply(print_weights) #print(fake) if CONSTANCY_LOSS: c_loss = CONSTANCY_LAMBDA * mse_loss(encoded_fake, autograd.Variable(noise)) loss += c_loss # train with gradient penalty gradient_penalty = calc_gradient_penalty(netD, netG, real_data_v.data, fake, encoded_real) loss += gradient_penalty loss.backward() # print "gradien_penalty: ", gradient_penalty D_cost = loss.data # TODO: D_cost = loss.data[0] Wasserstein_D = (D_real - D_fake).data.mean() optimizerD.step() ############################ # (2) Update G network ########################### netG.train() #netD.eval() # screws up cuda? for p in netD.parameters(): p.requires_grad = False # to avoid computation for p in netG.parameters(): # reset requires_grad p.requires_grad = True # they are set to False below in netG update for i in range(next(GEN_ITERS)): netG.zero_grad() _data = next(gen) real = autograd.Variable(_data.view((BATCH_SIZE, 3, 32, 32))) #if use_cuda: # real = real.cuda() noise.normal_(0, 1) noise_independent.normal_(0, 1) noisev1 = autograd.Variable(noise) noisev2 = autograd.Variable(noise_independent) fake1 = netG(noisev1) fake2 = netG(noisev2) real_encoded = netD(real) fake1_encoded = netD(fake1) fake2_encoded = netD(fake2) G = (torch.norm(real_encoded - fake1_encoded, p=2, dim=-1) + torch.norm(real_encoded - fake2_encoded, p=2, dim=-1) - torch.norm(fake1_encoded - fake2_encoded, p=2, dim=-1)).mean() if GENERATOR_SCORE_LOSS or GENERATOR_AUTOENCODER_LOSS: real_data_v = autograd.Variable(next(gen).view((BATCH_SIZE, 3, 32, 32)), volatile=True) #if use_cuda: # real_data_v = real_data_v.cuda() real_latent = netD(real_data_v) real_latent = autograd.Variable(real_latent.data) reconstructed = netG(autograd.Variable(real_latent.data)) if GENERATOR_AUTOENCODER_LOSS: gen_ae_loss = mse_loss(reconstructed, real_data_v) G += GENERATOR_AUTOENCODER_LAMBDA * gen_ae_loss if GENERATOR_SCORE_LOSS: gen_rec_loss = ((real_latent - netD(reconstructed))**2).mean() G += GENERATOR_SCORE_LAMBDA * gen_rec_loss G.backward() G_cost = G.data optimizerG.step() # Write logs and save samples lib.plot.plot(RUN_PATH + 'train disc cost', D_cost.cpu().numpy()) lib.plot.plot(RUN_PATH + 'time', time.time() - start_time) lib.plot.plot(RUN_PATH + 'train gen cost', G_cost.cpu().numpy()) lib.plot.plot(RUN_PATH + 'wasserstein distance', Wasserstein_D) # Calculate inception score every 1K iters if False and iteration % 1000 == 999: inception_score = get_inception_score(netG) lib.plot.plot(RUN_PATH + 'inception score', inception_score[0]) # Calculate dev loss and generate samples every 200 iters if iteration % 200 == 199: dev_disc_costs = [] #TODO: netD.eval() for images in dev_gen(): images = images.view((BATCH_SIZE, 3, 32, 32)) imgs = images#preprocess(images) #imgs = preprocess(images) #if use_cuda: # imgs = imgs.cuda(gpu) imgs_v = autograd.Variable(imgs, volatile=True) D = netD(imgs_v) _dev_disc_cost = -D.mean().cpu().data.numpy() dev_disc_costs.append(_dev_disc_cost) netD.train() lib.plot.plot(RUN_PATH + 'dev disc cost', np.mean(dev_disc_costs)) fixed_noise_128 = torch.randn(128, INPUT_DIM) if use_cuda: fixed_noise_128 = fixed_noise_128.cuda(gpu) generate_image(iteration, netG, fixed_noise_128) generate_image("{}_reconstruct".format(iteration), netG, D.data) save_images(imgs_v, RUN_PATH + 'samples_{}_original.jpg'.format(iteration)) #print(encoded) #print(fixed_noise_128) # Save logs every 200 iters if (iteration < 5) or (iteration % 100 == 99): lib.plot.flush() lib.plot.tick() state_dict = { 'iters': iteration + 1, 'algo_params': params, 'gen_state_dict': netG.state_dict(), 'critic_state_dict': netD.state_dict(), 'optimizerG' : optimizerG.state_dict(), 'optimizerD' : optimizerD.state_dict(), } torch.save(state_dict, RUN_PATH + 'final.pth.tar')
python
# @Author: Anas Mazouni <Stormix> # @Date: 2017-05-17T23:59:31+01:00 # @Email: [email protected] # @Project: PluralSight Scraper V1.0 # @Last modified by: Stormix # @Last modified time: 2017-05-18T17:08:22+01:00 import selenium as sl import os,time,inspect from sys import platform from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.common.keys import Keys import config from slugify import slugify from clint.textui import progress import requests class PluralCourse: """ Course Class. """ link = "" title = "" browser = "" delay = 3 Username = config.Username Password = config.Password output = "Download" #output folder def __init__(self,link): self.link = link def launchBrowser(self): assert not self.browser, "Browser already set !" # Initiate the Browser webdriver currentfolder = os.path.dirname(os.path.abspath(inspect.stack()[0][1])) # Check which operating system is being used ! if platform == "linux" or platform == "linux2": # linux chrome_driver = currentfolder+"/chromedriver" elif platform == "win32": # Windows chrome_driver = currentfolder+"/chromedriver.exe" self.browser = webdriver.Chrome(chrome_driver) Browser = self.browser Website = self.link # Open Pronote Browser.get(Website) print("Browser Initiated !") print("Loading .. " + Website, end =' ') time.sleep(self.delay) print(u'\u2713') def checkLoginAlert(self): try: self.browser.find_element_by_css_selector(".ps-button-primary-md.mr-lg") except NoSuchElementException: return False return True def pausePlayback(self): body = self.browser.find_element_by_css_selector("body"); body.send_keys(Keys.SPACE); def login(self): assert self.checkLoginAlert(), "Already logged in !" loginButton = self.browser.find_element_by_css_selector(".ps-button-primary-md.mr-lg") # Go to login page loginButton.click() # Define the login form Browser = self.browser usernameInput = "Username" passwordInput = "Password" LoginButtonClass = ".button.primary" # Fill in the login form username_log = Browser.find_element_by_id(usernameInput) password_log = Browser.find_element_by_id(passwordInput) username_log.send_keys(self.Username) password_log.send_keys(self.Password) # Click the connect buttun print("Logging in ...",end=" ") Browser.find_element_by_css_selector(LoginButtonClass).click() time.sleep(self.delay) self.pausePlayback() print(u'\u2713') def downloadEpisodes(self): #Create output folder self.createDir(self.output) titlesClass = ".m-0.p-0.ps-color-white.ps-type-sm.ps-type-weight-medium" moduleClass = ".module" episodesListClass = ".clips.m-0.p-0" modules = {} modulesSections = [elt.click() for elt in self.browser.find_elements_by_css_selector(moduleClass)] # Click all sections ModuleTitles = [element.text for element in self.browser.find_elements_by_css_selector(titlesClass)] # Looping through each title #Fetching the modules episodes lists Modules = self.browser.find_elements_by_css_selector(episodesListClass) for i in range(len(ModuleTitles)): #Create output folder self.createDir(self.output+"/"+slugify(ModuleTitles[i])) #For each list items(li) in the each list(ul) ,Get the titles (h3) ModuleEpisodesList = [elt.find_element_by_tag_name('h3').text for elt in [elt for elt in Modules[i].find_elements_by_tag_name('li')]] for j in range(len(ModuleEpisodesList)): self.createDir(self.output+"/"+slugify(ModuleTitles[i])+"/"+slugify(ModuleEpisodesList[j])) # Get the episode elemnt self.browser.find_element_by_xpath("//*[contains(text(), '"+ModuleEpisodesList[j]+"')]").click() time.sleep(self.delay*1.5) self.pausePlayback() print("Downloading : ",slugify(ModuleEpisodesList[j])+".mp4") path =self.output+"/"+slugify(ModuleTitles[i])+"/"+slugify(ModuleEpisodesList[j])+"/"+slugify(ModuleEpisodesList[j])+".mp4" if not os.path.exists(path): self.download(self.getVideoLink(),path) else: print("Already downloaded ... skipping \n") # Store the module title and episodes list modules[ModuleTitles[i].replace(" ", "_")] = ModuleEpisodesList return modules def getVideoLink(self): video_elt = self.browser.find_element_by_tag_name('video') link = video_elt.get_attribute("src") return link def createDir(self,Dir): if not os.path.exists(Dir): os.makedirs(Dir) print("<"+Dir+"> folder created !") def download(self,url,path): r = requests.get(url, stream=True) with open(path, 'wb') as f: total_length = int(r.headers.get('content-length')) for chunk in progress.bar(r.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1): if chunk: f.write(chunk) f.flush()
python
''' Learning rate schedulers. ''' import json import torch import torch.optim.lr_scheduler as lr_sched from typing import Any from cosine_scheduler import CosineLRWithRestarts def step(optimizer, last_epoch, step_size=10, gamma=0.1, **_) -> Any: return lr_sched.StepLR(optimizer, step_size=step_size, gamma=gamma, last_epoch=last_epoch) def multi_step(optimizer, last_epoch, milestones=[500, 5000], gamma=0.1, **_) -> Any: if isinstance(milestones, str): milestones = json.loads(milestones) return lr_sched.MultiStepLR(optimizer, milestones=milestones, gamma=gamma, last_epoch=last_epoch) def exponential(optimizer, last_epoch, gamma=0.995, **_) -> Any: return lr_sched.ExponentialLR(optimizer, gamma=gamma, last_epoch=last_epoch) def none(optimizer, last_epoch, **_) -> Any: return lr_sched.StepLR(optimizer, step_size=10000000, last_epoch=last_epoch) def reduce_lr_on_plateau(optimizer, last_epoch, mode='max', factor=0.1, patience=10, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, **_) -> Any: return lr_sched.ReduceLROnPlateau(optimizer, mode=mode, factor=factor, patience=patience, threshold=threshold, threshold_mode=threshold_mode, cooldown=cooldown, min_lr=min_lr) def cyclic_lr(optimizer, last_epoch, base_lr=0.001, max_lr=0.01, step_size_up=2000, step_size_down=None, mode='triangular', gamma=1.0, scale_fn=None, scale_mode='cycle', cycle_momentum=False, base_momentum=0.8, max_momentum=0.9, coeff=1, **_) -> Any: def exp_range_scale_fn(x): res = gamma ** (x - 1) return res return lr_sched.CyclicLR(optimizer, base_lr=base_lr*coeff, max_lr=max_lr*coeff, step_size_up=step_size_up, step_size_down= step_size_down, mode=mode, scale_fn=exp_range_scale_fn, scale_mode=scale_mode, cycle_momentum= cycle_momentum, base_momentum=base_momentum, max_momentum=max_momentum, last_epoch=last_epoch) def get_scheduler(config, optimizer, last_epoch=-1, coeff=1): func = globals().get(config.name) return func(optimizer, last_epoch, coeff=coeff, **config.params) def is_scheduler_continuous(scheduler) -> bool: if tuple(torch.__version__.split('.')) >= tuple(['1', '1', '0']): return type(scheduler) in [lr_sched.ExponentialLR, lr_sched.CosineAnnealingLR, lr_sched.CyclicLR, CosineLRWithRestarts] else: return type(scheduler) in [lr_sched.ExponentialLR, lr_sched.CosineAnnealingLR, CosineLRWithRestarts] def get_warmup_scheduler(config, optimizer) -> Any: return lr_sched.CyclicLR(optimizer, base_lr=0, max_lr=config.train.warmup.max_lr, step_size_up=config.train.warmup.steps, step_size_down=0, cycle_momentum=False, mode='triangular')
python
#!/usr/bin/python # encoding: utf-8 """ @author: Ian @file: serializers.py.py @time: 2019-04-30 12:23 """ from rest_framework import serializers from snippets.models import Snippet from dicproj.models import Dic, CsvFile class SnippetSerializer(serializers.ModelSerializer): class Meta: model = Snippet fields = ('id', 'title', 'code', 'linenos', 'language', 'style') class DicSerializer(serializers.ModelSerializer): class Meta: model = Dic fields = ('code', 'name') class CsvFileSerializer(serializers.ModelSerializer): class Meta: model = CsvFile fields = '__all__'
python
from django.db import models from django.utils import timezone from django.contrib.auth.models import User from django.db.models.signals import post_save from django.dispatch import receiver import app.core.patch # La solución planteada tiene ventajas y desventajas. Como ventaja, se usa el # sistema de autenticación de django, y no hay que hacer muchas cosas pues ya # vienen hechas. Cada entidad que es logueable, actua a modo de "perfil" de # usuario, conteniendo información adicional a los datos básicos que sirven para # loguear al usuario, etc. # Además, cada vez que se crea un usuario, sea desde el registro o desde el admin, # se le crean perfiles asociados (Acá viene la desventaja, si creo un usuario, # se le crean dos perfiles, uno de desocupado y uno de empresa, a lo cual, siempre # tengo un perfil que no uso, porq un desocupado no es una empresa, asi que me # quedan elementos vacíos por varios lados, pero bue) # Por otro lado, a un usuario se le puede preguntar si es o no un desocupado, o # si es o no una empresa, y pedir el "perfil" que devuelve o bien una empresa o # bien un desocupado, dependiendo de lo que se haya cargado. class Desocupado(models.Model): # Las cosas logueables tienen que tener este campo adicional. # Estas entidad actuan entonces como perfil de un usuario, y guardan # datos adicionales a los que se guarda en un usuario tradicional de Django user = models.OneToOneField(User, on_delete=models.CASCADE) # El resto de los campos son los que yo quiero tener el perfil. Notece que # algunos campos como el nombre, el apellido, o el email, ya están incluidos # en el usuario de django, pero se pueden clonar tranquilamente acá. nombre = models.CharField(max_length=20) apellido = models.CharField(max_length=20) fecha_nacimiento = models.DateField(null=True) localidad = models.CharField(max_length=20,null=True) estado_ocupacion = models.BooleanField(default=False) experiencia_laboral = models.TextField(null=True) formacion = models.TextField(null=True) habilidades = models.TextField(null=True) trabajo_realizable = models.CharField(max_length=50, null=True) dni = models.CharField(max_length=10, null=True) # Como se representa como texto, o sea, como se ve en el admin def __str__(self): return "Desocupado: " + str(self.nombre) + " " + str(self.apellido) + " de " + str(self.user.username) # Si se crea un usuario, se crea automáticamente un Desocupado @receiver(post_save, sender=User) def update_user_desocupado(sender, instance, created, **kwargs): if created: Desocupado.objects.create(user=instance, nombre=instance.first_name, apellido=instance.last_name) instance.desocupado.save() class Empresa(models.Model): # La empresa también es logueable, idem desocupado user = models.OneToOneField(User, on_delete=models.CASCADE) # El resto de los campos cuit = models.IntegerField(default=0) razon_social = models.CharField(max_length=50, null=True) rubro = models.CharField(max_length=30, null=True) # oferta_laboral = models.ForeignKey('OfertaLaboral') # Como se representa como texto, o sea, como se ve en el admin def __str__(self): return "Empresa" + str(self.razon_social) + " de " + str(self.user.username) #class EliminarUsuario(models.Model): # username = models.CharField(max_length=50) # Si se crea un usuario, se crea automáticamente una Empresa @receiver(post_save, sender=User) def update_user_empresa(sender, instance, created, **kwargs): if created: Empresa.objects.create(user=instance) instance.empresa.save() class Oferta(models.Model): cargo = models.CharField(max_length=200) trabajo = models.CharField(max_length=200) horarios = models.CharField(max_length=200) profesion = models.CharField(max_length=200) empresa = models.ForeignKey('core.Empresa') def __str__(self): return self.nombre
python
#!/usr/bin/env python # -*- coding: utf-8 -*- """ run_file2db is a tool to migrate a labeled dataset in a pickle file to a mongo db. It must be invoked using python run_file2db.py <project_folder> Created on Dec, 2016 @autor: Jesus Cid. """ import ast import time import sys import os import ipdb # Local imports from labelfactory.ConfigCfg import ConfigCfg as Cfg from labelfactory.Log import Log from labelfactory.labeling.datamanager import DataManager CF_FNAME = "config.cf" CF_DEFAULT_PATH = "./config.cf.default" def main(): # To complete the migration to python 3, I should replace all "raw_input" # by "input". Transitorily, to preserve compatibility with python 2, I # simply rename inut to raw_input if sys.version_info.major == 3: raw_input2 = input else: raw_input2 = raw_input ####### # Start # Check if project folder exists. Otherwise exit. if len(sys.argv) > 1: project_path = sys.argv[1] else: project_path = raw_input2("Select the (absolute or relative) path to" + " the labeling project folder: ") if not project_path.endswith('/'): project_path = project_path + '/' # Check if project folder exists. This is necessary to follow if not os.path.isdir(project_path): sys.exit("Project folder does not exist") ######################### # Read configuration data # Check if configuration file existe config_path = project_path + CF_FNAME if not os.path.isfile(config_path): sys.exit("Configuration file does not exist") # Read data from the configuation file cf = Cfg(config_path) # Data source and destination (options: file, mongodb) source_type = 'file' dest_type = 'mongodb' # Mongo DB settings db_info = {'name': cf.get('DataPaths', 'db_name'), 'hostname': cf.get('DataPaths', 'db_hostname'), 'user': cf.get('DataPaths', 'db_user'), 'pwd': cf.get('DataPaths', 'db_pwd'), 'label_coll_name': cf.get('DataPaths', 'db_label_coll_name'), 'history_coll_name': cf.get('DataPaths', 'db_history_coll_name'), 'port': cf.get('DataPaths', 'db_port'), 'mode': cf.get('DataPaths', 'db_mode'), 'file2db_mode': cf.get('DataPaths', 'db_file2db_mode'), 'db2file_mode': cf.get('DataPaths', 'db_db2file_mode'), } # Folder containing the urls to label file_info = {'project_path': project_path, 'input_folder': cf.get('DataPaths', 'input_folder'), 'output_folder': cf.get('DataPaths', 'output_folder'), 'used_folder': cf.get('DataPaths', 'used_folder'), 'dataset_fname': cf.get('DataPaths', 'dataset_fname'), 'labelhistory_fname': cf.get( 'DataPaths', 'labelhistory_fname'), 'labels_endname': cf.get('DataPaths', 'labels_endname'), 'preds_endname': cf.get('DataPaths', 'preds_endname'), 'urls_fname': cf.get('DataPaths', 'urls_fname')} # Type of wid: if 'yes', the wid is computed as a transformed url. # if 'no', the wid is taken equal to the url. compute_wid = cf.get('Labeler', 'compute_wid') # List of categories to label. categories = ast.literal_eval(cf.get('Labeler', 'categories')) parentcat = ast.literal_eval(cf.get('Labeler', 'parentcat')) # Possible labels for each category yes_label = cf.get('Labeler', 'yes_label') no_label = cf.get('Labeler', 'no_label') unknown_label = cf.get('Labeler', 'unknown_label') error_label = cf.get('Labeler', 'error_label') alphabet = {'yes': yes_label, 'no': no_label, 'unknown': unknown_label, 'error': error_label} # In multiclass cases, the reference class is the class used by the active # learning algorithm to compute the sample scores. ref_class = cf.get('ActiveLearning', 'ref_class') ########## # Log file # Create the log object log = Log(project_path + 'log') log.info('*****************************') log.info('****** WEB LABELER: *********') ##################### # Create main objects # Data manager object data_mgr = DataManager(source_type, dest_type, file_info, db_info, categories, parentcat, ref_class, alphabet, compute_wid) ############## # Read dataset # Load data from the standard dataset. log.info('Carga de datos') df_labels, df_preds, labelhistory = data_mgr.loadData(source_type) ############### # Migrate to DB # Save data and label history into db log.info("-- Saving data in mongodb") start = time.clock() data_mgr.migrate2DB(df_labels) log.info(str(time.clock() - start) + ' seconds') if __name__ == "__main__": main()
python
from django.contrib.auth.models import User from django.db import models import datetime as dt from tinymce.models import HTMLField from django.db.models.signals import post_save from django.dispatch import receiver @receiver(post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): if created: Profile.objects.create(user=instance) @receiver(post_save, sender=User) def save_user_profile(sender, instance, **kwargs): instance.profile.save() @receiver(post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): if created: Business.objects.create(user=instance) @receiver(post_save, sender=User) def save_user_profile(sender, instance, **kwargs): instance.business.save() class NeighbourHood(models.Model): neighbourhood_name = models.CharField(max_length =60) neighbourhood_location = models.CharField(max_length =250) population_count = models.IntegerField(null=True) admin = models.ForeignKey(User) def __str__(self): return self.neighbourhood_name def save_neighbourhood(self): self.save() def delete_neighborhood(self): self.delete() @classmethod def search_neighbourhood(cls,search_term): neighbourhood = cls.objects.filter(name__icontains = search_term) return neighbourhood class Profile(models.Model): profile_photo = models.ImageField(upload_to='images/') bio = models.CharField(max_length=300) user = models.OneToOneField(User) location = models.ForeignKey(NeighbourHood, null=True) email = models.EmailField(null = True) def __str__(self): return self.email def save_profile(self): self.save() def delete_profile(self): self.delete() class Business(models.Model): business_logo = models.ImageField(upload_to='images/') business_moto = models.CharField(max_length=300) user = models.OneToOneField(User) hood = models.ForeignKey(NeighbourHood, null=True) email = models.EmailField(null = True) def __str__(self): return self.email def save_business(self): self.save() def delete_business(self): self.delete() @classmethod def search_business(cls,search_term): business = cls.objects.filter(name__icontains = search_term) return business class JoinHood(models.Model): user_id = models.OneToOneField(User) hood_id = models.ForeignKey(NeighbourHood) def __str__(self): return self.user_id class Allert(models.Model): title = models.CharField(max_length=300) body = models.TextField() user = models.ForeignKey(User) hood = models.ForeignKey(NeighbourHood) def __str__(self): return self.title def save_allert(self): self.save() def delete_allert(self): self.delete() class Comment(models.Model): comment = models.CharField(max_length=500) user = models.ForeignKey(User) post = models.ForeignKey(Allert) def __str__(self): return self.comment def save_comment(self): self.save() def delete_comment(self): self.delete()
python
""" This code is based on these codebases associated with Yuta Saito's research. - Unbiased Recommender Learning from Missing-Not-At-Random Implicit Feedback: https://github.com/usaito/unbiased-implicit-rec-real - Unbiased Pairwise Learning from Biased Implicit Feedback: https://github.com/usaito/unbiased-pairwise-rec - Asymmetric Tri-training for Debiasing Missing-Not-At-Random Explicit Feedback: https://github.com/usaito/asymmetric-tri-rec-real """ from typing import Optional import numpy as np # Set a lower bound of a propensity score eps = 1e-3 def dcg_at_k(y_true: np.ndarray, y_score: np.ndarray, k: int, pscore: Optional[np.ndarray] = None) -> float: """Calculate a DCG score for a given user""" y_true_sorted_by_score = y_true[y_score.argsort()[::-1]] # If propensity score is provided, put high weight on records whose propensity score is low for unbiased evaluation # Otherwise, we evaluate each record evenly by setting all propensity scores as 1 if pscore is not None: pscore_sorted_by_score = np.maximum(pscore[y_score.argsort()[::-1]], eps) else: pscore_sorted_by_score = np.ones_like(y_true_sorted_by_score) dcg_score = 0.0 final_score = 0.0 k = k if y_true.shape[0] >= k else y_true.shape[0] if not np.sum(y_true_sorted_by_score) == 0: dcg_score += y_true_sorted_by_score[0] / pscore_sorted_by_score[0] for i in np.arange(1, k): dcg_score += y_true_sorted_by_score[i] / (pscore_sorted_by_score[i] * np.log2(i + 1)) final_score = dcg_score / np.sum(y_true_sorted_by_score) if pscore is None \ else dcg_score / np.sum(1. / pscore_sorted_by_score[y_true_sorted_by_score > 0]) return final_score def recall_at_k(y_true: np.ndarray, y_score: np.ndarray, k: int, pscore: Optional[np.ndarray] = None) -> float: """Calculate a recall score for a given user""" # Sort records in ascending order by prediction score y_true_sorted_by_score = y_true[y_score.argsort()[::-1]] # If propensity score is provided, put high weight on records whose propensity score is low for unbiased evaluation # Otherwise, we evaluate each record evenly by setting all propensity scores as 1 if pscore is not None: pscore_sorted_by_score = np.maximum(pscore[y_score.argsort()[::-1]], eps) else: pscore_sorted_by_score = np.ones_like(y_true_sorted_by_score) final_score = 0. k = k if y_true.shape[0] >= k else y_true.shape[0] if not np.sum(y_true_sorted_by_score) == 0: recall_score = np.sum(y_true_sorted_by_score[:k] / pscore_sorted_by_score[:k]) final_score = recall_score / np.sum(y_true_sorted_by_score) if pscore is None \ else recall_score / np.sum(1. / pscore_sorted_by_score[y_true_sorted_by_score > 0]) return final_score def average_precision_at_k(y_true: np.ndarray, y_score: np.ndarray, k: int, pscore: Optional[np.ndarray] = None) -> float: """Calculate a average precision for a given user""" y_true_sorted_by_score = y_true[y_score.argsort()[::-1]] # If propensity score is provided, put high weight on records whose propensity score is low for unbiased evaluation # Otherwise, we evaluate each record evenly by setting all propensity scores as 1 if pscore is not None: pscore_sorted_by_score = np.maximum(pscore[y_score.argsort()[::-1]], eps) else: pscore_sorted_by_score = np.ones_like(y_true_sorted_by_score) average_precision_score = 0.0 final_score = 0.0 k = k if y_true.shape[0] >= k else y_true.shape[0] if not np.sum(y_true_sorted_by_score) == 0: for i in np.arange(k): if y_true_sorted_by_score[i] > 0: score_ = np.sum(y_true_sorted_by_score[:i + 1] / pscore_sorted_by_score[:i + 1]) / (i + 1) average_precision_score += score_ final_score = average_precision_score / np.sum(y_true_sorted_by_score) if pscore is None \ else average_precision_score / np.sum(1. / pscore_sorted_by_score[y_true_sorted_by_score > 0]) return final_score
python
def is_super(connection): with connection.cursor() as cursor: cursor.execute('show grants for current_user()') query_result = cursor.fetchone() return 'SUPER' in query_result
python
from pixiedust.display.app import * @PixieApp class TestEntity(): @route() def main_screen(self): return """ <h1><center>Simple PixieApp with dynamically computed dataframe</center></h1> <div pd_entity="compute_pdf('prefix')" pd_options="handlerId=dataframe" pd_render_onload></div> """ test = TestEntity() test.run()
python
# --coding:utf-8-- # # Copyright (c) 2020 vesoft inc. All rights reserved. # # This source code is licensed under Apache 2.0 License, # attached with Common Clause Condition 1.0, found in the LICENSES directory. import pytest from nebula2.graph import ttypes from tests.common.nebula_test_suite import NebulaTestSuite class TestSetQuery(NebulaTestSuite): @classmethod def prepare(self): self.use_nba() def test_union_all(self): stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION ALL GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Tim Duncan", 1997, "Spurs"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"]] self.check_out_of_order_result(resp, expected_data) stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION ALL GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION ALL GO FROM "Manu Ginobili" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) colums = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, colums) expected_data = [["Tim Duncan", 1997, "Spurs"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"], ["Manu Ginobili", 2002, "Spurs"]] self.check_out_of_order_result(resp, expected_data) stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst AS id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \ UNION ALL GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Manu Ginobili", 2002, "Spurs"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"]] self.check_out_of_order_result(resp, expected_data) stmt = '''GO FROM "Tim Duncan" OVER like YIELD like._dst AS id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION ALL GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Manu Ginobili", 2002, "Spurs"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"]] self.check_out_of_order_result(resp, expected_data) stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION ALL (GO FROM "Tony Parker" OVER like YIELD like._dst AS id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name)''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Tim Duncan", 1997, "Spurs"], ["LaMarcus Aldridge", 2015, "Spurs"], ["LaMarcus Aldridge", 2006, "Trail Blazers"], ["Manu Ginobili", 2002, "Spurs"], ["Tim Duncan", 1997, "Spurs"]] self.check_out_of_order_result(resp, expected_data) stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION ALL GO FROM "Tony Parker" OVER like YIELD like._dst AS id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Tim Duncan", 1997, "Spurs"], ["LaMarcus Aldridge", 2015, "Spurs"], ["LaMarcus Aldridge", 2006, "Trail Blazers"], ["Manu Ginobili", 2002, "Spurs"], ["Tim Duncan", 1997, "Spurs"]] self.check_out_of_order_result(resp, expected_data) stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst AS id \ UNION ALL GO FROM "Tony Parker" OVER like YIELD like._dst AS id) \ | GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Manu Ginobili", 2002, "Spurs"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"], ["LaMarcus Aldridge", 2015, "Spurs"], ["LaMarcus Aldridge", 2006, "Trail Blazers"], ["Manu Ginobili", 2002, "Spurs"], ["Tim Duncan", 1997, "Spurs"]] # self.check_out_of_order_result(resp, expected_data) stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name as name, $$.team.name as player \ UNION ALL \ GO FROM "Tony Parker" OVER serve \ YIELD $^.player.name as name, serve.start_year as player''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["name", "player"] self.check_column_names(resp, column_names) expected_data = [["Tim Duncan", "Spurs"], ["Tony Parker", 1999], ["Tony Parker", 2018]] self.check_out_of_order_result(resp, expected_data) stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name as name, $$.team.name as player \ UNION ALL \ GO FROM "Tony Parker" OVER serve \ YIELD $^.player.name as name, serve.start_year''' resp = self.execute_query(stmt) self.check_resp_failed(resp) # column_names = ["name", "player"] # self.check_column_names(resp, column_names) # expected_data = [["Tim Duncan", "Spurs"], ["Tony Parker", "1999"], # ["Tony Parker", "2018"]] # self.check_out_of_order_result(resp, expected_data) stmt = '''GO FROM "Nobody" OVER serve YIELD $^.player.name AS player, serve.start_year AS start \ UNION ALL \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name AS player, serve.start_year AS start''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["player", "start"] self.check_column_names(resp, column_names) expected_data = [["Tony Parker", 1999], ["Tony Parker", 2018]] self.check_out_of_order_result(resp, expected_data) stmt = '''GO FROM "Nobody" OVER serve YIELD $^.player.name AS player, serve.start_year AS start \ UNION ALL \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year''' resp = self.execute_query(stmt) self.check_resp_failed(resp) # column_names = ["player", "start"] # self.check_column_names(resp, column_names) # expected_data = [["Tony Parker", 1999], ["Tony Parker", 2018]] # self.check_out_of_order_result(resp, expected_data) def test_union_distinct(self): stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \ UNION \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION \ GO FROM "Manu Ginobili" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Manu Ginobili", 2002, "Spurs"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"]] self.check_out_of_order_result(resp, expected_data) stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \ UNION DISTINCT \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Manu Ginobili", 2002, "Spurs"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"]] self.check_out_of_order_result(resp, expected_data) def test_minus(self): stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \ MINUS \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Manu Ginobili", 2002, "Spurs"]] self.check_result(resp, expected_data) def test_intersect(self): stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \ INTERSECT \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"]] self.check_out_of_order_result(resp, expected_data) def test_mix(self): stmt = '''(GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \ MINUS \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION \ GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ INTERSECT \ GO FROM "Manu Ginobili" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$^.player.name", "serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Manu Ginobili", 2002, "Spurs"]] self.check_result(resp, expected_data) def test_assign(self): stmt = '''$var = GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION ALL \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name; \ YIELD $var.*''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$var.$^.player.name", "$var.serve.start_year", "$var.$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Tim Duncan", 1997, "Spurs"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"]] self.check_out_of_order_result(resp, expected_data) stmt = '''$var = (GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION ALL \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name); \ YIELD $var.*''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$var.$^.player.name", "$var.serve.start_year", "$var.$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Tim Duncan", 1997, "Spurs"], ["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"]] self.check_out_of_order_result(resp, expected_data) stmt = '''$var = (GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \ MINUS \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name; \ YIELD $var.*''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$var.$^.player.name", "$var.serve.start_year", "$var.$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Manu Ginobili", 2002, "Spurs"]] self.check_result(resp, expected_data) stmt = '''$var = (GO FROM "Tim Duncan" OVER like YIELD like._dst as id | \ GO FROM $-.id OVER serve YIELD $^.player.name, serve.start_year, $$.team.name) \ INTERSECT \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name; \ YIELD $var.*''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$var.$^.player.name", "$var.serve.start_year", "$var.$$.team.name"] self.check_column_names(resp, column_names) expected_data = [["Tony Parker", 1999, "Spurs"], ["Tony Parker", 2018, "Hornets"]] self.check_out_of_order_result(resp, expected_data) def test_empty_input(self): stmt = '''GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \ UNION \ GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \ MINUS \ GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \ INTERSECT \ GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["serve.start_year", "$$.team.name"] self.check_column_names(resp, column_names) expected_data = [] self.check_result(resp, expected_data) stmt = '''$var = GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \ UNION \ GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \ MINUS \ GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name \ INTERSECT \ GO FROM "NON EXIST VERTEX ID" OVER serve YIELD serve.start_year, $$.team.name; \ YIELD $var.*''' resp = self.execute_query(stmt) self.check_resp_succeeded(resp) column_names = ["$var.serve.start_year", "$var.$$.team.name"] self.check_column_names(resp, column_names) expected_data = [] self.check_result(resp, expected_data) def test_syntax_error(self): stmt = '''GO FROM "123" OVER like \ YIELD like._src as src, like._dst as dst \ | (GO FROM $-.src OVER serve \ UNION GO FROM $-.dst OVER serve)''' resp = self.execute_query(stmt) self.check_resp_failed(resp, ttypes.ErrorCode.E_SEMANTIC_ERROR) def test_execution_error(self): stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name \ UNION \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name1, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_failed(resp, ttypes.ErrorCode.E_SEMANTIC_ERROR) stmt = '''GO FROM "Tim Duncan" OVER serve YIELD $^.player.name, serve.start_year \ UNION \ GO FROM "Tony Parker" OVER serve YIELD $^.player.name, serve.start_year, $$.team.name''' resp = self.execute_query(stmt) self.check_resp_failed(resp, ttypes.ErrorCode.E_SEMANTIC_ERROR)
python
from os import environ from .app_settings import * SECRET_KEY=environ.get('SECRET_KEY') STATIC_ROOT=environ.get('STATIC_ROOT') ALLOWED_HOSTS = list(environ.get('ALLOWED_HOSTS', default='').split(',')) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'NAME': environ.get('DB_NAME'), 'HOST': '', } } DEBUG = False SECURE_SSL_REDIRECT = True SESSION_COOKIE_SECURE = True CSRF_COOKIE_SECURE = True SECURE_HSTS_SECONDS = 63072000
python
# -*- coding: utf-8 -*- # Generated by Django 1.11.14 on 2018-08-23 08:01 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('cms', '0020_old_tree_cleanup'), ('articles', '0002_category_placeholder'), ] operations = [ migrations.CreateModel( name='CategoryPluginModel', fields=[ ('cmsplugin_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='articles_categorypluginmodel', serialize=False, to='cms.CMSPlugin')), ('number_to_show', models.IntegerField(choices=[(3, '3'), (6, '6'), (9, '9'), (12, '12')], default=6)), ], options={ 'abstract': False, }, bases=('cms.cmsplugin',), ), migrations.AlterModelOptions( name='article', options={'verbose_name': 'Artikel', 'verbose_name_plural': 'Artikel'}, ), ]
python
class Solution: # Solution using Mancher's Algorithm @staticmethod def longest_palindromic(s: str) -> str: if(type(s) != str): raise ValueError(f"{type(s)} not allowed only string type is allowed") def adjust_string(s: str) -> str: # method to adjust the string list_from_s = list(s.strip()) # Create List From {s} modified_s = "#".join(list_from_s) # Modified {s} By adding Hash After every Char in list return "#" + modified_s + "#" # return new {s} like : #a#b#b#a# if(len(s)<=1): # Check is {s} Empty or has length equal 1 return s; s = adjust_string(s) # Get new {s} adjusted from {adjust_string} method max_length = 0 # Variable indicate to maximum palindromic length in the string index = 0 # Variable indicate to the index of CENTER of the palindromic P = [0] * len(s) # Create Array with length equal to new {s} length and fill it zeros center = right_boundary = 0 # center and right_boundary variables that indicates to first index for i in range(0, len(s)): # start the functionallity by looping around the {s} from zero to the last element mirror = 2*center - i # mirror Variable indicate to the mirror index of current string ex: aczbzca the mirror of z is z if(i < right_boundary): # check if i lower than right_boundary P[i]= min(right_boundary-i,P[mirror]) # fill the location P[i] minimum value of { right_boundary - i } or value of the P[mirror] right = i + (P[i]+1) # right Variable is expanding to the right side left = i - (P[i]+1) # left Variable is expanding to the left side while(left >= 0 and right < len(s) and s[right] == s[left]): # check how many expantion is equal in left and right side and increase element of P[i] left-=1 right+=1 P[i]+=1 if(i + P[i] > right_boundary): # check if value of { i + P[i] > right_boundary} center = i # set {center} equal to {i} right_boundary = i + P[i] # set {right_boundary} equal to last index in right expantion if(P[i] > max_length): # set max_length and index max_length = P[i] index=i start_position = index - max_length + 1 end_position = index + max_length s = "".join(s[start_position:end_position].split("#")) return s # return the result after delete hashes list_of_examples = ["babad","cbbd","a","ac"] for example in list_of_examples: print(f"Input : {example} , Output : {Solution.longest_palindromic(example)}")
python
#!/usr/bin/env python # coding=utf-8 # ==================================================== # # File Name : pc_nd_conv_plot.py # Creation Date : 17-04-2018 # Created By : Min-Ye Zhang # Contact : [email protected] # # ==================================================== from __future__ import print_function import sys import pandas as pd import numpy as np import matplotlib.pyplot as plt from argparse import ArgumentParser def __check_column_and_target(df, xtarget_column, ytarget_column): n_columns = len(df.columns) # Get the column names and the maximum value for each column # Here the fact that the calculation is more accurate with larger parameter is assumed. # Not recommended to use for n_columns >= 7 if n_columns >= 7: raise ValueError(" data columns >= 7 will be crowded and NOT implemented YET. Remove some data.") if ytarget_column == 0: i_ytarget = n_columns - 1 else: try: assert ytarget_column <= n_columns assert ytarget_column > 0 except AssertionError: raise ValueError("Invalid ytarget") else: i_ytarget = ytarget_column - 1 if xtarget_column == 0: i_xtarget = n_columns - 2 else: try: assert xtarget_column <= n_columns assert xtarget_column > 0 except AssertionError: raise ValueError("Invalid xtarget") else: i_xtarget = xtarget_column - 1 para_names = [] for i in range(n_columns): if i == i_xtarget or i == i_ytarget: continue para_names.append(df.columns[i]) para_max = [] for col in para_names: para_max.append(df[col].max()) x_name = df.columns[i_xtarget] y_name = df.columns[i_ytarget] return n_columns, x_name, y_name, para_names, para_max # ==================================================== def __set_ax_linewidth(subplot_ax, linewidth=4): for axis in ['top','bottom','left','right']: subplot_ax.spines[axis].set_linewidth(linewidth) subplot_ax.tick_params(axis='both', which='major', length=linewidth*2, \ width=linewidth/2, direction='in') subplot_ax.tick_params(axis='both', which='minor', length=linewidth, \ width=linewidth/2, direction='in') # ==================================================== def __init_fig_axs(n_columns, para_names, x_name, y_name): # N-1 graphs are required for N (n>=2) convergence parameters, # with the left one as the x-axis if n_columns == 3: fig, axs = plt.subplots(figsize=(8,8)) axs.set_xlabel(x_name, size=12) axs.set_ylabel(y_name,size=12) __set_ax_linewidth(axs, 4) else: if n_columns == 4: fig, axs = plt.subplots(1,2, figsize=(12,8)) axs[0].set_xlabel(x_name, size=12) axs[1].set_xlabel(x_name, size=12) axs[0].set_ylabel(y_name, size=12) if n_columns == 5: fig, axs = plt.subplots(1,3, figsize=(16,8)) axs[0].set_xlabel(x_name, size=12) axs[1].set_xlabel(x_name, size=12) axs[2].set_xlabel(x_name, size=12) axs[0].set_ylabel(y_name, size=12) if n_columns == 6: fig, axs = plt.subplots(2,2, figsize=(12,12)) #axs[:,:].set_xlabel(x_name, size=12) #axs[].set_xlabel(x_name, size=12) axs[0,0].set_ylabel(y_name, size=12) axs[1,0].set_ylabel(y_name, size=12) axs[1,0].set_xlabel(x_name, size=12) axs[1,1].set_xlabel(x_name, size=12) for ax in axs.flatten(): __set_ax_linewidth(ax, 4) return fig, axs # ==================================================== def __init_fig_3d_axs(n_columns, para_names, x_name, y_name): from mpl_toolkits.mplot3d import Axes3D fig = plt.figure(figsize=(12,9)) if n_columns == 3: axs = fig.add_subplot(111, projection='3d') axs.set_xlabel(para_names[0], size=12) axs.set_ylabel(x_name, size=12) axs.set_zlabel(y_name, size=12) else: raise ValueError("plot3d has not been implemented yet for n_columns >3. Delete some columns") return fig, axs # ==================================================== def common_nd_conv_plot(df_all, xtarget_column=0, ytarget_column=0, f_plot3d=False, \ figname='', preview=False, imgres=2): n_columns, x_name, y_name, para_names, para_max = \ __check_column_and_target(df_all, xtarget_column, ytarget_column) # TODO: # if 3D plot is required, import necessary 3D plotting modules first if f_plot3d: from matplotlib import cm fig, axs = __init_fig_3d_axs(n_columns, para_names, x_name, y_name) if n_columns == 3: p3d = axs.scatter(xs=df_all[para_names[0]], ys=df_all[x_name], zs=df_all[y_name], \ s=100, c=df_all[y_name], cmap=cm.coolwarm, marker='o', \ depthshade=False) else: raise ValueError("--plot3d has not been implemented for n_columns !=3. Sorry :(") else: # Group the DataFrame by groupby method df_all_gpb = df_all.groupby(para_names) fig, axs = __init_fig_axs(n_columns, para_names, x_name, y_name) if n_columns == 3: for group in sorted(df_all_gpb.groups.iterkeys()): gp_data = df_all_gpb.get_group(group) x = gp_data.sort_values(by=x_name)[x_name] y = gp_data.sort_values(by=x_name)[y_name] axs.plot(x, y, 'o-', linewidth=2, \ label="%s=%s" % (para_names[0], group)) axs.legend(loc="upper left", shadow=True, fancybox=True) if n_columns >= 4: #print(df_all_gpb.groups) for i in range(len(para_names)): for group in sorted(df_all_gpb.groups.keys(), key=lambda x: x[i]): # check the convergence of parameter para_names[i] # with the other parameters at the best, i.e. max flag_best_other = True for j in range(len(para_names)): if j != i and group[j] != para_max[j]: flag_best_other = False break if not flag_best_other: continue gp_data = df_all_gpb.get_group(group) x = gp_data.sort_values(by=x_name)[x_name] y = gp_data.sort_values(by=x_name)[y_name] axs.flatten()[i].plot(x, y, 'o-', linewidth=2, \ label="%s=%s" % (para_names[i], group[i])) # Generate the title string as the fixed parameters for i in range(len(para_names)): title_str_list = ['convergence w.r.t', para_names[i],'\n@ ('] for j in range(len(para_names)): if j != i: title_str_list.append("%s = %s" % (para_names[j], para_max[j])) title_str_list.append(')') title_str = ' '.join(title_str_list) axs.flatten()[i].set_title(title_str) for ax in axs.flatten(): ax.legend(loc="upper left", shadow=True, fancybox=True) if preview: if f_plot3d: fig.colorbar(p3d) plt.show() if figname is not '': print("- Saving to %s" % figname) fig.savefig(figname, dpi=int(imgres)*150) return # ==================================================== def Main(ArgList): description = '''Visualize the data for an N-parameter convergence test. In general N is equal to 2 or 3. Support up to 5.''' parser = ArgumentParser(description=description) parser.add_argument(dest="datafile", metavar='file', type=str, nargs=1, help="The name of file storing the data. Better in CSV/Excel format and index is not necessary.") parser.add_argument("--xt", dest="xtarget_column", metavar="X", type=int, default=0, help="the index of column (>0) which contains the direct test parameter (x). Default is the second to last column.") parser.add_argument("--yt", dest="ytarget_column", metavar="Y", type=int, default=0, help="the index of column (>0) which contains the quantity to converge (y). Default is the last column.") parser.add_argument("--plot3d", dest="f_plot3d", action="store_true", help="Flag to use 3D plots. Support 2-parameter test only.") parser.add_argument("--save", dest="figname", type=str, default='', help="File name (e.g. conv.png) to save the figure. The figure will not be saved unless this option is set other than ''.") parser.add_argument("--res", dest="resolution", metavar='RES', type=int, default=2, help="Resolution of image, dpi = 150*RES. Default 2 (300 dpi).") # initialize options as 'opts' opts = parser.parse_args() datafile = opts.datafile[0] df_all = pd.read_table(datafile, delim_whitespace=True) common_nd_conv_plot(df_all, opts.xtarget_column, opts.ytarget_column, opts.f_plot3d, opts.figname, \ True, opts.resolution) # ============================== if __name__ == "__main__": Main(sys.argv)
python
sandwich_orders = ['pastrami', 'fish', 'pastrami', 'cabbage', 'pastrami', 'sala', 'pig', 'chicken'] finished_sandwich_orders = [] print(sandwich_orders) print("'pastrami' soled out!") while 'pastrami' in sandwich_orders: sandwich_orders.remove('pastrami') print(sandwich_orders) while sandwich_orders: finished = sandwich_orders.pop() print("I made your " + finished + ' sandwich.') finished_sandwich_orders.append(finished) print(sandwich_orders) print(finished_sandwich_orders)
python
import tensorflow as tf import src.lib as tl class DNN: def __init__(self,conf_data): n_classes = len(conf_data["classes_list"]) data_size = conf_data["size"] self.name = "selector" self.show_kernel_map = [] with tf.name_scope('Input'): self.input = tf.placeholder(tf.float32, shape=[None, data_size[0] * data_size[1] ], name="x-input") with tf.name_scope('Labels'): self.labels = tf.placeholder(tf.float32, shape=[None, n_classes], name="y-input") with tf.name_scope('DropOut'): self.keep_prob = tf.placeholder(tf.float32) with tf.name_scope('model'): net = tf.reshape(self.input, shape=[-1, data_size[0], data_size[1], 1]) with tf.variable_scope("CONV_1"): [conv1, W, b] = tl.conv2d(net, 121, 20) R1 = tf.nn.l2_loss(W) self.show_kernel_map.append(W) # Create the feature map with tf.variable_scope("POOL_1"): pool1 = tl.max_pool_2x2(conv1) with tf.variable_scope("CONV_2"): [conv2, W, b] = tl.conv2d(pool1, 16, 10) R2 = tf.nn.l2_loss(W) self.show_kernel_map.append(W) # Create the feature map with tf.variable_scope("POOL_2"): pool2 = tl.max_pool_2x2(conv2) with tf.variable_scope("FC_1"): flat1 = tl.fc_flat(pool2) h, W, b = tl.fc(flat1, 1024) R3 = tf.nn.l2_loss(W) fc1 = tf.nn.relu(h) with tf.variable_scope("DROPOUT_1"): drop1 = tf.nn.dropout(fc1, self.keep_prob) with tf.variable_scope("FC_2"): h, W, b = tl.fc(drop1, 1024) R4 = tf.nn.l2_loss(W) fc2 = tf.nn.relu( h ) with tf.variable_scope("DROPOUT_2"): drop2 = tf.nn.dropout(fc2, self.keep_prob) with tf.variable_scope("OUT"): self.out, W, b = tl.fc(drop2, n_classes) with tf.name_scope('Cost'): self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2( labels=self.labels, logits=self.out) ) self.cost = self.cost + 0.01 * (R1 + R2 + R3 + R4) self.output = tf.nn.softmax (self.out)
python
""" Wrap Google Prediction API into something that looks kind of like the standard scikit-learn interface to learning models. Derived from Google API example code examples found here: https://github.com/google/google-api-python-client @author: Jed Ludlow """ from __future__ import print_function import argparse import pprint import time import numpy as np from apiclient import sample_tools from oauth2client import client # Time to wait (in seconds) between successive checks of training status. TRAIN_SLEEP_TIME = 10 # Time to wait (in seconds) between successive prediction calls. PREDICT_SLEEP_TIME = 0.8 # String to display if OAuth fails. REAUTH = ("The credentials have been revoked or expired. " "Please re-instantiate the predictor to re-authorize.") def print_header(line): """ Format and print header block sized to length of line """ header_str = '=' header_line = header_str * len(line) print('\n' + header_line) print(line) print(header_line) class GooglePredictor(object): """ Prediction engine from the Google Prediction API wrapped loosely in the style of sckit-learn. """ def __init__(self, project_id, object_name, model_id, client_secrets): # Take advantage of the Google API example tools for # credential management which make use of command line # argument parsing. argparser = argparse.ArgumentParser(add_help=False) argparser.add_argument( 'object_name', help="Full Google Storage path of csv data (ex bucket/object)") argparser.add_argument( 'model_id', help="Model Id of your choosing to name trained model") argparser.add_argument( 'project_id', help="Project Id as shown in Developer Console") service, self.flags = sample_tools.init( ['GooglePredictor', object_name, model_id, project_id], 'prediction', 'v1.6', __doc__, client_secrets, parents=[argparser], scope=( 'https://www.googleapis.com/auth/prediction', 'https://www.googleapis.com/auth/devstorage.read_only')) self.papi = service.trainedmodels() def list(self): """ List available models in the current project. """ try: # List models. print_header("Fetching list of first ten models") result = self.papi.list( maxResults=10, project=self.flags.project_id).execute() print("List results:") pprint.pprint(result) except client.AccessTokenRefreshError: print(REAUTH) def get_params(self): """ Get description of current model. """ try: # Describe model. print_header("Fetching model description") result = self.papi.analyze( id=self.flags.model_id, project=self.flags.project_id).execute() print("Analyze results:") pprint.pprint(result) except client.AccessTokenRefreshError: print(REAUTH) def fit(self, model_type='CLASSIFICATION'): """ Fit a model to training data in the current bucket object. """ try: # Start training request on a data set. print_header("Submitting model training request") body = { 'id': self.flags.model_id, 'storageDataLocation': self.flags.object_name, 'modelType': model_type} start = self.papi.insert( body=body, project=self.flags.project_id).execute() print("Training results:") pprint.pprint(start) # Wait for the training to complete. print_header("Waiting for training to complete") while True: status = self.papi.get( id=self.flags.model_id, project=self.flags.project_id).execute() state = status['trainingStatus'] print("Training state: " + state) if state == 'DONE': break elif state == 'RUNNING': time.sleep(TRAIN_SLEEP_TIME) continue else: raise Exception("Training Error: " + state) # Job has completed. print("Training completed:") pprint.pprint(status) break except client.AccessTokenRefreshError: print(REAUTH) def predict(self, X): """ Get model predictions for the samples in X. X is a numpy array where each column is a feature, and each row is an observation sample. """ try: # Make some predictions using the newly trained model. print_header("Making some predictions") out = [] for sample in X: body = {'input': {'csvInstance': sample.tolist()}} result = self.papi.predict( body=body, id=self.flags.model_id, project=self.flags.project_id).execute() if 'outputLabel' in result: out.append(result['outputLabel']) elif 'outputValue' in result: out.append(float(result['outputValue'])) time.sleep(PREDICT_SLEEP_TIME) return np.array(out) except client.AccessTokenRefreshError: print(REAUTH) def delete(self): """ Delete the current model. """ try: # Delete model. print_header("Deleting model") result = self.papi.delete( id=self.flags.model_id, project=self.flags.project_id).execute() print("Model deleted.") return result except client.AccessTokenRefreshError: print(REAUTH)
python
def findDecision(obj): #obj[0]: Coupon, obj[1]: Education, obj[2]: Occupation # {"feature": "Coupon", "instances": 8147, "metric_value": 0.4744, "depth": 1} if obj[0]>1: # {"feature": "Education", "instances": 5889, "metric_value": 0.4676, "depth": 2} if obj[1]>1: # {"feature": "Occupation", "instances": 3337, "metric_value": 0.4747, "depth": 3} if obj[2]<=13.339599828993485: return 'True' elif obj[2]>13.339599828993485: return 'True' else: return 'True' elif obj[1]<=1: # {"feature": "Occupation", "instances": 2552, "metric_value": 0.4568, "depth": 3} if obj[2]<=19.03559777229008: return 'True' elif obj[2]>19.03559777229008: return 'True' else: return 'True' else: return 'True' elif obj[0]<=1: # {"feature": "Occupation", "instances": 2258, "metric_value": 0.4882, "depth": 2} if obj[2]>2.015213346063521: # {"feature": "Education", "instances": 1795, "metric_value": 0.4911, "depth": 3} if obj[1]>0: return 'False' elif obj[1]<=0: return 'True' else: return 'True' elif obj[2]<=2.015213346063521: # {"feature": "Education", "instances": 463, "metric_value": 0.4395, "depth": 3} if obj[1]<=3: return 'False' elif obj[1]>3: return 'True' else: return 'True' else: return 'False' else: return 'False'
python
from typing import Callable, Dict, Optional import torch import torch.nn as nn from torch.utils.data import DataLoader from kornia.metrics import accuracy, mean_average_precision, mean_iou from .trainer import Trainer from .utils import Configuration class ImageClassifierTrainer(Trainer): """Module to be used for image classification purposes. The module subclasses :py:class:`~kornia.x.Trainer` and overrides the :py:func:`~kornia.x.Trainer.evaluate` function implementing a standard :py:func:`~kornia.metrics.accuracy` topk@[1, 5]. .. seealso:: Learn how to use this class in the following `example <https://github.com/kornia/kornia/blob/master/examples/train/image_classifier/>`__. """ def compute_metrics(self, *args: torch.Tensor) -> Dict[str, float]: if len(args) != 2: raise AssertionError out, target = args acc1, acc5 = accuracy(out, target, topk=(1, 5)) return dict(top1=acc1.item(), top5=acc5.item()) class SemanticSegmentationTrainer(Trainer): """Module to be used for semantic segmentation purposes. The module subclasses :py:class:`~kornia.x.Trainer` and overrides the :py:func:`~kornia.x.Trainer.evaluate` function implementing IoU :py:func:`~kornia.metrics.mean_iou`. .. seealso:: Learn how to use this class in the following `example <https://github.com/kornia/kornia/blob/master/examples/train/semantic_segmentation/>`__. """ def compute_metrics(self, *args: torch.Tensor) -> Dict[str, float]: if len(args) != 2: raise AssertionError out, target = args iou = mean_iou(out.argmax(1), target, out.shape[1]).mean() return dict(iou=iou.item()) class ObjectDetectionTrainer(Trainer): """Module to be used for object detection purposes. The module subclasses :py:class:`~kornia.x.Trainer` and overrides the :py:func:`~kornia.x.Trainer.evaluate` function implementing IoU :py:func:`~kornia.metrics.mean_iou`. .. seealso:: Learn how to use this class in the following `example <https://github.com/kornia/kornia/blob/master/examples/train/object_detection/>`__. """ def __init__( self, model: nn.Module, train_dataloader: DataLoader, valid_dataloader: DataLoader, criterion: Optional[nn.Module], optimizer: torch.optim.Optimizer, scheduler: torch.optim.lr_scheduler.CosineAnnealingLR, config: Configuration, num_classes: int, callbacks: Dict[str, Callable] = None, loss_computed_by_model: Optional[bool] = None, ) -> None: if callbacks is None: callbacks = {} super().__init__( model, train_dataloader, valid_dataloader, criterion, optimizer, scheduler, config, callbacks ) # TODO: auto-detect if the model is from TorchVision self.loss_computed_by_model = loss_computed_by_model self.num_classes = num_classes def on_model(self, model: nn.Module, sample: dict): if self.loss_computed_by_model and model.training: return model(sample["input"], sample["target"]) return model(sample["input"]) def compute_loss(self, *args: torch.Tensor) -> torch.Tensor: if self.loss_computed_by_model: return torch.stack(list(args[0])).sum() if self.criterion is None: raise RuntimeError("`criterion` should not be None if `loss_computed_by_model` is False.") return self.criterion(*args) def compute_metrics(self, *args: torch.Tensor) -> Dict[str, float]: if ( isinstance(args[0], dict) and "boxes" in args[0] and "labels" in args[0] and "scores" in args[0] and isinstance(args[1], dict) and "boxes" in args[1] and "labels" in args[1] ): mAP, _ = mean_average_precision( [a['boxes'] for a in args[0]], [a['labels'] for a in args[0]], [a['scores'] for a in args[0]], [a['boxes'] for a in args[1]], [a['labels'] for a in args[1]], n_classes=self.num_classes, threshold=0.000001 ) return {'mAP': mAP.item()} return super().compute_metrics(*args)
python
# Created on Mar 07, 2021 # author: Hosein Hadipour # contact: [email protected] import os output_dir = os.path.curdir str_feedback1 = lambda a24, b15, b0, b1, b2: a24 + ' + ' + b15 + ' + ' + b0 + ' + ' + b1 + '*' + b2 str_feedback2 = lambda b6, a27, a0, a1, a2: b6 + ' + ' + a27 + ' + ' + a0 + ' + ' + a1 + '*' + a2 str_f = lambda b0, b15: b0 + ' + ' + b15 def biviumb(T=177): cipher_name = 'biviumb' # 177 clock cycles recommended_mg = 32 recommended_ms = 65 eqs = '#%s %d clock cycles\n' % (cipher_name, T) eqs += 'connection relations\n' for t in range(T): eqs += 'b_%d, b_%d => bm_%d\n' % (t + 1, t + 2, t) eqs += 'a_%d, a_%d => am_%d\n' % (t + 1, t + 2 ,t) eqs += 'algebraic relations\n' for t in range(T): eqs += 'a_%d + a_%d + b_%d + b_%d + bm_%d\n' % (t + 93, t + 24, t, t + 15, t) eqs += 'b_%d + b_%d + a_%d + a_%d + am_%d\n' % (t + 84, t + 6, t, t + 27, t) eqs += 'b_%d + b_%d + a_%d + a_%d + z_%d\n' % (t, t + 15, t, t + 27 , t) eqs += 'known\n' + '\n'.join(['z_%d' % i for i in range(T)]) + '\nend' eqsfile_path = os.path.join(output_dir, 'relationfile_%s_%dclk_mg%d_ms%d.txt' % ( cipher_name, T, recommended_mg, recommended_ms)) with open(eqsfile_path, 'w') as relation_file: relation_file.write(eqs) def main(): biviumb(T=177) if __name__ == '__main__': main()
python
from django.utils.translation import ugettext_lazy as _ from django.contrib.comments.models import CommentFlag from django.contrib.comments.admin import CommentsAdmin from django.contrib import admin from scipy_central.comments.models import SpcComment class SpcCommentAdmin(CommentsAdmin): """ Custom admin interface for comments defined on the top of built-in admin interface """ list_display = CommentsAdmin.list_display fieldsets = ( (None, {'fields': ('content_type', 'object_pk', 'site')} ), (_('Content'), {'fields': ('user', 'user_name', 'user_email', 'user_url', 'comment', 'rest_comment')} ), (_('Metadata'), {'fields': ('submit_date', 'ip_address', 'is_public', 'is_removed')} ), ) class SpcCommentFlagAdmin(admin.ModelAdmin): """ Admin interface for comment flags """ list_display = ('flag', 'user', 'comment', 'flag_date') search_fields = ['user__username', 'comment__user__username', 'flag_date'] list_filter = ['flag_date'] ordering = ['-flag_date'] admin.site.register(SpcComment, SpcCommentAdmin) admin.site.register(CommentFlag, SpcCommentFlagAdmin)
python
# 3.11 随机选择 import random values = [1,2,3,4,5,6] for i in range(0, 4): print(random.choice(values)) for i in range(0, 4): print(random.sample(values, 2)) random.shuffle(values) print(values) for i in range(0, 10): print(random.randint(0, 10)) for i in range(0, 3): print(random.random()) print(random.getrandbits(200)) random.seed() # Seed based on system time or os.urandom() random.seed(12345) # Seed based on integer given random.seed(b'bytedata') # Seed based on byte data
python
import json from pytorch_pretrained_bert import cached_path from pytorch_pretrained_bert import OpenAIGPTTokenizer from keras_gpt_2 import load_trained_model_from_checkpoint, get_bpe_from_files, generate tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') url = "s3://datasets.huggingface.co/personachat/personachat_self_original.json" # Download and load JSON dataset personachat_file = cached_path(url) with open(personachat_file, "r", encoding="utf-8") as f: dataset = json.loads(f.read()) # with open('dataset.json', "w", encoding="utf-8") as f: # f.write(json.dumps(dataset)) dataset = dataset['train'] dataset = dataset[:1] print('\n') print(dataset[0]['utterances'][1]) print('\n') print(dataset[0]['utterances'][2]) # Tokenize and encode the dataset using our loaded GPT tokenizer def tokenize(obj): if isinstance(obj, str): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj)) if isinstance(obj, dict): return dict((n, tokenize(o)) for n, o in obj.items()) return list(tokenize(o) for o in obj) dataset = tokenize(dataset)
python
import unittest import pandas as pd import os from requests import Response from computerMetricCollector.metricsCollector.StorageAPI import store_to_database from computerMetricCollector.crypto import encrypt_data from computerMetricCollector.test.crypto import read_key, decrypt_data from computerMetricCollector.config import import_config from computerMetricCollector.metricsCollector.memoryMetrics import MemoryMetrics from computerMetricCollector.metricsCollector.computerMetrics import get_computer_id from computerMetricCollector.test.TestCase.LoggerTest import set_logger class MemoryTest(unittest.TestCase): def setUp(self): self.logger = set_logger("DEBUG") self.root_dir = os.path.dirname(os.path.dirname(__file__)) self.settings = import_config(self.root_dir) self.date_format = self.settings.get("date_time_format") self.meta = self.settings.get("collectors").get("MemoryMetrics") self.collector = MemoryMetrics(self.logger, get_computer_id(self.logger), self.meta.get("metrics"), self.meta.get("metrics_to_encrypt"), self.date_format, self.meta.get("url")) self.collector.fetch_metrics() self.metrics_df = self.collector.get_metrics_df() self.sample_df = pd.read_csv(self.root_dir + "/sample_data/MemoryMetrics.csv", names=self.meta.get("metrics")) def test_memory_metrics(self): if len(self.meta.get("metrics_to_match")) > 0: match_metrics_df = self.metrics_df.filter(items=self.meta.get("metrics_to_match"), axis=1) match_sample_df = self.sample_df.filter(items=self.meta.get("metrics_to_match"), axis=1) pd.testing.assert_frame_equal(match_metrics_df, match_sample_df, check_dtype=False) def test_metrics_type(self): for idx, rec in self.metrics_df.iterrows(): self.assertLess(int(rec["memory_available"]), int(rec["memory_total"])) self.assertLess(int(rec["memory_used"]), int(rec["memory_total"])) self.assertLess(int(rec["swap_used"]), int(rec["swap_total"])) self.assertLess(int(rec["swap_free"]), int(rec["swap_total"])) self.assertGreaterEqual(int(rec["swap_byte_in"]), 0) self.assertGreaterEqual(int(rec["swap_byte_out"]), 0) self.assertIsInstance(rec["memory_used_percent"], float) self.assertIsInstance(rec["swap_percent"], float) def test_encryption(self): raw_metrics_df = self.metrics_df encrypt_key = read_key(self.root_dir + self.settings.get("encryption_key_file")) encrypt_data(self.collector, encrypt_key) encrypted_metrics_df = self.collector.get_metrics_df() decrypt_key = read_key(self.root_dir + self.settings.get("decryption_key_file")) decrypted_metrics_df = decrypt_data(encrypted_metrics_df, self.meta.get("metrics_to_encrypt"), decrypt_key) pd.testing.assert_frame_equal(raw_metrics_df, decrypted_metrics_df) def test_store(self): url = self.meta.get("url") reg_id = self.settings.get("registration_id") encrypt_key = read_key(self.root_dir + self.settings.get("encryption_key_file")) if (url is not None and url != "") and (reg_id is not None and reg_id != ""): response = store_to_database(self.collector, reg_id, encrypt_key) self.assertIsInstance(response, Response) self.assertEqual(response.status_code, 200)
python
import unittest from util.bean import deepNaviReqToNaviModel from model import DeepNaviReq import time def generateReq(): req = DeepNaviReq() req.time = int(time.time() * 1000) print() # magnetic = req.magneticList.add() # magnetic.x = 1 # magnetic.y = 2 # magnetic.z = 3 accelerometer = req.accelerometerList.add() accelerometer.x = 1 accelerometer.y = 2 accelerometer.z = 3 rientation = req.orientationList.add() rientation.x = 1 rientation.y = 2 rientation.z = 3 gyroscope = req.gyroscopeList.add() gyroscope.x = 1 gyroscope.y = 2 gyroscope.z = 3 gravity = req.gravityList.add() gravity.x = 1 gravity.y = 2 gravity.z = 3 linearAcceleration = req.linearAccelerationList.add() linearAcceleration.x = 1 linearAcceleration.y = 2 linearAcceleration.z = 3 ambientTemperature = req.ambientTemperatureList.add() ambientTemperature.value = 20 light = req.lightList.add() light.value = 20 pressure = req.pressureList.add() pressure.value = 20 proximity = req.proximityList.add() proximity.value = 20 return req class TestTo(unittest.TestCase): def testA(self): print(deepNaviReqToNaviModel(generateReq()))
python
# Generated by Django 2.1.11 on 2019-12-03 21:08 from django.db import migrations from qatrack.qatrack_core.dates import ( format_as_date, format_datetime, parse_date, parse_datetime, ) def datestrings_to_dates(apps, schema): TestInstance = apps.get_model("qa", "TestInstance") for ti in TestInstance.objects.filter(unit_test_info__test__type="date"): ti.date_value = parse_date(ti.string_value) ti.string_value = "" ti.save() for ti in TestInstance.objects.filter(unit_test_info__test__type="datetime"): ti.datetime_value = parse_datetime(ti.string_value) ti.string_value = "" ti.save() def date_to_datestrings(apps, schema): TestInstance = apps.get_model("qa", "TestInstance") for ti in TestInstance.objects.filter(unit_test_info__test__type="date"): ti.string_value = format_as_date(ti.date_value) ti.save() for ti in TestInstance.objects.filter(unit_test_info__test__type="datetime"): ti.string_value = format_datetime(ti.datetime_value) ti.save() class Migration(migrations.Migration): dependencies = [ ('qa', '0045_auto_20191203_1409'), ] operations = [ migrations.RunPython(datestrings_to_dates, date_to_datestrings), ]
python
#!/usr/bin/env python """Software Carpentry Windows Installer Helps mimic a *nix environment on Windows with as little work as possible. The script: * Installs nano and makes it accessible from msysgit * Provides standard nosetests behavior for msysgit To use: 1. Install Python, IPython, and Nose. An easy way to do this is with the Anaconda CE Python distribution http://continuum.io/anacondace.html 2. Install msysgit http://code.google.com/p/msysgit/downloads/list?q=full+installer+official+git 3. Run swc_windows_installer.py You should be able to simply double click the file in Windows """ import hashlib try: # Python 3 from io import BytesIO as _BytesIO except ImportError: # Python 2 from StringIO import StringIO as _BytesIO import os import re try: # Python 3 from urllib.request import urlopen as _urlopen except ImportError: # Python 2 from urllib2 import urlopen as _urlopen import zipfile def zip_install(url, sha1, install_directory): """Download and install a zipped bundle of compiled software""" r = _urlopen(url) zip_bytes = r.read() download_sha1 = hashlib.sha1(zip_bytes).hexdigest() if download_sha1 != sha1: raise ValueError( 'downloaded {!r} has the wrong SHA1 hash: {} != {}'.format( url, downloaded_sha1, sha1)) zip_io = _BytesIO(zip_bytes) zip_file = zipfile.ZipFile(zip_io) if not os.path.isdir(install_directory): os.makedirs(install_directory) zip_file.extractall(install_directory) def install_nano(install_directory): """Download and install the nano text editor""" zip_install( url='http://www.nano-editor.org/dist/v2.2/NT/nano-2.2.6.zip', sha1='f5348208158157060de0a4df339401f36250fe5b', install_directory=install_directory) def create_nosetests_entry_point(python_scripts_directory): """Creates a terminal-based nosetests entry point for msysgit""" contents = '\n'.join([ '#!/usr/bin/env/ python', 'import sys', 'import nose', "if __name__ == '__main__':", ' sys.exit(nose.core.main())', '', ]) if not os.path.isdir(python_scripts_directory): os.makedirs(python_scripts_directory) with open(os.path.join(python_scripts_directory, 'nosetests'), 'w') as f: f.write(contents) def update_bash_profile(extra_paths=()): """Create or append to a .bash_profile for Software Carpentry Adds nano to the path, sets the default editor to nano, and adds additional paths for other executables. """ lines = [ '', '# Add paths for Software-Carpentry-installed scripts and executables', 'export PATH=\"$PATH:{}\"'.format(':'.join( make_posix_path(path) for path in extra_paths),), '', '# Make nano the default editor', 'export EDITOR=nano', '', ] config_path = os.path.join(os.path.expanduser('~'), '.bash_profile') with open(config_path, 'a') as f: f.write('\n'.join(lines)) def make_posix_path(windows_path): """Convert a Windows path to a posix path""" for regex, sub in [ (re.compile(r'\\'), '/'), (re.compile('^[Cc]:'), '/c'), ]: windows_path = regex.sub(sub, windows_path) return windows_path def main(): swc_dir = os.path.join(os.path.expanduser('~'), '.swc') bin_dir = os.path.join(swc_dir, 'bin') create_nosetests_entry_point(python_scripts_directory=bin_dir) nano_dir = os.path.join(swc_dir, 'lib', 'nano') install_nano(install_directory=nano_dir) update_bash_profile(extra_paths=(nano_dir, bin_dir)) if __name__ == '__main__': main()
python
import sqlalchemy as sa from sqlalchemy import orm from data.db_session import BaseModel import datetime class Post(BaseModel): __tablename__ = 'posts' __repr_attrs__ = ["title", "tournament"] serialize_only = ( "id", "title", "content", "status", "now", "tournament.id", "tournament.title", "author.id", "author.email", "author.fullname", "created_info" ) secure_serialize_only = ( "id", "title", "content", "status", "now", "tournament.id", "tournament.title", "author.id", "author.fullname", "created_info" ) title = sa.Column(sa.String, nullable=False) content = sa.Column(sa.Text, nullable=False) status = sa.Column(sa.Integer, nullable=False, default=1) now = sa.Column(sa.Boolean, nullable=False, default=False) author_id = sa.Column(sa.Integer, sa.ForeignKey('users.id')) tournament_id = sa.Column(sa.Integer, sa.ForeignKey('tournaments.id')) author = orm.relationship('User', backref="posts") tournament = orm.relationship('Tournament', backref="posts") @property def created_info(self): created_date = datetime.datetime.fromisoformat(str(self.created_at)) return created_date.strftime('%d %B %Y') def __str__(self): return self.title def have_permission(self, user): return user == self.author or self.tournament.have_permission(user)
python
from geniusweb.issuevalue.Bid import Bid from geniusweb.issuevalue.Domain import Domain from geniusweb.issuevalue.Value import Value from geniusweb.profile.utilityspace.LinearAdditive import LinearAdditive from tudelft.utilities.immutablelist.AbstractImmutableList import AbstractImmutableList from tudelft.utilities.immutablelist.FixedList import FixedList from tudelft.utilities.immutablelist.ImmutableList import ImmutableList from tudelft.utilities.immutablelist.JoinedList import JoinedList from tudelft.utilities.immutablelist.MapList import MapList from tudelft.utilities.immutablelist.Tuple import Tuple from typing import List, Dict from geniusweb.bidspace.IssueInfo import IssueInfo from geniusweb.bidspace.Interval import Interval from geniusweb.utils import val from decimal import Decimal class BidsWithUtility : ''' WARNING DO NOT USE, NOT YET WORKING CORRECTLY Tool class containing functions dealing with utilities of all bids in a given {@link LinearAdditive}. This class caches previously computed values to accelerate the calls and subsequent calls. Re-use the object to keep/reuse the cache. <h2>Rounding</h2> Internally, utilities of bids are rounded to the given precision. This may cause inclusion/exclusion of some bids in the results. See {@link #BidsWithUtility(LinearAdditive, int)} for more details Immutable. ''' def __init__(self, issuesInfo:List[IssueInfo] , precision:int ) : ''' @param issuesInfo List of the relevant issues (in order of relevance) and all info of each issue. @param precision the number of digits to use for computations. In practice, 6 seems a good default value. <p> All utilities * weight are rounded to this number of digits. This value should match the max number of (digits used in the weight of an issue + number of digits used in the issue utility). To determine the optimal value, one may consider the step size of the issues, and the range of interest. For instance if the utility function has values 1/3 and 2/3, then these have an 'infinite' number of relevant digits. But if the goal is to search bids between utility 0.1 and 0.2, then computing in 2 digits might already be sufficient. <p> This algorithm has memory and space complexity O( |nissues| 10^precision ). For spaces up to 7 issues, 7 digits should be feasible; for 9 issues, 6 digits may be the maximum. ''' if issuesInfo == None or len(issuesInfo)==0: raise ValueError("sortedissues list must contain at least 1 element") self._issueInfo = issuesInfo; self._precision = precision; # cache. Key = call arguments for {@link #get(int, Interval)}. Value=return # value of that call. self._cache:Dict[Tuple[int, Interval], ImmutableList[Bid]] = {} @staticmethod def create(space:LinearAdditive, precision:int=6) -> "BidsWithUtility": ''' Support constructor, uses default precision 6. This value seems practical for the common range of issues, utilities and weights. See {@link #BidsWithUtility(LinearAdditive, int)} for more details on the precision. @param space the {@link LinearAdditive} to analyze @param space the {@link LinearAdditive} to analyze. Optional, defaults to 6 ''' return BidsWithUtility(BidsWithUtility._getInfo(space, precision), precision); def getRange(self) ->Interval : ''' @return the (rounded) utility {@link Interval} of this space: minimum and maximum achievable utility. ''' return self._getRange(len(self._issueInfo) - 1) def getBids(self, range: Interval) -> ImmutableList[Bid] : ''' @param range the minimum and maximum utility required of the bids. to be included (both ends inclusive). @return a list with bids that have a (rounded) utility inside range. possibly empty. ''' return self._get(len(self._issueInfo) - 1, range.round(self._precision)); def getInfo(self) -> List[IssueInfo] : return self._issueInfo.copy() def getExtremeBid(self, isMax:bool) ->Bid : ''' @param isMax the extreme bid required @return the extreme bid, either the minimum if isMax=false or maximum if isMax=true ''' map:Dict[str, Value] = {} for info in self._issueInfo: map[info.getName()] = info.getExtreme(isMax) return Bid(map) def _get(self, n:int , goal:Interval) -> ImmutableList[Bid] : ''' Create partial BidsWithUtil list considering only issues 0..n, with utilities in given range. @param n the number of issueRanges to consider, we consider 0..n here. The recursion decreases n until n=0 @param goal the minimum and maximum utility required of the bids. to be included (both ends inclusive) @return BidsWithUtil list, possibly empty. ''' if goal == None: raise ValueError("Interval=null") # clamp goal into what is reachable. Avoid caching empty goal = goal.intersect(self._getRange(n)) if (goal.isEmpty()): return FixedList([]) cachetuple = Tuple(n, goal) if (cachetuple in self._cache): return self._cache[cachetuple] result = self._checkedGet(n, goal) self._cache[cachetuple]=result return result @staticmethod def _getInfo(space2:LinearAdditive , precision:int) -> List[IssueInfo] : dom = space2.getDomain() return [IssueInfo(issue, dom.getValues(issue), \ val(space2.getUtilities().get(issue)), \ space2.getWeight(issue), precision) \ for issue in dom.getIssues()] def _checkedGet(self, n:int, goal:Interval ) -> ImmutableList[Bid] : info = self._issueInfo[n] # issue is the first issuesWithRange. issue = info.getName() if n == 0: return OneIssueSubset(info, goal) # make new list, joining all sub-lists fulllist:ImmutableList[Bid] = FixedList([]) for val in info.getValues(): weightedutil = info.getWeightedUtil(val) subgoal = goal.subtract(weightedutil) # recurse: get list of bids for the subspace partialbids = self._get(n - 1, subgoal) bid = Bid({issue: val}) fullbids = BidsWithUtility.maplist(bid, partialbids) if fullbids.size() != 0: fulllist = JoinedList[Bid]([fullbids, fulllist]) return fulllist @staticmethod def maplist(bid: Bid, partialbids: ImmutableList[Bid]) -> ImmutableList[Bid]: ''' this is just to force a scope onto bid ''' return MapList[Bid, Bid](lambda pbid: pbid.merge(bid), partialbids) def _getRange(self, n:int) ->Interval : ''' @param n the maximum issuevalue utility to include. Use n=index of last issue s= (#issues in the domain - 1) for the full range of this domain. @return Interval (min, max) of the total weighted utility Interval of issues 0..n. All weighted utilities have been rounded to the set {@link #precision} ''' value = Interval(Decimal(0),Decimal(0)) for i in range(0,n+1): # include end point value = value.add(self._issueInfo[i].getInterval()) return value class OneIssueSubset (AbstractImmutableList[Bid]): ''' List of all one-issue bids that have utility inside given interval. ''' def __init__(self, info:IssueInfo , interval:Interval ) : ''' @param info the {@link IssueInfo} @param interval a utility interval (weighted) ''' self._info = info; self._interval = interval; self._size = info._subsetSize(interval) #Override def get(self, index:int) ->Bid : return Bid({self._info.getName(): self._info._subset(self._interval)[index]}) #Override def size(self) ->int: return self._size
python
import discord from discord.ext import commands from WhiteFox.core.config.config import Config class WhiteFox(commands.Bot): def __init__(self, token=None, client_id=None, prefixes=None): self.configs = None self._init_configs() if token is not None: self.configs.discord.token = token if client_id is not None: self.configs.discord.client_id = client_id if prefixes is not None: self.configs.discord.prefixes = prefixes super().__init__(command_prefix=commands.when_mentioned_or(*self.configs.fox.prefixes)) def _init_configs(self): self.configs = Config() def run(self): try: super().run(self.configs.discord.token) except discord.LoginFailure: print("Invalid token provided.") async def on_ready(self): print(f"{self.user.name}#{self.user.discriminator} Ready!") print(f"User Id: {self.user.id}") print("-------")
python