content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from util import start_game, tapToStart, check_game_state
# 从模拟器打开 没运行游戏开始
if __name__ == '__main__':
start_game()
tapToStart()
check_game_state()
# main()
|
python
|
import json
from datetime import date, datetime, timedelta
import pytest
from jitenshea.webapi import ISO_DATE, ISO_DATETIME, api
from jitenshea.webapp import app
app.config['TESTING'] = True
api.init_app(app)
def yesterday():
return date.today() - timedelta(1)
@pytest.fixture
def client():
client = app.test_client()
return client
def test_app_index(client):
resp = client.get('/')
assert resp.status_code == 200
def test_api_city_list(client):
resp = client.get('/api/city')
assert resp.status_code == 200
content = json.loads(resp.data)
expected = [{"city": "lyon",
"country": "france",
"stations": 348},
{"city": "bordeaux",
"country": "france",
"stations": 174}]
assert expected == content['data']
def test_api_city_info_stations(client):
resp = client.get('/api/bordeaux/infostation', query_string={'limit': 10})
assert resp.status_code == 200
data = json.loads(resp.data)
assert 10 == len(data['data'])
resp = client.get('/api/lyon/infostation', query_string={'limit': 5})
assert resp.status_code == 200
data = json.loads(resp.data)
assert 5 == len(data['data'])
assert ("address", "city", "id", "name", "nb_stands", "x", "y") == tuple(data['data'][0].keys())
def test_api_city_stations(client):
resp = client.get('/api/bordeaux/station', query_string={'limit': 10})
assert resp.status_code == 200
data = json.loads(resp.data)
assert 10 == len(data['data'])
assert 'date' in data
station = data['data'][0]
assert ("id", "name", "nb_bikes", "nb_stands", "timestamp", "x", "y") == tuple(station.keys())
resp = client.get('/api/lyon/station', query_string={'limit': 5})
assert resp.status_code == 200
data = json.loads(resp.data)
assert 5 == len(data['data'])
assert 'date' in data
station = data['data'][0]
assert ("id", "name", "nb_bikes", "nb_stands", "timestamp", "x", "y") == tuple(station.keys())
def test_api_city_map_stations(client):
"""Data in GeoJSON
"""
resp = client.get('/api/bordeaux/station', query_string={'limit': 10,
'geojson': True})
assert resp.status_code == 200
data = json.loads(resp.data)
assert data['type'] == 'FeatureCollection'
assert 10 == len(data['features'])
station = data['features'][0]
assert station['geometry']['type'] == 'Point'
assert tuple(station['properties'].keys()) == ("id", "name", "nb_bikes", "nb_stands", "timestamp")
def test_api_specific_stations(client):
resp = client.get('/api/bordeaux/station/93,35')
assert resp.status_code == 200
data = json.loads(resp.data)
assert len(data['data']) == 2
assert ['35', '93'] == [x['id'] for x in data['data']]
def test_api_daily_transaction(client):
date = yesterday().strftime(ISO_DATE)
resp = client.get('/api/bordeaux/daily/station',
query_string={"limit": 10, "date": date, "by": "value"})
assert resp.status_code == 200
data = json.loads(resp.data)['data']
# order by value must return the first station transaction value higher than the
# second one.
assert data[0]['value'][0] > data[1]['value'][0]
def test_api_timeseries(client):
start = yesterday().strftime(ISO_DATE)
stop = date.today().strftime(ISO_DATE)
resp = client.get('/api/bordeaux/timeseries/station/93,33',
query_string={"start": start, "stop": stop})
assert resp.status_code == 200
def test_api_timeseries_features(client):
start = yesterday().strftime(ISO_DATE)
stop = date.today().strftime(ISO_DATE)
resp = client.get('/api/bordeaux/timeseries/station/102,58',
query_string={"start": start, "stop": stop})
data = resp.json["data"]
assert len(data) > 0
features = data[0].keys()
assert "id" in features
assert "name" in features
assert "status" in features
assert "nb_stands" in features
assert "ts" in features
assert "available_bikes" in features
assert "available_stands" in features
def test_api_hourly_profile(client):
date = yesterday().strftime(ISO_DATE)
resp = client.get('/api/bordeaux/profile/hourly/station/93,33',
query_string={'date': date,
'window': 2})
assert resp.status_code == 200
resp = client.get('/api/lyon/profile/hourly/station/1002',
query_string={"date": date})
assert resp.status_code == 200
def test_api_daily_profile(client):
date = yesterday().strftime(ISO_DATE)
resp = client.get('/api/bordeaux/profile/daily/station/93,33',
query_string={"date": date})
assert resp.status_code == 200
def test_api_clustering_stations(client):
resp = client.get('/api/bordeaux/clustering/stations')
assert resp.status_code == 200
data = json.loads(resp.data)['data']
# there are just 4 clusters
assert {'0', '1', '2', '3'} == set(x['cluster_id'] for x in data)
resp = client.get('/api/bordeaux/clustering/stations',
query_string={"geojson": True})
assert resp.status_code == 200
def test_api_clustering_centroids(client):
resp = client.get('/api/bordeaux/clustering/centroids')
assert resp.status_code == 200
data = json.loads(resp.data)['data']
assert {'0', '1', '2', '3'} == set(x['cluster_id'] for x in data)
def test_api_prediction(client):
stop = datetime.today() + timedelta(hours=1)
start = stop - timedelta(hours=2)
params = {'start': start.strftime(ISO_DATETIME),
'stop': stop.strftime(ISO_DATETIME)}
print(params)
resp = client.get('/api/bordeaux/predict/station/22',
query_string=params)
assert resp.status_code == 200
data = resp.get_json()
# 3 values by default
assert len(data) == 5
assert 'nb_bikes' in data[0]
assert data[0]['at'] == '1H'
def test_api_prediction_with_current_values(client):
stop = datetime.today() + timedelta(hours=1)
start = stop - timedelta(hours=2)
params = {'start': start.strftime(ISO_DATETIME),
'stop': stop.strftime(ISO_DATETIME),
'current': True}
print(params)
resp = client.get('/api/bordeaux/predict/station/22',
query_string=params)
assert resp.status_code == 200
data = resp.get_json()
assert len(data) > 3
assert 'nb_bikes' in data[0]
def test_api_latest_prediction(client):
"""Latest predictions for all stations.
"""
resp = client.get('/api/bordeaux/predict/station')
assert resp.status_code == 200
data = resp.get_json()['data']
date = resp.get_json()['date']
assert len(data) >= 100
# in GeoJSON
resp = client.get('/api/bordeaux/predict/station',
query_string={'limit': 5, 'geojson': True})
assert resp.status_code == 200
data = resp.get_json()
assert len(data['features']) == 5
assert data['features'][0]['geometry']['type'] == 'Point'
|
python
|
# -*- coding:utf-8 -*-
import os
rootdir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
class Config(object):
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = 'smtp.163.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
MISSOURI_ADMIN = os.environ.get('MISSOURI_ADMIN')
MISSOURI_MAIL_SENDER = os.environ.get('MAIL_USERNAME')
POSTS_PER_PAGE = os.environ.get('POSTS_PER_PAGE')
BABEL_DEFAULT_COCALE = 'zh'
MISSOURI_SLOW_DB_QUERY_TIME = 0.5
@staticmethod
def init_app(app):
pass
class TestingConfig(Config):
"""
Testing environment
"""
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(rootdir, 'data-test.sqlite')
WTF_CSRF_ENABLED = False
class DevelopmentConfig(Config):
"""
Debugging environment
"""
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(rootdir, 'data-dev.sqlite')
class ProductionConfig(Config):
"""
Production environment
"""
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(rootdir, 'data.sqlite')
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
conf = {
'testing': TestingConfig,
'production': ProductionConfig,
'default': DevelopmentConfig,
'heroku': HerokuConfig
}
|
python
|
## LOX-IPA sim
#@ Author Juha Nieminen
#import sys
#sys.path.insert(0, '/Users/juhanieminen/Documents/adamrocket')
import RocketComponents as rc
from physical_constants import poise, inches, Runiv, gallons, lbm, \
gearth, atm, psi, lbf
from numpy import pi, linspace, cos, radians, sqrt, exp, log, array, full, ceil, round
from scipy import optimize as opt
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import Flows1D as flows
#DESIGN VARIABLES____________________________________________________________________________________
# nominal parameters
Preg_ox_start = 470*psi # regulated pressurant outlet pressure [Pa]
Preg_fu_start = 550*psi # regulated pressurant outlet pressure [Pa]
mdot_fuel_nom = 0.2 # This is only for cooling jacket pressure drop purposes [kg/s]
Pdrop_jacket_nom = 10*psi # Cooling jacket pressure drop at mdot_nominal [Pa]
OF_nom = 1.2 # Oxidizer-to-fuel ratio. This has only effect on initial guesses during solving
# Propellant and pressurant tanks dimensions
Vfueltank = 0.15 # fuel tank volume [m3]
Voxtank = 0.168 # ox tank volume [m3]
Vfuelprestank = 0.142 # fuel pressurant tank volume [m3]
Voxprestank = 0.142 # ox pressurant tank volume [m3]
# Vent orifices
d_oxtank_vent = 0.00*inches # [m]
d_fueltank_vent = 0.00*inches # [m]
fuel_vent_cd = 0.7 # fuel vent discharge coefficient
ox_vent_cd = 0.7 # ox vent discharge coefficient
# Tubing
d_presox_tube = 1.0*inches # pressurant tank -> ox tank tube diameter [m]
L_presox_tube = 0.6 # pressurant tank -> ox tank tube length [m]
d_presfuel_tube = 1.0*inches # pressurant tank -> fuel tank tube diameter [m]
L_presfuel_tube = 0.6 # pressurant tank -> fuel tank tube length [m]
d_oxtube = 1.0*inches # ox tank -> manifold tube diameter [m]
L_oxtube = 2.4 # ox tank -> manifold tube length [m]
d_fueltube = 1.0*inches # fuel tank -> manifold tube diameter [m]
L_fueltube = 2.4 # fuel tank -> manifold tube length [m]
roughness = 0.005 # epsilon/diameter, dimensionless
# Valves
Cv_NC1_F = 9 # fuel solenoid valve flow coefficient, dimensionless
Cv_NC1_O = 9 # oxidizer solenoid valve flow coefficient, dimensionless
Cv_NC1_FP = 9 # fuel pressurant solenoid valve flow coefficient, dimensionless
Cv_NC1_OP = 9 # ox pressurant solenoid valve flow coefficient, dimensionless
Cv_CV1_FP = 1.7 # fuel pressurant check valve no.1 flow coefficient, dimensionless
Cv_CV2_FP = 1.7 # fuel pressurant check valve no.2 flow coefficient, dimensionless
Cv_CV1_OP = 1.7 # ox pressurant check valve no.1 flow coefficient, dimensionless
Cv_CV2_OP = 1.7 # ox pressurant check valve no.2 flow coefficient, dimensionless
Pcrack_CV1_FP = 10*psi # fuel pressurant check valve no.1 opening pressure [Pa]
Pcrack_CV2_FP = 10*psi # fuel pressurant check valve no.2 opening pressure [Pa]
Pcrack_CV1_OP = 10*psi # ox pressurant check valve no.1 opening pressure [Pa]
Pcrack_CV2_OP = 10*psi # ox pressurant check valve no.2 opening pressure [Pa]
# Pintle injector (Note that there is only CFD data for OD_shaft = [14mm,16mm] )
# Fuel side
d_in_fu = 0.5*inches # injector inlet diameter, m
d_mani_fu = 0.03 # manifold/converging section inlet diameter, m
ID_shaft = 0.01 # fuel annulus ID, m
OD_shaft = 0.015 #see note above # fuel annulus OD, m
L_shaft_fu = 0.075 # Length of annular flow path, m
r_tip = 0.0105 # pintle tip radius, m
A_fu_annulus = pi/4*(OD_shaft**2 - ID_shaft**2) # annulus cross section, m^2
h_exit = A_fu_annulus/(2*pi*r_tip) # pintle exit slot height, m
Dh_fu = OD_shaft - ID_shaft # annulus hydraulic diameter, m
rou_fu = 2e-6/Dh_fu # annulus _DIMENSIONLESS_ roughness]
# Ox side
d_in_ox = 0.5*inches # injector inlet diameter (two inlets!)
d_mani_ox = 0.08 # ox manifold diameter upstream from orifice plate, m
Nori = 8 # number of orifices in orifice plate
d_ori = 3.5e-3 # single orifice diameter, m
cd_ori = 0.7 # orifice discharge coefficient in orifice plate, dimensionless
OD_ann = 0.0227 # converging section end/annulus outer diameter, m
ID_ann = 0.019 # central shaft outer diameter, m
L_shaft_ox = 0.01 # Length of annular flow path, m
Dh_ox = OD_ann - ID_ann # annulus hydraulic diameter, m
rou_ox = 2e-6/Dh_ox # annulus _DIMENSIONLESS_ roughness
# Define initial/nominal conditions in the chamber (obtained from CEA code assuming OFratio = 1.2)
TfireInit = 293 # initial flame temperature [K]
Tfire_nom = 2675 # nominal flame temperature [K]
Pfire = 1*atm # initial chamber pressure [Pa]
gammaFireInit = 1.16 # dimensionless
ga = gammaFireInit
mbarFireInit = 19.10 # combustion products' initial molecular mass [kg/kmol]
RfireInit = Runiv/mbarFireInit # combustion products' initial specific gas constant [J/kgK]
Pambient = atm # ambient pressure [Pa]
# Nozzle and chamber
d_nozzleThroat = 0.0604 # throat diameter [m]
A_nozzleThroat = pi*d_nozzleThroat**2/4 # throat area [m2]
area_ratio = 3.947 # nozzle exit-to-throat area ratio
A_nozzleExit = area_ratio*A_nozzleThroat # nozzle exit area [m2]
d_nozzleExit = sqrt(4*A_nozzleExit/pi) # nozzle exit diameter [m]
Dchamber = 0.1205 # chamber diameter [m]
Achamber = pi*Dchamber**2/4 # chamber cross sectional area [m2]
Lchamber = 0.2763 # chamber length [m]
Vchamber = Achamber*Lchamber # chamber volume [m3]
Lstar = Vchamber/A_nozzleThroat # chamber characteristic length [m]
Mc_nom = flows.getIsentropicMs(A_nozzleThroat, Achamber, gammaFireInit)[0] # nominal chamber Mach number
print("throat diameter is", '%.1f'%(d_nozzleThroat*1000), 'mm')
print("exit diameter is", '%.1f'%(d_nozzleExit*1000), 'mm')
print("chamber volume is", '%.5f'%Vchamber, "m3")
print("chamber Lstar is", '%.2f'%Lstar, "m")
print("chamber Mach_nom is", '%.2f'%Mc_nom)
# INITIAL CONDITIONS____________________________________________________________________________________________
#Define initial conditions in the tanks
TfuelpresStart = 293 # Fuel pressurant temp [K]
PfuelprestankStart = 4000*psi # Fuel pressurant tank pressure [Pa]
ToxpresStart = 293 # Ox pressurant temp [K]
PoxprestankStart = 4000*psi # Ox pressurant tank pressure [Pa]
ToxStart = 90 # Oxidizer (LOX) temp [K]
PoxtankStart = Preg_ox_start -1*psi # Oxidizer tank pressure [Pa] (-1psi helps convergence on first timestep)
FFoxtankStart = 0.65 # Oxidizer tank fill fraction, dimensionless
TfuelStart = 293 # Fuel temp [K]
PfueltankStart = Preg_fu_start -1*psi # Fuel tank pressure [Pa] (-1psi helps convergence on first timestep)
FFfueltankStart = 0.67 # Fuel tank fill fraction (Vfuel/Vtank)
# initialize propellants
IPA = rc.IPAFluid()
LOX = rc.LOXFluid()
FuelPres = rc.NitrogenFluid()
OxPres = rc.HeFluid()
#initialize nozzle and chamber
nozzle = rc.ConvergingDivergingNozzle(A_nozzleExit, A_nozzleThroat)
chamber = rc.LOX_IPACombustionChamber(nozzle, Vchamber, TfireInit, ga, mbarFireInit, Pfire, atm)
#initialize injector
fuel_pintle = rc.MagnumFuelPintle(d_in_fu, d_mani_fu, ID_shaft, OD_shaft, L_shaft_fu, r_tip, h_exit, rou_fu)
ox_pintle = rc.MagnumOxPintle(d_in_ox, d_mani_ox, d_ori, OD_ann, ID_ann, L_shaft_ox, Nori, cd_ori, rou_ox)
#initialize pressurant tanks
oxprestank = rc.IdealgasTank(OxPres, Voxprestank, ToxpresStart, PoxprestankStart)
fuelprestank = rc.IdealgasTank(FuelPres, Vfuelprestank, TfuelpresStart, PfuelprestankStart)
#initialize propellant tanks
oxtank = rc.LiquidPropellantTank(OxPres, LOX, Voxtank, ToxStart, ToxpresStart,\
PoxtankStart, FFoxtankStart, Preg_ox_start)
fueltank = rc.LiquidPropellantTank(FuelPres, IPA, Vfueltank, TfuelStart, TfuelpresStart,\
PfueltankStart, FFfueltankStart, Preg_fu_start)
#initialize vent holes
fuelVent = rc.VentHole(d_fueltank_vent, FuelPres.gamma, Runiv/FuelPres.mbar, fuel_vent_cd)
oxVent = rc.VentHole(d_oxtank_vent, OxPres.gamma, Runiv/OxPres.mbar, ox_vent_cd)
#initialize solenoids
NC1_O = rc.IncompressibleFlowSolenoid( Cv_NC1_O)
NC1_F = rc.IncompressibleFlowSolenoid( Cv_NC1_F)
NC1_OP = rc.CompressibleFlowSolenoid( Cv_NC1_OP, OxPres)
NC1_FP = rc.CompressibleFlowSolenoid( Cv_NC1_FP, FuelPres)
#initialize check valves
CV1_FP = rc.CompressibleFlowCheckValve( Cv_CV1_FP, Pcrack_CV1_FP, FuelPres)
CV2_FP = rc.CompressibleFlowCheckValve( Cv_CV2_FP, Pcrack_CV2_FP, FuelPres)
CV1_OP = rc.CompressibleFlowCheckValve( Cv_CV1_OP, Pcrack_CV1_OP, OxPres)
CV2_OP = rc.CompressibleFlowCheckValve( Cv_CV2_OP, Pcrack_CV2_OP, OxPres)
#initialize flow meter
FM1_F = rc.IncompressibleFlowMeter(10.5*psi)
FM1_O = rc.IncompressibleFlowMeter(10.5*psi)
#initialize particle filter
PF1_F = rc.IncompressibleFlowParticleFilter(10*psi)
PF1_O = rc.IncompressibleFlowParticleFilter(10*psi)
#initialize tubing
ox_tube = rc.RoughStraightCylindricalTube(d_oxtube, L_oxtube, roughness, True)
fuel_tube = rc.RoughStraightCylindricalTube(d_fueltube, L_fueltube, roughness, True)
presox_tube = rc.RoughStraightCylindricalTube(d_presox_tube, L_presox_tube, roughness, True)
presfuel_tube = rc.RoughStraightCylindricalTube(d_presfuel_tube, L_presfuel_tube, roughness, True)
#initialize cooling jacket
jacket = rc.CoolingJacket(mdot_fuel_nom, Pdrop_jacket_nom)
#initialize arrays for various data time histories
T_chamber = [chamber.T] # combustion chamber temperature [K]
Pchamber = [chamber.get_P_inlet()] # combustion chamber pressure [Pa]
Pexit = [nozzle.getPe(Pchamber[0], gammaFireInit, Pambient)] # nozzle exit pressure [Pa]
Mexit = [nozzle.getMe(Pchamber[0], gammaFireInit, Pambient)] # nozzle exit Mach number
cmass = [chamber.m] # resident propellant mass in combustion chamber [kg]
mdot_nozzle = [nozzle.getmdot(gammaFireInit, RfireInit, chamber.get_P_inlet(), chamber.T, chamber.Pa)] # mass flow out of the nozzle [kg/s]
Poxtank = [oxtank.getPtank()] # ox tank pressure [Pa]
Toxtank = [oxtank.getTpres()] # pressurant temperature in ox tank [K]
mPresOxtank = [oxtank.getMpres()] # pressurant mass in ox tank [kg]
mox = [oxtank.getMprop()] # oxidizer mass in tank [kg]
FFoxtank = [oxtank.getFF()] # ox tank fill fraction defined as Vox/(Voxtank)
Pfueltank = [fueltank.getPtank()] # fuel tank pressure [Pa]
Tfueltank = [fueltank.getTpres()] # pressurant temperature in fuel tank[K]
mPresFueltank = [fueltank.getMpres()] # pressurant mass in fuel tank [kg]
mfuel = [fueltank.getMprop()] # fuel mass in tank [kg]
FFfueltank = [fueltank.getFF()] # fuel tank fill fraction defined as Vfuel/(Vfueltank)
Toxprestank = [oxprestank.getTtank()] # temperature in ox pressurant tank [K]
Poxprestank = [oxprestank.getPtank()] # pressure in ox pressurant tank [Pa]
moxprestank = [oxprestank.getM()] # pressurant mass in ox pressurant tank [Pa]
Tfuelprestank = [fuelprestank.getTtank()] # temperature in fuel pressurant tank [K]
Pfuelprestank = [fuelprestank.getPtank()] # pressure in fuel pressurant tank [Pa]
mfuelprestank = [fuelprestank.getM()] # pressurant mass in fuel pressurant tank [Pa]
time = [0] # time array [s]
mdot_ox = [0] # liq ox mass flow out of the tank [kg/s]
rooOx = oxtank.propellant.getDensity(PoxtankStart, ToxStart) # liq ox density, assumed constant [kg/m^3]
P2ox = [0] # ox tank presssure [Pa]
P3ox = [0] # ox flow meter outlet pressure [Pa]
P4ox = [0] # ox solenoid outlet pressure [Pa]
P5ox = [0] # ox particle filter outlet pressure [Pa]
P6ox = [0] # ox injector inlet pressure [Pa]
mdot_fuel = [0] # fuel mass flow out of the tank [kg/s]
rooFuel = fueltank.propellant.density # fuel density, assumed constant [kg/m3]
P2fuel = [0] # fuel tank presssure [Pa]
P3fuel = [0] # fuel flow meter outlet pressure [Pa]
P4fuel = [0] # fuel solenoid outlet pressure [Pa]
P5fuel = [0] # fuel particle filter outlet pressure [Pa]
P6fuel = [0] # fuel cooling jacket inlet pressure [Pa]
P7fuel = [0] # fuel injector inlet pressure [Pa]
mdot_ox_pres = [0] # ox pressurant mass flow rate [kg/s]
P1pres_ox = [0] # ox pressurant pressure at filter outlet [kg/s]
P2pres_ox = [Preg_ox_start] # ox pressurant pressure at regulator outlet [kg/s]
P3pres_ox = [0] # ox pressurant pressure at solenoid valve outlet [kg/s]
P4pres_ox = [0] # ox pressurant pressure at check valve no.1 outlet [kg/s]
P5pres_ox = [0] # ox pressurant pressure at check valve no.2 outlet [kg/s]
mdot_fuel_pres = [0] # fuel pressurant mass flow rate [kg/s]
P1pres_fuel = [0] # fuel pressurant pressure at filter outlet [kg/s]
P2pres_fuel = [Preg_fu_start] # fuel pressurant pressure at regulator outlet [kg/s]
P3pres_fuel = [0] # fuel pressurant pressure at solenoid valve outlet [kg/s]
P4pres_fuel = [0] # fuel pressurant pressure at check valve no.1 outlet [kg/s]
P5pres_fuel = [0] # fuel pressurant pressure at check valve no.2 outlet [kg/s]
mTotal = [0] # propellant mass in the system [kg]
moxpres = [moxprestank[0] + mPresOxtank[0]] # ox pressurant mass [kg]
mfuelpres = [mfuelprestank[0] + mPresFueltank[0]] # fuel pressurant mass [kg]
OFratio = [0] # oxidizer to fuel mass flow ratio
Isp = [0] # specific impulse [s]
Thrust = [nozzle.getThrust(chamber.get_P_inlet(), Pambient, gammaFireInit) ] # rocket thrust [N]
#SIMULATE_______________________________________________________________________________________________________
# using orifices as follows: ejecting GOX from manifold to chamber, fuel liq-to-liq from manifold to chamber
print("")
print("STARTING SIM...")
print("")
print("mOxStart is", '%.2f'%mox[0], "kg")
print("mIPAstart is", mfuel[0], "kg")
print("m_pres_fuel_start is", '%.2f'%mfuelprestank[0], "kg")
print("m_pres_ox_start is", '%.2f'%moxprestank[0], "kg")
# The first step is to solve oxidizer and fuel mass flow rates from the tank to combustion chamber.
# definitions:
# P1ox = ox pressurant tank pressure
# P2ox = ox tank pressure
# P3ox = ox flow meter outlet pressure
# P4ox = ox solenoid valve outlet pressure
# P5ox = ox particle filter outlet pressure
# P6ox = ox injector inlet pressure
# (P1ox-P2ox) = regulator+tubing pressure drop, ox pressurant flow, is solved separately from fuel flow
# (P2ox-P3ox) = ox flow meter pressure drop, eq 1
# (P3ox-P4ox) = ox solenoid valve pressure drop, eq 2
# (P4ox-P5ox) = ox particle filter pressure drop, eq 3
# (P5ox-P6ox) = ox tubing pressure drop, eq 4
# (P6ox-Pchamber) = ox injector pressure drop, eq 5
# P1fuel = fuel pressurant tank pressure
# P2fuel = fuel tank pressure
# P3fuel = fuel flow meter outlet pressure
# P4fuel = fuel solenoid valve outlet pressure
# P5fuel = fuel particle filter outlet pressure
# P6fuel = fuel cooling jacket inlet pressure
# P7fuel = fuel injector inlet pressure
# (P1fuel-P2fuel) = regulator+tubing pressure drop, fuel pressurant flow, is solved separately from fuel flow
# (P2fuel-P3fuel) = fuel flow meter pressure drop eq 1
# (P3fuel-P4fuel) = fuel solenoid valve pressure drop, eq 2
# (P4fuel-P5fuel) = fuel particle filter pressure drop, eq 3
# (P5fuel-P6fuel) = fuel tubing pressure drop, eq 4
# (P6fuel-P7fuel) = cooling jacket pressure drop, eq 5
# (P7fuel-Pchamber) = injector pressure drop, eq 6
# P1pres_ox = ox pressurant particle filter outlet pressure
# P2pres_ox = ox pressurant regulator outlet pressure
# P3pres_ox = ox pressurant solenoid valve outlet pressure
# P4pres_ox = ox pressurant check valve no.1 outlet pressure
# P5pres_ox = ox pressurant check valve no.2 outlet pressure
# P6pres_ox = ox pressurant tubing outlet = ox tank pressure
# (P1pres_ox-P2pres_ox) = ox pressurant regulator pressure drop
# (P2pres_ox-P3pres_ox) = ox pressurant solenoid valve pressure drop
# (P3pres_ox-P4pres_ox) = ox pressurant check valve no.1 pressure drop
# (P4pres_ox-P5pres_ox) = ox pressurant check valve no.2 pressure drop
# (P5pres_ox-P6pres_ox) = ox pressurant tubing pressure drop
# P1pres_fuel = fuel pressurant particle filter outlet pressure
# P2pres_fuel = fuel pressurant regulator outlet pressure
# P3pres_fuel = fuel pressurant solenoid valve outlet pressure
# P4pres_fuel = fuel pressurant check valve no.1 outlet pressure
# P5pres_fuel = fuel pressurant check valve no.2 outlet pressure
# P6pres_fuel = fuel pressurant tubing outlet = fuel tank pressure
# (P1pres_fuel-P2pres_fuel) = fuel pressurant regulator pressure drop
# (P2pres_fuel-P3pres_fuel) = fuel pressurant solenoid valve pressure drop
# (P3pres_fuel-P4pres_fuel) = fuel pressurant check valve no.1 pressure drop
# (P4pres_fuel-P5pres_fuel) = fuel pressurant check valve no.2 pressure drop
# (P5pres_fuel-P6pres_fuel) = fuel pressurant tubing pressure drop
# In the case of oxidizer, P2 and Pchamber are known, so one must solve for P3, P4, P5 & P6. Third unknown is the mass flow rate. The three equations are injector and tubing pressure drops, and expression for solenoid mass flow rate. They are defined in RocketComponents.py under their respective classes.
# With fuel P2 and Pchamber are known, so one must solve for P3, P4, P5, P6 & P7. Fourth unknown is mass flow rate.
# With ox pressurant, P2 (regulation pressure) and P6 (fuel tank pressure) are known, so one must solve for P3, P4 and P5. The fourth unknown is pressurant mass flow rate. Equations to be solved are pressure drops over the check valve, solenoid valve, and the tubing.
# With fuel pressurant, P2 (regulation pressure) and P6 (fuel tank pressure) are known, so one must solve for P3, P4 and P5. The fourth unknown is pressurant mass flow rate. Equations to be solved are pressure drops over the check valve, solenoid valve, and the tubing.
# fsolve requires sensible initial guesses for all unknowns. They are established by guessing the mass flow rate, because all other pressures trickle down from that.
#time = [0]
timestep_small = 5e-6 # seconds, used during initial transient
timestep_nom = 0.0001 # seconds, used after 0.01 seconds of simulation time
t_transient = 0.01 # seconds, estimated time of initial transient
t_simulation = 20 # seconds
if t_simulation <= t_transient:
simsteps = int(ceil(t_simulation/timestep_small))
else:
simsteps = int(ceil( t_transient/timestep_small + (t_simulation-t_transient)/timestep_nom ))
print("Sim time is", t_simulation, "s, number of simsteps is", simsteps)
oxErrorInt = 0
fuelErrorInt = 0
i=0
for i in range(0,simsteps):
if time[i] < 0.01:
timestep = timestep_small # use shorter timestep during initial transient
else:
timestep = timestep_nom
P1ox = Poxprestank[i]
P2ox = Poxtank[i]
P1fuel = Pfuelprestank[i]
P2fuel = Pfueltank[i]
Pchamb = Pchamber[i]
mu_ox = LOX.getViscosity(Poxtank[i], Toxtank[i])
mu_fuel = IPA.mu
T_pres_ox = Toxtank[i]
mu_pres_ox = OxPres.getViscosity(Poxtank[i], Toxtank[i])
roo_pres_ox = OxPres.getDensity(Poxtank[i], Toxtank[i])
roo_pres_ox_upstream = OxPres.getDensity(P2pres_ox[i], Toxprestank[i])
T_pres_fuel = Tfueltank[i]
mu_pres_fuel = FuelPres.getViscosity(Pfueltank[i], Tfueltank[i])
roo_pres_fuel = FuelPres.getDensity(Pfueltank[i], Tfueltank[i])
roo_pres_fuel_upstream = FuelPres.getDensity(P2pres_fuel[i], Tfuelprestank[i])
Pcrit_ox = LOX.P_crit
Pvapor_ox = LOX.P_vapor
if i==0: # First guesses. Based on choked flow at ox injector
#nominal values for guessing
Pc_nom = 350*psi
mdot_nominal = A_nozzleThroat*sqrt(ga)*Pc_nom/sqrt(RfireInit*TfireInit)/(1+(ga-1)/2)**((ga+1)/(2*ga-2))/5
mdot_ox_guess = mdot_nominal*OF_nom/(OF_nom+1)
mdot_fuel_guess = mdot_ox_guess/OF_nom
print("mdot_ox_guess is", mdot_ox_guess, "kg/s")
P3ox_guess = P2ox - FM1_O.getPressureDrop()
P4ox_guess = P3ox_guess - NC1_O.getPressureDrop(mdot_ox_guess, rooOx)
P5ox_guess = P4ox_guess - PF1_O.getPressureDrop()
P6ox_guess = P5ox_guess - ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, rooOx)
print("P2ox_tank is", P2ox/psi, "psi")
print("P3ox_guess is", P3ox_guess/psi, "psi")
print("P4ox_guess is", P4ox_guess/psi, "psi")
print("P5ox_guess is", P5ox_guess/psi, "psi")
print("P6ox_guess is", P6ox_guess/psi, "psi")
print("P_chamber is", Pchamber[i]/psi, "psi")
print("mdot_fuel_guess is", mdot_fuel_guess, "kg/s")
P3fuel_guess = P2fuel - FM1_F.getPressureDrop()
P4fuel_guess = P3fuel_guess - NC1_F.getPressureDrop(mdot_fuel_guess, rooFuel)
P5fuel_guess = P4fuel_guess - PF1_F.getPressureDrop()
P6fuel_guess = P5fuel_guess - fuel_tube.getPressureDrop(mdot_fuel_guess, mu_fuel, rooFuel)
P7fuel_guess = P6fuel_guess - jacket.getPressureDrop(mdot_fuel_guess)
print("P2fuel_tank is", P2fuel/psi, "psi")
print("P3fuel_guess is is", P3fuel_guess/psi, "psi")
print("P4fuel_guess is is", P4fuel_guess/psi, "psi")
print("P5fuel_guess is is", P5fuel_guess/psi, "psi")
print("P6fuel_guess is is", P6fuel_guess/psi, "psi")
print("P7fuel_guess is is", P7fuel_guess/psi, "psi")
print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_ox_pres_guess = mdot_ox_guess*roo_pres_ox/rooOx #volumetric flowrates of ox and pressurant are the same
P3pres_ox_guess = Preg_ox_start - NC1_OP.getPressureDrop(mdot_ox_pres_guess, Preg_ox_start, roo_pres_ox)
P4pres_ox_guess = P3pres_ox_guess - CV1_OP.getPressureDrop(mdot_ox_pres_guess, \
P3pres_ox_guess, \
OxPres.roo_std, \
roo_pres_ox, \
T_pres_ox)
P5pres_ox_guess = P4pres_ox_guess - CV2_OP.getPressureDrop(mdot_ox_pres_guess, \
P4pres_ox_guess, \
OxPres.roo_std, \
roo_pres_ox, \
T_pres_ox)
P6pres_ox_guess = P5pres_ox_guess - presox_tube.getPressureDrop(mdot_ox_pres_guess, mu_pres_ox, roo_pres_ox)
print("P3ox_pres_guess is", P3pres_ox_guess/psi, "psi")
print("P4ox_pres_guess is", P4pres_ox_guess/psi, "psi")
print("P5ox_pres_guess is", P5pres_ox_guess/psi, "psi")
print("P6ox_pres_guess is", P6pres_ox_guess/psi, "psi")
mdot_fuel_pres_guess = mdot_fuel_guess*roo_pres_fuel/rooFuel #volumetric flowrates of fuel and pressurant are the same
P3pres_fuel_guess = Preg_fu_start - NC1_FP.getPressureDrop(mdot_fuel_pres_guess, Preg_fu_start, roo_pres_fuel)
P4pres_fuel_guess = P3pres_fuel_guess - CV1_FP.getPressureDrop(mdot_fuel_pres_guess, \
P3pres_fuel_guess, \
FuelPres.roo_std, \
roo_pres_fuel, \
T_pres_fuel)
P5pres_fuel_guess = P4pres_fuel_guess - CV2_FP.getPressureDrop(mdot_fuel_pres_guess, \
P4pres_fuel_guess, \
FuelPres.roo_std, \
roo_pres_fuel, \
T_pres_fuel)
P6pres_fuel_guess = P5pres_fuel_guess - presfuel_tube.getPressureDrop(mdot_fuel_pres_guess, mu_pres_fuel, roo_pres_fuel)
print("P3fuel_pres_guess is", P3pres_fuel_guess/psi, "psi")
print("P4fuel_pres_guess is", P4pres_fuel_guess/psi, "psi")
print("P5fuel_pres_guess is", P5pres_fuel_guess/psi, "psi")
print("P6fuel_pres_guess is", P6pres_fuel_guess/psi, "psi")
else : # using solutions from previous timestep
mdot_ox_guess = mdot_ox[i-1]
#P3ox_guess = P2ox - FM1_O.getPressureDrop()
#P4ox_guess = P3ox_guess - NC1_O.getPressureDrop(mdot_ox_guess, rooOx)
#P5ox_guess = P4ox_guess - PF1_O.getPressureDrop()
#P6ox_guess = P5ox_guess - ox_tube.getPressureDrop(mdot_ox_guess, mu_ox, rooOx)
P3ox_guess = P3ox[i-1]
P4ox_guess = P4ox[i-1]
P5ox_guess = P5ox[i-1]
P6ox_guess = P6ox[i-1]
#print("mdot_ox_guess is", mdot_ox_guess)
#print("P2ox is", P2ox/psi, "psi")
#print("P3ox_guess is", P3ox_guess/psi, "psi")
#print("P4ox_guess is", P4ox_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_fuel_guess = mdot_fuel[i-1]
#P3fuel_guess = P2fuel - FM1_F.getPressureDrop()
#P4fuel_guess = P3fuel_guess - NC1_F.getPressureDrop(mdot_fuel_guess, rooFuel)
#P5fuel_guess = P4fuel_guess - PF1_F.getPressureDrop()
#P6fuel_guess = P5fuel_guess - fuel_tube.getPressureDrop(mdot_fuel_guess, mu_fuel, rooFuel)
#P7fuel_guess = P6fuel_guess - jacket.getPressureDrop(mdot_fuel_guess)
#mdot_fuel_guess = mdot_fuel[i-1]*1.0
P3fuel_guess = P3fuel[i-1]
P4fuel_guess = P4fuel[i-1]
P5fuel_guess = P5fuel[i-1]
P6fuel_guess = P6fuel[i-1]
P7fuel_guess = P7fuel[i-1]
#print("P2fuel is", P2fuel/psi, "psi")
#print("P3fuel_guess is is", P3fuel_guess/psi, "psi")
#print("P4fuel_guess is is", P4fuel_guess/psi, "psi")
#print("P5fuel_guess is is", P5fuel_guess/psi, "psi")
#print("P_chamber is", Pchamber[i]/psi, "psi")
mdot_ox_pres_guess = mdot_ox_pres[i-1]
P3pres_ox_guess = P3pres_ox[i-1]
P4pres_ox_guess = P4pres_ox[i-1]
P5pres_ox_guess = P5pres_ox[i-1]
mdot_fuel_pres_guess = mdot_fuel_pres[i-1]
P3pres_fuel_guess = P3pres_fuel[i-1]
P4pres_fuel_guess = P4pres_fuel[i-1]
P5pres_fuel_guess = P5pres_fuel[i-1]
initial_ox_guesses = [P3ox_guess, P4ox_guess,P5ox_guess, P6ox_guess, mdot_ox_guess]
initial_fuel_guesses = [P3fuel_guess, P4fuel_guess, P5fuel_guess, P6fuel_guess, P7fuel_guess, mdot_fuel_guess]
initial_pres_ox_guesses = [P3pres_ox_guess, P4pres_ox_guess, P5pres_ox_guess, mdot_ox_pres_guess]
initial_pres_fuel_guesses = [P3pres_fuel_guess, P4pres_fuel_guess, P5pres_fuel_guess, mdot_fuel_pres_guess]
def oxfunks(U): # defines the system of equations and unknowns U to be solved
P3 = U[0]
P4 = U[1]
P5 = U[2]
P6 = U[3]
mdot = U[4]
#print("P3 as U0 is", P3/psi, "psi")
#print("P4 as U1 is", P4/psi, "psi")
#print("mdot as U2 is", mdot, "kg/s")
out = [ mdot - NC1_O.getMdot(P3, P4, rooOx, Pcrit_ox, Pvapor_ox) ]
out.append( P2ox - P3 - FM1_O.getPressureDrop() )
out.append( P4 - P5 - PF1_O.getPressureDrop() )
out.append( P5 - P6 - ox_tube.getPressureDrop(mdot, mu_ox, rooOx))
out.append( P6 - Pchamb - ox_pintle.getPressureDrops(mdot, rooOx, mu_ox, 0*psi)[-1] )
#print("oxoutti", out)
return out
ox_solution = opt.fsolve(oxfunks, initial_ox_guesses) # iterates until finds a satisfying solution or goes bust
#print("ox solution is", array(ox_solution)/psi )
mdot_ox_new = ox_solution[4]
Pox_intermediate = oxtank.getPtank()
Pox_eff = (Pox_intermediate + P2ox)/2 # average of pressures before and after ejection of ox from tank; incoming Helium will see this 'effective' pressure in the tank
#mdot_ox_pres_new = presox_tube.getMdot(Preg, oxtank.getPtank(), mu_N2_ox, roo_N2_ox)
#print("mdot_ox_pres_new is", mdot_ox_pres_new, "kg/s")
def fuelfunks(U): # defines the system of equations and unknowns U to be solved
P3 = U[0]
P4 = U[1]
P5 = U[2]
P6 = U[3]
P7 = U[4]
mdot = U[5]
#print("U is", U)
#print("fuelmdot is", mdot)
out = [ mdot - NC1_F.getMdot(P3, P4, rooFuel, IPA.P_crit, IPA.P_vapor) ]
out.append( P2fuel - P3 - FM1_F.getPressureDrop() )
out.append( P4 - P5 - PF1_F.getPressureDrop() )
out.append( P5 - P6 - fuel_tube.getPressureDrop(mdot, mu_fuel, rooFuel) )
out.append( P6 - P7 - jacket.getPressureDrop(mdot) )
out.append( P7 - Pchamb - fuel_pintle.getPressureDrops(mdot, rooFuel, mu_fuel, 0*psi)[-1] )
#print("fueloutti", out)
return out
fuel_solution = opt.fsolve(fuelfunks, initial_fuel_guesses)
#print("fuel solution is", array(fuel_solution)/psi )
mdot_fuel_new = fuel_solution[5]
Pfuel_intermediate = fueltank.getPtank()
Pfuel_eff = (Pfuel_intermediate + P2fuel)/2 # average of pressures before and after ejection of fuel from tank; incoming Nitrogen will see this 'effective' pressure in the tank
def presoxfunks(U): # defines the system of equations and unknowns U to be solved
P3 = U[0]
P4 = U[1]
P5 = U[2]
mdot = U[3]
#print("P2pres_ox_i is", P2pres_ox[i]/psi, "psi")
out = [mdot - NC1_OP.getMdot(P2pres_ox[i], P3, roo_pres_ox)]
out.append(mdot - CV1_OP.getMdot(P3, P4, OxPres.roo_std, roo_pres_ox, T_pres_ox))
out.append(mdot - CV2_OP.getMdot(P4, P5, OxPres.roo_std, roo_pres_ox, T_pres_ox))
#out.append(mdot - presox_tube.getMdot(P5, Pox_eff, mu_pres_ox, roo_pres_ox))
out.append(P5 - Pox_eff - presox_tube.getPressureDrop(mdot, mu_pres_ox, roo_pres_ox))
#print("presox_outti", out)
return out
presox_solution = opt.fsolve(presoxfunks, initial_pres_ox_guesses)
#print("presox solution is", array(presox_solution)/psi )
mdot_ox_pres_new = presox_solution[3]
def presfuelfunks(U): # defines the system of equations and unknowns U to be solved
P3 = U[0]
P4 = U[1]
P5 = U[2]
mdot = U[3]
out = [mdot - NC1_FP.getMdot(P2pres_fuel[i], P3, roo_pres_fuel)]
out.append(mdot - CV1_FP.getMdot(P3, P4, FuelPres.roo_std, roo_pres_fuel, T_pres_fuel))
out.append(mdot - CV2_FP.getMdot(P4, P5, FuelPres.roo_std, roo_pres_fuel, T_pres_fuel))
#out.append(mdot - presfuel_tube.getMdot(P5, Pfuel_eff, mu_pres_fuel, roo_pres_fuel))
out.append(P5 - Pfuel_eff - presfuel_tube.getPressureDrop(mdot, mu_pres_fuel, roo_pres_fuel))
return out
presfuel_solution = opt.fsolve(presfuelfunks, initial_pres_fuel_guesses)
#print("presfuel solution is", array(presfuel_solution)/psi )
mdot_fuel_pres_new = presfuel_solution[3]
# Now that mass flow rates out have been solved, intermediate states of the prop tanks can be established (only prop ejection):
oxtank.update(Toxprestank[i], 0 , mdot_ox_new, P2pres_ox[i], timestep) #T_in, mdot_pres_in, mdot_prop_out, Pfeed, timestep, i):
fueltank.update(Tfuelprestank[i], 0, mdot_fuel_new, P2pres_fuel[i], timestep)
# Determine final conditions in prop tanks (only pressurant inflow)
oxtank.update(Toxprestank[i], mdot_ox_pres_new, 0, P2pres_ox[i], timestep)
fueltank.update(Tfuelprestank[i], mdot_fuel_pres_new, 0, P2pres_fuel[i], timestep)
# ...and pressurant tanks
oxprestank.update(mdot_ox_pres_new, timestep)
fuelprestank.update(mdot_fuel_pres_new, timestep)
# Check if OFratio exceeds 3.5. If so, stop simulation
if (mdot_ox_new/mdot_fuel_new) > 10:
print("OF ratio > 10, terminate")
break
# Update chamber parameters:
chamber.update(mdot_ox_new, mdot_fuel_new, Pambient, timestep) # mdot_ox_in, mdot_fuel_in, Pambient, timestep
# Check if ox or fuel tank will empty during this timestep. If so, stop simulation.
if oxtank.getMprop() < 0:
print("Ox tank empty after", i, " iterations, ie", i*timestep, "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining ox prs", moxprestank[i], "kg,", "i.e.", moxprestank[i]/moxprestank[0]*100, " % of initial amount")
print("remaining fuel prs", mfuelprestank[i], "kg,", "i.e.", mfuelprestank[i]/mfuelprestank[0]*100, " % of initial amount")
break
if fueltank.getMprop() < 0:
print("Fuel tank empty after", i, " iterations, ie", i*timestep, "seconds")
print("remaining ox", mox[i], "kg")
print("remaining ox prs", moxprestank[i], "kg,", "i.e.", moxprestank[i]/moxprestank[0]*100, " % of initial amount")
print("remaining fuel prs", mfuelprestank[i], "kg,", "i.e.", mfuelprestank[i]/mfuelprestank[0]*100, " % of initial amount")
break
if oxprestank.getPtank() < 1000*psi:
print("Out of ox pressurant after", i, " iterations, ie", i*timestep, "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining ox", mox[i], "kg")
print("remaining fuel prs", mfuelprestank[i], "kg,", "i.e.", mfuelprestank[i]/mfuelprestank[0]*100, " % of initial amount")
break
if fuelprestank.getPtank() < 1000*psi:
print("Out of fuel pressurant after", i, " iterations, ie", i*timestep, "seconds")
print("remaining fuel", mfuel[i], "kg")
print("remaining ox", mox[i], "kg")
print("remaining ox prs", oxprestank[i], "kg,", "i.e.", moxprestank[i]/moxprestank[0]*100, " % of initial amount")
break
#update mass flow time histories. These are values during the CURRENT time step.
if i==0:
P3ox = [ox_solution[0]]
P4ox = [ox_solution[1]]
P5ox = [ox_solution[2]]
P6ox = [ox_solution[3]]
mdot_ox = [ox_solution[4]]
P3fuel = [fuel_solution[0]]
P4fuel = [fuel_solution[1]]
P5fuel = [fuel_solution[2]]
P6fuel = [fuel_solution[3]]
P7fuel = [fuel_solution[4]]
mdot_fuel = [fuel_solution[5]]
OFratio = [ mdot_ox[0]/mdot_fuel[0] ]
P3pres_ox = [presox_solution[0]]
P4pres_ox = [presox_solution[1]]
P5pres_ox = [presox_solution[2]]
mdot_ox_pres = [presox_solution[3]]
P3pres_fuel = [presfuel_solution[0]]
P4pres_fuel = [presfuel_solution[1]]
P5pres_fuel = [presfuel_solution[2]]
mdot_fuel_pres = [presfuel_solution[3]]
else:
P3ox.append( ox_solution[0])
P4ox.append( ox_solution[1])
P5ox.append( ox_solution[2])
P6ox.append( ox_solution[3])
mdot_ox.append( ox_solution[4])
P3fuel.append( fuel_solution[0])
P4fuel.append( fuel_solution[1])
P5fuel.append( fuel_solution[2])
P6fuel.append( fuel_solution[3])
P7fuel.append( fuel_solution[4])
mdot_fuel.append( fuel_solution[5])
#print("i is= ", i)
OFratio.append( mdot_ox[i]/mdot_fuel[i])
P3pres_ox.append( presox_solution[0])
P4pres_ox.append( presox_solution[1])
P5pres_ox.append( presox_solution[2])
mdot_ox_pres.append( presox_solution[3])
P3pres_fuel.append( presfuel_solution[0])
P4pres_fuel.append( presfuel_solution[1])
P5pres_fuel.append( presfuel_solution[2])
mdot_fuel_pres.append( presfuel_solution[3])
#update the rest of the time histories. System will have these values during the NEXT time step.
Poxtank.append( oxtank.getPtank())
Toxtank.append( oxtank.getTpres())
mPresOxtank.append( oxtank.getMpres())
mox.append( oxtank.getMprop())
FFoxtank.append( oxtank.getFF())
Pfueltank.append( fueltank.getPtank())
Tfueltank.append( fueltank.getTpres())
mPresFueltank.append( fueltank.getMpres())
mfuel.append( fueltank.getMprop())
FFfueltank.append( fueltank.getFF())
Toxprestank.append( oxprestank.getTtank())
Poxprestank.append( oxprestank.getPtank())
moxprestank.append( oxprestank.getM())
#mdot_ox_pres.append( mdot_ox_pres_new)
Tfuelprestank.append( fuelprestank.getTtank())
Pfuelprestank.append( fuelprestank.getPtank())
mfuelprestank.append( fuelprestank.getM())
#mdot_fuel_pres.append( mdot_fuel_pres_new)
Pchamber.append( chamber.get_P_inlet() )
Pexit.append( nozzle.getPe(Pchamber[i+1], chamber.gamma, Pambient) )
Mexit.append( nozzle.getMe(Pchamber[i+1], chamber.gamma, Pambient) )
cmass.append( chamber.m)
mdot_nozzle.append( nozzle.getmdot(chamber.gamma, Runiv/chamber.mbar, chamber.get_P_inlet(),\
chamber.T, chamber.Pa) )
Thrust.append( nozzle.getThrust(chamber.get_P_inlet(), Pambient, chamber.gamma) )
T_chamber.append( chamber.T)
Isp.append( Thrust[i+1]/(mdot_ox[i] + mdot_fuel[i])/9.81 )
mTotal.append(mox[i+1] + mfuel[i+1] + cmass[i+1] + mdot_nozzle[i]*timestep )
moxpres.append( moxprestank[i+1] + mPresOxtank[i+1] )
mfuelpres.append( mfuelprestank[i+1] + mPresFueltank[i+1] )
time.append( time[i]+timestep )
# Here the PI(Proportional and Integral) control theory is used
oxErrorInt = oxErrorInt + (Poxtank[0]-Poxtank[-1])*timestep # Integral term for Preg_ox
fuelErrorInt = fuelErrorInt + (Pfueltank[0]-Pfueltank[-1])*timestep
#if (time[-1]*1000)%25 < timestep*1000:
#PI-controller operates on every single timestep)
Preg_ox_new = Preg_ox_start + 0.1*(Poxtank[0]-Poxtank[-1]) + oxErrorInt
Preg_fuel_new = Preg_fu_start + 0.1*(Pfueltank[0]-Pfueltank[-1]) + fuelErrorInt
P2pres_ox.append (Preg_ox_new)
P2pres_fuel.append (Preg_fuel_new)
i+=1
#print("i=",i)
if i%1000 == 0:
print("i=",i)
'''
print('i =', '%d, '%i, 'time =', '%.2fs'%time[-1], \
Preg_ox/psi, (Poxtank[0]-Poxtank[-1])/psi, \
Preg_fu/psi, (Pfueltank[0]-Pfueltank[-1])/psi, \
oxErrorInt/psi, fuelErrorInt/psi)
'''
# Evaluate and Print some values
Vinj_ox = ox_pintle.getVelocities(mdot_ox[-1], rooOx, mu_ox)[-1] # Ox injection velocity, m/s
Vinj_fu = fuel_pintle.getVelocities(mdot_fuel[-1], rooFuel, mu_fuel)[-1] # Fuel injection velocity, m/s
fire = chamber.get_Tfire(1.5, 2e6)
print("")
print("mdot_nozzle steady state is", '%.3f'%mdot_nozzle[-1], "kg/s")
print("SS thrust is", '%.1f'%Thrust[-1], "N")
print("SS Isp is", '%.1f'%Isp[-1], "s")
print("SS T_chamber is", '%.1f'%T_chamber[-1], "K")
print("SS P_chamber is", '%.1f'%(Pchamber[-1]/psi), "psi")
print("SS P_exit is", '%.3f'%(Pexit[-1]/atm), "atm")
print("SS thrust coeff is", '%.3f'%nozzle.getCf(Pchamber[-1], atm, chamber.get_gamma(OFratio[-1],Pchamber[-1] )) )
print("SS mdot_pres_fuel is", '%.3f'%mdot_fuel_pres[-1], "kg/s")
print("SS mdot_pres_ox is", '%.3f'%mdot_ox_pres[-1], "kg/s")
print("SS pres_fuel flow rate is", '%.3f'%(mdot_fuel_pres[-1]/roo_pres_fuel*1000/3.78*60), "GPM")
print("SS pres_ox flow rate is", '%.3f'%(mdot_ox_pres[-1]/roo_pres_ox*1000/3.78*60), "GPM")
print("SS mdot_ox is", '%.3f'%mdot_ox[-1], "kg/s")
print("SS mdot_fuel is", '%.3f'%mdot_fuel[-1], "kg/s")
print("SS O/F ratio is", '%.3f'%OFratio[-1])
print("SS ox tube velocity is", '%.1f'%(mdot_ox[-1]/(rooOx*pi*d_oxtube**2/4)), "m/s")
print("SS fuel tube velocity is", '%.1f'%(mdot_fuel[-1]/(rooFuel*pi*d_fueltube**2/4)), "m/s")
print("SS ox injection velocity is", '%.1f'%(Vinj_ox), "m/s")
print("SS fuel injection velocity is", '%.1f'%(Vinj_fu), "m/s")
print("Momentum ratio is", '%.3f'%(Vinj_fu*mdot_fuel[-1]/(Vinj_ox*mdot_ox[-1])))
print("SS ox injector P_drop is", '%.1f'%((P4ox[-1]-Pchamber[-1])/psi), "psi, ie.", '%.1f'%((P4ox[-1]-Pchamber[-1])/Pchamber[-1]*100), "% of Pchamber")
print("SS fuel injector P_drop", '%.1f'%((P5fuel[-1]-Pchamber[-1])/psi), "psi,ie, "'%.1f'%((P5fuel[-1]-Pchamber[-1])/Pchamber[-1]*100), "% of Pchamber")
print("")
print("SS ox pres line mass flow rate is", '%.3f'%mdot_ox_pres[-1], "kg/s")
print("SS ox pres line pressure at RG1-He outlet is", '%.2f'%(P2pres_ox[-1]/psi), "psi")
print("SS ox pres line pressure at NC1-He outlet is", '%.2f'%(P3pres_ox[-1]/psi), "psi")
print("SS ox pres line pressure at CV1-He outlet is", '%.2f'%(P4pres_ox[-1]/psi), "psi")
print("SS ox pres line pressure at CV2-He outlet is", '%.2f'%(P5pres_ox[-1]/psi), "psi")
print("SS pressure drop across NC1-He is", '%.2f'%((P2pres_ox[-1]-P3pres_ox[-1])/psi), "psi")
print("SS pressure drop across CV1-He is", '%.2f'%((P3pres_ox[-1]-P4pres_ox[-1])/psi), "psi")
print("SS pressure drop across CV2-He is", '%.2f'%((P4pres_ox[-1]-P5pres_ox[-1])/psi), "psi")
print("")
print("SS fuel pres line mass flow rate is", '%.3f'%mdot_fuel_pres[-1], "kg/s")
print("SS fuel pres line pressure at RG1-N outlet is", '%.2f'%(P2pres_fuel[-1]/psi), "psi")
print("SS fuel pres line pressure at NC1-N outlet is", '%.2f'%(P3pres_fuel[-1]/psi), "psi")
print("SS fuel pres line pressure at CV1-N outlet is", '%.2f'%(P4pres_fuel[-1]/psi), "psi")
print("SS fuel pres line pressure at CV2-N outlet is", '%.2f'%(P5pres_fuel[-1]/psi), "psi")
print("SS pressure drop across NC1-N is", '%.2f'%((P2pres_fuel[-1]-P3pres_fuel[-1])/psi), "psi")
print("SS pressure drop across CV1-N is", '%.2f'%((P3pres_fuel[-1]-P4pres_fuel[-1])/psi), "psi")
print("SS pressure drop across CV2-N is", '%.2f'%((P4pres_fuel[-1]-P5pres_fuel[-1])/psi), "psi")
print("")
print("SS pressure drop across cooling jacket is", '%.2f'%(jacket.getPressureDrop(mdot_fuel[-1])/psi), "psi")
print("")
print("bend drop in fuel inj is", '%.1f'%(fuel_pintle.getPressureDrops(mdot_fuel[-1], rooFuel,mu_fuel, 0*psi)[3]/psi), "psi")
print("fuel injector k_bend =", '%.3f'%( fuel_pintle.get_kbend(fuel_pintle.OD_shaft, mdot_fuel[-1])))
print("")
print("Pinj_in_fuel is", '%.1f'%(fuel_pintle.getPressures(mdot_fuel[-1], rooFuel,mu_fuel, P5fuel[-1])[0]/psi), "psi")
print("Pfuel_manifold is", '%.1f'%(fuel_pintle.getPressures(mdot_fuel[-1], rooFuel,mu_fuel, P5fuel[-1])[1]/psi), "psi")
print("Pfuel_annulus_in is", '%.1f'%(fuel_pintle.getPressures(mdot_fuel[-1], rooFuel,mu_fuel, P5fuel[-1])[2]/psi), "psi")
print("Pfuel_annulus_out is", '%.1f'%(fuel_pintle.getPressures(mdot_fuel[-1], rooFuel,mu_fuel, P5fuel[-1])[3]/psi), "psi")
print("Pfuel_bend_exit is", '%.1f'%(fuel_pintle.getPressures(mdot_fuel[-1], rooFuel,mu_fuel, P5fuel[-1])[4]/psi), "psi")
print("")
print("Pinj_in_ox is", '%.1f'%(ox_pintle.getPressures(mdot_ox[-1], rooOx,mu_ox, P4ox[-1])[0]/psi), "psi")
print("Pox_manifold is", '%.1f'%(ox_pintle.getPressures(mdot_ox[-1], rooOx,mu_ox, P4ox[-1])[1]/psi), "psi")
print("Pox_converging_in is", '%.1f'%(ox_pintle.getPressures(mdot_ox[-1], rooOx,mu_ox, P4ox[-1])[2]/psi), "psi")
print("Pox_annulus_in is", '%.1f'%(ox_pintle.getPressures(mdot_ox[-1], rooOx,mu_ox, P4ox[-1])[3]/psi), "psi")
print("Pox_annulus_exit is", '%.1f'%(ox_pintle.getPressures(mdot_ox[-1], rooOx,mu_ox, P4ox[-1])[4]/psi), "psi")
print("")
print("v_fuel_manifold is", '%.2f'%fuel_pintle.getVelocities(mdot_fuel[-1], rooFuel,mu_fuel)[1], "m/s")
print("v_fuel_annulus is", '%.2f'%fuel_pintle.getVelocities(mdot_fuel[-1], rooFuel,mu_fuel)[2], "m/s")
print("v_fuel_injection is", '%.2f'%fuel_pintle.getVelocities(mdot_fuel[-1], rooFuel,mu_fuel)[3], "m/s")
print("")
print("v_ox_manifold is", '%.2f'%ox_pintle.getVelocities(mdot_ox[-1], rooOx, mu_ox)[1], "m/s")
print("v_ox_ori is", '%.2f'%ox_pintle.getVelocities(mdot_ox[-1], rooOx, mu_ox)[2], "m/s")
print("v_ox_manifold after orifices is", '%.2f'%ox_pintle.getVelocities(mdot_ox[-1], rooOx, mu_ox)[3], "m/s")
print("v_ox_injection", '%.2f'%ox_pintle.getVelocities(mdot_ox[-1], rooOx, mu_ox)[4], "m/s")
# following time histories are one element shorter than the rest, so the last calculated value will be duplicated to match the length of other time histories.
P3ox.append( ox_solution[0])
P4ox.append( ox_solution[1])
P5ox.append( ox_solution[2])
P6ox.append( ox_solution[3])
mdot_ox.append( ox_solution[4])
P3fuel.append( fuel_solution[0])
P4fuel.append( fuel_solution[1])
P5fuel.append( fuel_solution[2])
P6fuel.append( fuel_solution[3])
P7fuel.append( fuel_solution[4])
mdot_fuel.append( fuel_solution[5])
P3pres_ox.append( presox_solution[0])
P4pres_ox.append( presox_solution[1])
P5pres_ox.append( presox_solution[2])
mdot_ox_pres.append( presox_solution[3])
P3pres_fuel.append( presfuel_solution[0])
P4pres_fuel.append( presfuel_solution[1])
P5pres_fuel.append( presfuel_solution[2])
mdot_fuel_pres.append( presfuel_solution[3])
OFratio.append( mdot_ox[i]/mdot_fuel[i])
# plot time histories
plt.ion()
plt.figure(1)
plt.plot(time,array(Poxprestank)/psi, label='pressurant tank')
plt.figure(1)
plt.plot(time,array(P3pres_ox)/psi, label='pressurant solenoid valve out')
plt.figure(1)
plt.plot(time,array(P4pres_ox)/psi, label='pressurant check valve 1 out')
plt.figure(1)
plt.plot(time,array(P5pres_ox)/psi, label='pressurant check valve 2 out')
plt.figure(1)
plt.plot(time,array(Poxtank)/psi, label='ox tank')
plt.figure(1)
plt.plot(time,array(P3ox)/psi, label='ox flow meter out')
plt.figure(1)
plt.plot(time,array(P4ox)/psi, label='ox solenoid valve out')
plt.figure(1)
plt.plot(time,array(P5ox)/psi, label='ox particle filter out')
plt.figure(1)
plt.plot(time,array(P6ox)/psi, label='ox injector in')
plt.figure(1)
plt.plot(time,array(Pchamber)/psi, label='chamber')
plt.figure(1)
plt.plot(time,array(Pexit)/psi, label='exit')
plt.title('Ox pressures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('psi')
plt.show()
plt.figure(2)
plt.plot(time,array(Pfuelprestank)/psi, label='pressurant tank')
plt.figure(2)
plt.plot(time,array(P3pres_fuel)/psi, label='pressurant solenoid valve out')
plt.figure(2)
plt.plot(time,array(P4pres_fuel)/psi, label='pressurant check valve 1 out')
plt.figure(2)
plt.plot(time,array(P5pres_fuel)/psi, label='pressurant check valve 2 out')
plt.figure(2)
plt.plot(time,array(Pfueltank)/psi, label='fuel tank')
plt.figure(2)
plt.plot(time,array(P3fuel)/psi, label='fuel flow meter out')
plt.figure(2)
plt.plot(time,array(P4fuel)/psi, label='fuel solenoid valve out')
plt.figure(2)
plt.plot(time,array(P5fuel)/psi, label='fuel particle filter out')
plt.figure(2)
plt.plot(time,array(P6fuel)/psi, label='fuel cooling jacket in')
plt.figure(2)
plt.plot(time,array(P7fuel)/psi, label='fuel injector in')
plt.figure(2)
plt.plot(time,array(Pchamber)/psi, label='chamber')
plt.figure(2)
plt.plot(time,array(Pexit)/psi, label='exit')
plt.title('Fuel pressures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('psi')
plt.show()
plt.figure(3)
plt.plot(time,Toxtank, label='ox tank')
plt.figure(3)
plt.plot(time,Toxprestank, label='ox pressurant tank')
plt.figure(3)
plt.plot(time,Tfueltank, label='fuel tank')
plt.figure(3)
plt.plot(time,Tfuelprestank, label='fuel pressurant tank')
plt.title('Tank temperatures')
plt.legend( loc='upper right')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.show()
plt.figure(4)
plt.plot(time,mdot_ox, label='ox mdot')
plt.figure(4)
plt.plot(time,mdot_fuel, label='fuel mdot')
plt.figure(4)
plt.plot(time,mdot_ox_pres, label='ox pressurant mdot')
plt.figure(4)
plt.plot(time,mdot_fuel_pres, label='fuel pressurant mdot')
plt.figure(4)
plt.plot(time,mdot_nozzle, label='nozzle mdot')
plt.title('Mass flows')
plt.xlabel('Time [s]')
plt.ylabel('kg/s')
plt.legend( loc='upper right')
plt.show()
plt.figure(5)
plt.plot(time,FFoxtank, label='ox tank')
plt.figure(5)
plt.plot(time,FFfueltank, label='fuel tank')
plt.title('Fill fractions in the tanks (Vprop_/Vtank)')
plt.xlabel('Time [s]')
plt.ylabel('')
plt.legend( loc='upper right')
plt.show()
plt.figure(6)
plt.plot(time, OFratio)
plt.title('O/F ratio')
plt.xlabel('Time [s]')
plt.ylabel('')
plt.show()
plt.figure(7)
plt.plot(time,mox, label='ox')
plt.figure(7)
plt.plot(time,mfuel, label='fuel')
plt.figure(7)
plt.plot(time,moxprestank, label='ox pressurant in pressurant tank')
plt.figure(7)
plt.plot(time,mfuelprestank, label='fuel pressurant in pressurant tank')
plt.figure(7)
plt.plot(time,mPresOxtank, label='ox pressurant in ox tank')
plt.figure(7)
plt.plot(time,mPresFueltank, label='fuel pressurant in fuel tank')
plt.figure(7)
plt.plot(time,moxpres, label='total ox pressurant')
plt.figure(7)
plt.plot(time,mfuelpres, label='total fuel pressurant')
plt.title('Fluid masses')
plt.xlabel('Time [s]')
plt.ylabel('kg')
plt.legend( loc='upper right')
plt.show()
plt.figure(8)
plt.plot(time, cmass)
plt.title('Resident mass in chamber')
plt.xlabel('Time [s]')
plt.ylabel('kg')
plt.show()
plt.figure(9)
plt.plot(time, Thrust)
plt.title('Thrust')
plt.xlabel('Time [s]')
plt.ylabel('N')
plt.show()
plt.figure(10)
plt.plot(time, Isp)
plt.title('Isp')
plt.xlabel('Time [s]')
plt.ylabel('s')
plt.show()
plt.figure(11)
plt.plot(time, T_chamber)
plt.title('T chamber')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.show()
plt.figure(12)
plt.plot(time, Mexit)
plt.title('Exit Mach number')
plt.xlabel('Time [s]')
plt.ylabel('-')
plt.show()
plt.figure(13)
y1 = Poxprestank[-1]/psi
y2 = P2pres_ox[-1]/psi
y3 = P3pres_ox[-1]/psi
y4 = P4pres_ox[-1]/psi
y5 = P5pres_ox[-1]/psi
y6 = Poxtank[-1]/psi
y7 = P3ox[-1]/psi
y8 = P4ox[-1]/psi
y9 = P5ox[-1]/psi
y10 = P6ox[-1]/psi
y11 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y1], linewidth=2, label="pressurant tank")
plt.plot( [1, 2], [y1, y2], linewidth=2, label="pressurant regulator")
plt.plot( [2, 3], [y2, y3], linewidth=2, label="pressurant solenoid valve")
plt.plot( [3, 4], [y3, y4], linewidth=2, label="pressurant check valve 1")
plt.plot( [4, 5], [y4, y5], linewidth=2, label="pressurant check valve 2")
plt.plot( [5, 6], [y5, y6], linewidth=2, label="pressurant tubing")
plt.plot( [6, 7], [y6, y6], linewidth=2, label="ox tank")
plt.plot( [7, 8], [y6, y7], linewidth=2, label="ox flow meter")
plt.plot( [8, 9], [y7, y8], linewidth=2, label="ox solenoid valve")
plt.plot( [9, 10], [y8, y9], linewidth=2, label="ox particle filter")
plt.plot( [10, 11], [y9, y10], linewidth=2, label="ox piping")
plt.plot( [11, 12], [y10, y11], linewidth=2, label="ox injector")
plt.plot( [12, 13], [y11, y11], linewidth=2, label="chamber")
plt.title('Ox line pressures at end of burn')
plt.ylabel('psi')
plt.legend( loc='upper right')
plt.show()
plt.figure(14)
y1 = Pfuelprestank[-1]/psi
y2 = P2pres_fuel[-1]/psi
y3 = P3pres_fuel[-1]/psi
y4 = P4pres_fuel[-1]/psi
y5 = P5pres_fuel[-1]/psi
y6 = Pfueltank[-1]/psi
y7 = P3fuel[-1]/psi
y8 = P4fuel[-1]/psi
y9 = P5fuel[-1]/psi
y10 = P6fuel[-1]/psi
y11 = P7fuel[-1]/psi
y12 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y1], linewidth=2, label="pressurant tank")
plt.plot( [1, 2], [y1, y2], linewidth=2, label="pressurant regulator")
plt.plot( [2, 3], [y2, y3], linewidth=2, label="pressurant solenoid valve")
plt.plot( [3, 4], [y3, y4], linewidth=2, label="pressurant check valve 1")
plt.plot( [4, 5], [y4, y5], linewidth=2, label="pressurant check valve 2")
plt.plot( [5, 6], [y5, y6], linewidth=2, label="pressurant tubing")
plt.plot( [6, 7], [y6, y6], linewidth=2, label="fuel tank")
plt.plot( [7, 8], [y6, y7], linewidth=2, label="fuel flow meter")
plt.plot( [8, 9], [y7, y8], linewidth=2, label="fuel solenoid valve")
plt.plot( [9, 10], [y8, y9], linewidth=2, label="fuel particle filter")
plt.plot( [10, 11], [y9, y10], linewidth=2, label="fuel piping")
plt.plot( [11, 12], [y10, y11], linewidth=2, label="fuel cooling jacket")
plt.plot( [12, 13], [y11, y12], linewidth=2, label="fuel injector")
plt.plot( [13, 14], [y12, y12], linewidth=2, label="chamber")
plt.title('Fuel line pressures at end of burn')
plt.ylabel('psi')
plt.legend( loc='upper right')
plt.show()
plt.figure(15)
plt.plot(time, array(Poxprestank)/psi, label='ox pressurant tank')
plt.plot(time, array(Pfuelprestank)/psi, label='fuel pressurant tank')
plt.title('Pressurant Tank Pressure Time History')
plt.xlabel('Time [s]')
plt.ylabel('psi')
plt.legend(loc='upper right')
plt.show()
plt.figure(16)
plt.plot(time, Toxprestank, label='ox pressurant tank')
plt.plot(time, Tfuelprestank, label='fuel pressurant tank')
plt.title('Pressurant Tank Temperature Time History')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.legend(loc='upper right')
plt.show()
plt.figure(17)
plt.plot(time, array(Poxtank)/psi, label='ox tank')
plt.plot(time, array(Pfueltank)/psi, label='fuel tank')
plt.title('Propellant Tank Pressure Time History')
plt.xlabel('Time [s]')
plt.ylabel('psi')
plt.legend(loc='upper right')
plt.show()
plt.figure(18)
plt.plot(time, Toxtank, label='ox tank')
plt.plot(time, Tfueltank, label='fuel tank')
plt.title('Propellant Tank Temperature Time History')
plt.xlabel('Time [s]')
plt.ylabel('K')
plt.legend(loc='upper right')
plt.show()
plt.figure(19)
y1 = P2pres_ox[-1]/psi
y2 = P3pres_ox[-1]/psi
y3 = P4pres_ox[-1]/psi
y4 = P5pres_ox[-1]/psi
y5 = Poxtank[-1]/psi
y6 = P3ox[-1]/psi
y7 = P4ox[-1]/psi
y8 = P5ox[-1]/psi
y9 = P6ox[-1]/psi
y10 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y2], linewidth=2, label="pressurant solenoid valve")
plt.plot( [1, 2], [y2, y3], linewidth=2, label="pressurant check valve 1")
plt.plot( [2, 3], [y3, y4], linewidth=2, label="pressurant check valve 2")
plt.plot( [3, 4], [y4, y5], linewidth=2, label="pressurant tubing")
plt.plot( [4, 5], [y5, y5], linewidth=2, label="ox tank")
plt.plot( [5, 6], [y5, y6], linewidth=2, label="ox flow meter")
plt.plot( [6, 7], [y6, y7], linewidth=2, label="ox solenoid valve")
plt.plot( [7, 8], [y7, y8], linewidth=2, label="ox particle filter")
plt.plot( [8, 9], [y8, y9], linewidth=2, label="ox piping")
plt.plot( [9, 10], [y9, y10], linewidth=2, label="ox injector")
plt.plot( [10, 11], [y10, y10], linewidth=2, label="chamber")
plt.title('Ox line pressures downstream of regulator at end of burn')
plt.ylabel('psi')
plt.legend( loc='lower left')
plt.show()
plt.figure(20)
y1 = P2pres_fuel[-1]/psi
y2 = P3pres_fuel[-1]/psi
y3 = P4pres_fuel[-1]/psi
y4 = P5pres_fuel[-1]/psi
y5 = Pfueltank[-1]/psi
y6 = P3fuel[-1]/psi
y7 = P4fuel[-1]/psi
y8 = P5fuel[-1]/psi
y9 = P6fuel[-1]/psi
y10 = P7fuel[-1]/psi
y11 = Pchamber[-1]/psi
plt.plot( [0, 1], [y1, y2], linewidth=2, label="pressurant solenoid valve")
plt.plot( [1, 2], [y2, y3], linewidth=2, label="pressurant check valve 1")
plt.plot( [2, 3], [y3, y4], linewidth=2, label="pressurant check valve 2")
plt.plot( [3, 4], [y4, y5], linewidth=2, label="pressurant tubing")
plt.plot( [4, 5], [y5, y5], linewidth=2, label="fuel tank")
plt.plot( [5, 6], [y5, y6], linewidth=2, label="fuel flow meter")
plt.plot( [6, 7], [y6, y7], linewidth=2, label="fuel solenoid valve")
plt.plot( [7, 8], [y7, y8], linewidth=2, label="fuel particle filter")
plt.plot( [8, 9], [y8, y9], linewidth=2, label="fuel piping")
plt.plot( [9, 10], [y9, y10], linewidth=2, label="fuel cooling jacket")
plt.plot( [10, 11], [y10, y11], linewidth=2, label="fuel injector")
plt.plot( [11, 12], [y11, y11], linewidth=2, label="chamber")
plt.title('Fuel line pressures downstream of regulator at end of burn')
plt.ylabel('psi')
plt.legend( loc='lower left')
plt.show()
plt.figure(21)
plt.title( "PI-controller related pressures")
plt.plot(time, array(P2pres_fuel)/psi, label = "Fuel pressurant reg" )
plt.plot(time, array(Pfueltank)/psi, label = "Fuel tank" )
plt.plot(time, array(P2pres_ox)/psi, label = "Ox pressurant reg" )
plt.plot(time, array(Poxtank)/psi, label = "Ox tank" )
plt.xlabel('Time [s]')
plt.ylabel('psi')
plt.legend( loc='upper left')
plt.show()
|
python
|
import unittest
from kaleidoscope.config import GalleryConfigParser
class TestGalleryConfigParser(unittest.TestCase):
def test_lowercase_key(self):
"""Lowercase option key is left as is."""
config = GalleryConfigParser()
config.read_string("[album]\ntitle: Example\n")
self.assertTrue(config.has_option('album', 'title'))
def test_mixedcase_key(self):
"""Other keys are converted to lowercase."""
config = GalleryConfigParser()
config.read_string("[album]\nTitle: Example\n")
self.assertTrue(config.has_option('album', 'title'))
def test_filename_key(self):
"""Filename keys (with dot) are case sensitive."""
config = GalleryConfigParser()
config.read_string("[photos]\nDSC3000.JPG: UPPER\ndsc2000.jpg: lower\n")
self.assertEqual(config.options('photos'),
['DSC3000.JPG', 'dsc2000.jpg'])
self.assertTrue(config.has_option('photos', 'DSC3000.JPG'))
self.assertTrue(config.has_option('photos', 'dsc2000.jpg'))
|
python
|
print("hello,world")
#print(5 ** 4321)
a = 1
if (a < 10 and a > -10):
print("fuck you")
|
python
|
# -*- coding: utf-8 -*-
"""Domain Driven Design framework."""
from ddd_base import ValueObject
class RouteSpecification(ValueObject):
def __init__(self, api_ref, upstream_ref, policies, tls):
super(RouteSpecification, self).__init__()
self.api_ref = api_ref
self.upstream_ref = upstream_ref
self.policies = policies
self.tls = tls
@property
def ssl(self):
return True if self.tls == 'on' else False
def __eq__(self, other):
if not isinstance(other, RouteSpecification):
return NotImplemented
return self.api_ref == other.api_ref \
and self.upstream_ref == other.upstream_ref \
and self.policies == other.policies
|
python
|
load("//java/private:common.bzl", "has_maven_deps")
load("//java/private:dist_info.bzl", "DistZipInfo", "dist_aspect", "separate_first_and_third_party")
def _java_dist_zip_impl(ctx):
inputs = []
files = []
for file in ctx.files.files:
files.append("%s=%s" % (file.basename, file.path))
inputs.append(file)
infos = depset([d[DistZipInfo] for d in ctx.attr.deps]).to_list()
(first, third) = separate_first_and_third_party(ctx.attr.third_party_prefixes, [dep[DistZipInfo] for dep in ctx.attr.deps])
first_party = []
third_party = []
for info in first:
inputs.extend(info.binary_jars)
inputs.extend(info.source_jars)
[first_party.append("%s.jar=%s" % (info.name, fp.path)) for fp in info.binary_jars]
[first_party.append("%s-sources.jar=%s" % (info.name, fp.path)) for fp in info.source_jars]
for info in third:
inputs.extend(info.binary_jars)
inputs.extend(info.source_jars)
[third_party.append("lib/%s.jar=%s" % (info.name, tp.path)) for tp in info.binary_jars]
out = ctx.actions.declare_file("%s.zip" % ctx.attr.name)
args = ctx.actions.args()
args.add_all(["c", out])
args.add_all(sorted(files))
args.add_all(sorted(first_party))
args.add_all(sorted(third_party))
ctx.actions.run(
executable = ctx.executable._zip,
arguments = [args],
outputs = [out],
inputs = inputs,
)
return [
DefaultInfo(files = depset([out])),
]
java_dist_zip = rule(
_java_dist_zip_impl,
attrs = {
"files": attr.label_list(
default = [],
allow_files = True,
),
"deps": attr.label_list(
providers = [
[DistZipInfo],
],
aspects = [
dist_aspect, has_maven_deps,
],
),
"third_party_prefixes": attr.string_list(),
"_zip": attr.label(
default = "@bazel_tools//tools/zip:zipper",
executable = True,
cfg = "host",
),
},
)
|
python
|
"""draws the price charts for all the securities in the currently active
case"""
from multiprocessing.connection import Listener
from bokeh.driving import count
from bokeh.layouts import layout, column, gridplot, row, widgetbox
from bokeh.models import ColumnDataSource, CustomJS, Span
from bokeh.plotting import curdoc, figure
from bokeh.models.widgets import Div
import pandas as pd
def receive_data():
"""need to get something like [(ticker, tick, price)]"""
data = conn.recv()
# print(data)
return data
def depth(ticker, books, level=50):
"""Extract the book for our ticker and set up the df the way we want."""
bids = fill(books.loc[ticker, "BUY"].drop_duplicates("price", "first"), True).head(
level
)
asks = fill(books.loc[ticker, "SELL"].drop_duplicates("price", "last"), False).tail(
level
)
return bids, asks
# center(bids, asks)
def fill(book, isbid):
""" clean up the duplicates and fill up the empty spaces. """
# if it's a bid, drop the first duplicate.
# if isbid:
# clean = book.drop_duplicates('price', 'first')
# else:
# clean = book.drop_duplicates('price', 'last')
# count how many cents the book covers
# _range = round(book['price'].max() - book['price'].min(), 2)
# rungs = int(_range * 100)
# Get the price range in a list to pass to numpy.linspace to generate our new index
# pricerange = [book['price'].min(), book['price'].max()]
pmax = int(book["price"].max() * 100)
pmin = int(book["price"].min() * 100)
# print(f"MAX/MIN : {pmax}/{pmin}")
ix = []
for i in range(pmin, pmax, 1):
# print(i/100)
ix.append(i / 100)
newind = pd.Index(ix, name="priceline")
# print(newind)
# Set the new index and backfill the cvol values
filled = book.set_index("price").reindex(newind, method="pad")
# filled['price'] = newind.values
filled["price"] = newind.get_values()
# if isbid:
filled = filled[::-1]
# print(filled[["price", "cvol"]].to_string())
return filled
def center(bids, asks):
""" Modify the last data point to make the two books have symetric price ranges. """
bidrange = bids["price"].max() - bids["price"].min()
askrange = asks["price"].max() - asks["price"].min()
if bidrange > askrange:
#
distance = round(bidrange - askrange, 2)
shim_ask = asks["price"].max() + distance
asks.iloc[-1, 0] = shim_ask
elif bidrange < askrange:
# 00
distance = round(askrange - bidrange, 2)
shim_bid = bids["price"].min() - distance
bids.iloc[-1, 0] = shim_bid
return bids, asks
@count()
def update(step):
data = receive_data()
# print(data["CRZY_candle"], data["TAME_candle"])
if data["case"]["status"] == "ACTIVE":
if data["CRZY_candle"]["tick"] is not None:
color1 = (
"#fe0000"
if data["CRZY_candle"]["open"] > data["CRZY_candle"]["close"]
else "#00fd02"
)
CRZY_data = dict(
tick=[data["CRZY_candle"]["tick"]],
open=[data["CRZY_candle"]["open"]],
high=[data["CRZY_candle"]["high"]],
low=[data["CRZY_candle"]["low"]],
close=[data["CRZY_candle"]["close"]],
mid=[(data["CRZY_candle"]["open"] + data["CRZY_candle"]["close"]) / 2],
height=[
max(
0.01,
abs(data["CRZY_candle"]["open"] - data["CRZY_candle"]["close"]),
)
],
color=[color1],
)
color2 = (
"#fe0000"
if data["TAME_candle"]["open"] > data["TAME_candle"]["close"]
else "#00fd02"
)
TAME_data = dict(
tick=[data["TAME_candle"]["tick"]],
open=[data["TAME_candle"]["open"]],
high=[data["TAME_candle"]["high"]],
low=[data["TAME_candle"]["low"]],
close=[data["TAME_candle"]["close"]],
mid=[(data["TAME_candle"]["open"] + data["TAME_candle"]["close"]) / 2],
height=[
max(
0.01,
abs(data["TAME_candle"]["open"] - data["TAME_candle"]["close"]),
)
],
color=[color2],
)
# tick_num = len(CRZY.data['tick'])
# if tick_num > 0:
# print(CRZY.data, len(CRZY.data['tick'])) #, tick_num)
if (
len(CRZY.data["tick"])
and CRZY.data["tick"][-1] == data["CRZY_candle"]["tick"]
):
index = max(0, len(CRZY.data["tick"]) - 1)
rpatches = {
"open": [(index, data["CRZY_candle"]["open"])],
"high": [(index, data["CRZY_candle"]["high"])],
"low": [(index, data["CRZY_candle"]["low"])],
"close": [(index, data["CRZY_candle"]["close"])],
"color": [(index, color1)],
"mid": [
(
index,
(data["CRZY_candle"]["open"] + data["CRZY_candle"]["close"])
/ 2,
)
],
"height": [
(
index,
max(
0.01,
abs(
data["CRZY_candle"]["open"]
- data["CRZY_candle"]["close"]
),
),
)
],
}
CRZY.patch(rpatches)
cpatches = {
"open": [(index, data["TAME_candle"]["open"])],
"high": [(index, data["TAME_candle"]["high"])],
"low": [(index, data["TAME_candle"]["low"])],
"close": [(index, data["TAME_candle"]["close"])],
"color": [(index, color2)],
"mid": [
(
index,
(data["TAME_candle"]["open"] + data["TAME_candle"]["close"])
/ 2,
)
],
"height": [
(
index,
max(
0.01,
abs(
data["TAME_candle"]["open"]
- data["TAME_candle"]["close"]
),
),
)
],
}
TAME.patch(cpatches)
else:
CRZY.stream(CRZY_data, 600)
TAME.stream(TAME_data, 600)
# else:
# CRZY.stream(CRZY_data, 600)
# TAME.stream(TAME_data, 600)
CRZY_price.location = data["CRZY_candle"]["close"]
TAME_price.location = data["TAME_candle"]["close"]
CRZY_bid_depth, CRZY_ask_depth = depth("CRZY", data["orderbook"])
CRZY_bidbook.data = ColumnDataSource._data_from_df(CRZY_bid_depth)
CRZY_askbook.data = ColumnDataSource._data_from_df(CRZY_ask_depth)
# print(CRZY_bid_depth, CRZY_ask_depth, CRZY_bidbook.data)
TAME_bid_depth, TAME_ask_depth = depth("TAME", data["orderbook"])
TAME_bidbook.data = ColumnDataSource._data_from_df(TAME_bid_depth)
TAME_askbook.data = ColumnDataSource._data_from_df(TAME_ask_depth)
if data["tenders"]:
output = ""
for tender in data["tenders"]:
reserve = " " if not tender["biddable"] else " BIDDABLE "
text = f"<b>{tender['ticker']} {tender['action']}{reserve}TENDER</b>: {tender['quantity']//1000}K @ {tender['price']}<br>"
# print(text)
output += text
div.text = output
else:
div.text = f"""Trader PnL : {data['trader']['nlv']}<br>
CRZY POSITION: {data['securities'].loc['CRZY', 'position']}<br>
TAME POSITION: {data['securities'].loc['TAME', 'position']}"""
elif data["case"]["status"] == "STOPPED":
div.text = f"Round Over, final PnL : {data['trader']['nlv']}"
CRZY.data = ColumnDataSource(
dict(
tick=[], mid=[], height=[], open=[], high=[], low=[], close=[], color=[]
)
)
TAME.data = ColumnDataSource(
dict(
tick=[], mid=[], height=[], open=[], high=[], low=[], close=[], color=[]
)
)
CRZY_bidbook.data = ColumnDataSource(dict(price=[], cvol=[]))
CRZY_askbook.data = ColumnDataSource(dict(price=[], cvol=[]))
TAME_bidbook.data = ColumnDataSource(dict(price=[], cvol=[]))
TAME_askbook.data = ColumnDataSource(dict(price=[], cvol=[]))
# Data sources
CRZY = ColumnDataSource(
dict(tick=[], mid=[], height=[], open=[], high=[], low=[], close=[], color=[])
)
TAME = ColumnDataSource(
dict(tick=[], mid=[], height=[], open=[], high=[], low=[], close=[], color=[])
)
CRZY_bidbook = ColumnDataSource(dict(price=[], cvol=[]))
CRZY_askbook = ColumnDataSource(dict(price=[], cvol=[]))
TAME_bidbook = ColumnDataSource(dict(price=[], cvol=[]))
TAME_askbook = ColumnDataSource(dict(price=[], cvol=[]))
CRZY_chart = figure(
plot_height=300,
plot_width=600,
y_axis_location="left",
title="CRZY",
background_fill_color="#d3d3d3",
)
CRZY_price = Span(location=9, dimension="width", line_width=2, line_color="gold")
CRZY_chart.add_layout(CRZY_price)
CRZY_chart.segment(
x0="tick", y0="low", x1="tick", y1="high", line_width=1, color="black", source=CRZY
)
CRZY_chart.rect(
x="tick",
y="mid",
width=4,
height="height",
line_width=1,
line_color="black",
fill_color="color",
source=CRZY,
)
TAME_chart = figure(
plot_height=300,
plot_width=600,
y_axis_location="left",
title="TAME",
background_fill_color="#d3d3d3",
)
TAME_price = Span(location=25, dimension="width", line_width=2, line_color="gold")
TAME_chart.add_layout(TAME_price)
TAME_chart.segment(
x0="tick", y0="low", x1="tick", y1="high", line_width=2, color="black", source=TAME
)
TAME_chart.rect(
x="tick",
y="mid",
width=4,
height="height",
line_width=1,
line_color="black",
fill_color="color",
source=TAME,
)
CRZY_dchart = figure(
plot_height=175, plot_width=600, y_axis_location="left", title="Orderbook"
)
CRZY_dchart.vbar(x="price", top="cvol", width=0.01, color="green", source=CRZY_bidbook)
CRZY_dchart.vbar(x="price", top="cvol", width=0.01, color="red", source=CRZY_askbook)
TAME_dchart = figure(
plot_height=175, plot_width=600, y_axis_location="left", title="Orderbook"
)
TAME_dchart.vbar(x="price", top="cvol", width=0.01, color="green", source=TAME_bidbook)
TAME_dchart.vbar(x="price", top="cvol", width=0.01, color="red", source=TAME_askbook)
div = Div(
text=f"<b>MADE BY UOTTAWA</br>", width=1100, height=200, style={"font-size": "200%"}
)
curdoc().add_root(
layout(
gridplot(
[[CRZY_chart, TAME_chart], [CRZY_dchart, TAME_dchart]],
toolbar_location=None,
),
widgetbox(div),
)
)
listener = Listener(("localhost", 6000))
print("Server up and running! Just waiting for you to run the main in another process.\n\n\
Listening...")
conn = listener.accept()
# Add a periodic callback to be run every X milliseconds
curdoc().add_periodic_callback(update, 250)
|
python
|
#function = -x**2 + 2*x - 1
# x = [0,3]
import random
import easygui
import math
def representation(interval,precision,no_selected):
original_list = []
largest = int(interval*(10**precision))
select_interval = int(largest//no_selected)
for i in range(1,largest + 1):
str_temp = ''
if i % select_interval == 0:
m = bin(i).split('b')[1]
original_list.append(m)
return original_list
def select(list_num,no_selected,lower_bound):
score = []
total = 0
dice = []
index = []
final_choice = []
for i in range(no_selected):
x = int(list_num[i],2)/1000000 + lower_bound
xscore = get_x(x)
score.append(xscore)
total += xscore
maximum = max(score)
if min(score) < 0:
minimum = min(score)
for j in range(no_selected):
score[j] -= minimum
total -= minimum * no_selected
score[0] = score[0]/total
for k in range(1,no_selected):
score[k] = score[k-1] + score[k]/total
score.insert(0,0)
for l in range(no_selected):
dice.append(random.random())
for m in range(no_selected):
index.append(get_nos(no_selected,score,dice[m]))
for n in range(no_selected):
final_choice.append(list_num[index[n]])
return [final_choice,maximum]
def get_nos(no_selected,score,dice_no):
for m in range(no_selected):
if score[m] <= dice_no < score[m+1]:
return m
def crossover(choices,percentage_cross,no_selected):
for i in range(0,no_selected,2):
new = cross_individual(choices[i],choices[i+1],percentage_cross)
choices[i],choices[i+1] = new[0],new[1]
return choices
def cross_individual(crossA,crossB,percentage_cross):
length = min(len(crossA),len(crossB))
length = round(length * percentage_cross,0)
a = len(crossA)
b = len(crossB)
m = int(a-length)
n = int(b-length)
atemp = crossA[m:a]
btemp = crossB[n:b]
crossA = crossA[0:m]
crossB = crossA[0:n]
crossA += btemp
crossB += atemp
return [crossA,crossB]
def mutate(choices,percentage_mutate,no_selected):
list_nos = []
no_mutate = int(round(no_selected * percentage_mutate,0))
for i in range(no_mutate):
place = random.randint(0,no_selected-1)
while place in list_nos:
place = random.randint(0,no_selected-1)
list_nos.append(place)
for i in range(no_mutate):
choices[i] = mutate_individual(choices[i])
return choices
def mutate_individual(original):
position_mutate = []
place = random.randint(0,len(original)-1)
while place in position_mutate:
place = random.randint(0,len(original)-1)
position_mutate.append(place)
if original[place] == '1':
original = original[0:place] + '0' + original[place+1:len(original)]
else:
original = original[0:place] + '1' + original[place+1:len(original)]
return original
def main(no_selected,percentage_cross,percentage_mutate,precision,no_repetition,lower_bound,upper_bound,interval):
list_no = representation(interval,precision,no_selected)
for i in range(no_repetition):
list_no = run(no_selected,percentage_cross,percentage_mutate,precision,no_repetition,interval,list_no,lower_bound)
if list_no == 'yes':
break
def run(no_selected,percentage_cross,percentage_mutate,precision,no_repetition,interval,list_no,lower_bound):
x = select(list_no,no_selected,lower_bound)
m = x[0]
fuck.append(x[1])
print(x[1])
if round(fuck[len(fuck)-1] - fuck[len(fuck)-2],2) == 0.0 and round(fuck[len(fuck)-2] - fuck[len(fuck)-3],2) == 0.0:
return 'yes'
n = crossover(m,percentage_cross,no_selected)
o = mutate(n,percentage_mutate,no_selected)
return o
def get_x(x):
return x*math.sin(10*math.pi*x) + 2
fuck = [-580293485523,-4523454239523422]
main(50,0.75,0.02,6,50,-1,2,3)
|
python
|
import pytz
from datetime import datetime
from FlaskRTBCTF import create_app, db, bcrypt
from FlaskRTBCTF.helpers import handle_admin_pass
from FlaskRTBCTF.models import User, Score, Notification, Machine
from FlaskRTBCTF.config import organization, LOGGING
if LOGGING:
from FlaskRTBCTF.models import Logs
app = create_app()
# create_app().app_context().push()
with app.app_context():
db.create_all()
default_time = datetime.now(pytz.utc)
box = Machine(
name="My Awesome Pwnable Box",
user_hash="A" * 32,
root_hash="B" * 32,
user_points=10,
root_points=20,
os="Linux",
ip="127.0.0.1",
hardness="You tell",
)
db.session.add(box)
passwd = handle_admin_pass()
admin_user = User(
username="admin",
email="[email protected]",
password=bcrypt.generate_password_hash(passwd).decode("utf-8"),
isAdmin=True,
)
admin_score = Score(
user=admin_user, userHash=False, rootHash=False, points=0, machine=box
)
db.session.add(admin_user)
db.session.add(admin_score)
notif = Notification(
title=f"Welcome to {organization['ctfname']}",
body="The CTF is live now. Please read rules!",
)
db.session.add(notif)
if LOGGING:
admin_log = Logs(
user=admin_user,
accountCreationTime=default_time,
visitedMachine=True,
machineVisitTime=default_time,
)
db.session.add(admin_log)
db.session.commit()
|
python
|
# This file is part of DroidCarve.
#
# Copyright (C) 2020, Dario Incalza <dario.incalza at gmail.com>
# All rights reserved.
#
__author__ = "Dario Incalza <[email protected]"
__copyright__ = "Copyright 2020, Dario Incalza"
__maintainer__ = "Dario Incalza"
__email__ = "[email protected]"
from adbutils import adb
import threading, subprocess
import queue, os
def get_devices():
return adb.device_list()
def get_device(serial):
return adb.device(serial)
class ConnectedDevice:
def __init__(self, serial):
if serial is None:
raise RuntimeError("No connected device configured.")
self.device = get_device(serial)
self.serial = self.device.serial
self._logcat_reader = None
def get_package_list(self, full_info=False):
if not full_info:
return self.device.list_packages()
else:
pkgs = []
for p in self.device.list_packages():
pkgs.append(self.device.package_info(p))
return pkgs
def download_package(self):
pass
def get_serial(self):
return self.serial
def get_prop(self, propstr):
return str(self.device.shell(["getprop", propstr]))
def get_info_dict(self):
return {
"serial": self.device.serial,
"name": self.get_prop("ro.product.name"),
"manufacturer": self.get_prop("ro.product.manufacturer"),
"model": self.get_prop("ro.product.model"),
"android_version": self.get_prop("ro.build.version.release"),
"api_level": self.get_prop("ro.build.version.sdk"),
"cpu_arch": self.get_prop("ro.arch"),
"cpu_abi": self.get_prop("ro.product.cpu.abi"),
"crypto_state": self.get_prop("ro.crypto.state"),
"fde_algorithm": self.get_prop("ro.crypto.fde_algorithm"),
"latest_security_patch": self.get_prop("ro.build.version.security_patch")
}
def start_logcat_interface(self):
if not self._logcat_reader:
self._logcat_reader = LogCatInterface(device_serial=self.serial)
self._logcat_reader.init()
# with app.app_context():
# while self._logcat_reader.hasNext():
# line = self._logcat_reader.next()
# logging.debug('[%s] - [WS] %s'.format(websocket.LOGCAT_MSG, line))
# emit(websocket.LOGCAT_MSG, {'data', line})
# eventlet.sleep(1)
def get_next_line(self):
if self._logcat_reader.hasNext():
return self._logcat_reader.next()
else:
return None
def tear_down_logcat_interface(self):
self._logcat_reader.stop()
self._logcat_reader = None
def download_package(self, packagename, dst):
full_path = self.device.shell("pm path {}".format(packagename)).split("package:")[1].rstrip("\n")
self.device.shell("cp {} /sdcard/temp.apk".format(full_path))
if os.path.exists(dst):
os.remove(dst)
self.device.sync.pull("/sdcard/temp.apk", dst)
return dst
class LogCatInterface:
def __init__(self, device_serial=None, pkg_name=None):
self.pkg_name = pkg_name
self.device_serial = device_serial
self._proces = None
self._queue = None
self._reader = None
def init(self):
base_cmd = ['adb']
if self.device_serial:
base_cmd.extend(['-s', self.device_serial])
base_cmd.append('logcat')
self._process = subprocess.Popen(base_cmd, stdout=subprocess.PIPE)
# Launch the asynchronous readers of the process' stdout.
self._queue = queue.Queue()
self._reader = AsynchronousAdbReader(self._process.stdout, self._queue)
self._reader.start()
def next(self):
return self._queue.get()
def stop(self):
self._process.kill()
self._process = None
def hasNext(self):
return self._process and not self._reader.eof()
class AsynchronousAdbReader(threading.Thread):
def __init__(self, fd, queue):
assert callable(fd.readline)
threading.Thread.__init__(self)
self._fd = fd
self._queue = queue
def run(self):
'''The body of the tread: read lines and put them on the queue.'''
for line in iter(self._fd.readline, ''):
self._queue.put(line)
def eof(self):
'''Check whether there is no more content to expect.'''
return not self.is_alive() and self._queue.empty()
if __name__ == "__main__":
device = ConnectedDevice(serial="RF8M4050H3A")
device.download_package("cake.app", "temp.apk")
|
python
|
import logging
_FORMAT = '%(asctime)s:%(levelname)s:%(lineno)s:%(module)s.%(funcName)s:%(message)s'
_formatter = logging.Formatter(_FORMAT, '%H:%M:%S')
_handler = logging.StreamHandler()
_handler.setFormatter(_formatter)
logger = logging.getLogger('yatsm')
logger.addHandler(_handler)
logger.setLevel(logging.INFO)
logger_algo = logging.getLogger('yatsm_algo')
logger_algo.addHandler(_handler)
logger_algo.setLevel(logging.WARNING)
|
python
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
def pathSum(root: TreeNode, sum: int) -> [[int]]:
temp = []
result = []
def dfs(root: TreeNode, left: int) :
if not root :
return
left -= root.val
temp.append(root.val)
if not root.left and not root.right and left == 0 :
result.append(temp[:])
dfs(root.left,left)
dfs(root.right,left)
temp.pop()
dfs(root,sum)
return result
if __name__ == "__main__" :
node = TreeNode(3)
node.left = TreeNode(5)
node.right = TreeNode(1)
node.left.left = TreeNode(6)
node.left.right = TreeNode(2)
node.left.right.left = TreeNode(7)
node.left.right.right = TreeNode(4)
node.right.left = TreeNode(0)
node.right.right = TreeNode(8)
result = pathSum(node,12)
print(result)
|
python
|
"""Perform non-linear regression using a Hill function."""
import numpy as np
import math
from scipy.optimize import curve_fit
class Hill:
"""A Hill function is for modeling a field falloff (i.e. penumbra). It's not a perfect model, but it fits to a
function, which is not limited by resolution issues as may be experienced on low-res devices like ion chamber arrays."""
@classmethod
def fit(cls, x_data: np.ndarray, y_data: np.ndarray):
"""Fit x & y data to a Hill function."""
fitted_parameters, _ = curve_fit(hill_func, x_data, y_data, p0=(min(y_data), max(y_data), np.median(x_data), 0))
instance = cls()
instance.params = fitted_parameters
return instance
def inflection_idx(self) -> dict:
"""Determine the x-value inflection point of the fitted Hill function."""
idx = self.params[2] * math.pow((self.params[3] - 1) / (self.params[3] + 1), 1 / self.params[3])
return {'index (exact)': idx, 'index (rounded)': int(round(idx))}
@classmethod
def from_params(cls, params):
"""Create a Hill function from pre-determined parameters. Useful to recreate a Hill function"""
instance = cls()
instance.params = params
return instance
def gradient_at(self, x: float) -> float:
"""Return the gradient of the Hill function at a given x-value"""
cxd = math.pow(self.params[2]/x, self.params[3])
return (self.params[1] - self.params[0])*self.params[3]*cxd/(math.pow(cxd + 1, 2)*x)
def x(self, y: float) -> float:
"""Return the x-value given a y-value"""
return self.params[2] * math.pow((y - self.params[0]) / (self.params[1] - y), 1 / self.params[3])
def y(self, x: float) -> float:
"""Return the y-value given an x-value."""
return self.params[0] + (self.params[1] - self.params[0]) / (1 + (self.params[2]/x) ** self.params[3])
def hill_func(x, a, b, c, d): # Hill function
"""Calculates the Hill function at x.
a : sigmoid low level
b : sigmoid high level
c : approximate inflection point
d : slope of the sigmoid
"""
return a + (b - a) / (1.0 + (c / x) ** d)
def inv_hill_func(y, fit_params): # Inverse Hill function
"""Calculates the inverse Hill function at y.
[0] : sigmoid low level
[1] : sigmoid high level
[2] : approximate inflection point
[3] : slope of the sigmoid
"""
if (y > min(fit_params[0], fit_params[1])) and (y < max(fit_params[0], fit_params[1])) and (fit_params[3] != 0):
return fit_params[2]*math.pow((y - fit_params[0])/(fit_params[1] - y), 1/fit_params[3])
else:
return 0
def deriv_hill_func(x, fit_params) -> float:
"""calculates the tangent of the Hill function at X.
[0] : sigmoid low level
[1] : sigmoid high level
[2] : approximate inflection point
[3] : slope of the sigmoid
"""
if x > 0:
cxd = math.pow(fit_params[2]/x, fit_params[3])
return (fit_params[1] - fit_params[0])*fit_params[3]*cxd/(math.pow(cxd + 1, 2)*x)
else:
return 0
def inflection(fit_params) -> float:
"""calculates the inflection point of the Hill function.
[0] : sigmoid low level
[1] : sigmoid high level
[2] : approximate inflection point
[3] : slope of the sigmoid
"""
return fit_params[2]*math.pow((fit_params[3] - 1)/(fit_params[3] + 1), 1/fit_params[3])
def fit_to_hill(xData: np.ndarray, yData: np.ndarray):
"""Performs non-linear least squares regression on a Hill (sigmoid) function.
Parameters
----------
xData: X values of the function
yData: Y values of the function
Returns
-------
Fitted Parameters
[0] : sigmoid low level
[1] : sigmoid high level
[2] : approximate inflection point
[3] : slope of the sigmoid
"""
fitted_parameters, _ = curve_fit(hill_func, xData, yData, p0=(min(yData), max(yData), np.median(xData), 0))
return fitted_parameters
|
python
|
### Wet HW2 DIP ###
import numpy as np
import matplotlib.pyplot as plt
import cv2
from scipy import fftpack
###########################################################################
###########################################################################
###########################################################################
image_file = 'DIPSourceHW2.png'
image = cv2.imread(image_file, cv2.IMREAD_GRAYSCALE).astype(float) / 255.0
def plot_image(image_, name_: str):
plt.figure()
plt.imshow(image_, cmap="gray")
plt.title(name_)
plt.show(block=0)
plt.savefig(name_)
return
# ################## 1 ##################
height = 346
width = 550
epsilon = 1e-9
def PSF_Gauss(std):
x = np.linspace(-10, 10, width)
y = np.linspace(-10, 10, height)
X, Y = np.meshgrid(x, y)
exp_ = np.exp(-1 * (X ** 2 + Y ** 2) / (2 * (std * std)))
return exp_ / (2 * np.pi * std)
PFS_l_gauss = PSF_Gauss(0.25)
PFS_h_gauss = PSF_Gauss(0.04)
plot_image(PFS_l_gauss, '1 - PFS_l_gauss')
plot_image(PFS_h_gauss, '1 - PFS_h_gauss')
# ################## 2 ##################
def PSF_box(box_size):
h_ = int(height / 2)
w_ = int(width / 2)
d_ = int(box_size / 2)
PSF_box_ = np.zeros((height, width))
PSF_box_[h_ - d_:h_ + d_, w_ - d_:w_ + d_] = 1 / (box_size ** 2)
return PSF_box_
PSF_l_box = PSF_box(6)
PSF_h_box = PSF_box(3)
plot_image(PSF_l_box, '2 - PSF_l_box')
plot_image(PSF_h_box, '2 - PSF_h_box')
# ################## 3 ##################
def resolution_image(image_, psf_):
f_image = fftpack.fftshift(fftpack.fftn(image_))
k_ = fftpack.fftshift(fftpack.fftn(psf_))
return np.abs(fftpack.fftshift(fftpack.ifftn(f_image * k_)))
gauss_l = resolution_image(image, PFS_l_gauss)
gauss_h = resolution_image(image, PFS_h_gauss)
box_l = resolution_image(image, PSF_l_box)
box_h = resolution_image(image, PSF_h_box)
plot_image(gauss_l, '3 - gauss_l')
plot_image(gauss_h, '3 - gauss_h')
plot_image(box_l, '3 - box_l')
plot_image(box_h, '3 - box_h')
# ################## 4 ##################
def blur_kernel_k(psf_l_, psf_h_):
PSF_l_F = fftpack.fftshift(fftpack.fftn(psf_l_))
PSF_h_F = fftpack.fftshift(fftpack.fftn(psf_h_))
PSF_h_F[np.abs(PSF_h_F) < epsilon] = epsilon
return np.abs(fftpack.fftshift(fftpack.ifftn(PSF_l_F / PSF_h_F)))
k_gauss = blur_kernel_k(PFS_l_gauss, PFS_h_gauss)
k_box = blur_kernel_k(PSF_l_box, PSF_h_box)
plot_image(k_gauss, '4 - k_gauss')
plot_image(k_box, '4 - k_box')
# ################## 5 - wiener ##################
def wiener(high_res_image, k_):
L_ = fftpack.fftshift(fftpack.fftn(high_res_image))
K_ = fftpack.fftshift(fftpack.fftn(k_))
K_ = K_ + epsilon
H_ = L_ / K_
return np.abs(fftpack.fftshift(fftpack.ifftn(H_)))
wiener_gauss = wiener(gauss_l, k_gauss)
wiener_box = wiener(box_l, k_box)
plot_image(wiener_gauss, '5 - wiener_gauss')
plot_image(wiener_box, '5 - wiener_box')
# ################## 5 - TV ##################
def TV(high_res_image, k_):
d_op = [[0, -1, 0], [-1, 4, -1], [0, -1, 0]]
D_ = fftpack.fftshift(fftpack.fftn(d_op, shape=high_res_image.shape))
K_ = fftpack.fftshift(fftpack.fftn(k_))
L_ = fftpack.fftshift(fftpack.fftn(high_res_image))
filter_ = K_ / (K_ ** 2 + 0.3 * D_ ** 2)
filter_ = np.abs(filter_)
filter_ = filter_ + epsilon
H_ = L_ * filter_
return np.abs(fftpack.ifftn(H_))
TV_gauss = TV(gauss_l, k_gauss)
TV_box = TV(box_l, k_box)
plot_image(TV_gauss, '5 - TV_gauss')
plot_image(TV_box, '5 - TV_box')
# ################## 6 ##################
bilinear_gauss = cv2.resize(gauss_l, dsize=None, fx=2, fy=2)
bilinear_box = cv2.resize(box_l, dsize=None, fx=2, fy=2)
plot_image(bilinear_gauss, '6 - bilinear_gauss')
plot_image(bilinear_box, '6 - bilinear_box')
bicubic_gauss = cv2.resize(gauss_l, dsize=None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
bicubic_box = cv2.resize(box_l, dsize=None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
plot_image(bicubic_gauss, '6 - bicubic_gauss')
plot_image(bicubic_box, '6 - bicubic_box')
# ################################################
plt.show()
# ################################################
# ################################################
# ################################################
|
python
|
from pathlib import Path
from code_scanner.data_validators import validator
from code_scanner.enums import FileType
class FileInfo:
def __init__(self, full_name: Path, file_type: FileType = FileType.UNKNOWN):
"""
:param full_name: Absolute folder without file, Path
:param file_type: Enum FileType
"""
self._full_name = full_name
self._file_type = file_type
@property
def full_name(self) -> Path:
return self._full_name
@property
def file_type(self) -> FileType:
return self._file_type
@full_name.setter
@validator(lambda field: field is None or len(str(field)) == 0, ValueError("name cannot be empty or "))
def full_name(self, val) -> None:
self._full_name = val
@file_type.setter
def file_type(self, val) -> None:
self._file_type = val
def __str__(self) -> str:
return self._file_type.name + "-" + str(self._full_name)
def __repr__(self) -> str:
return self.__str__()
def __hash__(self) -> int:
return hash(self.__str__())
def __eq__(self, other) -> bool:
return (
self.__class__ == other.__class__ and
self.__str__() == other.__str__()
)
|
python
|
#
# Base engine class
# Copyright EAVISE
#
import logging
import signal
from abc import ABC, abstractmethod
import lightnet as ln
__all__ = ['Engine']
log = logging.getLogger(__name__)
class Engine(ABC):
""" This class removes the boilerplate code needed for writing your training cycle. |br|
Here is the code that runs when the engine is called:
.. literalinclude:: /../lightnet/engine/_engine.py
:language: python
:pyobject: Engine.__call__
:dedent: 4
Args:
params (lightnet.engine.HyperParameters): Serializable hyperparameters for the engine to work with
dataloader (torch.utils.data.DataLoader, optional): Dataloader for the training data; Default **None**
**kwargs (dict, optional): Keywords arguments that will be set as attributes of the engine
Attributes:
self.params: HyperParameter object
self.dataloader: Dataloader object
self.sigint: Boolean value indicating whether a SIGINT (CTRL+C) was send; Default **False**
self.*: All values that were passed with the init function and all values from the :class:`~lightnet.engine.HyperParameters` can be accessed in this class
Note:
This class expects a `self.dataloader` object to be present. |br|
You can either pass a dataloader when initializing this class, or you can define it yourself.
This allows to define `self.dataloader` as a computed property (@property) of your class, opening up a number of different possibilities,
like eg. computing different dataloaders depending on which epoch you are.
Note:
This engine allows to define hook functions to run at certain points in the training *(epoch_start, epoch_end, batch_start, batch_end)*.
The functions can be defined as class methods of your engine without any extra arguments or as separate functions that take the engine as a single argument.
There are different functions to register a hook and they can be used as decorator functions or called straight away in code:
>>> class TrainingEngine(ln.engine.Engine):
... def start(self):
... pass
...
... @ln.engine.Engine.epoch_end
... def backup(self):
... pass # This method will be executed at the end of every epoch
...
... @ln.engine.Engine.batch_start(100)
... def update_hyperparams(self):
... pass # This method will be executed at the start of every 100th batch
...
>>> # Create TrainingEngine object and run it
>>> def backup(engine):
... pass # This function will be executed at the end of every Xth batch defined by a backup_rate variable at runtime
...
>>> @ln.engine.Engine.epoch_start
... def select_data_subset(engine):
... pass # This function will be executed at the start of every epoch
...
>>> class TrainingEngine(ln.engine.Engine):
... def start(self):
... if hasattr(self, 'backup_rate') and self.backup_rate is not None:
... self.batch_start(self.backup_rate)(backup)
...
>>> # Create TrainingEngine object and run it
"""
__init_done = False
_required_attr = ['network', 'batch_size', 'dataloader']
_handled_signals = [signal.SIGINT, signal.SIGTERM]
_epoch_start = {}
_epoch_end = {}
_batch_start = {}
_batch_end = {}
def __init__(self, params, dataloader=None, **kwargs):
self.params = params
if dataloader is not None:
self.dataloader = dataloader
# Sigint handling
self.sigint = False
for sig in self._handled_signals:
signal.signal(sig, self.__sigint_handler)
# Set attributes
for key in kwargs:
if not hasattr(self, key):
setattr(self, key, kwargs[key])
else:
log.warning(f'{key} attribute already exists on engine.')
self.__init_done = True
def __call__(self):
""" Start the training cycle. """
self.__check_attr()
self.start()
log.info('Start training')
self.network.train()
idx = 0
while True:
# Epoch Start
self._run_hooks(self.epoch + 1, self._epoch_start)
idx %= self.batch_subdivisions
loader = self.dataloader
for idx, data in enumerate(loader, idx+1):
# Batch Start
if (idx - 1) % self.batch_subdivisions == 0:
self._run_hooks(self.batch + 1, self._batch_start)
# Forward and backward on (mini-)batches
self.process_batch(data)
if idx % self.batch_subdivisions != 0:
continue
# Optimizer step
self.batch += 1 # Should only be called after train, but this is easier to use self.batch in function
self.train_batch()
# Batch End
self._run_hooks(self.batch, self._batch_end)
# Check if we need to stop training
if self.quit() or self.sigint:
log.info('Reached quitting criteria')
return
# Epoch End
self.epoch += 1
self._run_hooks(self.epoch, self._epoch_end)
# Check if we need to stop training
if self.quit() or self.sigint:
log.info('Reached quitting criteria')
return
def __getattr__(self, name):
if hasattr(self.params, name):
return getattr(self.params, name)
else:
raise AttributeError(f'{name} attribute does not exist')
def __setattr__(self, name, value):
if self.__init_done and name not in dir(self) and hasattr(self.params, name):
setattr(self.params, name, value)
else:
super().__setattr__(name, value)
def __sigint_handler(self, signal, frame):
if not self.sigint:
log.debug('SIGINT/SIGTERM caught. Waiting for gracefull exit')
self.sigint = True
def __check_attr(self):
for attr in self._required_attr:
if not hasattr(self, attr):
raise AttributeError(f'Engine requires attribute [{attr}] (as an engine or hyperparameter attribute)')
if not hasattr(self, 'mini_batch_size'):
log.warning('No [mini_batch_size] attribute found, setting it to [batch_size]')
self.mini_batch_size = self.batch_size
elif self.batch_size % self.mini_batch_size != 0 or self.mini_batch_size > self.batch_size:
raise ValueError('batch_size should be a multiple of mini_batch_size')
def log(self, msg):
""" Log messages about training and testing.
This function will automatically prepend the messages with **TRAIN** or **TEST**.
Args:
msg (str): message to be printed
"""
if self.network.training:
log.train(msg)
else:
log.test(msg)
def _run_hooks(self, value, hooks):
""" Internal method that will execute registered hooks. """
keys = list(hooks.keys())
for k in keys:
if value % k == 0:
for fn in hooks[k]:
if hasattr(fn, '__self__'):
fn()
else:
fn(self)
@classmethod
def epoch_start(cls, interval=1):
""" Register a hook to run at the start of an epoch.
Args:
interval (int, optional): Number dictating how often to run the hook; Default **1**
Note:
The `self.epoch` attribute contains the number of processed epochs,
and will thus be one lower than the epoch you are currently starting.
For example, when starting training with the very first epoch,
the `self.epoch` attribute will be set to **0** during any `epoch_start` hook. |br|
However, the `interval` argument will be computed with the correct epoch number (ic. `self.epoch` + 1).
"""
def decorator(fn):
if interval in cls._epoch_start:
cls._epoch_start[interval].append(fn)
else:
cls._epoch_start[interval] = [fn]
return fn
return decorator
@classmethod
def epoch_end(cls, interval=1):
""" Register a hook to run at the end of an epoch.
Args:
interval (int, optional): Number dictating how often to run the hook; Default **1**
"""
def decorator(fn):
if interval in cls._epoch_end:
cls._epoch_end[interval].append(fn)
else:
cls._epoch_end[interval] = [fn]
return fn
return decorator
@classmethod
def batch_start(cls, interval=1):
""" Register a hook to run at the start of a batch.
Args:
interval (int, optional): Number dictating how often to run the hook; Default **1**
Note:
The `self.batch` attribute contains the number of processed batches,
and will thus be one lower than the batch you are currently starting.
For example, when starting training with the very first batch,
the `self.batch` attribute will be set to **0** during any `batch_start` hook. |br|
However, the `interval` argument will be computed with the correct batch number (ic. `self.batch` + 1).
"""
def decorator(fn):
if interval in cls._batch_start:
cls._batch_start[interval].append(fn)
else:
cls._batch_start[interval] = [fn]
return fn
return decorator
@classmethod
def batch_end(cls, interval=1):
""" Register a hook to run at the end of a batch.
Args:
interval (int, optional): Number dictating how often to run the hook; Default **1**
"""
def decorator(fn):
if interval in cls._batch_end:
cls._batch_end[interval].append(fn)
else:
cls._batch_end[interval] = [fn]
return fn
return decorator
@property
def batch_subdivisions(self):
""" Get number of mini-batches per batch.
Return:
int: Computed as self.batch_size // self.mini_batch_size
"""
return self.batch_size // self.mini_batch_size
def start(self):
""" First function that gets called when starting the engine. |br|
Any required setup code can come in here.
"""
pass
@abstractmethod
def process_batch(self, data):
""" This function should contain the code to process the forward and backward pass of one (mini-)batch.
Args:
data: The data that comes from your dataloader
Note:
If you are working with mini-batches, you should pay attention to how you process your loss and backwards function. |br|
PyTorch accumulates gradients when performing multiple backward() calls before using your optimizer.
However, usually your loss function performs some kind of average over your batch-size (eg. reduction='mean' in a lot of default pytorch functions).
When that is the case, you should also average your losses over the mini-batches, by dividing your resulting loss:
.. code:: bash
loss = loss_function(output, target) / self.batch_subdivisions
loss.backward()
"""
pass
@abstractmethod
def train_batch(self):
""" This function should contain the code to update the weights of the network. |br|
Statistical computations, performing backups at regular intervals, etc. also happen here.
"""
pass
def quit(self):
""" This function gets called after every training epoch and decides if the training cycle continues.
Return:
Boolean: Whether are not to stop the training cycle
Note:
This function gets called before checking the ``self.sigint`` attribute.
This means you can also check this attribute in this function. |br|
If it evaluates to **True**, you know the program will exit after this function and you can thus
perform the necessary actions (eg. save final weights).
"""
return False
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import yaml
import roslib
from sensor_msgs.msg import CameraInfo, Image
import rospy
class CameraInfoPublisher:
# Callback of the ROS subscriber.
def callback(self, data):
self.cam_info.header = data.header
self.publish()
def __init__(self):
file_name = rospy.get_param('~file_name', '')
camera_name = rospy.get_param('~camera_name', '')
out_topic_name = camera_name + "/camera_info"
in_topic = camera_name + "/image_raw"
self.cam_info = parse_yaml(file_name)
rospy.Subscriber(in_topic, Image, self.callback)
self.pub = rospy.Publisher(out_topic_name,CameraInfo, queue_size=2)
def publish(self):
'''
now = rospy.Time.now()
self.cam_info.header.stamp = now
'''
self.pub.publish(self.cam_info)
def parse_yaml(filename):
stream = file(filename, 'r')
calib_data = yaml.load(stream)
cam_info = CameraInfo()
cam_info.width = calib_data['image_width']
cam_info.height = calib_data['image_height']
cam_info.K = calib_data['camera_matrix']['data']
cam_info.D = calib_data['distortion_coefficients']['data']
cam_info.R = calib_data['rectification_matrix']['data']
cam_info.P = calib_data['projection_matrix']['data']
cam_info.distortion_model = calib_data['distortion_model']
return cam_info
if __name__ == '__main__':
rospy.init_node("camera_info_publisher")
publisher = CameraInfoPublisher()
rospy.spin()
|
python
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def cal_Nash(obs_path, predict_array):
obs = pd.read_csv(obs_path, index_col=0, parse_dates = True)
obs_chl = obs['chla'].iloc[:-1]
|
python
|
import serial
import time
arduino = serial.Serial("COM5", 9600, timeout=0)
time.sleep(5)
for i in range(10000):
arduino.write(chr(0+48).encode())
arduino.write(chr(1+48).encode())
arduino.write(chr(2+48).encode())
arduino.write(chr(3+48).encode())
arduino.write(chr(4+48).encode())
arduino.write(chr(5+48).encode())
arduino.write(chr(6+48).encode())
time.sleep(5)
|
python
|
#!/usr/bin/env python3
"""
domain2idna - The tool to convert a domain or a file with a list
of domain to the famous IDNA format.
This submodule contains all helpers that are used by other submodules.
Author:
Nissar Chababy, @funilrys, contactTATAfunilrysTODTODcom
Contributors:
Let's contribute to domains2idna!!
Project link:
https://github.com/PyFunceble/domain2idna
Project documentation:
http://domain2idna.readthedocs.io
License:
MIT License
Copyright (c) 2019, 2020, 2021, 2022 PyFunceble
Copyright (c) 2018, 2019, 2020, 2021, 2022 Nissar Chababy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from os import remove
class File:
"""
File treatment/manipulations.
:param str filename:
A path to the file to manipulate.
"""
def __init__(self, filename):
self.file = filename
def write(self, data_to_write):
"""
Writes or appends data into the given file path.
:param str data_to_write:
The data to write.
"""
if data_to_write and isinstance(data_to_write, str):
with open(self.file, "w", encoding="utf-8") as file:
file.write(data_to_write)
def read(self, encoding=None):
"""
Reads a given file path and return its content.
:param str encoding:
The encoding to use when opening the file.
:rtype: str
"""
if not encoding:
encoding = "utf-8"
with open(self.file, "r", encoding=encoding) as file:
funilrys = file.read()
return funilrys
def delete(self):
"""
Deletes a given file path.
.. warning::
We handle the case that the file does not exist.
"""
try:
remove(self.file)
except OSError:
pass
|
python
|
from models.nasnet_do import NASNet_large_do
from tensorflow.keras import Model, Input
from tensorflow.keras.applications import DenseNet169
from tensorflow.keras.layers import Dropout, UpSampling2D, Conv2D, BatchNormalization, Activation, concatenate, Add
from tensorflow.keras.utils import get_file
from . import NetType
resnet_filename = 'ResNet-{}-model.keras.h5'
resnet_resource = 'https://github.com/fizyr/keras-models/releases/download/v0.0.1/{}'.format(resnet_filename)
def download_resnet_imagenet(v):
v = int(v.replace('resnet', ''))
filename = resnet_filename.format(v)
resource = resnet_resource.format(v)
if v == 50:
checksum = '3e9f4e4f77bbe2c9bec13b53ee1c2319'
elif v == 101:
checksum = '05dc86924389e5b401a9ea0348a3213c'
elif v == 152:
checksum = '6ee11ef2b135592f8031058820bb9e71'
return get_file(
filename,
resource,
cache_subdir='models',
md5_hash=checksum
)
def conv_bn_relu(input, num_channel, kernel_size, stride, name, padding='same', bn_axis=-1, bn_momentum=0.99,
bn_scale=True, use_bias=True):
x = Conv2D(filters=num_channel, kernel_size=(kernel_size, kernel_size),
strides=stride, padding=padding,
kernel_initializer="he_normal",
use_bias=use_bias,
name=name + "_conv")(input)
x = BatchNormalization(name=name + '_bn', scale=bn_scale, axis=bn_axis, momentum=bn_momentum, epsilon=1.001e-5, )(x)
x = Activation('relu', name=name + '_relu')(x)
return x
def conv_bn_relu_do(input, num_channel, kernel_size, stride, name, padding='same', net_type=NetType.mc, bn_axis=-1, bn_momentum=0.99,
bn_scale=True, use_bias=True, do_p=0.3):
x = Conv2D(filters=num_channel, kernel_size=(kernel_size, kernel_size),
strides=stride, padding=padding,
kernel_initializer="he_normal",
use_bias=use_bias,
name=name + "_conv")(input)
x = BatchNormalization(name=name + '_bn', scale=bn_scale, axis=bn_axis, momentum=bn_momentum, epsilon=1.001e-5, )(x)
if net_type == net_type.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == net_type.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = Activation('relu', name=name + '_relu')(x)
return x
def conv_bn(input, num_channel, kernel_size, stride, name, padding='same', bn_axis=-1, bn_momentum=0.99, bn_scale=True,
use_bias=True):
x = Conv2D(filters=num_channel, kernel_size=(kernel_size, kernel_size),
strides=stride, padding=padding,
kernel_initializer="he_normal",
use_bias=use_bias,
name=name + "_conv")(input)
x = BatchNormalization(name=name + '_bn', scale=bn_scale, axis=bn_axis, momentum=bn_momentum, epsilon=1.001e-5, )(x)
return x
def conv_relu(input, num_channel, kernel_size, stride, name, padding='same', use_bias=True, activation='relu'):
x = Conv2D(filters=num_channel, kernel_size=(kernel_size, kernel_size),
strides=stride, padding=padding,
kernel_initializer="he_normal",
use_bias=use_bias,
name=name + "_conv")(input)
x = Activation(activation, name=name + '_relu')(x)
return x
def conv_relu_do(input, num_channel, kernel_size, stride, name, padding='same', use_bias=True, activation='relu',
net_type=NetType.mc, do_p=0.3):
x = Conv2D(filters=num_channel, kernel_size=(kernel_size, kernel_size),
strides=stride, padding=padding,
kernel_initializer="he_normal",
use_bias=use_bias,
name=name + "_conv")(input)
if net_type == net_type.mc:
x = Dropout(do_p)(x, training=True)
elif net_type == net_type.mc_df:
x = Dropout(do_p, noise_shape=(x.shape[0], 1, 1, x.shape[-1]))(x, training=True)
x = Activation(activation, name=name + '_relu')(x)
return x
def create_pyramid_features(C1, C2, C3, C4, C5, feature_size=256):
P5 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='P5', kernel_initializer="he_normal")(C5)
P5_upsampled = UpSampling2D(name='P5_upsampled')(P5)
P4 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C4_reduced',
kernel_initializer="he_normal")(C4)
P4 = Add(name='P4_merged')([P5_upsampled, P4])
P4 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P4', kernel_initializer="he_normal")(P4)
P4_upsampled = UpSampling2D(name='P4_upsampled')(P4)
P3 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C3_reduced',
kernel_initializer="he_normal")(C3)
P3 = Add(name='P3_merged')([P4_upsampled, P3])
P3 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P3', kernel_initializer="he_normal")(P3)
P3_upsampled = UpSampling2D(name='P3_upsampled')(P3)
P2 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C2_reduced',
kernel_initializer="he_normal")(C2)
P2 = Add(name='P2_merged')([P3_upsampled, P2])
P2 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P2', kernel_initializer="he_normal")(P2)
P2_upsampled = UpSampling2D(size=(2, 2), name='P2_upsampled')(P2)
P1 = Conv2D(feature_size, kernel_size=1, strides=1, padding='same', name='C1_reduced',
kernel_initializer="he_normal")(C1)
P1 = Add(name='P1_merged')([P2_upsampled, P1])
P1 = Conv2D(feature_size, kernel_size=3, strides=1, padding='same', name='P1', kernel_initializer="he_normal")(P1)
return P1, P2, P3, P4, P5
def decoder_block(input, filters, skip, block_name):
x = UpSampling2D()(input)
x = conv_bn_relu(x, filters, 3, stride=1, padding='same', name=block_name + '_conv1')
x = concatenate([x, skip], axis=-1, name=block_name + '_concat')
x = conv_bn_relu(x, filters, 3, stride=1, padding='same', name=block_name + '_conv2')
return x
def decoder_block_no_bn(input, filters, skip, block_name, activation='relu'):
x = UpSampling2D()(input)
x = conv_relu(x, filters, 3, stride=1, padding='same', name=block_name + '_conv1', activation=activation)
x = concatenate([x, skip], axis=-1, name=block_name + '_concat')
x = conv_relu(x, filters, 3, stride=1, padding='same', name=block_name + '_conv2', activation=activation)
return x
def decoder_block_no_bn_do(input, filters, skip, block_name, activation='relu', net_type=NetType.mc, do_p=0.3):
x = UpSampling2D()(input)
x = conv_relu_do(x, filters, 3, stride=1, padding='same', name=block_name + '_conv1', activation=activation,
net_type=net_type, do_p=do_p)
# if net_type==NetType.mc_dp:
# Dropout() ???
x = concatenate([x, skip], axis=-1, name=block_name + '_concat')
x = conv_relu_do(x, filters, 3, stride=1, padding='same', name=block_name + '_conv2', activation=activation,
net_type=net_type, do_p=do_p)
return x
def prediction_fpn_block(x, name, upsample=None):
x = conv_relu(x, 128, 3, stride=1, name="prediction_" + name + "_1")
x = conv_relu(x, 128, 3, stride=1, name="prediction_" + name + "_2")
if upsample:
x = UpSampling2D(upsample)(x)
return x
def prediction_fpn_block_do(x, name, upsample=None, net_type=NetType.mc, do_p=0.3):
x = conv_relu_do(x, 128, 3, stride=1, name="prediction_" + name + "_1", net_type=net_type, do_p=do_p)
x = conv_relu_do(x, 128, 3, stride=1, name="prediction_" + name + "_2", net_type=net_type, do_p=do_p)
if upsample:
x = UpSampling2D(upsample)(x)
return x
def nasnet_cls(input_shape, do_p=0, resize_size=32, total_training_steps=None, weights='imagenet', activation="softmax",
classes=13):
if resize_size is not None:
input_shape = (*((resize_size, resize_size) if isinstance(resize_size, int) else resize_size), input_shape[2])
nasnet = NASNet_large_do(input_shape=input_shape, net_type=NetType.vanilla, do_p=do_p, include_top=False,
total_training_steps=total_training_steps, activation=activation, weights=weights, classes=classes)
return nasnet
def nasnet_cdp_cls(input_shape, do_p=0.3, resize_size=32, total_training_steps=None,
weights='imagenet', activation="softmax", classes=13):
if resize_size is not None:
input_shape = (*((resize_size, resize_size) if isinstance(resize_size, int) else resize_size), input_shape[2])
return NASNet_large_do(input_shape=input_shape, net_type=NetType.cdp, do_p=do_p, total_training_steps=total_training_steps,
weights=weights, activation=activation, classes=classes, include_top=False)
def nasnet_sch_dp_cls(input_shape, do_p=0.3, resize_size=32, total_training_steps=None, # IS NOT IMPLEMENTED
weights='imagenet', activation="softmax", classes=13):
if resize_size is not None:
input_shape = (*((resize_size, resize_size) if isinstance(resize_size, int) else resize_size), input_shape[2])
return NASNet_large_do(input_shape=input_shape, net_type=NetType.mc_dp, do_p=do_p, total_training_steps=total_training_steps,
weights=weights, activation=activation, classes=classes, include_top=False)
def nasnet_do_cls(input_shape, do_p=0.3, resize_size=32, total_training_steps=None,
weights='imagenet', activation="softmax", classes=13):
if resize_size is not None:
input_shape = (*((resize_size, resize_size) if isinstance(resize_size, int) else resize_size), input_shape[2])
return NASNet_large_do(input_shape=input_shape, net_type=NetType.mc, do_p=do_p, total_training_steps=total_training_steps,
weights=weights, activation=activation, classes=classes, include_top=False)
def nasnet_df_cls(input_shape, do_p=0.3, resize_size=32, total_training_steps=None,
weights='imagenet', activation="softmax", classes=13):
if resize_size is not None:
input_shape = (*((resize_size, resize_size) if isinstance(resize_size, int) else resize_size), input_shape[2])
return NASNet_large_do(net_type=NetType.mc_df, do_p=do_p, total_training_steps=total_training_steps, weights=weights,
input_shape=input_shape, activation=activation, classes=classes, include_top=False)
def nasnet_fpn_do(input_shape, net_type, channels=1, do_p=0.3, total_training_steps=None, weights='imagenet', activation="softmax"):
nasnet = NASNet_large_do(input_shape=input_shape, net_type=net_type, do_p=do_p, include_top=False,
total_training_steps=total_training_steps, weights=weights)
conv1 = nasnet.get_layer("activation").output # ("stem_bn1").output
conv2 = nasnet.get_layer("reduction_concat_stem_1").output
conv3 = nasnet.get_layer("activation_134").output # ("normal_concat_5").output
conv4 = nasnet.get_layer("activation_252").output # ("normal_concat_12").output # shape: (batch_size, 16, 16, channels)
conv5 = nasnet.get_layer("normal_concat_18").output # ("normal_concat_18").output # shape: (batch_size, 8, 8, channels)
P1, P2, P3, P4, P5 = create_pyramid_features(conv1, conv2, conv3, conv4, conv5)
x = concatenate(
[
prediction_fpn_block(P5, "P5", (8, 8)),
prediction_fpn_block(P4, "P4", (4, 4)),
prediction_fpn_block(P3, "P3", (2, 2)),
prediction_fpn_block(P2, "P2"),
]
)
x = conv_bn_relu(x, 256, 3, (1, 1), name="aggregation")
x = decoder_block_no_bn(x, 128, conv1, 'up4')
x = UpSampling2D()(x)
x = conv_relu(x, 64, 3, (1, 1), name="up5_conv1")
x = conv_relu(x, 64, 3, (1, 1), name="up5_conv2")
if activation == 'softmax':
name = 'mask_softmax'
x = Conv2D(channels, (1, 1), activation=activation, name=name)(x)
else:
x = Conv2D(channels, (1, 1), activation=activation, name="mask")(x)
model = Model(nasnet.input, x)
return model
def nasnet_cdp_fpn(input_shape, channels=1, do_p=None, resize_size=None, total_training_steps=None, weights='imagenet',
activation="sigmoid"):
if resize_size is not None:
input_shape = (*((resize_size, resize_size) if isinstance(resize_size, int) else resize_size), input_shape[2])
return nasnet_fpn_do(input_shape, NetType.cdp, channels, weights=weights, activation=activation)
def nasnet_do_fpn(input_shape, channels=1, do_p=0.3, resize_size=None, total_training_steps=None, weights='imagenet',
activation="sigmoid"):
if resize_size is not None:
input_shape = (*((resize_size, resize_size) if isinstance(resize_size, int) else resize_size), input_shape[2])
return nasnet_fpn_do(input_shape, NetType.mc, channels, do_p, weights=weights, activation=activation)
def nasnet_df_fpn(input_shape, channels=1, do_p=0.3, resize_size=None, total_training_steps=None, weights='imagenet',
activation="sigmoid"):
if resize_size is not None:
input_shape = (*((resize_size, resize_size) if isinstance(resize_size, int) else resize_size), input_shape[2])
return nasnet_fpn_do(input_shape, NetType.mc_df, channels, do_p, weights=weights, activation=activation)
#vanilla nasnet
def nasnet_scd_fpn(input_shape, channels=1, do_p=0.3, resize_size=None, total_training_steps=None,
weights='imagenet', activation="sigmoid"):
if resize_size is not None:
input_shape = (*((resize_size, resize_size) if isinstance(resize_size, int) else resize_size), input_shape[2])
return nasnet_fpn_do(input_shape, NetType.sdp, channels, do_p, total_training_steps, weights, activation)
def nasnet_fpn_mc_sch_dp(input_shape, channels=1, do_p=0.3, resize_size=None, total_training_steps=None,
weights='imagenet', activation="sigmoid"):
if resize_size is not None:
input_shape = (*((resize_size, resize_size) if isinstance(resize_size, int) else resize_size), input_shape[2])
return nasnet_fpn_do(input_shape, NetType.mc_dp, channels, do_p, total_training_steps, weights, activation) #TODO: refactor NetType to mc_sch_dp
|
python
|
"""
Profile ../profile-datasets-py/standard54lev_o3_trunc/005.py
file automaticaly created by prof_gen.py script
"""
self["ID"] = "../profile-datasets-py/standard54lev_o3_trunc/005.py"
self["Q"] = numpy.array([ 4.902533, 4.833692, 4.768255, 4.706048,
4.646861, 4.590505, 4.536884, 4.486118,
4.828359, 6.645004, 10.27973 , 18.52683 ,
26.18652 , 31.28984 , 42.90004 , 120.0283 ,
184.579 , 245.7708 , 363.3486 , 512.8023 ,
709.9901 , 902.3722 , 1087.377 , 1231.327 ,
1343.463 , 1440.92 , 1510.047 , 1573.036 ,
1592.632 , 1535.324 , 1484.593 , 1440.364 ,
1402.571 , 1371.158 , 1346.081 ])
self["P"] = numpy.array([ 36.1735, 45.043 , 55.4433, 67.5109, 81.3744,
97.1505, 114.9415, 134.8318, 156.8846, 181.1394,
207.6092, 236.2784, 267.1012, 300. , 334.8648,
371.5529, 409.8893, 449.6677, 490.6516, 532.5769,
575.1538, 618.0706, 660.9965, 703.5863, 745.4841,
786.3278, 825.7546, 863.4047, 898.9275, 931.9853,
962.2587, 989.451 , 1013.292 , 1033.544 , 1050. ])
self["T"] = numpy.array([ 212.9693, 213.7954, 214.5807, 215.3272, 216.0374, 216.7137,
217.2 , 217.2 , 217.2 , 217.2 , 217.2 , 217.2 ,
217.2 , 218.4757, 221.1365, 225.7039, 230.145 , 234.4131,
238.5368, 242.4568, 246.1974, 249.207 , 251.6708, 253.5196,
254.898 , 256.1723, 257.3524, 258.4279, 258.9206, 258.4005,
257.9401, 257.5388, 257.1958, 256.9108, 256.6833])
self["O3"] = numpy.array([ 4.215323 , 3.902271 , 3.319317 , 2.528896 , 1.875051 ,
1.443153 , 1.121424 , 0.8306873 , 0.5889677 , 0.3919343 ,
0.3485972 , 0.3073584 , 0.2429395 , 0.1702235 , 0.1014747 ,
0.08000966, 0.06076883, 0.04415221, 0.04023689, 0.03673658,
0.03371695, 0.03103716, 0.02863924, 0.02653345, 0.02467643,
0.02310199, 0.02212827, 0.02124101, 0.02043248, 0.01969566,
0.01904341, 0.01847474, 0.01798883, 0.01758495, 0.01726253])
self["CTP"] = 400.0
self["CFRACTION"] = 0.2
self["IDG"] = 4
self["ISH"] = 3
self["ELEVATION"] = 0.0
self["S2M"]["T"] = 256.2
self["S2M"]["Q"] = 1403.02874461
self["S2M"]["O"] = 0.017994717422
self["S2M"]["P"] = 1030.0
self["S2M"]["U"] = 10.0
self["S2M"]["V"] = 3.0
self["S2M"]["WFETC"] = 50000.0
self["SKIN"]["SURFTYPE"] = 2
self["SKIN"]["WATERTYPE"] = 1
self["SKIN"]["T"] = 257.2
self["SKIN"]["SALINITY"] = 33.0
self["SKIN"]["FOAM_FRACTION"] = 0.5
self["SKIN"]["FASTEM"] = numpy.array([ 3. , 5. , 15. , 0.1, 0.3])
self["ZENANGLE"] = 0.0
self["AZANGLE"] = 90.0
self["SUNZENANGLE"] = 70.0
self["SUNAZANGLE"] = 90.0
self["LATITUDE"] = 60.0
self["GAS_UNITS"] = 2
self["BE"] = 0.5
self["COSBK"] = 1.0
self["DATE"] = numpy.array([1966, 1, 1])
self["TIME"] = numpy.array([ 0, 25, 0])
|
python
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\rabbit_hole\tunable_rabbit_hole_condition.py
# Compiled at: 2018-08-14 18:06:05
# Size of source mod 2**32: 918 bytes
from sims4.tuning.tunable import TunableVariant
from statistics.statistic_conditions import TunableStatisticCondition
class TunableRabbitHoleCondition(TunableVariant):
def __init__(self, *args, **kwargs):
(super().__init__)(args, stat_based=TunableStatisticCondition(description='\n A condition based on the status of a statistic.\n '),
default='stat_based', **kwargs)
|
python
|
# -*- coding: utf-8 -*-
"""FIFO Simulator."""
from __future__ import annotations
import queue
from queue import Queue
import threading
from typing import Any
from typing import Dict
from typing import Optional
from stdlib_utils import drain_queue
from xem_wrapper import DATA_FRAME_SIZE_WORDS
from xem_wrapper import DATA_FRAMES_PER_ROUND_ROBIN
from xem_wrapper import FrontPanelBase
from xem_wrapper import FrontPanelSimulator
from xem_wrapper import OpalKellyBoardNotInitializedError
from .constants import BARCODE_SCANNER_TOP_WIRE_OUT_ADDRESS
from .constants import FIFO_SIMULATOR_DEFAULT_WIRE_OUT_VALUE
from .constants import FIRMWARE_VERSION_WIRE_OUT_ADDRESS
from .constants import SECONDS_TO_WAIT_WHEN_POLLING_QUEUES
from .exceptions import AttemptToAddCyclesWhileSPIRunningError
from .exceptions import AttemptToInitializeFIFOReadsError
from .fifo_read_producer import FIFOReadProducer
from .fifo_read_producer import produce_data
from .mantarray_front_panel import MantarrayFrontPanelMixIn
class RunningFIFOSimulator(FrontPanelSimulator, MantarrayFrontPanelMixIn):
"""Simulate a running Mantarray machine with OK board.
Args:
simulated_response_queues: dictionary where the ultimate leaves should be queue.Queue objects.
These values are popped off the end of the queue and returned as if coming from the XEM.
The 'wire_outs' key should contain a sub-dict with keys of integer values representing the EP addresses.
"""
default_device_id = "M02001900Mantarray Simulator"
default_mantarray_serial_number = "M02001900"
default_mantarray_nickname = "Mantarray Simulator"
default_firmware_version = "0.0.0"
default_barcode = "MA190190000"
def __init__(self, simulated_response_queues: Optional[Dict[str, Any]] = None) -> None:
if simulated_response_queues is None:
simulated_response_queues = {}
if "pipe_outs" in simulated_response_queues:
raise AttemptToInitializeFIFOReadsError()
super().__init__(simulated_response_queues)
self._device_id = self.default_device_id
self._fifo_read_producer: Optional[FIFOReadProducer] = None
self._producer_error_queue: Optional[Queue[str]] = None # pylint: disable=unsubscriptable-object
self._producer_data_queue: Optional[Queue[bytearray]] = None # pylint: disable=unsubscriptable-object
self._lock: Optional[threading.Lock] = None
def hard_stop(self, timeout: Optional[float] = None) -> None:
if self._fifo_read_producer is not None:
self._fifo_read_producer.hard_stop(timeout=timeout)
if "wire_outs" in self._simulated_response_queues:
wire_outs = self._simulated_response_queues["wire_outs"]
for wire_out_queue in wire_outs.values():
drain_queue(wire_out_queue)
def initialize_board(
self,
bit_file_name: Optional[str] = None,
allow_board_reinitialization: bool = False,
) -> None:
board_already_initialized = self.is_board_initialized()
super().initialize_board(
bit_file_name=bit_file_name,
allow_board_reinitialization=allow_board_reinitialization,
)
if not board_already_initialized:
self._producer_error_queue = queue.Queue()
self._producer_data_queue = queue.Queue()
self._lock = threading.Lock()
def start_acquisition(self) -> None:
super().start_acquisition()
if self._producer_data_queue is None:
raise NotImplementedError("_producer_data_queue should never be None here")
if self._producer_error_queue is None:
raise NotImplementedError("_producer_error_queue should never be None here")
if self._lock is None:
raise NotImplementedError("_lock should never be None here")
self._fifo_read_producer = FIFOReadProducer(
self._producer_data_queue, self._producer_error_queue, self._lock
)
self._fifo_read_producer.start()
def stop_acquisition(self) -> None:
super().stop_acquisition()
if self._fifo_read_producer is None:
raise NotImplementedError("_fifo_read_producer should never be None here")
if self._producer_data_queue is None:
raise NotImplementedError("_producer_data_queue should never be None here")
if self._lock is None:
raise NotImplementedError("_lock should never be None here")
self._fifo_read_producer.soft_stop()
is_producer_stopped = False
while not is_producer_stopped:
is_producer_stopped = self._fifo_read_producer.is_stopped()
with self._lock:
while True:
try:
self._producer_data_queue.get(timeout=SECONDS_TO_WAIT_WHEN_POLLING_QUEUES)
except queue.Empty:
break
self._fifo_read_producer.join()
self._fifo_read_producer = None
def read_wire_out(self, ep_addr: int) -> int:
FrontPanelBase.read_wire_out(self, ep_addr)
wire_outs = self._simulated_response_queues.get("wire_outs", None)
if wire_outs is None:
return FIFO_SIMULATOR_DEFAULT_WIRE_OUT_VALUE
wire_out_queue = wire_outs.get(ep_addr, None)
if wire_out_queue is None:
return FIFO_SIMULATOR_DEFAULT_WIRE_OUT_VALUE
try:
wire_out_value = wire_out_queue.get(timeout=SECONDS_TO_WAIT_WHEN_POLLING_QUEUES)
except queue.Empty:
return FIFO_SIMULATOR_DEFAULT_WIRE_OUT_VALUE
if not isinstance(wire_out_value, int):
raise NotImplementedError("Wire out values should always be ints")
return wire_out_value
def read_from_fifo(self) -> bytearray:
if self._producer_data_queue is None:
raise NotImplementedError("_producer_data_queue should never be None here")
if self._lock is None:
raise NotImplementedError("_lock should never be None here")
# Tanner (3/12/20) is not sure how to test that we are using a lock here. The purpose of this lock is to ensure that data is not pulled from the queue at the same time it is being added.
with self._lock:
data_read = bytearray(0)
while True:
try:
iter_data = self._producer_data_queue.get(timeout=SECONDS_TO_WAIT_WHEN_POLLING_QUEUES)
except queue.Empty:
break
data_read.extend(iter_data)
return data_read
def get_num_words_fifo(self) -> int:
FrontPanelBase.get_num_words_fifo(self)
if self._producer_data_queue is None:
raise NotImplementedError("_producer_data_queue should never be None here")
if self._lock is None:
raise NotImplementedError("_lock should never be None here")
num_words = 0
temp_queue: Queue[bytearray] = queue.Queue() # pylint: disable=unsubscriptable-object
# Tanner (3/12/20) is not sure how to test that we are using a lock here. The purpose of this lock is to ensure that data is not pulled from the queue at the same time it is being added.
with self._lock:
while True:
try:
iter_data = self._producer_data_queue.get(timeout=SECONDS_TO_WAIT_WHEN_POLLING_QUEUES)
except queue.Empty:
break
num_words += (
DATA_FRAME_SIZE_WORDS
* DATA_FRAMES_PER_ROUND_ROBIN
* 20 # Tanner (7/25/21): this value is no longer accurate, but is only used by ok_comm for logging purposes
)
temp_queue.put_nowait(iter_data)
while True:
try:
iter_data = temp_queue.get(timeout=SECONDS_TO_WAIT_WHEN_POLLING_QUEUES)
except queue.Empty:
break
self._producer_data_queue.put_nowait(iter_data)
return num_words
def add_data_cycles(self, num_cycles: int) -> None:
if not self._is_board_initialized:
raise OpalKellyBoardNotInitializedError()
if self.is_spi_running():
raise AttemptToAddCyclesWhileSPIRunningError()
if self._producer_data_queue is None:
raise NotImplementedError("_producer_data_queue should never be None here")
data = produce_data(num_cycles, 0)
self._producer_data_queue.put_nowait(data)
def get_firmware_version(self) -> str:
FrontPanelBase.read_wire_out(self, FIRMWARE_VERSION_WIRE_OUT_ADDRESS)
return self.default_firmware_version
def get_barcode(self) -> str:
FrontPanelBase.read_wire_out(self, BARCODE_SCANNER_TOP_WIRE_OUT_ADDRESS)
return self.default_barcode
|
python
|
import sys
from pathlib import Path
from setuptools import find_packages, setup
project_slug = "nptyping"
here = Path(__file__).parent.absolute()
def _get_dependencies(dependency_file):
with open(here / "dependencies" / dependency_file, mode="r", encoding="utf-8") as f:
return f.read().strip().split("\n")
# Read meta info from package_info.py.
package_info = {}
with open(here / project_slug / "package_info.py", mode="r", encoding="utf-8") as f:
exec(f.read(), package_info)
supp_versions = package_info["__python_versions__"]
# The README.md provides the long description text.
with open("README.md", mode="r", encoding="utf-8") as f:
long_description = f.read()
# Check the current version against the supported versions: older versions are not supported.
u_major = sys.version_info.major
u_minor = sys.version_info.minor
versions_as_ints = [[int(v) for v in version.split(".")] for version in supp_versions]
version_unsupported = not [
1 for major, minor in versions_as_ints if u_major == major and u_minor >= minor
]
if version_unsupported:
supported_versions_str = ", ".join(version for version in supp_versions)
raise Exception(
f"Unsupported Python version: {sys.version}. Supported versions: {supported_versions_str}"
)
extras = {
"build": _get_dependencies("build-requirements.txt"),
"qa": _get_dependencies("qa-requirements.txt"),
}
# Complete: all extras for end users, excluding dev dependencies.
extras["complete"] = [
req for key, reqs in extras.items() for req in reqs if key not in ("build", "qa")
]
# Dev: all extras for developers, including build and qa dependencies.
extras["dev"] = [req for key, reqs in extras.items() for req in reqs if key]
setup(
name=package_info["__title__"],
version=package_info["__version__"],
author=package_info["__author__"],
author_email=package_info["__author_email__"],
description=package_info["__description__"],
url=package_info["__url__"],
long_description=long_description,
long_description_content_type="text/markdown",
license=package_info["__license__"],
package_data={
"nptyping": [
"ndarray.pyi",
"shape_expression.pyi",
"typing_.pyi",
"py.typed",
],
},
packages=find_packages(
exclude=("tests", "tests.*", "test_resources", "test_resources.*")
),
install_requires=_get_dependencies("requirements.txt"),
extras_require=extras,
python_requires=f">={supp_versions[0]}",
test_suite="tests",
zip_safe=False,
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
*[f"Programming Language :: Python :: {version}" for version in supp_versions],
],
)
|
python
|
from math import pi
import numpy as np
cm = 0.23
km = 370.0
WIND = 5.0
OMEGA = 0.84
AMPLITUDE = 0.5
CHOPPY_FACTOR = np.array([2.3, 2.1, 1.3, 0.9], dtype=np.float32)
PASSES = 8 # number of passes needed for the FFT 6 -> 64, 7 -> 128, 8 -> 256, etc
FFT_SIZE = 1 << PASSES # size of the textures storing the waves in frequency and spatial domains
N_SLOPE_VARIANCE = 10
GRID1_SIZE = 5488.0
GRID2_SIZE = 392.0
GRID3_SIZE = 28.0
GRID4_SIZE = 2.0
GRID_SIZES = np.array([GRID1_SIZE, GRID2_SIZE, GRID3_SIZE, GRID4_SIZE], dtype=np.float32)
INVERSE_GRID_SIZES = np.array([2.0 * pi * FFT_SIZE / GRID1_SIZE,
2.0 * pi * FFT_SIZE / GRID2_SIZE,
2.0 * pi * FFT_SIZE / GRID3_SIZE,
2.0 * pi * FFT_SIZE / GRID4_SIZE], dtype=np.float32)
GRID_VERTEX_COUNT = 200
GRID_CELL_SIZE = np.array([1.0 / float(GRID_VERTEX_COUNT), 1.0 / float(GRID_VERTEX_COUNT)], dtype=np.float32)
|
python
|
from datasets.kitti import *
from Archs_3D.build_retina3d_model import build_model
from Solver.Solver import Solver
import operator
import logging
import os
import sys
import copy
import datetime
from datasets.distributed_sampler import *
from datasets.dataset_utils import AspectRatioGroupedDataset
import numpy as np
import argparse
from utils.check_point import DetectronCheckpointer
from utils.metric_logger import MetricLogger
import time
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
# def eval(model, b_images, b_labels, device=torch.device('cpu')):
# model.eval()
# with torch.no_grad():
# pass
#
def train(model, solver, b_images, gt_labels):
model.train()
losses = model(copy.deepcopy(b_images), gt_labels=gt_labels, is_training=True)
total_loss = solver.step(losses)
return total_loss, losses
def run(model_name):
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
logger = logging.getLogger("od.trainer")
logger.info('Cuda available: ' + 'true' if torch.cuda.is_available() else 'false')
model, backbone, CONFIGS = build_model(model_name)
train_path = CONFIGS.TRAINING.LOGDIR
device = torch.device(CONFIGS.DEVICE if torch.cuda.is_available() else "cpu")
dataset = KITTI(CONFIGS, transform=TrainAugmentation(CONFIGS.INTENSOR_SIZE,
CONFIGS.DATASET.MEAN),
is_training=True)
sampler = TrainingSampler(len(dataset))
params = {'sampler': sampler,
'batch_sampler': None,
'collate_fn': operator.itemgetter(0), # don't batch, but yield individual elements
'num_workers': 8}
generator = data.DataLoader(dataset, **params)
generator = AspectRatioGroupedDataset(generator, CONFIGS.BATCH_SIZE)
model.train()
solver = Solver(model, CONFIGS)
model = model.to(device)
first_step = 0
# load any previous weights
model_path = os.path.abspath(train_path)
checkpointer = DetectronCheckpointer(
CONFIGS, model, solver, model_path, True, logger=logger
)
arguments = {}
arguments['iteration'] = first_step
weights = checkpointer.load(CONFIGS.TRAINING.CHECKPOINT_FILE)
arguments.update(weights)
max_steps = CONFIGS.SOLVER.MAX_ITER
generator = iter(generator)
meters = MetricLogger(delimiter=" ")
first_step = arguments['iteration']
start_training_time = time.time()
end = time.time()
for i in range(first_step, max_steps):
iteration = i + 1
arguments['iteration'] = iteration
batch_data = next(generator)
data_time = time.time() - end
b_images = [d[0] for d in batch_data]
gt_labels = [d[1] for d in batch_data]
b_images = torch.stack(b_images, 0)
b_images = b_images.to(device)
total_loss,loss_dict = train(model, solver, b_images, gt_labels)
batch_time = time.time() - end
end = time.time()
meters.update(loss=total_loss, **loss_dict)
meters.update(time=batch_time, data=data_time)
eta_seconds = meters.time.global_avg * (max_steps - iteration)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if iteration % 10 == 0 or iteration == max_steps:
logger.info(meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"{meters}",
"lr: {lr:.8f}",
"max men: {memory:.0f}",
]
).format(
eta=eta_string,
iter=iteration,
meters=str(meters),
lr=solver.learnrate,
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 if torch.cuda.is_available() else 0
))
# fixme: do we need checkpoint_period here
if iteration % 1000 == 0 and iteration > 0:
checkpointer.save("model_{:07d}".format(iteration), **arguments)
if iteration == max_steps:
checkpointer.save("model_final", **arguments)
total_training_time = time.time() - start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / (max_steps)
)
)
def arg_parser():
parser = argparse.ArgumentParser(description="Deepbox3D Training")
parser.add_argument("--model-name", default="MOBI-V2-RETINA3D-FPN", help="specific model name")
return parser
if __name__ == '__main__':
args = arg_parser().parse_args()
run(args.model_name)
|
python
|
from __future__ import absolute_import, division, print_function
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
def create_flags():
# Importer
# ========
tf.app.flags.DEFINE_string ('train_files', '', 'comma separated list of files specifying the dataset used for training. multiple files will get merged')
tf.app.flags.DEFINE_string ('dev_files', '', 'comma separated list of files specifying the dataset used for validation. multiple files will get merged')
tf.app.flags.DEFINE_string ('test_files', '', 'comma separated list of files specifying the dataset used for testing. multiple files will get merged')
tf.app.flags.DEFINE_boolean ('fulltrace', False, 'if full trace debug info should be generated during training')
tf.app.flags.DEFINE_string ('train_cached_features_path', '', 'comma separated list of files specifying the dataset used for training. multiple files will get merged')
tf.app.flags.DEFINE_string ('dev_cached_features_path', '', 'comma separated list of files specifying the dataset used for validation. multiple files will get merged')
tf.app.flags.DEFINE_string ('test_cached_features_path', '', 'comma separated list of files specifying the dataset used for testing. multiple files will get merged')
# Cluster configuration
# =====================
tf.app.flags.DEFINE_string ('ps_hosts', '', 'parameter servers - comma separated list of hostname:port pairs')
tf.app.flags.DEFINE_string ('worker_hosts', '', 'workers - comma separated list of hostname:port pairs')
tf.app.flags.DEFINE_string ('job_name', 'localhost', 'job name - one of localhost (default), worker, ps')
tf.app.flags.DEFINE_integer ('task_index', 0, 'index of task within the job - worker with index 0 will be the chief')
tf.app.flags.DEFINE_integer ('replicas', -1, 'total number of replicas - if negative, its absolute value is multiplied by the number of workers')
tf.app.flags.DEFINE_integer ('replicas_to_agg', -1, 'number of replicas to aggregate - if negative, its absolute value is multiplied by the number of workers')
tf.app.flags.DEFINE_integer ('coord_retries', 100, 'number of tries of workers connecting to training coordinator before failing')
tf.app.flags.DEFINE_string ('coord_host', 'localhost', 'coordination server host')
tf.app.flags.DEFINE_integer ('coord_port', 2500, 'coordination server port')
tf.app.flags.DEFINE_integer ('iters_per_worker', 1, 'number of train or inference iterations per worker before results are sent back to coordinator')
# Global Constants
# ================
tf.app.flags.DEFINE_boolean ('train', True, 'whether to train the network')
tf.app.flags.DEFINE_boolean ('test', True, 'whether to test the network')
tf.app.flags.DEFINE_integer ('epoch', 75, 'target epoch to train - if negative, the absolute number of additional epochs will be trained')
tf.app.flags.DEFINE_float ('dropout_rate', 0.05, 'dropout rate for feedforward layers')
tf.app.flags.DEFINE_float ('dropout_rate2', -1.0, 'dropout rate for layer 2 - defaults to dropout_rate')
tf.app.flags.DEFINE_float ('dropout_rate3', -1.0, 'dropout rate for layer 3 - defaults to dropout_rate')
tf.app.flags.DEFINE_float ('dropout_rate4', 0.0, 'dropout rate for layer 4 - defaults to 0.0')
tf.app.flags.DEFINE_float ('dropout_rate5', 0.0, 'dropout rate for layer 5 - defaults to 0.0')
tf.app.flags.DEFINE_float ('dropout_rate6', -1.0, 'dropout rate for layer 6 - defaults to dropout_rate')
tf.app.flags.DEFINE_float ('relu_clip', 20.0, 'ReLU clipping value for non-recurrant layers')
# Adam optimizer (http://arxiv.org/abs/1412.6980) parameters
tf.app.flags.DEFINE_float ('beta1', 0.9, 'beta 1 parameter of Adam optimizer')
tf.app.flags.DEFINE_float ('beta2', 0.999, 'beta 2 parameter of Adam optimizer')
tf.app.flags.DEFINE_float ('epsilon', 1e-8, 'epsilon parameter of Adam optimizer')
tf.app.flags.DEFINE_float ('learning_rate', 0.001, 'learning rate of Adam optimizer')
# Batch sizes
tf.app.flags.DEFINE_integer ('train_batch_size', 1, 'number of elements in a training batch')
tf.app.flags.DEFINE_integer ('dev_batch_size', 1, 'number of elements in a validation batch')
tf.app.flags.DEFINE_integer ('test_batch_size', 1, 'number of elements in a test batch')
tf.app.flags.DEFINE_integer ('export_batch_size', 1, 'number of elements per batch on the exported graph')
# Performance (UNSUPPORTED)
tf.app.flags.DEFINE_integer ('inter_op_parallelism_threads', 0, 'number of inter-op parallelism threads - see tf.ConfigProto for more details')
tf.app.flags.DEFINE_integer ('intra_op_parallelism_threads', 0, 'number of intra-op parallelism threads - see tf.ConfigProto for more details')
# Sample limits
tf.app.flags.DEFINE_integer ('limit_train', 0, 'maximum number of elements to use from train set - 0 means no limit')
tf.app.flags.DEFINE_integer ('limit_dev', 0, 'maximum number of elements to use from validation set- 0 means no limit')
tf.app.flags.DEFINE_integer ('limit_test', 0, 'maximum number of elements to use from test set- 0 means no limit')
# Step widths
tf.app.flags.DEFINE_integer ('validation_step', 0, 'number of epochs we cycle through before validating the model - 0 means no validation steps')
# Checkpointing
tf.app.flags.DEFINE_string ('checkpoint_dir', '', 'directory in which checkpoints are stored - defaults to directory "deepspeech/checkpoints" within user\'s data home specified by the XDG Base Directory Specification')
tf.app.flags.DEFINE_integer ('checkpoint_secs', 600, 'checkpoint saving interval in seconds')
tf.app.flags.DEFINE_integer ('max_to_keep', 5, 'number of checkpoint files to keep - default value is 5')
# Exporting
tf.app.flags.DEFINE_string ('export_dir', '', 'directory in which exported models are stored - if omitted, the model won\'t get exported')
tf.app.flags.DEFINE_integer ('export_version', 1, 'version number of the exported model')
tf.app.flags.DEFINE_boolean ('remove_export', False, 'whether to remove old exported models')
tf.app.flags.DEFINE_boolean ('export_tflite', False, 'export a graph ready for TF Lite engine')
tf.app.flags.DEFINE_boolean ('use_seq_length', True, 'have sequence_length in the exported graph (will make tfcompile unhappy)')
tf.app.flags.DEFINE_integer ('n_steps', 16, 'how many timesteps to process at once by the export graph, higher values mean more latency')
# Reporting
tf.app.flags.DEFINE_integer ('log_level', 1, 'log level for console logs - 0: INFO, 1: WARN, 2: ERROR, 3: FATAL')
tf.app.flags.DEFINE_boolean ('log_traffic', False, 'log cluster transaction and traffic information during debug logging')
tf.app.flags.DEFINE_boolean ('show_progressbar', True, 'Show progress for training, validation and testing processes. Log level should be > 0.')
tf.app.flags.DEFINE_boolean ('log_placement', False, 'whether to log device placement of the operators to the console')
tf.app.flags.DEFINE_integer ('report_count', 10, 'number of phrases with lowest WER (best matching) to print out during a WER report')
tf.app.flags.DEFINE_string ('summary_dir', '', 'target directory for TensorBoard summaries - defaults to directory "deepspeech/summaries" within user\'s data home specified by the XDG Base Directory Specification')
tf.app.flags.DEFINE_integer ('summary_secs', 0, 'interval in seconds for saving TensorBoard summaries - if 0, no summaries will be written')
# Geometry
tf.app.flags.DEFINE_integer ('n_hidden', 2048, 'layer width to use when initialising layers')
# Initialization
tf.app.flags.DEFINE_integer ('random_seed', 4568, 'default random seed that is used to initialize variables')
# Early Stopping
tf.app.flags.DEFINE_boolean ('early_stop', True, 'enable early stopping mechanism over validation dataset. Make sure that dev FLAG is enabled for this to work')
# This parameter is irrespective of the time taken by single epoch to complete and checkpoint saving intervals.
# It is possible that early stopping is triggered far after the best checkpoint is already replaced by checkpoint saving interval mechanism.
# One has to align the parameters (earlystop_nsteps, checkpoint_secs) accordingly as per the time taken by an epoch on different datasets.
tf.app.flags.DEFINE_integer ('earlystop_nsteps', 4, 'number of steps to consider for early stopping. Loss is not stored in the checkpoint so when checkpoint is revived it starts the loss calculation from start at that point')
tf.app.flags.DEFINE_float ('estop_mean_thresh', 0.5, 'mean threshold for loss to determine the condition if early stopping is required')
tf.app.flags.DEFINE_float ('estop_std_thresh', 0.5, 'standard deviation threshold for loss to determine the condition if early stopping is required')
# Decoder
tf.app.flags.DEFINE_string ('alphabet_config_path', 'data/alphabet.txt', 'path to the configuration file specifying the alphabet used by the network. See the comment in data/alphabet.txt for a description of the format.')
tf.app.flags.DEFINE_string ('lm_binary_path', 'data/lm/lm.binary', 'path to the language model binary file created with KenLM')
tf.app.flags.DEFINE_string ('lm_trie_path', 'data/lm/trie', 'path to the language model trie file created with native_client/generate_trie')
tf.app.flags.DEFINE_integer ('beam_width', 1024, 'beam width used in the CTC decoder when building candidate transcriptions')
tf.app.flags.DEFINE_float ('lm_alpha', 0.75, 'the alpha hyperparameter of the CTC decoder. Language Model weight.')
tf.app.flags.DEFINE_float ('lm_beta', 1.85, 'the beta hyperparameter of the CTC decoder. Word insertion weight.')
# Inference mode
tf.app.flags.DEFINE_string ('one_shot_infer', '', 'one-shot inference mode: specify a wav file and the script will load the checkpoint and perform inference on it. Disables training, testing and exporting.')
|
python
|
"""
Original Demo: http://js.cytoscape.org/demos/concentric-layout/
Original Code: https://github.com/cytoscape/cytoscape.js/blob/master/documentation/demos/concentric-layout/code.js
Note: This example is broken because layout takes a function as input, i.e.
```
layout: {
name: 'concentric',
concentric: function( node ){
return node.degree();
},
levelWidth: function( nodes ){
return 2;
}
},
```
"""
import dash_cytoscape
import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
import json
app = dash.Dash(__name__)
server = app.server
app.scripts.config.serve_locally = True
app.css.config.serve_locally = True
# Load Data
with open('data/concentric-layout/data.json', 'r') as f:
elements = json.loads(f.read())
# App
app.layout = html.Div([
dash_cytoscape.Cytoscape(
id='cytoscape',
elements=elements,
layout={
'name': 'concentric',
},
stylesheet=[{
'selector': 'node',
'style': {
'height': 20,
'width': 20,
'background-color': '#30c9bc'
}
}, {
'selector': 'edge',
'style': {
'curve-style': 'haystack',
'haystack-radius': 0,
'width': 5,
'opacity': 0.5,
'line-color': '#a8eae5'
}
}],
style={
'width': '100%',
'height': '100%',
'position': 'absolute',
'left': 0,
'top': 0,
'z-index': 999
}
)
])
if __name__ == '__main__':
app.run_server(debug=True)
|
python
|
import json
import hikari
async def debug(event: hikari.GuildMessageCreateEvent, command: str, config, *args) -> None:
await event.message.respond(json.dumps({
"command": command,
"args": args,
"user": event.message.author.username,
}))
|
python
|
from .models import TopLevelDomain,WhoisServer,Domain
import json
import dateutil.parser
from django.db.models.functions import Length
import xmltodict
import socket
import pythonwhois
TLD_FIELDS_MAP = {
'@name':"name",
'countryCode' : "country_code" ,
"created": 'created',
"changed": 'changed',
"registrationService":"registar",
"source": "source",
"state": "state",
"domain": "domain"
}
WHOIS_FIELDS_MAP = {
"@host" : "host",
"source" : "source",
"queryFormat" : "queryFormat",
"availablePattern" : "errorPattern",
"errorPattern" : "errorPattern"
}
def get_data_from_xml():
fpath = "engine/data/whois-server-list.xml"
f = open(fpath, 'r')
data = f.read()
f.close()
parsed = xmltodict.parse(data)
dict_data = json.loads(json.dumps(parsed)) # OrderedDict -> Dict conversion
return dict_data
def ws_get_or_create(ws_dict):
host = ws_dict.get('@host')
if host.endswith('.'):
host = host[:-1]
try:
return WhoisServer.objects.get(host=host)
except WhoisServer.DoesNotExist:
pass
ws_item = {}
for k in ws_dict.keys():
ws_item[WHOIS_FIELDS_MAP[k]] = ws_dict[k]
ws = WhoisServer(**ws_item)
ws.save()
return ws
def prepare_tld_entry(entry):
tld_dict = {}
for k in entry.keys():
if k in ["whoisServer", "domain"]:
continue
if k in ['changed',"created"]:
tld_dict[k] = dateutil.parser.parse(str(entry[k]))
continue
tld_dict[TLD_FIELDS_MAP[k]] = entry[k]
return tld_dict
def tld_get_or_create(entry,parent=None):
name = entry.get('@name')
try:
tld = TopLevelDomain.objects.get(name=name)
except TopLevelDomain.DoesNotExist:
tld_dict = prepare_tld_entry(entry)
tld = TopLevelDomain(**tld_dict)
if parent: tld.parent = parent
tld.save()
return tld
def handle_entry(entry, parent=None):
name = entry.get('@name')
print "[handle_entry]", "-----------"*bool(parent), name, parent
domains = entry.get('domain', [])
whois_servers = entry.get('whoisServer')
tld = tld_get_or_create(entry, parent)
if whois_servers:
if isinstance(whois_servers, dict): whois_servers = [whois_servers]
for ws in whois_servers:
ws_item = ws_get_or_create(ws)
if ws_item not in tld.whois.all():
tld.whois.add(ws_item)
if domains:
if isinstance(domains, dict): domains = [domains]
for domain in domains:
handle_entry(domain, parent=tld)
def parse_data():
data = get_data_from_xml()
records = data.get('domainList').get('domain')
for entry in records:
handle_entry(entry)
print "[parse_data] Finished"
def get_names():
data = get_data_from_xml()
records = data.get('domainList').get('domain')
names = []
for entry in records:
names.append(entry.get('@name'))
domains = entry.get('domain')
if domains:
if isinstance(domains, dict):
names.append(domains.get('@name'))
elif isinstance(domains, list):
for d in domains:
names.append(d.get('@name'))
else:
print "Unrecognized domain data", domains
return names
def get_ws():
data = get_data_from_xml()
records = data.get('domainList').get('domain')
ws = []
for entry in records:
whois_servers = entry.get('whoisServer')
if whois_servers:
if not isinstance(whois_servers, list):
whois_servers = [whois_servers]
for k in whois_servers:
ws.append(k.get('@host'))
domains = entry.get('domain')
if domains:
if isinstance(domains, dict):
domains = [domains]
for d in domains:
whois_servers = d.get('whoisServer')
if whois_servers:
if not isinstance(whois_servers, list):
whois_servers = [whois_servers]
for p in whois_servers:
ws.append(p.get('@host'))
return ws
def get_whois_servers_for_domain(domain):
splitted = domain.split('.')
variants = ['.'.join(splitted[i:]) for i in range(len(splitted))] # a.b.c.d.e -> ['a.b.c.d.e','b.c.d.e','c.d.e','d.e','e']
q = TopLevelDomain.objects.filter(name__in=variants).order_by(Length('name').asc())
assert q.count(), "Not valid domain name"
sub = q.last() # the longest subdomain
if sub.whois.count():
return sub.whois.all()
elif sub.parent:
return sub.parent.whois.all()
return []
def run_whois_query(domain_name, whois_server, query_format=None):
query = domain_name
if query_format:
query = query_format % domain_name
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((whois_server, 43))
s.send(query + "\r\n")
response = ""
while True:
data = s.recv(4096)
response += data
if not data:
break
s.close()
return response
def get_whois_data(domain_name):
"""
:param domain_name:
:param ws:
:return:
# GOOD, REGISTERED: {'raw':...,'parsed':...}
# GOOD, UNREGISTERED {'free' :True} (error pattern found in raw response) }
# UNKNOWN: [{'whois1.registar1.com': {'raw':...,'parsed':...}, 'whois2.registar2.com': {'raw':...,'parsed':...}]
"""
print "get_whois_data started", domain_name
d, created = Domain.objects.get_or_create(name=domain_name)
if d.whois:
print "whois_servers for domain", d.whois
raw = run_whois_query(d.name, d.whois.host, d.whois.queryFormat)
parsed = pythonwhois.parse.parse_raw_whois([raw])
return dict(raw=raw,parsed=parsed)
whois_servers = get_whois_servers_for_domain(domain_name)
print "whois_servers for domain", whois_servers
responses = {}
for ws in whois_servers:
print "checking for", ws
raw = run_whois_query(d.name, ws.host, ws.queryFormat)
if ws.errorPattern and ws.errorPattern.replace('\Q','').replace('\E','').lower() in raw.lower():
print "error pattern found", ws.errorPattern, raw
return {"free":True, "raw" :raw }
#free for regitration
parsed = pythonwhois.parse.parse_raw_whois([raw])
if parsed.get('whois_server'):
print "New server found", parsed.get('whois_server')
next_server_host = parsed.get('whois_server')[0].lower()
if next_server_host == "gandi.net": # .ninja fix for whois.donut.com response
next_server_host = "whois.gandi.net"
if next_server_host != ws.host:
next_server, created = WhoisServer.objects.get_or_create(host=next_server_host)
d.whois = next_server
d.save()
return get_whois_data(d.name)
if parsed.get('expiration_date'): # good enought
ws.domains.add(d)
return dict(raw=raw,parsed=parsed)
responses[ws.host] = dict(raw=raw,parsed=parsed) # TODO: compare and get better response
return responses
|
python
|
# Copyright (c) 2020 - for information on the respective copyright owner
# see the NOTICE file and/or the repository https://github.com/boschresearch/blackboxopt
#
# SPDX-License-Identifier: Apache-2.0
import time
from typing import Callable
import parameterspace as ps
from blackboxopt import Evaluation, EvaluationSpecification, Objective
from blackboxopt.optimizers.random_search import RandomSearch
SPACE = ps.ParameterSpace()
SPACE.add(ps.ContinuousParameter("p1", [-1, 1]))
def _evaluation_function(eval_spec: EvaluationSpecification) -> Evaluation:
loss = eval_spec.configuration["p1"] ** 2
return eval_spec.create_evaluation(objectives={"loss": loss})
def limit_with_max_evaluations(run_optimization_loop: Callable, loop_kwargs: dict):
evaluations = run_optimization_loop(
RandomSearch(SPACE, [Objective("loss", False)], max_steps=10),
_evaluation_function,
max_evaluations=8,
**loop_kwargs,
)
assert len(evaluations) == 8
assert all([not e.all_objectives_none for e in evaluations])
def limit_with_loop_timeout(run_optimization_loop: Callable, loop_kwargs: dict):
class SlowRandomSearch(RandomSearch):
def generate_evaluation_specification(self) -> EvaluationSpecification:
time.sleep(1)
return super().generate_evaluation_specification()
evaluations = run_optimization_loop(
SlowRandomSearch(SPACE, [Objective("loss", False)], max_steps=10),
_evaluation_function,
timeout_s=3.0,
**loop_kwargs,
)
assert len(evaluations) < 4
assert all([not e.all_objectives_none for e in evaluations])
def reporting_user_info(run_optimization_loop: Callable, loop_kwargs: dict):
def __evaluation_function(eval_spec):
return eval_spec.create_evaluation(
objectives={"loss": 1.0}, user_info={"user": "info"}
)
max_steps = 10
evaluations = run_optimization_loop(
RandomSearch(SPACE, [Objective("loss", False)], max_steps=max_steps),
__evaluation_function,
**loop_kwargs,
)
assert len(evaluations) == max_steps
assert all([not e.all_objectives_none for e in evaluations])
assert all([e.user_info is not None for e in evaluations])
ALL_REFERENCE_TESTS = [
limit_with_max_evaluations,
limit_with_loop_timeout,
reporting_user_info,
]
|
python
|
from typing import Iterator # noqa
from pyramid.config import Configurator
from pyramid.httpexceptions import HTTPInternalServerError
from pyramid.response import Response
from pyramid.request import Request # noqa
import httpretty
import pytest
import _pytest # noqa
import webtest
httpretty.HTTPretty.allow_net_connect = False
class FakeError(Exception):
pass
def include_testing_views(config): # type: (Configurator) -> None
def view_ok(request): # type: (Request) -> Response
return Response(b'hello')
config.add_route(name='route_ok', pattern='/ok')
config.add_view(view_ok, route_name='route_ok')
def view_raise_error(request): # type: (Request) -> Response
raise HTTPInternalServerError()
config.add_route(name='raise_error', pattern='/raise_error')
config.add_view(view_raise_error, route_name='raise_error')
def view_raise_custom_error(request): # type: (Request) -> Response
raise FakeError()
config.add_route(name='raise_custom_error', pattern='/raise_custom_error')
config.add_view(view_raise_custom_error, route_name='raise_custom_error')
@pytest.fixture
def bugsnag_ok(): # type: () -> Iterator[None]
httpretty.enable()
httpretty.register_uri('POST', 'https://notify.bugsnag.com', status=200)
yield
httpretty.disable()
@pytest.fixture
def bugsnag_failure(): # type: () -> Iterator[None]
httpretty.enable()
httpretty.register_uri('POST', 'https://notify.bugsnag.com', status=500)
yield
httpretty.disable()
@pytest.fixture(scope='module')
def test_app(): # type: () -> webtest.TestApp
# Settings needed to get bugsnag to actually send a notification
settings = {
'bugsnag.api_key': 'FAKE_KEY',
'bugsnag.asynchronous': 'false',
'bugsnag.ignore_classes': 'pyramid.httpexceptions.HTTPNotFound',
}
config = Configurator(settings=settings)
config.include('pyramid_bugsnag')
config.include(include_testing_views)
app = config.make_wsgi_app()
return webtest.TestApp(app)
def test_ok(test_app, bugsnag_ok): # type: (webtest.TestApp, None) -> None
test_app.get('/ok')
assert not httpretty.has_request()
def test_not_found(test_app, bugsnag_ok, capsys): # type: (webtest.TestApp, None, _pytest.capsys) -> None
test_app.get('/unknown_route', status=404)
assert not httpretty.has_request()
out, err = capsys.readouterr()
assert not err
def test_raise_error(test_app, bugsnag_ok): # type: (webtest.TestApp, None) -> None
test_app.get('/raise_error', status=500)
assert httpretty.has_request()
def test_raise_custom_error(test_app, bugsnag_ok): # type: (webtest.TestApp, None) -> None
with pytest.raises(FakeError):
test_app.get('/raise_custom_error', status=500)
assert httpretty.has_request()
def test_bugsnag_failure(test_app, bugsnag_failure): # type: (webtest.TestApp, None) -> None
test_app.get('/raise_error', status=500)
assert httpretty.has_request()
|
python
|
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.server import SimpleXMLRPCRequestHandler
import xmlrpc.client
import time
import ntplib
from time import ctime
import _thread
import math
import psutil
s = xmlrpc.client.ServerProxy("http://localhost:9700")
while(True):
per_cpu = psutil.cpu_percent();
print("porcentaje:" + str(per_cpu));
if(per_cpu > 20.0):
print("soy mayor que 20")
print(str(s.asignar()) + " viene de servidor")
else:
x = (1254*12343)*math.log(299)/(math.sqrt(math.pow(234435, 10)))
print (str(x) + " Local")
time.sleep(1)
|
python
|
from models.connection import get_cnx, tables
ballot_table = tables["ballot"]
ballot_info = tables["ballot_info"]
score_table = tables["scores"]
ranks_table = tables["ranks"]
class Ballot:
@staticmethod
def _bool_to_SQL(witness: bool):
return 1 if witness else 0
@staticmethod
def create_ballot(matchup_id, judge_id, presiding=False, note_only=False):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
INSERT INTO {ballot_table}
(matchup_id, judge_id, presiding, note_only)
VALUES (%s, %s, %s, %s)
""",
(
matchup_id,
judge_id,
Ballot._bool_to_SQL(presiding),
Ballot._bool_to_SQL(note_only),
),
)
db.commit()
return cursor.lastrowid
@staticmethod
def set_is_complete(ballot_id, complete):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
UPDATE {ballot_table}
SET complete = {1 if complete else 0}
WHERE id = %s
""",
(ballot_id,),
)
db.commit()
@staticmethod
def get_is_complete(ballot_id):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"SELECT complete FROM {ballot_table} WHERE id = %s", (ballot_id,)
)
(complete,) = cursor.fetchone()
return complete == 1
@staticmethod
def get_judge_for_ballot(ballot_id):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
SELECT judge_id
FROM {ballot_table}
WHERE id = %s
""",
(ballot_id,),
)
(judge,) = cursor.fetchone()
return judge
@staticmethod
def get_matchup_for_ballot(ballot_id):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
SELECT matchup_id
FROM {ballot_table}
WHERE id = %s
""",
(ballot_id,),
)
(matchup,) = cursor.fetchone()
return matchup
@staticmethod
def set_rank_for_ballot(ballot_id, witness: bool, rank, student):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
INSERT INTO {ranks_table} (ballot_id, rank, witness, student)
VALUES (%s, %s, %s, %s)
ON DUPLICATE KEY UPDATE student = %s
""",
(ballot_id, rank, Ballot._bool_to_SQL(witness), student, student),
)
db.commit()
@staticmethod
def get_rank_for_ballot(ballot_id, witness: bool, rank):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
SELECT student
FROM {ranks_table}
WHERE ballot_id = %s AND witness = %s AND rank = %s
""",
(ballot_id, Ballot._bool_to_SQL(witness), rank),
)
try:
(sid,) = cursor.fetchone()
return sid
except:
return None
@staticmethod
def get_ballot(ballot_id):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
SELECT presiding, note_only
FROM {ballot_table}
WHERE id = %s
""",
(ballot_id,),
)
(preside, note_only) = cursor.fetchone()
return {"presiding": preside == 1, "note_only": note_only == 1}
@staticmethod
def set_score_only(ballot_id, score_only):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
UPDATE {ballot_table}
SET note_only = %s
WHERE id = %s
""",
(Ballot._bool_to_SQL(score_only), ballot_id),
)
db.commit()
@staticmethod
def delete_ballot(ballot_id):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
DELETE FROM {ballot_table}
WHERE id = %s
""",
(ballot_id,),
)
db.commit()
return True
@staticmethod
def is_valid(ballot_id):
with get_cnx() as db:
cursor = db.cursor()
cursor.execute(
f"""
SELECT COUNT(*)
FROM {ballot_table} B
INNER JOIN {scores_table} S ON B.id = S.ballot_id
WHERE B.id = %s
""",
(ballot_id,)
)
(num,) = cursor.fetchone()
assert(num == 28)
cursor.execute(
f"""
SELECT COUNT(*)
FROM {ballot_table} B
INNER JOIN {ranks_table} R ON B.id = R.ballot_id
WHERE B.id = %s
""",
(ballot_id,)
)
(ranks_cnt,) = cursor.fetchone()
|
python
|
"""
Martin Kersner, [email protected]
seoulai.com
2018
Adapted by Gabriela B. to work with python 2.7 and ROS
"""
from base import Constants
from base import Piece
import rospy #to print debug info
class Rules(object):
@staticmethod
def get_opponent_type(ptype) :
"""Get a type of opponent agent.
Note: In checkers there is only one pair of agents competing with each other.
Args:
ptype: Type of piece.
Returns:
opponent_type: Type of opponent agent.
"""
if ptype == Constants().DARK:
opponent_type = Constants().LIGHT
else:
opponent_type = Constants().DARK
return opponent_type
@staticmethod
def get_positions(
board_list,
ptype,
board_size) :
"""Get positions of all pices of given type in given board.
Args:
board_list: Information about positions of pieces.
ptype: Type of piece.
board_size:
Returns:
positions: Positions of pieces for given agent type in given board.
"""
positions = []
for row in range(board_size):
for col in range(board_size):
pos = board_list[row][col]
if pos is not None and pos.ptype == ptype:
positions.append((row, col))
return positions
@staticmethod
def get_valid_moves_simple(
board_list,
from_row,
from_col) :
"""Generate valid moves for given position with respect to the current state of game.
simple: move one square
Args:
board_list: Information about positions of pieces.
from_row: Row of board of piece location.
from_col: Column of board of piece location.
Returns:
List of (row, column) tuples representing valid moves for given piece location at current
state of board.
"""
def validate_move_wrapper(to_row_col):
return Rules.validate_move(board_list, from_row, from_col, *to_row_col)
return list(filter(validate_move_wrapper, Rules.generate_all_moves_simple(from_row, from_col)))
@staticmethod
def get_valid_moves_jump(
board_list,
from_row,
from_col) :
"""Generate valid moves for given position with respect to the current state of game.
simple: move one square
Args:
board_list: Information about positions of pieces.
from_row: Row of board of piece location.
from_col: Column of board of piece location.
Returns:
List of (row, column) tuples representing valid moves for given piece location at current
state of board.
"""
def validate_move_wrapper(to_row_col):
return Rules.validate_move(board_list, from_row, from_col, *to_row_col)
return list(filter(validate_move_wrapper, Rules.generate_all_moves_jump(from_row, from_col)))
@staticmethod
def generate_valid_moves(
board_list,
ptype,
board_size) :
"""Get all possible valid moves for agent of given type.
Args:
board_list: Information about positions of pieces.
ptype: Type of piece.
board_size:
Returns:
moves: Dictionary with keys tuple(row, col) of pieces with at least one valid move. Values
of dictionary are represented as a list of tuples as a new valid piece coordinates.
"""
moves = {}
double_moves={}
positions = Rules.get_positions(board_list, ptype, board_size)
for row, col in positions:
temp_moves_jump = Rules.get_valid_moves_jump(board_list, row, col)
if len(temp_moves_jump) > 0:#first find jumps, if there are jumps, do not look for simple moves
double_moves[(row, col)] = temp_moves_jump
elif len(double_moves)==0:
temp_moves = Rules.get_valid_moves_simple(board_list, row, col)
if len(temp_moves) > 0:
moves[(row, col)] = temp_moves
#add rule: when you can jump a piece, you must do it
if len(double_moves)>0:
return double_moves
return moves
@staticmethod
def validate_move(
board_list,
from_row,
from_col,
to_row,
to_col) :
"""Validate move given by current and desired piece coordinates.
Args:
board_list: Information about positions of pieces.
from_row: Row of board of original piece location.
from_col: Column of board of original piece location.
to_row: Cow of board of desired piece location.
to_col: Column of board of desired piece location.
Returns:
True if given move is valid, otherwise false.
"""
# not among available moves
if( (to_row, to_col) not in Rules.generate_all_moves_simple(from_row, from_col)) and ((to_row, to_col) not in Rules.generate_all_moves_jump(from_row, from_col)):
return False
# can't move piece from outside of board
if from_row < 0 or from_col < 0 or from_row > 7 or from_col > 7:
return False
# cant move out of board
if to_row < 0 or to_col < 0 or to_row > 7 or to_col > 7:
return False
# target square must be empty
if board_list[to_row][to_col] is not None:
return False
# can't move empty square
p = board_list[from_row][from_col]
if p is None:
return False
# cant move in opposite direction, except king
if p.direction == Constants().UP and from_row < to_row and not p.king:
return False
if p.direction == Constants().DOWN and from_row > to_row and not p.king:
return False
# cant jump over itself or empty square
between_row, between_col = Rules.get_between_position(from_row, from_col, to_row, to_col)
if between_row is not None and between_col is not None:
pp = board_list[between_row][between_col]
if pp is None or pp.ptype == p.ptype:
return False
return True
@staticmethod
def check_was_jump(board_list, from_row, from_col, to_row, to_col):
"""
Check if the movement is jump over an enemy piece
(asuming from is a piece of the current player)
return boolean
"""
# rospy.loginfo('from (%s ,%s), to: (%s , %s)',from_row,from_col,to_row,to_col)
#current player
p = board_list[from_row][from_col]
between_row, between_col = Rules.get_between_position(from_row, from_col, to_row, to_col)
if between_row is not None and between_col is not None:
pp = board_list[between_row][between_col]
if pp is not None and pp.ptype != p.ptype:
return True
return False
@staticmethod
def check_jump_is_possible(board_list, to_row, to_col):
"""
check if more jump movements are posible from where the piece landed
"""
new_from_row=to_row
new_from_col=to_col
#check if there are valis jumps
jumps_list=Rules.get_valid_moves_jump(board_list, new_from_row, new_from_col)#valid jumps where it landed
return len(jumps_list)>0
@staticmethod
def check_valid_move(board_list, ptype,from_row, from_col, to_row,to_col):
"""
Check if movement is valid, and is being performed with current user checker
"""
# don't move with opponent's piece
if ptype != board_list[from_row][from_col].ptype:
return False
#check if movement is valid
return Rules.validate_move(board_list, from_row, from_col, to_row, to_col)
@staticmethod
def check_become_king(board_list, to_row,to_col,UP=Constants().UP,DOWN=Constants().DOWN,size=8):
"""
Check if the new position of the checker is to be a king
size: how many cell has the board
UP, DOWN. Numerical representation to up and down directions.
"""
p = board_list[to_row][to_col]
return (to_row == 0 and p.direction == UP) or (to_row == size-1 and p.direction == DOWN)
@staticmethod
def check_end_game(board_list,ptype, board_size):
"""
check if end of game
ptype= type of current player
"""
opponent_ptype=Rules.get_opponent_type(ptype)
#openent lost all his pieces?
oponent_no_pieces= len(Rules.get_positions(board_list, opponent_ptype, board_size)) == 0
if oponent_no_pieces:
rospy.loginfo('Rules: End game because oponent has no pieces')
return True
#check if oponent can move
oponent_cant_move=len(Rules.generate_valid_moves(board_list, opponent_ptype, board_size)) == 0
if oponent_cant_move:
rospy.loginfo('Rules: End game because oponent can not move')
return True
return False
@staticmethod
def get_between_position(
from_row,
from_col,
to_row,
to_col) :
"""Get position of square over which was move performed.
Args:
board_list: Information about positions of pieces.
from_row: Row of board of original piece location.
from_col: Column of board of original piece location.
to_row: Row of board of desired piece location.
to_col: Column of board of desired piece location.
Returns:
Position of sqaure expressed by tuple(row, col) if length of move was 2, otherwise
tuple(None, None).
"""
if abs(from_row-to_row) == 2 and abs(from_col-to_col) == 2:
if from_row-to_row > 0: # UP
if from_col-to_col > 0: # LEFT
return from_row-1, from_col-1
else: # RIGHT
return from_row-1, from_col+1
else: # DOWN
if from_col-to_col > 0: # LEFT
return from_row+1, from_col-1
else: # RIGHT
return from_row+1, from_col+1
else:
return None, None
@staticmethod
def generate_all_moves_simple(
from_row,
from_col) :
"""Generate all moves for given board position. Some moves can be invalid.
Args:
from_row: Row of board of piece location.
from_col: Column of board of piece location.
Returns:
moves: Generated moves for given position.
"""
moves = [
(from_row-1, from_col-1),
(from_row+1, from_col-1),
(from_row-1, from_col+1),
(from_row+1, from_col+1),
]
return moves
@staticmethod
def generate_all_moves_jump(
from_row,
from_col ) :
"""Generate all moves for given board position. Some moves can be invalid.
Args:
from_row: Row of board of piece location.
from_col: Column of board of piece location.
Returns:
moves: Generated moves for given position.
"""
moves = [
(from_row-2, from_col-2),
(from_row+2, from_col-2),
(from_row-2, from_col+2),
(from_row+2, from_col+2),
]
return moves
|
python
|
"""Bubble sort Implementation in python."""
def bubble_sort(unsorted_list):
"""Iterate through an unsorted list and swap elements accordingly."""
# make a copy of the list
unsorted_copy = unsorted_list[:]
for index in range(len(unsorted_list) - 1):
if unsorted_copy[index] > unsorted_copy[index + 1]:
a, b = unsorted_copy[index], unsorted_copy[index + 1]
unsorted_copy[index + 1], unsorted_copy[index] = a, b
return bubble_sort(unsorted_copy)
return unsorted_copy
|
python
|
"""Builds a pip package suitable for redistribution.
Adapted from tensorflow/tools/pip_package/build_pip_package.sh. This might have
to change if Bazel changes how it modifies paths.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import glob
import os
import shutil
import subprocess
import sys
import tempfile
import dragnn
import tensorflow
def main():
cmd_args = argparse.ArgumentParser()
cmd_args.add_argument("--include-tensorflow", action="store_true")
cmd_args.add_argument("--output-dir", required=True)
args = cmd_args.parse_args()
if not os.path.isdir(args.output_dir):
raise EnvironmentError(
"Output directory {} doesn't exist".format(args.output_dir))
elif not args.output_dir.startswith("/"):
raise EnvironmentError("Please pass an absolute path to --output-dir.")
tmp_packaging = tempfile.mkdtemp()
runfiles, = (path for path in sys.path
if path.endswith("build_pip_package.runfiles"))
# Use the dragnn and tensorflow modules to resolve specific paths in the
# runfiles directory. Current Bazel puts dragnn in a __main__ subdirectory,
# for example.
lib_path = os.path.abspath(dragnn.__file__)
if runfiles not in lib_path:
raise EnvironmentError("WARNING: Unexpected PYTHONPATH set by Bazel :(")
base_dir = os.path.dirname(os.path.dirname(lib_path))
tensorflow_dir = os.path.dirname(tensorflow.__file__)
if runfiles not in tensorflow_dir:
raise EnvironmentError("WARNING: Unexpected tf PYTHONPATH set by Bazel :(")
# Copy the files.
subprocess.check_call([
"cp", "-r",
"--no-preserve=all", os.path.join(base_dir, "dragnn"), os.path.join(
base_dir, "syntaxnet"), tmp_packaging
])
if args.include_tensorflow:
subprocess.check_call(
["cp", "-r", "--no-preserve=all", tensorflow_dir, tmp_packaging])
shutil.copy(
os.path.join(base_dir, "dragnn/tools/oss_setup.py"),
os.path.join(tmp_packaging, "setup.py"))
subprocess.check_output(
["python", "setup.py", "bdist_wheel"], cwd=tmp_packaging)
wheel, = glob.glob("{}/*.whl".format(os.path.join(tmp_packaging, "dist")))
shutil.move(wheel, args.output_dir)
print(
"Wrote {}".format(os.path.join(args.output_dir, os.path.basename(wheel))))
if __name__ == "__main__":
main()
|
python
|
"""
TODO: Insert player name in combat text
"""
import random as random
import threading as threading
import config as config
import items as items
lock = threading.Lock()
def link_terminal(terminal):
global terminal_output
terminal_output = terminal
def success(strength, attack_modifier, defense, att_random):
return int((strength + attack_modifier - defense + att_random - 100))
def damage(success, constitution):
return int(success / constitution)
def get_experience(character_level, target_level):
if character_level > target_level + 5:
return 0
level_differential = character_level - target_level
base_experience = float(config.EXPERIENCE_FILE.at[target_level,"Experience_Per_Enemy"])
if level_differential >= 0:
adjusted_experience = base_experience - 0.2 * base_experience * level_differential
elif level_differential < 0:
adjusted_experience = base_experience + 0.1 * base_experience * level_differential * -1
else:
adjusted_experience = base_experience
random_modifier = -1 + (random.random() * 2)
experience = int(adjusted_experience * random_modifier)
return experience
def do_physical_damage_to_enemy(self, target):
if isinstance(self.right_hand_inv, items.Weapon):
attack_modifier = self.right_hand_inv.attack_modifier
else:
attack_modifier = 0
with lock:
att_random = random.randint(0,100)
att_success = success(self.strength, attack_modifier, target.defense, att_random)
att_damage = damage(att_success, self.constitution)
terminal_output.print_text("""\
{} attacks {}!
STR {} + ATTMOD {} - DEF {} + RAND {} = {}\
""".format(self.name, target.name, self.strength, attack_modifier, target.defense, att_random, att_success))
if att_damage < 0:
terminal_output.print_text("""\
{} evades the attack.\
""".format(target.name))
else:
target.health = target.health - att_damage
terminal_output.print_text("""\
{} damages {} by {}.\
""".format(self.name, target.name, att_damage))
if target.health <= 0:
target.is_dead()
self.experience += get_experience(character_level=self.level, target_level=target.level)
return target
def do_physical_damage_to_character(self, character):
if isinstance(self.right_hand_inv, items.Weapon):
attack_modifier = self.right_hand_inv.attack_modifier
else:
attack_modifier = 0
with lock:
att_random = random.randint(0,100)
att_success = success(self.strength, attack_modifier, character.defense, att_random)
att_damage = damage(att_success, self.constitution)
terminal_output.print_text("""\
{} attacks {}!
STR {} + ATTMOD {} - DEF {} + RAND {} = {}\
""".format(self.name, character.name, self.strength, attack_modifier, character.defense, att_random, att_success))
if att_damage < 0:
terminal_output.print_text("""\
{} evades the attack.\
""".format(character.name))
else:
character.health = character.health - att_damage
terminal_output.print_text("""\
{} damages {} by {}.\
""".format(self.name, character.name, att_damage))
if character.health <= 0:
character.is_dead()
return character
|
python
|
# Generated by Django 3.2.3 on 2021-05-26 16:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('TrafficMan', '0003_alter_violationprocess_options'),
]
operations = [
migrations.AlterField(
model_name='vehicle',
name='plate_number',
field=models.CharField(max_length=20, null=True, unique=True, verbose_name='车牌号码'),
),
]
|
python
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def add_tags(command, tags):
return command + ' --tags {}'.format(tags)
# pylint: disable=too-many-public-methods
class CdnScenarioMixin(object):
def profile_create_cmd(self, group, name, tags=None, checks=None, options=None, sku=None):
command = 'cdn profile create -g {} -n {}'.format(group, name)
if tags:
command = command + ' --tags {}'.format(tags)
if options:
command = command + ' ' + options
if sku:
command = command + ' --sku {}'.format(sku)
return self.cmd(command, checks)
def profile_update_cmd(self, group, name, tags=None, checks=None):
command = 'cdn profile update -g {} -n {}'.format(group, name)
if tags:
command = command + ' --tags {}'.format(tags)
return self.cmd(command, checks)
def profile_list_cmd(self, group, checks=None):
command = 'cdn profile list -g {}'.format(group)
return self.cmd(command, checks)
def profile_delete_cmd(self, group, name, checks=None):
command = 'cdn profile delete -g {} -n {}'.format(group, name)
return self.cmd(command, checks)
def endpoint_create_cmd(self, group, name, profile_name, origin, tags=None, checks=None):
cmd_txt = 'cdn endpoint create -g {} -n {} --profile-name {} --origin {}'
command = cmd_txt.format(group,
name,
profile_name,
origin)
if tags:
command = add_tags(command, tags)
return self.cmd(command, checks)
def endpoint_update_cmd(self, group, name, profile_name, tags=None, checks=None, options=None):
command = 'cdn endpoint update -g {} -n {} --profile-name {}'.format(group,
name,
profile_name)
if tags:
command = add_tags(command, tags)
if options:
command = command + ' ' + options
return self.cmd(command, checks)
def endpoint_start_cmd(self, group, name, profile_name, checks=None, options=None):
command = 'cdn endpoint start -g {} -n {} --profile-name {}'.format(group,
name,
profile_name)
if options:
command = command + ' ' + options
return self.cmd(command, checks)
def endpoint_stop_cmd(self, group, name, profile_name, checks=None, options=None):
command = 'cdn endpoint stop -g {} -n {} --profile-name {}'.format(group,
name,
profile_name)
if options:
command = command + ' ' + options
return self.cmd(command, checks)
def endpoint_show_cmd(self, group, name, profile_name, checks=None, options=None):
command = 'cdn endpoint show -g {} -n {} --profile-name {}'.format(group,
name,
profile_name)
if options:
command = command + ' ' + options
return self.cmd(command, checks)
def endpoint_load_cmd(self, group, name, profile_name, content_paths, checks=None):
msg = 'cdn endpoint load -g {} -n {} --profile-name {} --content-paths {}'
command = msg.format(group,
name,
profile_name,
' '.join(content_paths))
return self.cmd(command, checks)
def endpoint_add_rule_cmd(self, group, name, profile_name, checks=None):
msg = 'az cdn endpoint rule add -g {} -n {} --profile-name {} --order 1 --rule-name r1\
--match-variable RemoteAddress --operator GeoMatch --match-values "TH"\
--action-name CacheExpiration --cache-behavior BypassCache'
command = msg.format(group,
name,
profile_name)
return self.cmd(command, checks)
def endpoint_add_condition_cmd(self, group, name, profile_name, checks=None, options=None):
command = 'cdn endpoint rule condition add -g {} -n {} --profile-name {}'.format(group,
name,
profile_name)
if options:
command = command + ' ' + options
return self.cmd(command, checks)
def endpoint_add_action_cmd(self, group, name, profile_name, checks=None, options=None):
command = 'cdn endpoint rule action add -g {} -n {} --profile-name {}'.format(group,
name,
profile_name)
if options:
command = command + ' ' + options
return self.cmd(command, checks)
def endpoint_remove_rule_cmd(self, group, name, profile_name, checks=None, options=None):
command = 'cdn endpoint rule remove -g {} -n {} --profile-name {}'.format(group,
name,
profile_name)
if options:
command = command + ' ' + options
return self.cmd(command, checks)
def endpoint_remove_condition_cmd(self, group, name, profile_name, checks=None, options=None):
command = 'cdn endpoint rule condition remove -g {} -n {} --profile-name {}'.format(group,
name,
profile_name)
if options:
command = command + ' ' + options
return self.cmd(command, checks)
def endpoint_remove_action_cmd(self, group, name, profile_name, checks=None, options=None):
command = 'cdn endpoint rule action remove -g {} -n {} --profile-name {}'.format(group,
name,
profile_name)
if options:
command = command + ' ' + options
return self.cmd(command, checks)
def endpoint_purge_cmd(self, group, name, profile_name, content_paths, checks=None):
msg = 'cdn endpoint purge -g {} -n {} --profile-name {} --content-paths {}'
command = msg.format(group,
name,
profile_name,
' '.join(content_paths))
return self.cmd(command, checks)
def endpoint_list_cmd(self, group, profile_name, checks=None, expect_failure=False):
command = 'cdn endpoint list -g {} --profile-name {}'.format(group, profile_name)
return self.cmd(command, checks, expect_failure=expect_failure)
def endpoint_delete_cmd(self, group, name, profile_name, checks=None):
command = 'cdn endpoint delete -g {} -n {} --profile-name {}'.format(group,
name,
profile_name)
return self.cmd(command, checks)
def origin_list_cmd(self, group, endpoint_name, profile_name, checks=None):
msg = 'cdn origin list -g {} --endpoint-name {} --profile-name {}'
command = msg.format(group,
endpoint_name,
profile_name)
return self.cmd(command, checks)
def origin_show_cmd(self, group, endpoint_name, profile_name, name, checks=None):
msg = 'cdn origin show -g {} -n {} --endpoint-name {} --profile-name {}'
command = msg.format(group,
name,
endpoint_name,
profile_name)
return self.cmd(command, checks)
|
python
|
#!/usr/bin/env python
'''
We get the lidar point cloud and use it to determine if there are any obstacles ahead
Author:
Sleiman Safaoui & Kevin Daniel
Email:
[email protected]
Github:
The-SS
Date:
Oct 3, 2018
'''
# python
from __future__ import print_function
import numpy as np
import copy
import math
from numpy import pi
# ROS
import rospy
from sensor_msgs.msg import LaserScan
from std_msgs.msg import Bool
class ScanSub:
'''
Subscribes to the lidar laser scan topic
'''
def __init__(self):
self.scan_data = []
self.scan_sub = rospy.Subscriber("/scan", LaserScan, self.callback, queue_size =1)
def callback(self, data):
self.scan_data = data
def get_scan(self):
return self.scan_data
class ScanDetect:
'''
Uses the obtained laser scan to determine if there are any obstacles ahead
'''
def __init__(self, ang_range = 20.):
self.ang_range = ang_range #math.radians(ang_range) # range of angles to sweep in radian(about forward)
#self.ang_min = -float(self.ang_range)/2.0 # lower bound for ang_range
#self.ang_max = +float(self.ang_range)/2.0 # upper bound for ang_range
self.scan = [] # scan data
self.detected_points = [] # ranges detected in the area to scan
self.detected_points_ang = [] # angles of points detected in the area to scan
def scan_area(self, scan):
if scan == []: # no data
return [],[]
self.scan = scan
self.detected_points = [] # reset detected points
self.detected_points_ang = [] # reset detected points
if (scan.angle_min == scan.angle_max): # no lidar data
return [],[]
if (self.ang_range > 350):
self.detected_points = scan.ranges
self.detected_points_ang = np.arange(0, 360, scan.angle_increment).tolist()
return self.detected_points, self.detected_points_ang
half_ang = float(self.ang_range)/2.0
first_half_end = 0.0 + half_ang # first half angle interval: 0 --> first_half end
second_half_start = math.degrees(2 * pi) - half_ang # second half angle interval: second_half_start --> 2*PI
first_half_cnt = math.floor((first_half_end - 0.0) / math.degrees(scan.angle_increment)) + 1 # number of angle increments in first half
second_half_cnt = math.floor((math.degrees(2* pi) - second_half_start) / math.degrees(scan.angle_increment)) # number of angle increments in second half
if (len(scan.ranges) < (first_half_cnt + second_half_cnt)):
return [], []
for i in range(0, int(first_half_cnt)):
self.detected_points.append(scan.ranges[i])
self.detected_points_ang.append(i * scan.angle_increment)
for i in range(int(math.ceil(second_half_start)), int(math.ceil(second_half_start) + second_half_cnt)):
self.detected_points.append(scan.ranges[i])
self.detected_points_ang.append(i * scan.angle_increment)
return self.detected_points, self.detected_points_ang
def check_flag(self, dist, min_dist):
collision_flag = False
for d in dist:
if d < min_dist:
collision_flag = True
break
return collision_flag
'''
ang_min = self.ang_min
ang_max = self.ang_max
if (ang_min < scan.angle_min):
ang_min_idx = 0
ang_min = scan.angle_min
else:
ang_min_idx = math.ceil((ang_min-scan.angle_min)/scan.angle_increment) + 1 # number of increments between the lidar min. angle and the desired min. angle
ang_min = ang_min_idx * scan.angle_increment
if (ang_max > scan.angle_max):
ang_max_idx = len(scan.ranges)
ang_max = scan.angle_max
else:
ang_max_idx = math.floor((ang_max-scan.angle_min)/scan.angle_increment) + 1 # number of increments between the lidar min. angle and the desired max. angle
ang_max = ang_max_idx * scan.angle_increment
if ang_min_idx > ang_max_idx:
return [],[]
for i in range(int(ang_min_idx), int(ang_max_idx)+1):
self.detected_points.append(scan.ranges[i])
self.detected_points_ang.append(scan.angle_min + i * math.degrees(scan.angle_increment))
#return self.detected_points, self.detected_points_ang
return scan.ranges, []
'''
class FlagPub:
'''
Publishes a boolean value that specifies if an object is within collision range. Flag is set to true if there is a point within the specified angle range of the LIDAR that is below the minimum distance.
'''
def __init__(self):
self.collision_flag_pub = rospy.Publisher('/lidar/collision_flag', Bool, queue_size = 1)
def publish_flag(self, collision_flag):
self.collision_flag_pub.publish(collision_flag)
'''
Main function
'''
def main():
rospy.init_node("lidar_collision_avoidance")
rate = rospy.Rate(15)
nodename = "/lidar_collision_avoidance"
min_dist = rospy.get_param(nodename + "/min_dist")
old_seq = -1
# Initialize nodes
scan_sub = ScanSub()
scan_detect = ScanDetect()
flag_pub = FlagPub()
while not rospy.is_shutdown():
scan = scan_sub.get_scan() #get laser scan
if (scan != []): # if scan was obtained
if (scan.header.seq != old_seq): # new data obtained
old_seq = scan.header.seq
#detect using scan
dists, angs = scan_detect.scan_area(scan)
flag = scan_detect.check_flag(dists, min_dist)
flag_pub.publish_flag(flag)
if __name__ == "__main__":
try:
main()
except rospy.ROSInterruptException as e:
rospy.logfatal("ROS interrupt. Shutting down lidar_collision_avoidance node")
print (e)
pass
|
python
|
from b3get import to_numpy
import numpy as np
def test_available():
assert dir(to_numpy)
def test_wrong_ds():
assert to_numpy(43) == (None, None)
def test_008():
imgs, labs = to_numpy(8)
assert len(imgs) > 0
assert len(imgs) == 24
assert isinstance(imgs[0], np.ndarray)
assert imgs[0].shape == (512, 512)
assert imgs[0].dtype == np.uint8
assert isinstance(labs[0], np.ndarray)
assert labs[0].shape == (512, 512)
assert imgs[0].dtype == np.uint8
|
python
|
"""/**
* @author [Jai Miles]
* @email [[email protected]]
* @create date 2020-05-20 13:17:50
* @modify date 2020-08-15 14:38:03
* @desc [
Handlers for mode statistics.
- ModeStatsHandler
TODO: Consider refactoring this into differnet handlers??
]
*/
"""
##########
# Imports
##########
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.utils import is_intent_name
from ask_sdk_model import ui, Response
from ask_sdk_model.ui import SimpleCard
from logs import logger, log_func_name
from pause.pauser import Pauser
from answer_response.confirmation_utils import ConfirmUtils
from slots.slot_utils import SlotUtils
from skill_card.card_funcs import CardFuncs
from aux_utils.last_prompt import LastPrompt
from helper.help_utils import HelpUtils
from aux_utils.create_tuple_message_clauses import get_linear_nlg
from players.players_dict import PlayerDict
from stats.mode_speech_utils import ModeSpeech
import speed_challenge.data
##########
# Imports
##########
class ModeStatsHandler(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_intent_name("ModeStatsIntent")(handler_input)
def handle(self, handler_input):
logger.info("HAN ModeStatsHandler")
attr = handler_input.attributes_manager.session_attributes
speech_list = []
player_obj = PlayerDict.load_player_obj(handler_input)
mode = attr.get('mode', None)
activity = SlotUtils.get_resolved_value(handler_input, 'activity')
activity = activity if activity else mode
difficulty = True
if activity == 'survival':
speech_list += ModeSpeech.get_ms_sm_stats(player_obj, activity)
elif activity == 'speed':
difficulty = SlotUtils.get_resolved_value(handler_input, 'difficulty')
sc_difficulty = difficulty if difficulty else attr.get('sc_difficulty', None)
if (difficulty in speed_challenge.data.SC_DIFFICULTIES):
speech_list += ModeSpeech.get_ms_sc_stats(player_obj, sc_difficulty, activity)
else:
reprompt = ModeSpeech.get_ms_sc_what_difficulty()
speech_list += reprompt
reprompt = get_linear_nlg(reprompt)
else:
ms_can_tell_record = ModeSpeech.get_ms_can_tell_record()
ms_example_saying = ModeSpeech.get_example_hear_records()
speech_list = [ms_can_tell_record, 2, ms_example_saying]
reprompt = HelpUtils.get_q_what_todo()
if activity in ('survival', 'speed') and (difficulty):
prompt, reprompt = (
LastPrompt.get_last_prompt(handler_input) for _ in range(2))
speech_list += Pauser.make_ms_pause_level_list( 3, prompt)
speech = get_linear_nlg(speech_list)
card_title, card_text = CardFuncs.get_card_info(handler_input, speech)
return (
handler_input.response_builder
.speak(speech)
.ask(reprompt)
.set_card( SimpleCard( card_title, card_text))
.response)
|
python
|
import re
class Particle:
def __init__(self, number, position, velocity, acceleration):
self.number = number
self.position = position
self.velocity = velocity
self.acceleration = acceleration
self.destroyed = False
def move(self):
for i in range(len(self.acceleration)):
self.velocity[i] += self.acceleration[i]
for i in range(len(self.velocity)):
self.position[i] += self.velocity[i]
def distance(self):
x, y, z = self.position
return abs(x) + abs(y) + abs(z)
def get_positions_count(particles):
positions_count = {}
for particle in particles:
position_str = ''.join(map(str, particle.position))
if not positions_count.get(position_str):
positions_count[position_str] = [particle]
else:
positions_count[position_str].append(particle)
return positions_count
def remaining_particles(particles, positions_count):
for position in positions_count:
if len(positions_count[position]) > 1:
for particle in positions_count[position]:
particle.destroyed = True
return [particle for particle in particles if not particle.destroyed]
def main():
f = open('input.txt')
particles, colliding_particles = [], []
for i, line in enumerate(f):
m = re.match(
r'p=<(-?\d+),(-?\d+),(-?\d+)>, v=<(-?\d+),(-?\d+),(-?\d+)>, a=<(-?\d+),(-?\d+),(-?\d+)>', line)
particles.append(Particle(i, [int(m[1]), int(m[2]), int(m[3])], [int(
m[4]), int(m[5]), int(m[6])], [int(m[7]), int(m[8]), int(m[9])]))
colliding_particles.append(Particle(i, [int(m[1]), int(m[2]), int(m[3])], [int(
m[4]), int(m[5]), int(m[6])], [int(m[7]), int(m[8]), int(m[9])]))
f.close()
finished = 1000
min_distance = 2 ** 32 - 1
while finished != 0:
for particle in particles:
particle.move()
finished -= 1
for particle in particles:
distance = particle.distance()
if min_distance > distance:
min_distance = distance
finished += 1
break
particles.sort(key=lambda p: p.distance())
print(f'Closest particle (1): { particles[0].number }')
remaining = len(colliding_particles)
finished = 100
while finished != 0:
for particle in colliding_particles:
particle.move()
positions_count = get_positions_count(colliding_particles)
colliding_particles = remaining_particles(
colliding_particles, positions_count)
if remaining == len(colliding_particles):
finished -= 1
else:
remaining = len(colliding_particles)
print(f'Remaining particle (2): { remaining }')
if __name__ == '__main__':
main()
|
python
|
"""
Python code for hashing function
"""
def search(x):
if x > 0:
if has[x][0] == 1:
return True
# if X is negative take the absolute value of x.
x = abs(x)
if has[x][1] == 1:
return True
return False
def insert(a, n):
for i in range(0, n):
if a[i] >= 0:
has[a[i]][0] = 1
else:
has[abs(a[i])][1] = 1
if __name__ == '__main__':
a = [-1, 9, -5, -8, -5, -2]
n = len(a)
max = 1000
has = [[0 for i in range(2)]
for j in range(max+1)]
insert(a, n)
x = 10
if search(x) is True:
print('\nPresent\n')
else:
print('\nNot present\n')
|
python
|
# -*- coding: utf-8 -*-
import numpy as np
from votesim.votemethods.ranked import borda
def test_wiki_1():
"""Test wiki example,
https://en.wikipedia.org/wiki/Borda_count, Mar-3-2021
"""
# Andrew Brian Catherine David
# A B C D
d = (
[[1, 3, 2, 4]] * 51 +
[[4, 2, 1, 3]] * 5 +
[[4, 1, 2, 3]] * 23 +
[[4, 3, 2, 1]] * 21
)
d = np.array(d)
winners, ties, output = borda(d)
assert len(winners) == 1
assert winners[0] == 2
correct_tally = np.array([153, 151, 205, 91])
assert np.all(output['tally'] == correct_tally)
return
def test_wiki_2():
"""Test wiki example #2.
https://en.wikipedia.org/wiki/Borda_count, Mar-3-2021
"""
# M N C K
d = (
[[1, 2, 3, 4]]*42 +
[[4, 1, 2, 3]]*26 +
[[4, 3, 1, 2]]*15 +
[[4, 3, 2, 1]]*17
)
d = np.array(d)
winners, ties, output = borda(d)
assert len(winners) == 1
assert winners[0] == 1
correct_tally = np.array([126, 194, 173, 107])
assert np.all(output['tally'] == correct_tally)
if __name__ == '__main__':
test_wiki_1()
test_wiki_2()
|
python
|
from pypy.rpython.memory.gctransform.test.test_transform import rtype
from pypy.rpython.memory.gctransform.stacklessframework import \
StacklessFrameworkGCTransformer
from pypy.rpython.lltypesystem import lltype
from pypy.translator.c.gc import StacklessFrameworkGcPolicy
from pypy.translator.translator import TranslationContext, graphof
from pypy import conftest
import py
class StacklessFrameworkGcPolicy2(StacklessFrameworkGcPolicy):
transformerclass = StacklessFrameworkGCTransformer
def test_stackless_simple():
def g(x):
return x + 1
class A(object):
pass
def entrypoint(argv):
a = A()
a.b = g(1)
return a.b
from pypy.rpython.llinterp import LLInterpreter
from pypy.translator.c.genc import CStandaloneBuilder
from pypy.translator.c import gc
from pypy.annotation.listdef import s_list_of_strings
t = rtype(entrypoint, [s_list_of_strings])
cbuild = CStandaloneBuilder(t, entrypoint, t.config,
gcpolicy=StacklessFrameworkGcPolicy2)
db = cbuild.generate_graphs_for_llinterp()
entrypointptr = cbuild.getentrypointptr()
entrygraph = entrypointptr._obj.graph
r_list_of_strings = t.rtyper.getrepr(s_list_of_strings)
ll_argv = r_list_of_strings.convert_const([])
llinterp = LLInterpreter(t.rtyper)
# FIIIIISH
setupgraph = db.gctransformer.frameworkgc_setup_ptr.value._obj.graph
llinterp.eval_graph(setupgraph, [])
res = llinterp.eval_graph(entrygraph, [ll_argv])
assert res == 2
|
python
|
from applauncher.kernel import ConfigurationReadyEvent
import redis
class RedisBundle(object):
def __init__(self):
self.config_mapping = {
'redis': {
'host': {'type': 'string', 'default': 'localhost'},
'port': {'type': 'integer', 'default': 6379},
'db': {'type': 'integer', 'default': 0},
'password': {'type': 'string', 'nullable': True},
'socket_timeout': {'type': 'integer', 'nullable': True},
'socket_connect_timeout': {'type': 'integer', 'nullable': True},
'socket_keepalive': {'type': 'boolean', 'nullable': True},
'socket_keepalive_options': {'nullable': True},
'connection_pool': {'nullable': True},
'unix_socket_path': {'nullable': True},
'encoding': {'type': 'string', 'default': 'utf-8'},
'encoding_errors': {'type': 'string', 'default': 'strict'},
'charset': {'nullable': True},
'errors': {'nullable': True},
'decode_responses': {'type': 'boolean', 'default': False},
'retry_on_timeout': {'type': 'boolean', 'default': False},
'ssl': {'type': 'boolean', 'default': False},
'ssl_keyfile': {'nullable': True},
'ssl_certfile': {'nullable': True},
'ssl_cert_reqs': {'nullable': True},
'ssl_ca_certs': {'nullable': True},
'max_connections': {'type': 'integer', 'nullable': True}
}
}
self.injection_bindings = {}
self.event_listeners = [
(ConfigurationReadyEvent, self.config_ready)
]
def config_ready(self, event):
d = dict(event.configuration.redis._asdict())
for i in d:
if hasattr(d[i], "_asdict"):
d[i] = None
self.injection_bindings[redis.Redis] = redis.Redis(**d)
|
python
|
"""This class stores all of the samples for training. It is able to
construct randomly selected batches of phi's from the stored history.
"""
import numpy as np
import time
floatX = 'float32'
class DataSet(object):
"""A replay memory consisting of circular buffers for observed images,
actions, and rewards.
"""
def __init__(self, config, rng, data_format="NHWC"):
"""Construct a DataSet.
Arguments:
width, height - image size
max_steps - the number of time steps to store
phi_length - number of images to concatenate into a state
rng - initialized numpy random number generator, used to
choose random minibatches
"""
# TODO: Specify capacity in number of state transitions, not
self.width = config.screen_width
self.height = config.screen_height
self.max_steps = config.memory_size
self.discount = config.discount
self.phi_length = config.history_length
self.rng = rng
self.data_format = data_format
# Allocate the circular buffers and indices.
self.imgs = np.zeros((self.max_steps, self.height, self.width), dtype='float32')
self.actions = np.zeros(self.max_steps, dtype='int32')
self.rewards = np.zeros(self.max_steps, dtype=floatX)
self.terminal = np.zeros(self.max_steps, dtype='bool')
self.R = np.zeros(self.max_steps, dtype=floatX)
self.bottom = 0
self.top = 0
self.size = 0
def add_sample(self, img, action, reward, terminal):
"""Add a time step record.
Arguments:
img -- observed image
action -- action chosen by the agent
reward -- reward received after taking the action
terminal -- boolean indicating whether the episode ended
after this time step
"""
self.imgs[self.top] = img
self.actions[self.top] = action
self.rewards[self.top] = reward
self.terminal[self.top] = terminal
self.R[self.top] = -1000.0
if terminal:
self.R[self.top] = reward
idx = self.top
count = 0
while True:
count += 1
idx -= 1
if self.terminal[idx]:
break
self.R[idx] = self.R[idx+1]*self.discount + self.rewards[idx]
if self.size == self.max_steps:
self.bottom = (self.bottom + 1) % self.max_steps
else:
self.size += 1
self.top = (self.top + 1) % self.max_steps
def __len__(self):
"""Return an approximate count of stored state transitions."""
# TODO: Properly account for indices which can't be used, as in
# random_batch's check.
return max(0, self.size - self.phi_length)
def last_phi(self):
"""Return the most recent phi (sequence of image frames)."""
indexes = np.arange(self.top - self.phi_length, self.top)
phi = np.transpose(self.imgs.take(indexes, axis=0, mode='wrap'), [1, 2, 0])
return phi
def phi(self, img):
"""Return a phi (sequence of image frames), using the last phi_length -
1, plus img.
"""
indexes = np.arange(self.top - self.phi_length + 1, self.top)
phi = np.empty((self.phi_length, self.height, self.width), dtype=floatX)
phi[0:self.phi_length - 1] = self.imgs.take(indexes,
axis=0,
mode='wrap')
phi[-1] = img
return phi
def random_batch(self, batch_size):
"""Return corresponding imgs, actions, rewards, and terminal status for
batch_size randomly chosen state transitions.
"""
# Allocate the response.
imgs = np.zeros((batch_size,
self.height,
self.width,
self.phi_length + 1),
dtype='float32')
actions = np.zeros(batch_size, dtype='int32')
rewards = np.zeros(batch_size, dtype=floatX)
terminal = np.zeros(batch_size, dtype='bool')
# R is the Monte Carlo Return. :)
R = np.zeros(batch_size, dtype=floatX)
count = 0
while count < batch_size:
# Randomly choose a time step from the replay memory.
# index = self.rng.randint(self.bottom,
# self.bottom + self.size - self.phi_length)
index = self.rng.randint(0, self.size - self.phi_length)
# Both the before and after states contain phi_length
# frames, overlapping except for the first and last.
all_indices = np.arange(index, index + self.phi_length + 1)
end_index = index + self.phi_length - 1
# Check that the initial state corresponds entirely to a
# single episode, meaning none but its last frame (the
# second-to-last frame in imgs) may be terminal. If the last
# frame of the initial state is terminal, then the last
# frame of the transitioned state will actually be the first
# frame of a new episode, which the Q learner recognizes and
# handles correctly during training by zeroing the
# discounted future reward estimate.
if np.any(self.terminal.take(all_indices[0:-2], mode='wrap')) or self.R.take(end_index,
mode='wrap') == -1000.0:
continue
# Add the state transition to the response.
imgs[count] = np.transpose(self.imgs.take(all_indices, axis=0, mode='wrap'), [1, 2, 0])
actions[count] = self.actions.take(end_index, mode='wrap')
rewards[count] = self.rewards.take(end_index, mode='wrap')
terminal[count] = self.terminal.take(end_index, mode='wrap')
R[count] = self.R.take(end_index, mode='wrap')
count += 1
if self.data_format == "NHWC":
s_t = imgs[..., :self.phi_length]
s_t_plus_1 = imgs[..., -self.phi_length:]
else:
imgs = np.transpose(imgs, [0, 3, 1, 2])
s_t = imgs[:, :self.phi_length, ...]
s_t_plus_1 = imgs[:, -self.phi_length:, ...]
return s_t, s_t_plus_1, actions, rewards, terminal, R
|
python
|
from .client import ExClient
from .mock import MockTransport
from .models import ResponseMixin
from .transport import AsyncHTTPTransportMixin
|
python
|
import asyncio
import logging
import synapse.exc as s_exc
import synapse.telepath as s_telepath
import synapse.lib.base as s_base
import synapse.lib.stormtypes as s_stormtypes
logger = logging.getLogger(__name__)
stormcmds = (
{
'name': 'service.add',
'descr': 'Add a storm service to the cortex.',
'cmdargs': (
('name', {'help': 'The name of the service.'}),
('url', {'help': 'The telepath URL for the remote service.'}),
),
'cmdconf': {},
'storm': '''
$sdef = $lib.service.add($cmdopts.name, $cmdopts.url)
$lib.print("added {iden} ({name}): {url}", iden=$sdef.iden, name=$sdef.name, url=$sdef.url)
''',
},
{
'name': 'service.del',
'descr': 'Remove a storm service from the cortex.',
'cmdargs': (
('iden', {'help': 'The service identifier or prefix.'}),
),
'cmdconf': {},
'storm': '''
$svcs = ()
for $sdef in $lib.service.list() {
if $sdef.iden.startswith($cmdopts.iden) {
$svcs.append($sdef)
}
}
$count = $svcs.length()
if $( $count = 1 ) {
$sdef = $svcs.index(0)
$lib.service.del($sdef.iden)
$lib.print("removed {iden} ({name}): {url}", iden=$sdef.iden, name=$sdef.name, url=$sdef.url)
} elif $( $count = 0 ) {
$lib.print("No service found by iden: {iden}", iden=$cmdopts.iden)
} else {
$lib.print('Multiple matches found for {iden}. Aborting delete.', iden=$cmdopts.iden)
}
''',
},
{
'name': 'service.list',
'descr': 'List the storm services configured in the cortex.',
'cmdopts': (),
'cmdconf': {},
'storm': '''
$lib.print("")
$lib.print("Storm service list (iden, ready, name, url):")
$count = $(0)
for $sdef in $lib.service.list() {
$lib.print(" {iden} {ready} ({name}): {url}", iden=$sdef.iden, ready=$sdef.ready, name=$sdef.name, url=$sdef.url)
$count = $( $count + 1 )
}
$lib.print("")
$lib.print("{count} services", count=$count)
''',
}
)
class StormSvc:
'''
The StormSvc mixin class used to make a remote storm service with commands.
'''
_storm_svc_name = 'noname'
_storm_svc_vers = (0, 0, 1)
_storm_svc_evts = {} # type: ignore
_storm_svc_pkgs = () # type: ignore
async def getStormSvcInfo(self):
# Users must specify the service name
assert self._storm_svc_name != 'noname'
return {
'name': self._storm_svc_name,
'vers': self._storm_svc_vers,
'evts': self._storm_svc_evts,
'pkgs': await self.getStormSvcPkgs(),
}
async def getStormSvcPkgs(self):
return self._storm_svc_pkgs
class StormSvcClient(s_base.Base, s_stormtypes.Proxy):
'''
A StormService is a wrapper for a telepath proxy to a service
accessible from the storm runtime.
'''
async def __anit__(self, core, sdef):
await s_base.Base.__anit__(self)
s_stormtypes.Proxy.__init__(self, None)
self.core = core
self.sdef = sdef
self.iden = sdef.get('iden')
self.name = sdef.get('name') # Local name for the cortex
self.svcname = '' # remote name from the service
# service info from the server...
self.info = None
url = self.sdef.get('url')
self.ready = asyncio.Event()
proxy = await s_telepath.Client.anit(url, onlink=self._onTeleLink)
s_stormtypes.Proxy.__init__(self, proxy)
self.onfini(self.proxy.fini)
async def _runSvcInit(self):
# Set the latest reference for this object to the remote svcname
self.core.svcsbysvcname.pop(self.svcname, None)
self.svcname = self.info['name']
self.core.svcsbysvcname[self.svcname] = self
try:
await self.core._delStormSvcPkgs(self.iden)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'_delStormSvcPkgs failed for service {self.name} ({self.iden})')
# Register new packages
for pdef in self.info.get('pkgs', ()):
try:
# push the svciden in the package metadata for later reference.
pdef['svciden'] = self.iden
await self.core._hndladdStormPkg(pdef)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
name = pdef.get('name')
logger.exception(f'addStormPkg ({name}) failed for service {self.name} ({self.iden})')
# Set events and fire as needed
evts = self.info.get('evts')
try:
if evts is not None:
self.sdef = await self.core.setStormSvcEvents(self.iden, evts)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'setStormSvcEvents failed for service {self.name} ({self.iden})')
try:
if self.core.isactive:
await self.core._runStormSvcAdd(self.iden)
except asyncio.CancelledError: # pragma: no cover TODO: remove once >= py 3.8 only
raise
except Exception:
logger.exception(f'service.add storm hook failed for service {self.name} ({self.iden})')
async def _onTeleLink(self, proxy):
clss = proxy._getClasses()
names = [c.rsplit('.', 1)[-1] for c in clss]
if 'StormSvc' in names:
self.info = await proxy.getStormSvcInfo()
await self._runSvcInit()
async def unready():
self.ready.clear()
await self.core.fire("stormsvc:client:unready", iden=self.iden)
proxy.onfini(unready)
self.ready.set()
async def deref(self, name):
# method used by storm runtime library on deref
try:
await self.proxy.waitready()
return await s_stormtypes.Proxy.deref(self, name)
except asyncio.TimeoutError:
mesg = 'Timeout waiting for storm service'
raise s_exc.StormRuntimeError(mesg=mesg, name=name) from None
except AttributeError as e: # pragma: no cover
# possible client race condition seen in the real world
mesg = f'Error dereferencing storm service - {str(e)}'
raise s_exc.StormRuntimeError(mesg=mesg, name=name) from None
|
python
|
from datetime import date, datetime
import pytz
from Cinema.models.Actor import Discount
from Cinema.models.Actor import Actor
from Cinema.models.Movie import Movie
from Cinema.models.Projection import Projection, Entry
from Cinema.models.Hall import Seat
from django.db.models import Sum
from django.db.models import F
def actors_query():
return Actor.objects.all()
def movies_query():
return Movie.objects.all()
def projection_query():
return Projection.objects.filter(active=True)
def all_projections_query():
return Projection.objects.all()
def update_projections_query():
proj = projection_query().values('id', 'time__ending_time')
ids = list(proj.values_list('id', flat=True))
times = list(proj.values_list('time__ending_time', flat=True))
cdate = pytz.UTC.localize(datetime.now())
ids_to_update = []
# Store all expired projections
for i in range(0, len(times)):
if cdate >= times[i]:
ids_to_update.append(ids[i])
Projection.objects.filter(id__in=ids_to_update).update(active=False)
def all_movies_query():
return Movie.objects.all()
def all_discounts_query():
return Discount.objects.all()
def active_discounts_query():
# Get all active discounts
# all_actives = Discount.objects.all().values('type', 'amount').filter(active=True)
# Make a list of both types and amounts
# types = list(all_actives.values_list('type'))
return Discount.objects.all().filter(active=True)
def discount_update_active(options, all_inactive=False):
if not all_inactive:
Discount.objects.filter(id__in=options).update(active=True)
Discount.objects.exclude(id__in=options).update(active=False)
else:
Discount.objects.update(active=False)
a = Discount.objects.all().values_list()
print(a)
def get_spec_proj(id):
return Projection.objects.get(id=id)
def seats_query(id):
return Seat.objects.filter(hall_id=id)
def available_entries_query(projection):
return Entry.objects.filter(projection=projection, reserved=False)
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
After 40755, what is the next triangle number
that is also pentagonal and hexagonal?
T285 = P165 = H143 = 40755.
"""
def pe45():
"""
>>> pe45()
(1533776805, 27549)
"""
limit = 30000
h = [n * ((n << 1) - 1) for n in range(144, limit)]
p = set([n * (3 * n - 1) >> 1 for n in range(166, limit << 1)])
t = set([n * (n + 1) >> 1 for n in range(286, limit << 1)])
for i in range(limit - 144):
n = h[i]
if n in p and n in t:
return (n, i)
return None
if __name__ == "__main__":
import doctest
doctest.testmod()
|
python
|
from kikola.core.context_processors import path
from kikola.core.decorators import render_to, render_to_json
@render_to_json
def context_processors_path(request):
return path(request)
@render_to('core/render_to.html')
def decorators_render_to(request):
return {'text': 'It works!'}
@render_to('core/render_to.txt', mimetype='text/plain')
def decorators_render_to_with_mimetype(request):
return {'text': 'It works!'}
@render_to('core/render_to.html')
def decorators_render_to_with_mimetype_in_dict(request):
return {'MIMETYPE': 'text/plain',
'TEMPLATE': 'core/render_to.txt',
'text': 'It works!'}
@render_to_json
def decorators_render_to_json(request):
return {}
@render_to_json(indent=4)
def decorators_render_to_json_with_options(request):
return {'key': 'value'}
|
python
|
from .tasks import (
AsyncTasks,
Tasks,
CloudFunction,
as_completed,
GroupTerminalException,
BoundGlobalError,
)
# Backwards compatibility
from descarteslabs.common.tasks import FutureTask, TransientResultError
TransientResultException = TransientResultError
__all__ = [
"AsyncTasks",
"Tasks",
"TransientResultException",
"FutureTask",
"CloudFunction",
"as_completed",
"GroupTerminalException",
"BoundGlobalError",
]
|
python
|
#!/usr/bin/env python
#
# utils.py
"""
Utility functions for Markdown -> HTML -> LaTeX conversion.
"""
#
# Copyright © 2020-2021 Dominic Davis-Foster <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
#
# Parts based on https://github.com/rufuspollock/markdown2latex
# BSD Licensed
# Authored by Rufus Pollock: <http://www.rufuspollock.org/>
# Reworked by Julian Wulfheide ([email protected]) and
# Pedro Gaudencio ([email protected])
#
# stdlib
import re
__all__ = ["escape_latex_entities", "re_escape", "unescape_html_entities", "unescape_latex_entities"]
def re_escape(string: str) -> str:
"""
Escape literal backslashes for use with re.
:param string:
:type string:
:return:
:rtype:
"""
return string.replace('\\', "\\\\")
start_single_quote_re = re.compile("(^|\\s|\")'")
start_double_quote_re = re.compile("(^|\\s|'|`)\"")
end_double_quote_re = re.compile('"(,|\\.|\\s|$)')
def unescape_html_entities(text: str) -> str:
"""
Replaces escaped html entities (e.g. ``&``) with their ASCII representations (e.g. ``&``).
:param text:
"""
out = text.replace("&", '&')
out = out.replace("<", '<')
out = out.replace(">", '>')
out = out.replace(""", '"')
return out
def escape_latex_entities(text: str) -> str:
"""
Escape latex reserved characters.
:param text:
"""
out = text
out = unescape_html_entities(out)
out = re.sub(r"[^\n\\]%", r"\\%", out)
out = re.sub(r"[^\\]&", r"\\&", out)
out = re.sub(r"[^\\]#", r"\\#", out)
out = re.sub(r"\"([^\"]*)\"", r"\\enquote{\1}", out)
out = re.sub(r"\'([^\']*)\'", r"\\enquote{\1}", out)
# out = start_single_quote_re.sub(r'\g<1>`', out)
# out = start_double_quote_re.sub(r'\g<1>``', out)
# out = end_double_quote_re.sub(r"''\g<1>", out)
# people should escape these themselves as it conflicts with maths
# out = out.replace('{', '\\{')
# out = out.replace('}', '\\}')
# do not do '$' here because it is dealt with by convert_maths
# out = out.replace('$', '\\$')
return out
def unescape_latex_entities(text: str) -> str:
"""
Unescape certain latex characters.
:param text:
"""
# Limit ourselves as this is only used for maths stuff.
out = text
out = out.replace("\\&", '&')
return out
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Unit tests for nginxfmt module."""
import contextlib
import logging
import pathlib
import shutil
import tempfile
import unittest
import nginxfmt
__author__ = "Michał Słomkowski"
__license__ = "Apache 2.0"
class TestFormatter(unittest.TestCase):
fmt = nginxfmt.Formatter()
def __init__(self, method_name: str = ...) -> None:
super().__init__(method_name)
logging.basicConfig(level=logging.DEBUG) # todo fix logging in debug
def check_formatting(self, original_text: str, formatted_text: str):
self.assertMultiLineEqual(formatted_text, self.fmt.format_string(original_text))
def check_stays_the_same(self, text: str):
self.assertMultiLineEqual(text, self.fmt.format_string(text))
def _check_variable_tags_symmetry(self, text):
self.assertMultiLineEqual(text,
self.fmt._strip_variable_template_tags(self.fmt._apply_variable_template_tags(text)))
def test_collapse_variable1(self):
self.check_formatting(" lorem ipsum ${ dol } amet", "lorem ipsum ${dol} amet\n")
def test_join_opening_parenthesis(self):
self.assertEqual(["foo", "bar {", "johan {", "tee", "ka", "}"],
self.fmt._join_opening_bracket(("foo", "bar {", "johan", "{", "tee", "ka", "}")))
def test_clean_lines(self):
self.assertEqual(["ala", "ma", "{", "kota", "}", "to;", "", "ook"],
self.fmt._clean_lines(("ala", "ma {", "kota", "}", "to;", "", "ook")))
self.assertEqual(["ala", "ma", "{", "{", "kota", "}", "to", "}", "ook"],
self.fmt._clean_lines(("ala", "ma {{", "kota", "}", "to}", "ook")))
self.assertEqual(["{", "ala", "ma", "{", "{", "kota", "}", "to", "}"],
self.fmt._clean_lines(("{", "ala ", "ma {{", " kota ", "}", " to} ")))
self.assertEqual(["{", "ala", "# ma {{", "kota", "}", "to", "}", "# }"],
self.fmt._clean_lines(("{", "ala ", "# ma {{", " kota ", "}", " to} ", "# }")))
self.assertEqual(["{", "ala", "# ma {{", r"rewrite /([\d]{2}) /up/$1.html last;", "}", "to", "}"],
self.fmt._clean_lines(
("{", "ala ", "# ma {{", r" rewrite /([\d]{2}) /up/$1.html last; ", "}", " to", "}")))
self.assertEqual(["{", "ala", "# ma {{", "aa last;", "bb to;", "}"],
self.fmt._clean_lines(("{", "ala ", "# ma {{", " aa last; bb to; ", "}")))
self.assertEqual(["{", "aa;", "b b \"cc; dd; ee \";", "ssss;", "}"],
self.fmt._clean_lines(("{", "aa; b b \"cc; dd; ee \"; ssss;", "}")))
self.assertEqual([r"location ~ /\.ht", "{"], self.fmt._clean_lines([r"location ~ /\.ht {", ]))
def test_perform_indentation(self):
self.assertEqual([
"foo bar {",
" fizz bazz;",
"}"], self.fmt._perform_indentation(("foo bar {", "fizz bazz;", "}")))
self.assertEqual([
"foo bar {",
" fizz bazz {",
" lorem ipsum;",
" asdf asdf;",
" }",
"}"], self.fmt._perform_indentation(("foo bar {", "fizz bazz {", "lorem ipsum;", "asdf asdf;", "}", "}")))
self.assertEqual([
"foo bar {",
" fizz bazz {",
" lorem ipsum;",
" # }",
" }",
"}",
"}",
"foo {"],
self.fmt._perform_indentation(("foo bar {", "fizz bazz {", "lorem ipsum;", "# }", "}", "}", "}", "foo {")))
self.assertEqual([
"foo bar {",
" fizz bazz {",
" lorem ipsum;",
" }",
"}",
"}",
"foo {"],
self.fmt._perform_indentation(("foo bar {", "fizz bazz {", "lorem ipsum;", "}", "}", "}", "foo {")))
def test_strip_line(self):
self.assertEqual("foo", self.fmt._strip_line(" foo "))
self.assertEqual("bar foo", self.fmt._strip_line(" bar foo "))
self.assertEqual("bar foo", self.fmt._strip_line(" bar \t foo "))
self.assertEqual('lorem ipsum " foo bar zip "', self.fmt._strip_line(' lorem ipsum " foo bar zip " '))
self.assertEqual('lorem ipsum " foo bar zip " or " dd aa " mi',
self.fmt._strip_line(' lorem ipsum " foo bar zip " or \t " dd aa " mi'))
def test_apply_bracket_template_tags(self):
self.assertEqual(
"\"aaa___TEMPLATE_BRACKET_OPENING_TAG___dd___TEMPLATE_BRACKET_CLOSING_TAG___bbb\"".splitlines(),
self.fmt._apply_bracket_template_tags("\"aaa{dd}bbb\"".splitlines()))
self.assertEqual(
"\"aaa___TEMPLATE_BRACKET_OPENING_TAG___dd___TEMPLATE_BRACKET_CLOSING_TAG___bbb\"cc{cc}cc\"dddd___TEMPLATE_BRACKET_OPENING_TAG___eee___TEMPLATE_BRACKET_CLOSING_TAG___fff\"".splitlines(),
self.fmt._apply_bracket_template_tags("\"aaa{dd}bbb\"cc{cc}cc\"dddd{eee}fff\"".splitlines()))
def test_strip_bracket_template_tags1(self):
self.assertEqual("\"aaa{dd}bbb\"", self.fmt._strip_bracket_template_tags(
"\"aaa___TEMPLATE_BRACKET_OPENING_TAG___dd___TEMPLATE_BRACKET_CLOSING_TAG___bbb\""))
def test_apply_bracket_template_tags1(self):
self.assertEqual(
"\"aaa___TEMPLATE_BRACKET_OPENING_TAG___dd___TEMPLATE_BRACKET_CLOSING_TAG___bbb\"cc{cc}cc\"dddd___TEMPLATE_BRACKET_OPENING_TAG___eee___TEMPLATE_BRACKET_CLOSING_TAG___fff\"".splitlines(),
self.fmt._apply_bracket_template_tags("\"aaa{dd}bbb\"cc{cc}cc\"dddd{eee}fff\"".splitlines()))
def test_variable_template_tags(self):
self.assertEqual("foo bar ___TEMPLATE_VARIABLE_OPENING_TAG___myvar___TEMPLATE_VARIABLE_CLOSING_TAG___",
self.fmt._apply_variable_template_tags("foo bar ${myvar}"))
self._check_variable_tags_symmetry("lorem ipsum ${dolor} $amet")
self._check_variable_tags_symmetry("lorem ipsum ${dolor} $amet\nother $var and ${var_name2}")
def test_umlaut_in_string(self):
self.check_formatting(
"# Statusseite für Monitoring freigeben \n" +
"# line above contains german umlaut causing problems \n" +
"location /nginx_status {\n" +
" stub_status on;\n" +
" access_log off;\n" +
" allow 127.0.0.1;\n" +
" deny all;\n" +
"}",
"# Statusseite für Monitoring freigeben\n" +
"# line above contains german umlaut causing problems\n" +
"location /nginx_status {\n" +
" stub_status on;\n" +
" access_log off;\n" +
" allow 127.0.0.1;\n" +
" deny all;\n" +
"}\n"
)
def test_empty_lines_removal(self):
self.check_formatting(
"\n foo bar {\n" +
" lorem ipsum;\n" +
"}\n\n\n",
"foo bar {\n" +
" lorem ipsum;\n" +
"}\n")
self.check_formatting(
"\n foo bar {\n\n\n\n\n\n" +
" lorem ipsum;\n" +
"}\n\n\n",
"foo bar {\n\n\n" +
" lorem ipsum;\n" +
"}\n")
self.check_formatting(
" foo bar {\n" +
" lorem ipsum;\n" +
" kee {\n" +
"caak; \n" +
"}}",
"foo bar {\n" +
" lorem ipsum;\n" +
" kee {\n" +
" caak;\n" +
" }\n" +
"}\n")
def test_template_variables_with_dollars1(self):
self.check_formatting('server {\n' +
' # commented ${line} should not be touched\n' +
'listen 80 default_server;\n' +
'server_name localhost;\n' +
'location / {\n' +
'proxy_set_header X-User-Auth "In ${cookie_access_token} ${ other}";\n' +
'proxy_set_header X-User-Other "foo ${bar}";\n' +
'}\n' +
'}',
'server {\n' +
' # commented ${line} should not be touched\n' +
' listen 80 default_server;\n' +
' server_name localhost;\n' +
' location / {\n' +
' proxy_set_header X-User-Auth "In ${cookie_access_token} ${ other}";\n' +
' proxy_set_header X-User-Other "foo ${bar}";\n' +
' }\n' +
'}\n')
def test_template_variables_with_dollars2(self):
self.check_formatting(' some_tag { with_templates "my ${var} and other ${ invalid_variable_use } "; }\n' +
'# in my line\n',
'some_tag {\n' +
' with_templates "my ${var} and other ${ invalid_variable_use } ";\n' +
'}\n' +
'# in my line\n')
def test_backslash3(self):
self.check_formatting('location ~ /\.ht {\n' +
'deny all;\n' +
'}',
'location ~ /\.ht {\n' +
' deny all;\n' +
'}\n')
def test_backslash2(self):
"""If curly braces are withing quotation marks, we treat them as part of the string, not syntax structure.
Writing '${ var }' is not valid in nginx anyway, so we slip collapsing these altogether. May be changed in
the future. """
self.check_formatting(
' tag { wt ~ /\.ht \t "my ${some some} and ~ /\.ht \tother ${comething in curly braces } "; }\n' +
'# in my line\n',
'tag {\n' +
' wt ~ /\.ht "my ${some some} and ~ /\.ht \tother ${comething in curly braces } ";\n' +
'}\n' +
'# in my line\n')
def test_multi_semicolon(self):
self.check_formatting('location /a { \n' +
'allow 127.0.0.1; allow 10.0.0.0/8; deny all; \n' +
'}\n',
'location /a {\n' +
' allow 127.0.0.1;\n' +
' allow 10.0.0.0/8;\n' +
' deny all;\n' +
'}\n')
def test_loading_utf8_file(self):
tmp_file = pathlib.Path(tempfile.mkstemp('utf-8')[1])
try:
shutil.copy('test-files/umlaut-utf8.conf', tmp_file)
self.fmt.format_file(tmp_file)
# todo perform some tests on result file
finally:
tmp_file.unlink()
def test_loading_latin1_file(self):
tmp_file = pathlib.Path(tempfile.mkstemp('latin1')[1])
try:
shutil.copy('test-files/umlaut-latin1.conf', tmp_file)
self.fmt.format_file(tmp_file)
# todo perform some tests on result file
finally:
tmp_file.unlink()
def test_issue_15(self):
self.check_formatting(
'section { server_name "~^(?<tag>[0-9a-f]{8}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{12})\.a\.b\.com$"; }',
'section {\n server_name "~^(?<tag>[0-9a-f]{8}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{4}\-[0-9a-f]{12})\.a\.b\.com$";\n}\n')
def test_issue_11(self):
self.check_formatting(" # 3 spaces\n" +
"# 2 spaces\n" +
" # 1 space",
"# 3 spaces\n" +
"# 2 spaces\n" +
"# 1 space\n")
# everything after # is left as is (except trimming trailing whitespaces)
self.check_formatting(""" #if (!-f $request_filename) {
# rewrite ^/static/?(.*)$ /static.php?resource=$1 last;
#""",
"#if (!-f $request_filename) {\n" +
"# rewrite ^/static/?(.*)$ /static.php?resource=$1 last;\n" +
"#\n")
def test_issue_20_1(self):
self.check_stays_the_same("# comment 1\n" +
"tag {\n" +
" # comment 2\n" +
" code;\n" +
" # comment 3\n" +
" subtag {\n" +
" code;\n" +
" # comment 4\n" +
" #\n" +
" }\n" +
" # comment 5\n" +
"}\n")
def test_issue_20_2(self):
self.check_formatting(
"location /nginx_status {\n" +
"# Don't break \n" +
" stub_status on;\n" +
" access_log off;\n" +
" allow 127.0.0.1;\n" +
" deny all;\n" +
"}",
"location /nginx_status {\n" +
" # Don't break\n" +
" stub_status on;\n" +
" access_log off;\n" +
" allow 127.0.0.1;\n" +
" deny all;\n" +
"}\n"
)
self.check_formatting(
"location /nginx_status {\n" +
"# Don\"t break \n" +
" stub_status on;\n" +
" access_log off;\n" +
" allow 127.0.0.1;\n" +
" deny all;\n" +
"}",
"location /nginx_status {\n" +
" # Don\"t break\n" +
" stub_status on;\n" +
" access_log off;\n" +
" allow 127.0.0.1;\n" +
" deny all;\n" +
"}\n"
)
def test_issue_16(self):
self.check_formatting(
"location /example { allow 192.168.0.0/16; deny all; }",
"location /example {\n"
" allow 192.168.0.0/16;\n"
" deny all;\n"
"}\n")
def test_issue_9(self):
self.check_formatting(
(
"""http {\n"""
""" log_format le_json '{"time":"$time_iso8601", '\n"""
""" '"client_agent":"$client_agent",\n"""
""" '"user_agent":"$http_user_agent"}';\n"""
"""}\n"""
),
(
"""http {\n"""
""" log_format le_json '{"time":"$time_iso8601", '\n"""
""" '"client_agent":"$client_agent",\n"""
""" '"user_agent":"$http_user_agent"}';\n"""
"""}\n"""
),
)
def test_custom_indentation(self):
fo = nginxfmt.FormatterOptions()
fo.indentation = 2
fmt2 = nginxfmt.Formatter(fo)
self.assertMultiLineEqual("{\n"
" foo bar;\n"
"}\n",
fmt2.format_string(
" { \n"
" foo bar;\n"
"}\n"))
class TestStandaloneRun(unittest.TestCase):
@contextlib.contextmanager
def input_test_file(self, file_name):
tmp_file = pathlib.Path(tempfile.mkstemp('utf-8')[1])
try:
shutil.copy('test-files/' + file_name, tmp_file)
yield str(tmp_file)
# todo perform some tests on result file
finally:
tmp_file.unlink()
# todo better tests of standalone mode?
def test_print_result(self):
with self.input_test_file('not-formatted-1.conf') as input:
nginxfmt._standalone_run(['-p', input])
if __name__ == '__main__':
unittest.main()
|
python
|
#!/usr/bin/python3
"""
Revision history:
24 Sep 2020 | 1.0 - initial release
"""
DOCUMENTATION = '''
---
module: hjson_to_json.py
author: Sanjeevi Kumar, Wipro Technologies
version_added: "1.0"
short_description: Read an .hjson file and output JSON
description:
- Read the HJON file specified and output JSON file
requirements:
- The hjson Python module must be installed on the Ansible host. This can be installed using pip:
sudo pip install hjson
options:
input:
description:
- The name with location of the HJSON file
required: true
output:
description:
- The name with location of the JSON file
required: true
'''
EXAMPLES = '''
Running the module from the command line:
ansible localhost -m hjson_to_json -a input="/tmp/sample.hjson" -a output="/tmp/sample.json" -M ~/ansible/library
In a playbook configuration, given a host entry:
$ cat hjson_to_json.yml
---
- name: Test playbook to convert hjson to json
hosts: localhost
roles:
- {role: hjson_to_json, debug: on}
$ ansible-playbook hjson_to_json.yml -e "input_file='/tmp/sample.hjson'" -e "output_file='/tmp/sample.json'"
'''
import hjson
# ---------------------------------------------------------------------------
# convert hjson to json
# ---------------------------------------------------------------------------
def hjson_to_json(input_file,output_file):
"Read the HJSON file and return as JSON"
try:
with open(input_file) as infile:
data = hjson.loads(infile.read())
print(data)
with open(output_file, 'w') as outfile:
json.dump(data, outfile)
except IOError:
return (1, "IOError on input file:%s" % input_file)
return (0, data)
# ---------------------------------------------------------------------------
# MAIN
# ---------------------------------------------------------------------------
def main():
" Read parameters and add common file arguments "
module = AnsibleModule(argument_spec = dict(
input = dict(required=True),
output = dict(required=True)
),
check_invalid_arguments=False,
add_file_common_args=True)
code, response = hjson_to_json(module.params["input"],module.params["output"])
if code == 1:
module.fail_json(msg=response)
else:
module.exit_json(**response)
return code
from ansible.module_utils.basic import *
main()
#
|
python
|
import django_userhistory.models as models
from django.contrib import admin
try:
admin.site.register(models.UserAction)
admin.site.register(models.UserHistory)
admin.site.register(models.UserTrackedContent)
except Exception, e:
pass
|
python
|
"""
"""
load("@bazel_skylib//lib:shell.bzl", "shell")
def _stately_copy_impl(ctx):
input_files = ctx.files.srcs
if ctx.attr.state_file:
state_file = ctx.attr.state_file
else:
state_file = ".stately.yaml"
args = [
a
for a in ["copy"] + [f.short_path for f in input_files]
if a != ""
] + (
["--file-mode=%s" % ctx.attr.mode] if ctx.attr.mode else []
)
if ctx.attr.strip_prefix:
strip_prefix = ctx.attr.strip_prefix
elif ctx.attr.strip_package_prefix:
strip_prefix = ctx.label.package
else:
strip_prefix = ctx.attr.strip_prefix
runner_out_file = ctx.actions.declare_file(ctx.label.name + ".bash")
substitutions = {
"@@ARGS@@": shell.array_literal(args),
"@@BIN_DIRECTORY@@": ctx.bin_dir.path,
"@@OUTPUT_DIRECTORY@@": ctx.attr.output,
"@@STATE_FILE_PATH@@": state_file,
"@@STRIPPREFIX@@": strip_prefix,
"@@STATELY_SHORT_PATH@@": shell.quote(ctx.executable._stately.short_path),
"@@NAME@@": "//%s:%s" % (ctx.label.package, ctx.label.name),
}
ctx.actions.expand_template(
template = ctx.file._runner,
output = runner_out_file,
substitutions = substitutions,
is_executable = True,
)
return [
DefaultInfo(
files = depset([runner_out_file]),
runfiles = ctx.runfiles(files = [ctx.executable._stately] + ctx.files.srcs),
executable = runner_out_file,
),
]
def _stately_manifest_impl(ctx):
if ctx.attr.state_file:
state_file = ctx.attr.state_file
else:
state_file = ".stately.yaml"
args = ["manifest"]
runner_out_file = ctx.actions.declare_file(ctx.label.name + ".bash")
substitutions = {
"@@ARGS@@": shell.array_literal(args),
"@@FILE@@": ctx.files.src[0].short_path,
"@@BIN_DIRECTORY@@": ctx.bin_dir.path,
"@@OUTPUT_DIRECTORY@@": ctx.attr.output,
"@@STATE_FILE_PATH@@": state_file,
"@@STATELY_SHORT_PATH@@": shell.quote(ctx.executable._stately.short_path),
"@@NAME@@": "//%s:%s" % (ctx.label.package, ctx.label.name),
}
ctx.actions.expand_template(
template = ctx.file._runner,
output = runner_out_file,
substitutions = substitutions,
is_executable = True,
)
return [
DefaultInfo(
files = depset([runner_out_file]),
runfiles = ctx.runfiles(files = [ctx.executable._stately] + ctx.files.src),
executable = runner_out_file,
),
]
_stately_common_attrs = {
"output": attr.string(
doc = "The output directory",
),
"state_file": attr.string(
doc = "The state file name",
),
"_stately": attr.label(
default = "//stately:stately_tool",
cfg = "host",
allow_single_file = True,
executable = True,
),
}
_stately_copy_attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = True,
doc = "The files to install",
),
"strip_package_prefix": attr.bool(
doc = "Strip the package path from the output directory.",
default = False,
),
"strip_prefix": attr.string(
doc = "A prefix to strip from files being staged in, defaults to package path.",
),
"mode": attr.string(
doc = "The mode to set all copied files to.",
),
"_runner": attr.label(
default = "//stately:copy_runner.bash.template",
allow_single_file = True,
),
}
project_installed_files = rule(
implementation = _stately_copy_impl,
executable = True,
attrs = dict(_stately_common_attrs.items() + _stately_copy_attrs.items()),
doc = """
Install generated files into the project.
""",
)
_stately_manifest_attrs = {
"src": attr.label(
mandatory = True,
allow_single_file = True,
doc = "The files to manifest.",
),
"_runner": attr.label(
default = "//stately:manifest_runner.bash.template",
allow_single_file = True,
),
}
manifest_project_installed_files = rule(
implementation = _stately_manifest_impl,
executable = True,
attrs = dict(_stately_common_attrs.items() + _stately_manifest_attrs.items()),
doc = """
Install generated files into the project.
""",
)
|
python
|
from __future__ import unicode_literals
import logging
from django.utils.translation import ugettext as _
import django.views.decorators.cache
import django.views.decorators.csrf
import django.views.decorators.debug
import django.contrib.auth.decorators
import django.contrib.auth.views
import django.contrib.auth.forms
import django.contrib.auth
import django.contrib.messages
import django.shortcuts
import django.http
import django.template.response
import django.utils.module_loading
import django.urls
from django.conf import settings as app_settings
from accountsplus import signals
from accountsplus import forms
from accountsplus import settings
logger = logging.getLogger(__name__)
def logout_then_login(request, login_url=None, extra_context=None):
"""
Logs out the user if they are logged in. Then redirects to the log-in page.
"""
# if a user is masquerading, don't log them out, just kill the masquerade
if request.session.get('is_masquerading'):
return django.shortcuts.redirect('end_masquerade')
else:
return django.contrib.auth.views.logout_then_login(request, login_url)
@django.views.decorators.cache.never_cache
@django.contrib.auth.decorators.login_required
def masquerade(request, user_id=None):
User = django.contrib.auth.get_user_model()
return_page = request.META.get('HTTP_REFERER') or 'admin:index'
if not user_id:
django.contrib.messages.error(request, 'Masquerade failed: no user specified')
return django.shortcuts.redirect(return_page)
if not request.user.has_perm(User.PERMISSION_MASQUERADE):
django.contrib.messages.error(request, 'Masquerade failed: insufficient privileges')
return django.shortcuts.redirect(return_page)
if not (request.user.is_superuser or request.user.is_staff):
django.contrib.messages.error(request, 'Masquerade failed: must be staff or superuser')
return django.shortcuts.redirect(return_page)
try:
user = User.objects.get(pk=user_id)
except User.DoesNotExist:
logger.error('User {} ({}) masquerading failed for user {}'.format(request.user.email, request.user.id, user_id))
django.contrib.messages.error(request, 'Masquerade failed: unknown user {}'.format(user_id))
return django.shortcuts.redirect(return_page)
if user.is_superuser:
logger.warning(
'User {} ({}) cannot masquerade as superuser {} ({})'.format(request.user.email, request.user.id, user.email, user.id))
django.contrib.messages.warning(request, 'Cannot masquerade as a superuser')
return django.shortcuts.redirect(return_page)
admin_user = request.user
user.backend = request.session[django.contrib.auth.BACKEND_SESSION_KEY]
# log the new user in
signals.masquerade_start.send(sender=masquerade, request=request, user=admin_user, masquerade_as=user)
# this is needed to track whether this login is for a masquerade
setattr(user, 'is_masquerading', True)
setattr(user, 'masquerading_user', admin_user)
django.contrib.auth.login(request, user)
request.session['is_masquerading'] = True
request.session['masquerade_user_id'] = admin_user.id
request.session['return_page'] = return_page
request.session['masquerade_is_superuser'] = admin_user.is_superuser
logger.info(
'User {} ({}) masquerading as {} ({})'.format(admin_user.email, admin_user.id, request.user.email, request.user.id))
django.contrib.messages.success(request, 'Masquerading as user {0}'.format(user.email))
return django.http.HttpResponseRedirect(app_settings.LOGIN_REDIRECT_URL)
@django.views.decorators.cache.never_cache
@django.contrib.auth.decorators.login_required
def end_masquerade(request):
User = django.contrib.auth.get_user_model()
if 'is_masquerading' not in request.session:
return django.shortcuts.redirect('admin:index')
if 'masquerade_user_id' in request.session:
try:
masqueraded_user = request.user
user = User.objects.get(
pk=request.session['masquerade_user_id'])
user.backend = request.session[
django.contrib.auth.BACKEND_SESSION_KEY]
# this is needed to track whether this login is for a masquerade
django.contrib.auth.logout(request)
signals.masquerade_end.send(
sender=end_masquerade, request=request, user=user,
masquerade_as=masqueraded_user)
django.contrib.auth.login(request, user)
logging.info('End masquerade user: {} ({}) by: {} ({})'.format(
masqueraded_user.email, masqueraded_user.id,
user.email, user.id))
django.contrib.messages.success(request, 'Masquerade ended')
except User.DoesNotExist as e:
logging.critical(
'Masquerading user {} does not exist'.format(
request.session['masquerade_user_id']))
return django.shortcuts.redirect('admin:index')
@django.views.decorators.debug.sensitive_post_parameters()
@django.views.decorators.csrf.csrf_protect
@django.contrib.auth.decorators.login_required
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=django.contrib.auth.forms.
PasswordChangeForm,
current_app=None, extra_context=None):
if post_change_redirect is None:
post_change_redirect = django.urls.reverse(
'password_change_done')
else:
post_change_redirect = django.shortcuts.resolve_url(
post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one if
# django.contrib.auth.middleware.SessionAuthenticationMiddleware
# is enabled.
django.contrib.auth.update_session_auth_hash(request, form.user)
signals.user_password_change.send(
sender=password_change, request=request, user=form.user)
return django.http.HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return django.template.response.TemplateResponse(request, template_name, context)
class PasswordResetView(django.contrib.auth.views.PasswordResetView):
def form_valid(self, form):
result = super().form_valid(form)
for user in form.get_users(form.cleaned_data['email']):
signals.user_password_reset_request.send(
sender=PasswordResetView, request=self.request, user=user)
return result
class GenericLockedView(django.views.generic.FormView):
template_name = settings.LOCKOUT_TEMPLATE
form_class = forms.CaptchaForm
urlPattern = ''
def get_success_url(self):
return django.urls.reverse_lazy(self.urlPattern)
def form_valid(self, form):
from axes import utils
utils.reset(username=form.cleaned_data['username'])
return super(GenericLockedView, self).form_valid(form)
class UserLockedOutView(GenericLockedView):
urlPattern = 'login'
class AdminLockedOutView(GenericLockedView):
urlPattern = 'admin:index'
|
python
|
# -*- coding: utf-8 -*-
import unittest
import tempson
from .coloredPrint import *
vm = tempson.vm()
class vmTest(unittest.TestCase):
def test_execute_code (self):
result = vm.evalExpToStr('a + 3', { 'a': 1 }, True)
try:
self.assertEqual(result, '4', 'Evaluate error')
except AssertionError:
coloredPrint('\n [sandbox] × falied evaluate expression to string.', 'RED')
else:
coloredPrint('\n [sandbox] √ evaluate expression to string.', 'GREEN')
def test_execute_code_with_import_expression (self):
try:
with self.assertRaises(RuntimeError):
result = vm.evalExpToStr('import os', {}, True)
except AssertionError:
coloredPrint('\n [sandbox] × sensitive word not detect.', 'RED')
else:
coloredPrint('\n [sandbox] √ sensitive word detect.', 'GREEN')
def test_execute_code_without_variable (self):
try:
with self.assertRaises(NameError):
result = vm.evalExpToStr('a + 1', {}, True)
except AssertionError:
coloredPrint('\n [sandbox] × evaluate expression failed (variable not defined).', 'RED')
else:
coloredPrint('\n [sandbox] √ evaluate expression (variable not defined).', 'GREEN')
def test_execute_statement (self):
try:
with self.assertRaises(RuntimeError):
result = vm.evalExpToStr('a = 1', {}, True)
except AssertionError:
coloredPrint('\n [sandbox] × statement not detect.', 'RED')
else:
coloredPrint('\n [sandbox] √ statement detect.', 'GREEN')
def test_execute_code_without_xss_filter (self):
result = vm.evalExpToStr('a + 3', { 'a': 1 }, False)
try:
self.assertEqual(result, '4', 'Turn off filter failed')
except AssertionError:
coloredPrint('\n [sandbox] × ignore xss protect failed.', 'RED')
else:
coloredPrint('\n [sandbox] √ ignore xss protect.', 'GREEN')
def test_execute_code_context_with_xss (self):
result = vm.evalExpToStr('a', { 'a': '<script>alert("123")</script>' }, True)
try:
self.assertEqual(result, '<script>alert("123")</script>', 'Filter error')
except AssertionError:
coloredPrint('\n [sandbox] × xss protect failed.', 'RED')
else:
coloredPrint('\n [sandbox] √ xss protect.', 'GREEN')
def test_evaluate_boolean_value (self):
result = vm.evalExpToBool('a == b', {
'a': 1,
'b': 1})
try:
self.assertEqual(result, True, 'Evaluate error')
except AssertionError:
coloredPrint('\n [sandbox] × evaluate expression to boolean failed.', 'RED')
else:
coloredPrint('\n [sandbox] √ evaluate expression to boolean.', 'GREEN')
def test_evaluate_boolean_value_false (self):
result = vm.evalExpToBool('a is not b', {
'a': 1,
'b': 1})
try:
self.assertEqual(result, False, 'Evaluate error')
except AssertionError:
coloredPrint('\n [sandbox] × evaluate expression to boolean failed (python syntax).', 'RED')
else:
coloredPrint('\n [sandbox] √ evaluate expression to boolean (python syntax).', 'GREEN')
|
python
|
#!/usr/bin/env python3
# Copyright (c) 2020, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
import json
import os.path
import re
import sys
_COMPAT_KEY = '__compat'
_EXPERIMENTAL_KEY = 'experimental'
_STATUS_KEY = 'status'
_SUPPORT_KEY = 'support'
_VERSION_ADDED_KEY = 'version_added'
def _get_browser_compat_data():
current_dir = os.path.dirname(__file__)
browser_compat_folder = os.path.abspath(
os.path.join(current_dir, '..', '..', '..', 'third_party', 'mdn',
'browser-compat-data', 'src'))
if not os.path.exists(browser_compat_folder):
raise RuntimeError('Browser compatibility data not found at %s' %
browser_compat_folder)
browser_compat_data = {}
INCLUDE_DIRS = [
'api',
'html',
'svg',
# TODO(srujzs): add more if needed
]
# Transform to absolute paths
INCLUDE_DIRS = [
os.path.join(browser_compat_folder, dir) for dir in INCLUDE_DIRS
]
def process_json_dict(json_dict):
# Returns a tuple of the interface name and the metadata corresponding
# to it.
if 'api' in json_dict:
# Get the interface name
api_dict = json_dict['api']
interface_name = api_dict.keys()[0]
return (interface_name, api_dict[interface_name])
elif 'html' in json_dict:
html_dict = json_dict['html']
if 'elements' in html_dict:
elements_dict = html_dict['elements']
element_name = elements_dict.keys()[0]
# Convert to WebCore name
interface = str('HTML' + element_name + 'Element')
return (interface, elements_dict[element_name])
elif 'svg' in json_dict:
svg_dict = json_dict['svg']
if 'elements' in svg_dict:
elements_dict = svg_dict['elements']
element_name = elements_dict.keys()[0]
# Convert to WebCore name
interface = str('SVG' + element_name + 'Element')
return (interface, elements_dict[element_name])
return (None, None)
def visitor(arg, dir_path, names):
def should_process_dir(dir_path):
if os.path.abspath(dir_path) == browser_compat_folder:
return True
for dir in INCLUDE_DIRS:
if dir_path.startswith(dir):
return True
return False
if should_process_dir(dir_path):
for name in names:
file_name = os.path.join(dir_path, name)
(interface_path, ext) = os.path.splitext(file_name)
if ext == '.json':
with open(file_name) as src:
json_dict = json.load(src)
interface, metadata = process_json_dict(json_dict)
if not interface is None:
# Note: interface and member names do not
# necessarily have the same capitalization as
# WebCore, so we keep them all lowercase for easier
# matching later.
interface = interface.lower()
metadata = {
member.lower(): info
for member, info in metadata.items()
}
if interface in browser_compat_data:
_unify_metadata(browser_compat_data[interface],
metadata)
else:
browser_compat_data[interface] = metadata
else:
names[:] = [] # Do not go underneath
# Attempts to unify two compatibility infos by taking the union of both, and
# for conflicting information, taking the "stricter" of the two versions.
# Updates `a` in place to represent the union of `a` and `b`.
def _unify_compat(a, b):
def _has_compat_data(metadata):
return _COMPAT_KEY in metadata and _SUPPORT_KEY in metadata[_COMPAT_KEY]
# Unifies the support statements of both metadata and updates
# `support_a` in place. If either metadata do not contain simple support
# statements, defaults attribute to not supported.
def _unify_support(support_a, support_b):
for browser in support_a.keys():
if browser in support_b:
if _is_simple_support_statement(support_a[browser]) and _is_simple_support_statement(support_b[browser]):
support_a[browser][_VERSION_ADDED_KEY] = _unify_versions(
support_a[browser][_VERSION_ADDED_KEY],
support_b[browser][_VERSION_ADDED_KEY])
else:
# Only support simple statements for now.
support_a[browser] = {_VERSION_ADDED_KEY: None}
for browser in support_b.keys():
if not browser in support_a:
support_a[browser] = support_b[browser]
if not _has_compat_data(b):
return
if not _has_compat_data(a):
a[_COMPAT_KEY] = b[_COMPAT_KEY]
return
support_a = a[_COMPAT_KEY][_SUPPORT_KEY]
support_b = b[_COMPAT_KEY][_SUPPORT_KEY]
_unify_support(support_a, support_b)
# Unifies any status info in the two metadata. Modifies `a` in place to
# represent the union of both `a` and `b`.
def _unify_status(a, b):
def _has_status(metadata):
return _COMPAT_KEY in metadata and _STATUS_KEY in metadata[_COMPAT_KEY]
# Modifies `status_a` in place to combine "experimental" tags.
def _unify_experimental(status_a, status_b):
# If either of the statuses report experimental, assume attribute is
# experimental.
status_a[_EXPERIMENTAL_KEY] = status_a.get(
_EXPERIMENTAL_KEY, False) or status_b.get(_EXPERIMENTAL_KEY, False)
if not _has_status(b):
return
if not _has_status(a):
a[_COMPAT_KEY] = b[_COMPAT_KEY]
return
status_a = a[_COMPAT_KEY][_STATUS_KEY]
status_b = b[_COMPAT_KEY][_STATUS_KEY]
_unify_experimental(status_a, status_b)
# If there exists multiple definitions of the same interface metadata e.g.
# elements, this attempts to unify the compatibilities for the interface as
# well as for each attribute.
def _unify_metadata(a, b):
# Unify the compatibility statement and status of the API or element.
_unify_compat(a, b)
_unify_status(a, b)
# Unify the compatibility statement and status of each attribute.
for attr in list(a.keys()):
if attr == _COMPAT_KEY:
continue
if attr in b:
_unify_compat(a[attr], b[attr])
_unify_status(a[attr], b[attr])
for attr in b.keys():
if not attr in a:
a[attr] = b[attr]
os.path.walk(browser_compat_folder, visitor, browser_compat_folder)
return browser_compat_data
# Given two version values for a given browser, chooses the more strict version.
def _unify_versions(version_a, version_b):
# Given two valid version strings, compares parts of the version string
# iteratively.
def _greater_version(version_a, version_b):
version_a_split = map(int, version_a.split('.'))
version_b_split = map(int, version_b.split('.'))
for i in range(min(len(version_a_split), len(version_b_split))):
if version_a_split[i] > version_b_split[i]:
return version_a
elif version_a_split[i] < version_b_split[i]:
return version_b
return version_a if len(version_a_split) > len(
version_b_split) else version_b
# Validate that we can handle the given version.
def _validate_version(version):
if not version:
return False
if version is True:
return True
if isinstance(version, str) or isinstance(version, unicode):
pattern = re.compile('^([0-9]+\.)*[0-9]+$')
if not pattern.match(version):
# It's possible for version strings to look like '<35'. We don't
# attempt to parse the conditional logic, and just default to
# potentially incompatible.
return None
return version
else:
raise ValueError(
'Type of version_a was not handled correctly! type(version) = '
+ str(type(version)))
version_a = _validate_version(version_a)
version_b = _validate_version(version_b)
# If one version reports not supported, default to not supported.
if not version_a or not version_b:
return False
# If one version reports always supported, the other version can only be
# more strict.
if version_a is True:
return version_b
if version_b is True:
return version_a
return _greater_version(version_a, version_b)
# At this time, we only handle simple support statements due to the complexity
# and variability around support statements with multiple elements.
def _is_simple_support_statement(support_statement):
if isinstance(support_statement, list): # array_support_statement
# TODO(srujzs): Parse this list to determine compatibility. Will
# likely require parsing for 'version_removed' keys. Notes about
# which browser version enabled this attribute for which
# platform also complicates things. For now, we assume it's not
# compatible.
return False
if len(support_statement.keys()) > 1:
# If it's anything more complicated than 'version_added', like
# 'notes' that specify platform versions, we assume it's not
# compatible.
return False
return True
class MDNReader(object):
# Statically initialize and treat as constant.
_BROWSER_COMPAT_DATA = _get_browser_compat_data()
def __init__(self):
self._compat_overrides = {}
def _get_attr_compatibility(self, compat_data):
# Parse schema syntax of MDN data:
# https://github.com/mdn/browser-compat-data/blob/master/schemas/compat-data.schema.json
# For now, we will require support for browsers since the last IDL roll.
# TODO(srujzs): Determine if this is too conservative.
browser_version_map = {
'chrome': '63',
'firefox': '57',
'safari': '11',
# We still support the latest version of IE.
'ie': '11',
'opera': '50',
}
for browser in browser_version_map.keys():
support_data = compat_data[_SUPPORT_KEY]
if browser not in support_data:
return False
support_statement = support_data[browser]
if not _is_simple_support_statement(support_statement):
return False
version = support_statement[_VERSION_ADDED_KEY]
# Compare version strings, target should be the more strict version.
target = browser_version_map[browser]
if _unify_versions(version, target) != target:
return False
# If the attribute is experimental, we assume it's not compatible.
status_data = compat_data[_STATUS_KEY]
if _EXPERIMENTAL_KEY in status_data and status_data[_EXPERIMENTAL_KEY]:
return False
return True
def is_compatible(self, attribute):
# Since capitalization isn't consistent across MDN and WebCore, we
# compare lowercase equivalents for interface and attribute names.
interface = attribute.doc_js_interface_name.lower()
if interface in self._BROWSER_COMPAT_DATA and attribute.id and len(
attribute.id) > 0:
interface_dict = self._BROWSER_COMPAT_DATA[interface]
id_name = attribute.id.lower()
secure_context_key = 'isSecureContext'
if interface in self._compat_overrides and id_name in self._compat_overrides[
interface]:
return self._compat_overrides[interface][id_name]
elif secure_context_key in interface_dict:
# If the interface requires a secure context, all attributes are
# implicitly incompatible.
return False
elif id_name in interface_dict:
id_data = interface_dict[id_name]
return self._get_attr_compatibility(id_data[_COMPAT_KEY])
else:
# Might be an attribute that is defined in a parent interface.
# We defer until attribute emitting to determine if this is the
# case. Otherwise, return None.
pass
return None
def set_compatible(self, attribute, compatible):
# Override value in the MDN browser compatibility data.
if not compatible in [True, False, None]:
raise ValueError('Cannot set a non-boolean object for compatible')
interface = attribute.doc_js_interface_name.lower()
if not interface in self._compat_overrides:
self._compat_overrides[interface] = {}
if attribute.id and len(attribute.id) > 0:
id_name = attribute.id.lower()
self._compat_overrides[interface][id_name] = compatible
|
python
|
from django.utils import timezone
def get_now():
"""
Separado para poder establecer el tiempo como sea necesario durante los tests
"""
return timezone.now()
def setallattr(obj, **kwargs):
for k in kwargs:
setattr(obj, k, kwargs.get(k))
def dict_except(obj, *args):
result = {}
for k in obj:
if k not in args:
result[k] = obj[k]
return result
|
python
|
"""
Link: https://www.hackerrank.com/challenges/py-if-else/problem?isFullScreen=true
Problem: Python If-ELse
"""
#Solution
#!/bin/python3
import math
import os
import random
import re
import sys
if __name__ == '__main__':
n = int(input().strip())
if n % 2 != 0:
print("Weird")
else:
if n in range(2,5+1):
print("Not Weird")
elif n in range(6,20+1):
print("Weird")
else:
print("Not Weird")
|
python
|
import unittest
from pymal import account
from pymal.account_objects import account_animes
from pymal import anime
from pymal.account_objects import my_anime
from tests.constants_for_testing import ACCOUNT_TEST_USERNAME, ACCOUNT_TEST_PASSWORD, ANIME_ID
class AccountAnimeListTestCase(unittest.TestCase):
EXPECTED_LENGTH = 1
@classmethod
def setUpClass(cls):
cls.account = account.Account(ACCOUNT_TEST_USERNAME, ACCOUNT_TEST_PASSWORD)
cls.animes = cls.account.animes
@classmethod
def tearDownClass(cls):
account_animes.AccountAnimes._unregiter(cls.animes)
account.Account._unregiter(cls.account)
def test_len(self):
self.assertEqual(len(self.animes), self.EXPECTED_LENGTH)
def test_contains(self):
anm = anime.Anime(ANIME_ID)
self.assertIn(anm, self.animes)
def test_contains_my_manga(self):
my_anm = my_anime.MyAnime(ANIME_ID, 0, self.account)
self.assertIn(my_anm, self.animes)
def test_contains_id(self):
self.assertIn(ANIME_ID, self.animes)
def test_str(self):
self.assertEqual(str(self.animes), "<User animes' number is {0:d}>".format(self.EXPECTED_LENGTH))
class AccountAnimeListInteraction(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.account = account.Account(ACCOUNT_TEST_USERNAME, ACCOUNT_TEST_PASSWORD)
cls.friend = list(cls.account.friends)[0]
cls.animes = cls.account.animes
cls.friend_animes = cls.friend.animes
@classmethod
def tearDownClass(cls):
account_animes.AccountAnimes._unregiter(cls.friend_animes)
account_animes.AccountAnimes._unregiter(cls.animes)
account.Account._unregiter(cls.friend)
account.Account._unregiter(cls.account)
def test_union(self):
regular = self.animes.union(self.friend_animes)
operator = self.animes | self.friend_animes
self.assertEqual(regular, operator)
def test_intersection(self):
regular = self.animes.intersection(self.friend_animes)
operator = self.animes & self.friend_animes
self.assertEqual(regular, operator)
def test_difference(self):
regular = self.animes.difference(self.friend_animes)
operator = self.animes - self.friend_animes
self.assertEqual(regular, operator)
def test_symmetric_difference(self):
regular = self.animes.symmetric_difference(self.friend_animes)
operator = self.animes ^ self.friend_animes
self.assertEqual(regular, operator)
def test_issubset(self):
regular = self.animes.issubset(self.friend_animes)
operator = self.animes <= self.friend_animes
self.assertEqual(regular, operator)
@unittest.skip('need to re think about this')
def test_issuperset(self):
regular = self.animes.issubset(self.friend_animes)
operator = self.animes >= self.friend_animes
self.assertEqual(regular, operator)
def main():
unittest.main()
if '__main__' == __name__:
main()
|
python
|
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.views import APIView
from rest_framework import permissions
from views.retrieve_test_view import TEST_QUESTIONS, retrieve_test_data, is_test_public
class TestQuestionsSet(APIView):
def get(self, request):
return retrieve_test_data(request, TEST_QUESTIONS)
# assign the permissions depending on if the test is public or not
def get_permissions(self):
try:
# is_public = true
if is_test_public(self.request.query_params.get("test_name", None)):
return [permissions.IsAuthenticatedOrReadOnly()]
# is_public = false
else:
return [permissions.IsAuthenticated()]
# is_test_public does not exist
except ObjectDoesNotExist:
return [permissions.IsAdminUser()]
# http://localhost/api/test-questions/?test_name=emibSampleTest
|
python
|
from flask import request
from flask_restplus import Namespace, Resource, fields
from api.utils import validator
from api.service.community_question import get_all_community_questions, create_community_question, get_all_question_comments
from .community_comment import community_comment_schema
import json
api = Namespace('community_questions', description='Community questions CRUD operations')
community_question_schema = api.model('CommunityQuestion', {
'question_id': fields.Integer(required=False, description='Id of this question', readonly=True),
'question_title': fields.String(required=True, description='Question title'),
'question': fields.String(required=True, description='Question content'),
'user_id': fields.Integer(required=True, description='Id of the user who ask this question'),
'username': fields.String(required=False, description='Username of the user who ask this question', readonly=True),
'user_image': fields.Integer(required=False, description='User image id of the user who ask this question', readonly=True),
'date': fields.String(required=False, description='String who represent the datetime of the post of this question', readonly=True),
'comments': fields.List(fields.Nested(community_comment_schema), readonly=True)
})
@api.route("/")
class CommunityQuestionsRoute(Resource):
@api.marshal_with(community_question_schema, as_list=True)
@api.doc(
params={
'get_comment': 'true if you want to retrive for all question their comments (default value : false)',
'check_comment_like': 'Ask if user like return comments (indicate in user_like_status variable)',
'page': 'Number of the page',
'page_size': 'Number of questions on each pages',
'user_id': 'Id of user'
},
responses={
200: 'Active community questions list',
}
)
def get(self):
return get_all_community_questions(
get_comments=bool(request.args.get('get_comment', False)),
check_comment_like=int(request.args.get('check_comment_like', 0)),
page=int(request.args.get('page', 0)),
page_size=int(request.args.get('page_size', 0)),
user_id = int(request.args.get('user_id', 0))
)
@api.expect(community_question_schema, envelope='json')
@api.doc(responses={
201: 'Question successfully created',
409: 'Conflict, User not exist',
422: 'Validation Error'
})
@api.marshal_with(community_question_schema)
def post(self):
validator.validate_payload(request.json, community_question_schema)
return create_community_question(data=request.json), 201
@api.route("/<int:question_id>/comunity_comments")
class CommunityQuestionCommentsRoute(Resource):
@api.marshal_with(community_comment_schema, as_list=True)
@api.doc(
params={
'check_comment_like': 'Ask if user like return comments (indicate in user_like_status variable)'
},
responses={
200: 'Active community questions list',
}
)
def get(self, question_id):
return get_all_question_comments(question_id, int(request.args.get('check_comment_like', None)))
|
python
|
import straws_screen
import gravity_screen
import diagon_screen
# 3440 x 1440
# WIDTH = 3440
# HEIGHT = 1440
# 2880 x 1800
WIDTH = 2880
HEIGHT = 1800
# 800 x 800
# WIDTH = 800
# HEIGHT = 800
#my_screen = straws_screen.StrawsScreen();
my_screen = gravity_screen.GravityScreen();
#my_screen = diagon_screen.DiagonScreen();
my_screen.draw_screen(WIDTH, HEIGHT)
|
python
|
"""
File: hailstone.py
Name: Justin Kao
-----------------------
This program should implement a console program that simulates
the execution of the Hailstone sequence, defined by Douglas
Hofstadter. Output format should match what is shown in the sample
run in the Assignment 2 Handout.
"""
def main():
"""
Use while loop to keep doing Hailstone sequence.
If the number entered is a Even number, divided by 2.
If the number entered is a Odd number, make 3n+1.
Use another variable(num_old) to store the previous number when printing out.
"""
print("This program computes Hailstone sequences!")
num = int(input("Enter a number: "))
print("----------------------------------")
count = 0
while True:
if num == 1:
break
if num % 2 == 1: # Odd number
num_old = num # Previous number
num = int(num*3+1)
print(str(num_old) + " is odd, so I make 3n+1: " + str(num))
else: # Even number
num_old = num # Previous number
num = int(num / 2)
print(str(num_old) + " is even, so I take half: " + str(num))
count += 1
print("I took " + str(count) + " steps to reach 1.")
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
|
python
|
# -*- coding: utf-8 -*-
"""
Base class for Topologies.
You can use this class to create your own topology. Note that every Topology
should implement a way to compute the (1) best particle, the (2) next
position, and the (3) next velocity given the Swarm's attributes at a given
timestep. Not implementing these methods will raise an error.
In addition, this class must interface with any class found in the
:mod:`pyswarms.backend.swarms.Swarm` module.
"""
# Import standard library
import abc
import logging
from ...utils.reporter import Reporter
class Topology(abc.ABC):
def __init__(self, static, **kwargs):
"""Initialize the topology."""
# Initialize logger
self.rep = Reporter(logger=logging.getLogger(__name__))
# Initialize attributes
self.static = static
self.neighbor_idx = None
if self.static:
self.rep.log(
"Running on `dynamic` topology,"
"set `static=True` for fixed neighbors.",
lvl=logging.DEBUG,
)
@abc.abstractmethod
def compute_gbest(self, swarm):
"""
Compute the best particle of the swarm.
Return
------
Cost and position of the best particle of the swarm.
"""
raise NotImplementedError("Topology::compute_gbest()")
@abc.abstractmethod
def compute_position(self, swarm):
"""Update the swarm's position-matrix."""
raise NotImplementedError("Topology::compute_position()")
@abc.abstractmethod
def compute_velocity(self, swarm):
"""Update the swarm's velocity-matrix."""
raise NotImplementedError("Topology::compute_velocity()")
|
python
|
import numpy as np
import numpy.linalg as lin
MAX_ITERS = 20
EPSILON = 1.0e-7
def h(theta, X, y):
margins = y * X.dot(theta)
return 1 / (1 + np.exp(-margins))
def J(theta, X, y):
probs = h(theta, X, y)
mm = probs.size
return (-1 / mm) * np.sum(np.log(probs))
def gradJ(theta, X, y):
probs = h(theta, X, -y) * y
mm = probs.size
return (-1 / mm) * X.T.dot(probs)
def hessJ(theta, X, y):
d = h(theta, X, y)
probs = d * (1 - d)
mm = probs.size
prob_vec = np.diag(probs)
hessJ = (1 / mm) * (X.T.dot(prob_vec).dot(X))
return hessJ
def logistic_regression(X, y, epsilon, max_iters):
mm = X.shape[0]
nn = X.shape[1]
# The cost of the ith iteration of Newton-Raphson.
cost = np.zeros(max_iters)
theta = np.zeros(nn)
for i in range(0, max_iters):
cost[i] = J(theta, X, y)
grad = gradJ(theta, X, y)
H = hessJ(theta, X, y)
Hinv = lin.inv(H)
theta = theta - Hinv.dot(grad)
return (theta, cost)
|
python
|
#############################################################################
#
# Author: Ruth HUEY, Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2000
#
#############################################################################
#
# $Header: /opt/cvs/python/packages/share1.5/AutoDockTools/autotors4Commands.py,v 1.3 2009/02/10 17:44:05 rhuey Exp $
#
# $Id: autotors4Commands.py,v 1.3 2009/02/10 17:44:05 rhuey Exp $
#
#
#
#
#
#
#
"""
This Module facilitates selecting and formatting a ligand for a subsequent
AutoDock4.0 calculation. The steps in this process are:
* The user selects the small molecule from a list of molecules
already in the moleculeViewer OR as a PDBQ file, a PDB file or
a MOL2 file from a fileBrowser.
* The user selects the ROOT atom of the ligand either:
o by picking it or
o by autoroot which sets the root to be the atom in the
molecule which has the smallest 'largest sub-tree.'
* Next the user decides which possible and active torsions he wants
to disallow, changing them from active to inactive. This is done by picking
an active 'green' bond which turns it inactive or 'purple'. This is
reversible. The user can also disallow all peptide backbone torsions and/or
all torsions of amide bonds.
* Carbons in cycles can be tested for aromaticity. If the angle
between the normals to adjacent atoms in the cycle is less than 7.5 Degrees,
the cycle is considered aromatic: its carbons are renamed "A.." and their
element type set to 'A'. (This is for the force-field calculations done
in AutoDock.) This Module does this conversion reversibly. Also, the user
is able to select a carbon to convert (reversibly) and he can change the
the value of the aromaticity cut-off.
* Non-polar hydrogens and lone pairs are merged which means that the charge of
each is added to its heavy atom and the hydrogen atoms themselves are not written
in the output file, thus in some sense 'removing' them from the molecule.
'Fewer' atoms simplifies the AutoDock run.
* The last function of this Module is to write a file which contains
the correctly formatted ligand atoms. The ROOT section of the molecule
expands from the selected ROOT atom out to include all atoms adjacent to it
up to the first active torsion. The active torsions set the position of
BRANCH key words in the output pdbq file (and their corresponding
ENDBRANCH key words). These keywords are nested to set up a
Breadth-First Order Traversal. Autotors also calculates the torsional degrees
of freedom (TORSDOF) which is the number of possible torsions less the number of
symmetry-equivalent torsions (such as a bond to a NH3). This key word is the
last line of the pdbq file.
"""
import Tkinter
from ViewerFramework.VFCommand import CommandGUI
from AutoDockTools.autotorsCommands import rootSph, markSph,\
menuText, AtorsMoleculeChooser, MAXTORS, AdtSetMode,\
AtorsReader, Ators4MoleculeChooser, Ators4Reader, AtorsRefWriter, \
RigidMolecule, RigidMolecule4, AUTOTORSWriter, AUTOTORS4Writer, \
MarkRoot, SelectRoot, SetTorsionNumberGUICommand, SetTorsionNumber, \
AutoRoot, SetRotatableBonds, DefiningRotatableBonds, SetBondRotatableFlag,\
CheckAromatic, StopCheckAromatic, SetCarbonNames, ChangeAromaticCutOff, \
TogglerootSphere, AutoAutoTors, StopAutoTors, AtorsInit, AtorsInitMol, \
ProcessCharges, ProcessBonds, rootSph, markSph, check_autotors_geoms,\
MAXTORS, menuText, warningText, checkMolCharges,\
autoMergeNPHS, set_autoMergeNPHS
Ators4MoleculeChooserGUI=CommandGUI()
Ators4MoleculeChooserGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'], menuText['Choose Molecule4'], cascadeName = menuText['Input Molecule'])
Ators4ReaderGUI = CommandGUI()
Ators4ReaderGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'], menuText['Read Molecule4'], cascadeName = menuText['Input Molecule'])
AtorsRefWriterGUI = CommandGUI()
AtorsRefWriterGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'], menuText['Ref Molecule'], cascadeName = menuText['Input Molecule'])
RigidMolecule4GUI = CommandGUI()
RigidMolecule4GUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'], menuText['Rigid Molecule4'], cascadeName = menuText['Input Molecule'])
AUTOTORS4WriterGUI=CommandGUI()
AUTOTORS4WriterGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'], menuText['WritePDBQTMB'],
cascadeName = menuText['WriteMB'])
MarkRootGUI=CommandGUI()
MarkRootGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'], menuText['SRA1'],
cascadeName = menuText['DefineRigidRootMB'])
SelectRootGUI=CommandGUI()
SelectRootGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'],\
menuText['ByPicking'], cascadeName = menuText['DefineRigidRootMB'])
SetTorsionNumberGUICommandGUI=CommandGUI()
SetTorsionNumberGUICommandGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'],\
menuText['SetTorsionNumber'], cascadeName = menuText['DefineRigidRootMB'] )
AutoRootGUI=CommandGUI()
AutoRootGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'],\
menuText['Automatically'], cascadeName = menuText['DefineRigidRootMB'])
DefiningRotatableBondsGUI = CommandGUI()
DefiningRotatableBondsGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'],\
menuText['DefineRotatableBonds'], cascadeName = menuText['DefineRigidRootMB'])
CheckAromaticGUI = CommandGUI()
CheckAromaticGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'],\
menuText['RenameAromaticCarbons'], cascadeName = menuText['AromaticCarbonsMB'])
StopCheckAromaticGUI = CommandGUI()
StopCheckAromaticGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'],\
menuText['RestoreAliphaticCarbons'], cascadeName = menuText['AromaticCarbonsMB'])
SetCarbonNamesGUI = CommandGUI()
SetCarbonNamesGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'],\
menuText['SetCarbonNames'], cascadeName = menuText['AromaticCarbonsMB'])
ChangeAromaticCutOffGUI=CommandGUI()
ChangeAromaticCutOffGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'],\
menuText['ChangeAromaticityCriteria'], cascadeName = menuText['AromaticCarbonsMB'])
TogglerootSphereGUI=CommandGUI()
TogglerootSphereGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'],\
menuText['ShowAutotorsRootSphMB'], cascadeName = menuText['DefineRigidRootMB'])
AutoAutoTorsGUI=CommandGUI()
AutoAutoTorsGUI.addMenuCommand('AutoTools4Bar', menuText['AutoTorsMB'], \
menuText['AutomaticAutotorsSetupMB'], cascadeName = menuText['Input Molecule'])
commandList = [
{'name':'AD4tors_readLigand','cmd':Ators4Reader(),
'gui':Ators4ReaderGUI},
{'name':'AD4tors_chooseLigand','cmd':Ators4MoleculeChooser(),
'gui':Ators4MoleculeChooserGUI},
{'name':'AD4tors_rigidLigand','cmd':RigidMolecule4(),
'gui':RigidMolecule4GUI},
{'name':'AD4tors_automaticLigandFormatting','cmd':AutoAutoTors(),
'gui':AutoAutoTorsGUI},
# {'name':'AD4tors_writeRef','cmd':AtorsRefWriter(),
# 'gui':AtorsRefWriterGUI},
{'name':'AD4tors_setRoot','cmd':SelectRoot(),'gui':SelectRootGUI},
{'name':'AD4tors_autoRoot','cmd':AutoRoot(),'gui':AutoRootGUI},
# {'name':'AD4tors_addChainToRootGC','cmd':AddChainToRootGUICommand(),
# 'gui':AddChainToRootGUICommandGUI},
# {'name':'AD4tors_addChainToRoot','cmd':AddChainToRoot(),'gui':None},
# {'name':'AD4tors_removeChainFromRootGC','cmd':RemoveChainFromRootGUICommand(),
# 'gui':RemoveChainFromRootGUICommandGUI},
# {'name':'AD4tors_removeChainFromRoot','cmd':RemoveChainFromRoot(),'gui':None},
{'name':'AD4tors_markRoot','cmd':MarkRoot(),'gui':MarkRootGUI},
{'name':'AD4tors_showRootSphere','cmd':TogglerootSphere(),
'gui':TogglerootSphereGUI},
{'name':'AD4tors_defineRotBonds', 'cmd':DefiningRotatableBonds(),
'gui':DefiningRotatableBondsGUI },
{'name':'AD4tors_limitTorsionsGC','cmd':SetTorsionNumberGUICommand(),
'gui':SetTorsionNumberGUICommandGUI},
## {'name':'AD4tors_changePlanarCarbonsToA','cmd':CheckAromatic(),
## 'gui':CheckAromaticGUI},
## {'name':'AD4tors_changeAromaticCarbonsToC','cmd':StopCheckAromatic(),
## 'gui':StopCheckAromaticGUI},
{'name':'AD4tors_setCarbonNames','cmd':SetCarbonNames(),
'gui':SetCarbonNamesGUI},
{'name':'AD4tors_changePlanarityCriteria','cmd':ChangeAromaticCutOff(),
'gui':ChangeAromaticCutOffGUI},
{'name':'AD4tors_writeFormattedPDBQT','cmd':AUTOTORS4Writer(),
'gui':AUTOTORS4WriterGUI},
]
def initModule(vf):
for dict in commandList:
vf.addCommand(dict['cmd'],dict['name'],dict['gui'])
if not hasattr(vf, 'ADTSetMode'):
vf.addCommand(AdtSetMode(), 'ADTSetMode')
if not hasattr(vf, 'ADtors_limitTorsions'):
vf.addCommand(SetTorsionNumber(), 'ADtors_limitTorsions')
if not hasattr(vf, 'ADtors_setBondRotatableFlag'):
vf.addCommand(SetBondRotatableFlag(), 'ADtors_setBondRotatableFlag')
if not hasattr(vf, 'ADtors_stop'):
vf.addCommand(StopAutoTors(), 'ADtors_stop')
if vf.hasGui:
vf.GUI.menuBars['AutoTools4Bar']._frame.config( {'background':'tan'})
for item in vf.GUI.menuBars['AutoTools4Bar'].menubuttons.values():
item.configure(background = 'tan')
if not hasattr(vf.GUI, 'adt4Bar'):
vf.GUI.adt4Bar = vf.GUI.menuBars['AutoTools4Bar']
vf.GUI.adt4Frame = vf.GUI.adt4Bar.menubuttons.values()[0].master
if not hasattr(vf.GUI, 'adt4ModeLabel'):
mbs = {}
packing_list = []
for c in vf.GUI.adt4Frame.children.values():
if isinstance(c, Tkinter.Menubutton):
mbs[c.cget('text')] = c
packing_list.append(c.cget('text'))
c.pack_forget()
vf.GUI.adt4ModeLabel=Tkinter.Label(vf.GUI.adt4Frame, text="ADT4.0", width=6,
relief='sunken', borderwidth=1, fg='DarkGreen',
bg = 'ivory',anchor='w' )
vf.GUI.adt4ModeLabel.pack(side='left')
vf.GUI.adt4ModeLabel.bind("<Double-Button-1>", vf.ADTSetMode.guiCallback())
for t in packing_list:
try:
c = mbs[t]
c.pack(side='left')
except:
pass
# if not hasattr(vf.GUI, 'ligandLabel'):
# vf.GUI.ligandLabelLabel = Tkinter.Label(vf.GUI.adtFrame, \
# text="Ligand:", bg='tan')
# vf.GUI.ligandLabelLabel.pack(side='left')
# vf.GUI.ligandLabel=Tkinter.Label(vf.GUI.adtFrame, text="None", width=4,
# relief='sunken', borderwidth=1,
# anchor='w' )
# vf.GUI.ligandLabel.pack(side='left')
|
python
|
################################################################################
# siegkx1.py
#
# Post Processor for the Sieg KX1 machine
# It is just an ISO machine, but I don't want the tool definition lines
#
# Dan Heeks, 5th March 2009
import nc
import iso_modal
import math
################################################################################
class Creator(iso_modal.Creator):
def __init__(self):
iso_modal.Creator.__init__(self)
self.output_tool_definitions = False
################################################################################
nc.creator = Creator()
|
python
|
#!/Users/duhuifeng/Code/RentHouseSite/renthouse/macvenv/bin/python
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
python
|
from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from rest_framework.response import Response
from sentry.app import tsdb
from sentry.api.base import EnvironmentMixin
from sentry.api.bases.project import ProjectEndpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.models import Environment
class ProjectUserStatsEndpoint(EnvironmentMixin, ProjectEndpoint):
def get(self, request, project):
try:
environment_id = self._get_environment_id_from_request(
request,
project.organization_id,
)
except Environment.DoesNotExist:
raise ResourceDoesNotExist
now = timezone.now()
then = now - timedelta(days=30)
results = tsdb.get_distinct_counts_series(
tsdb.models.users_affected_by_project,
(project.id, ),
then,
now,
rollup=3600 * 24,
environment_id=environment_id,
)[project.id]
return Response(results)
|
python
|
# Generated by Django 2.2.1 on 2019-06-08 01:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Palace',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('login', models.CharField(max_length=50)),
('password', models.CharField(max_length=50)),
],
options={
'verbose_name': 'Palace credentials',
'verbose_name_plural': 'Palace credentials',
},
),
migrations.CreateModel(
name='Property',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unit', models.CharField(default=None, max_length=7)),
('street_number', models.CharField(max_length=5)),
('street_name', models.CharField(max_length=40)),
('suburb', models.CharField(max_length=50)),
('city', models.CharField(default='Auckland', max_length=20)),
('postcode', models.IntegerField(null=True)),
('code', models.CharField(max_length=11)),
('change_code', models.IntegerField(default=1)),
('publish_entry', models.CharField(default=None, max_length=3)),
('date_available', models.CharField(max_length=35)),
('bathrooms', models.IntegerField(blank=True, null=True)),
('bedrooms', models.IntegerField(blank=True, null=True)),
('carparks', models.IntegerField(blank=True, null=True)),
('property_class', models.CharField(blank=True, max_length=15)),
('is_new_construction', models.CharField(blank=True, default=False, max_length=3)),
('pets', models.CharField(max_length=3)),
('smokers', models.CharField(max_length=3)),
('agent_email1', models.EmailField(max_length=254)),
('agent_email2', models.EmailField(max_length=254)),
('agent_name', models.CharField(max_length=50)),
('agent_mobile_num', models.CharField(max_length=15)),
('agent_work_num', models.CharField(max_length=15)),
('rental_period', models.CharField(default='Week', max_length=6)),
('rent', models.IntegerField()),
('advert_text', models.TextField(blank=True, max_length=2000)),
('thumbnail', models.ImageField(blank=True, null=True, upload_to='media/property_images')),
],
options={
'verbose_name': 'Property',
'verbose_name_plural': 'Properties',
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='PropertyImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default=None, null=True, upload_to='media/property_images')),
('property', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='properties.Property')),
],
),
]
|
python
|
import re
from SingleLog.log import Logger
from . import i18n
from . import connect_core
from . import screens
from . import command
from . import _api_util
def has_new_mail(api) -> int:
cmd_list = list()
cmd_list.append(command.go_main_menu)
cmd_list.append(command.ctrl_z)
cmd_list.append('m')
# cmd_list.append('1')
# cmd_list.append(command.enter)
cmd = ''.join(cmd_list)
current_capacity = None
plus_count = 0
index_pattern = re.compile('(\d+)')
checked_index_list = list()
break_detect = False
target_list = [
connect_core.TargetUnit(
i18n.mail_box,
screens.Target.InMailBox,
break_detect=True,
log_level=Logger.DEBUG
)
]
api.connect_core.send(
cmd,
target_list,
)
current_capacity, _ = _api_util.get_mailbox_capacity(api)
if current_capacity > 20:
cmd_list = list()
cmd_list.append(command.go_main_menu)
cmd_list.append(command.ctrl_z)
cmd_list.append('m')
cmd_list.append('1')
cmd_list.append(command.enter)
cmd = ''.join(cmd_list)
while True:
if current_capacity > 20:
api.connect_core.send(
cmd,
target_list,
)
last_screen = api.connect_core.get_screen_queue()[-1]
last_screen_list = last_screen.split('\n')
last_screen_list = last_screen_list[3:-1]
last_screen_list = [x[:10] for x in last_screen_list]
current_plus_count = 0
for line in last_screen_list:
if str(current_capacity) in line:
break_detect = True
index_result = index_pattern.search(line)
if index_result is None:
continue
current_index = index_result.group(0)
if current_index in checked_index_list:
continue
checked_index_list.append(current_index)
if '+' not in line:
continue
current_plus_count += 1
plus_count += current_plus_count
if break_detect:
break
cmd = command.ctrl_f
return plus_count
|
python
|
numerator = 32
denominator = 1
print numerator / denominator #dividing by zero, big no-no
#all code below this won't get executed
print "won't get executed"
|
python
|
from PIL import Image
import os
import glob
saveToPath = "C:\\Users\\phili\\Desktop\\"
filePath = input("File: ")
filePath = filePath[1:-1]
fileName = filePath.split("\\")[len(filePath.split("\\")) - 1]
image = Image.open(filePath)
size = image.size
print("Current image size of " + fileName + " is: " + str(size))
newSize = input("(Default is 300x168) New image size: ")
if newSize == "\r" or newSize == "" or newSize == " ":
newSize = [300, 168]
else:
"130 130"
newSize = newSize.split(" ")
newSize = [int(newSize[0]), int(newSize[1])]
print("New size is " + "(" + str(newSize[0]) + ", " + str(newSize[1]) + ")")
newImage = image.resize((newSize[0], newSize[1]), Image.ANTIALIAS)
newImage.save(saveToPath + "Downscaled_" + fileName, optimize=True, quality=95)
|
python
|
def log(msg, dry_run=False):
prefix = ''
if dry_run:
prefix = '[DRY RUN] '
print('{}{}'.format(prefix, msg))
|
python
|
#!/usr/bin/python3
"""
some code taken from huffman.py
"""
import sys
from functools import reduce
from operator import add
from math import log, log2
from random import shuffle, choice, randint, seed
from bruhat.argv import argv
EPSILON = 1e-8
def is_close(a, b):
return abs(a-b) < EPSILON
class Multiset(object):
"un-normalized probability distribution"
def __init__(self, cs={}, tp=None):
for k in cs.keys():
assert tp is None or tp is type(k)
tp = type(k)
self.tp = tp
items = [(k, v) for (k, v) in cs.items() if v>0]
cs = dict(items)
self.cs = cs # map item -> count
self._len = sum(self.cs.values(), 0)
keys = list(cs.keys())
keys.sort() # canonicalize
self._keys = keys
self.val = tuple((k, cs[k]) for k in keys) # for hash, __lt__, ...
def __str__(self):
cs = self.cs
keys = self._keys
items = reduce(add, [(str(key),)*cs[key] for key in keys], ())
items = '+'.join(items)
return '(%s)'%items
__repr__ = __str__
def get_str(self):
cs = self.cs
keys = self._keys
items = [(key if cs[key]==1 else "%d%s"%(cs[key], key)) for key in keys]
items = '+'.join(items)
return items
def check_tp(self, other):
assert self.tp is None or other.tp is None or self.tp is other.tp, "%s =!= %s"%(self, other)
def __eq__(self, other):
self.check_tp(other)
return self.cs == other.cs
def __ne__(self, other):
self.check_tp(other)
return self.cs != other.cs
def __lt__(self, other):
self.check_tp(other)
return self.val < other.val
def __hash__(self):
return hash(self.val)
def __mul__(X, Y):
"cartesian product of multisets"
if isinstance(Y, int):
return X.__rmul__(Y)
if not isinstance(Y, Multiset):
return NotImplemented
if not X.cs or not Y.cs:
return Multiset() # zero
if X.tp is str and Y.tp is str:
xcs, ycs = X.cs, Y.cs
cs = dict(((x+y), xcs[x]*ycs[y]) for x in xcs for y in ycs)
return Multiset(cs, str)
if X.tp is Box and Y.tp is str:
item = Multiset({}, Box)
for box in X.terms():
item = item + box*Y
return item
if X.tp is str and Y.tp is Box:
item = Multiset({}, Box)
for box in Y.terms():
item = item + Multiset({X*box : 1}, Box)
return item
if X.tp is Box and Y.tp is Box:
item = Multiset({}, Box)
for x_box in X.terms():
for y_box in Y.terms():
item = item + x_box * y_box
return item
assert 0, "%s =X= %s" % (X, Y)
def __rmul__(self, r):
"left multiplication by a number"
assert isinstance(r, int)
assert r >= 0
cs = dict((k, r*v) for (k, v) in self.cs.items())
return Multiset(cs, self.tp)
def __add__(X, Y):
# WARNING: not disjoint union (coproduct)
Y = Multiset.promote(Y)
X.check_tp(Y)
xcs, ycs = X.cs, Y.cs
cs = dict(xcs)
for k, v in ycs.items():
cs[k] = cs.get(k, 0) + v
return Multiset(cs, X.tp)
def __getitem__(self, k):
return self.cs[k]
def keys(self):
return self._keys
def items(self):
return self.cs.items()
def terms(self):
cs = self.cs
for k in self._keys:
for i in range(cs[k]):
yield k
def disjoint(X, Y):
# We only keep non-zero keys, so this works
lhs = set(X.cs.keys())
rhs = set(Y.cs.keys())
return not bool(lhs.intersection(rhs))
def contains(self, other):
"self contains other"
cs = self.cs
for k,v in other.cs.items():
if v > cs.get(k, 0):
return False
return True
def __len__(self):
return self._len
def isomorphic(self, other):
if self._len != other._len:
return False
lhs = set(self.cs.values())
rhs = set(other.cs.values())
return lhs == rhs
@classmethod
def promote(cls, item):
if isinstance(item, Multiset):
pass
else:
item = Multiset({item:1})
return item
class Box(object):
def __init__(self, X):
assert isinstance(X, Multiset)
assert X.tp is None or X.tp is str
self.X = X
def __str__(self):
return "Box%s"%(self.X,)
__repr__ = __str__
def __eq__(self, other):
if isinstance(other, Multiset):
return Multiset.promote(self) == other
return self.X == other.X
def __ne__(self, other):
if isinstance(other, Multiset):
return Multiset.promote(self) != other
return self.X != other.X
def __lt__(self, other):
assert isinstance(other, Box), other
return self.X < other.X
def __hash__(self):
return hash(self.X)
def right_multiply(self, Y):
"right multiply by a multiset"
assert isinstance(Y, Multiset)
X = self.X
cs = {}
for k, v in Y.items():
k = X*Multiset({k:1})
box = Box(k)
cs[box] = v
return Multiset(cs)
def __add__(self, other):
self = Multiset.promote(self)
other = Multiset.promote(other)
return self + other
def __mul__(self, other):
if isinstance(other, Multiset):
item = self.right_multiply(other)
elif isinstance(other, Box):
item = self.X * other + self * other.X
elif isinstance(other, int):
item = Multiset({self : other})
else:
assert 0
return item
def __rmul__(self, other):
if isinstance(other, Multiset):
item = Box(other*self.X)
elif isinstance(other, int):
item = Box(other*self.X)
else:
assert 0
return item
def main():
X = Multiset({"a":3, "b":1})
assert (X+X) == Multiset({"a":6, "b":2})
assert (X+X) == 2*X
#print(X, X.entropy())
XX = X*X
Y = Multiset({"a":2, "b":2})
#print(Y, Y.entropy())
assert str(Y) == "(a+a+b+b)"
a = Multiset({"a" : 1})
b = Multiset({"b" : 1})
c = Multiset({"c" : 1})
d = Multiset({"d" : 1})
e = Multiset({"e" : 1})
f = Multiset({"f" : 1})
g = Multiset({"g" : 1})
assert a.disjoint(b)
assert not (a+b).disjoint(b)
assert list((a+2*b).terms()) == ["a", "b", "b"]
assert not a.contains(b)
assert (a+b).contains(b)
assert not (a+b).contains(2*b)
def mkmultiset():
X = randint(0,2)*a + randint(0,2)*b + randint(0,2)*c + randint(0,2)*d
return X
zero = Multiset()
X = a+2*b
BX = Box(X)
Y = c
#print("%s*%s = %s"%(Y, BX, Y*BX))
#print("%s*%s = %s"%(BX, Y, BX*Y))
#print("%s*%s = %s"%(BX, (c+c), BX*(c+c)))
# seed(0)
# test distributivity
for trial in range(10000):
X = mkmultiset()
Y = mkmultiset()
Z = mkmultiset()
#print(X, Y, Z)
BX = Box(X)
BY = Box(Y)
BZ = Box(Z)
n = randint(0, 3)
m = randint(0, 3)
lhs, rhs = n*(BX+BY), n*BX + n*BY
print(lhs, rhs)
assert (n+m)*BX == n*BX + m*BX
assert (n*X) == X*n
assert (n*X)*Box(Y) == X*(n*Box(Y))
assert n*Box(X) == Box(n*X)
assert Box(X)*(n*Y) == (Box(X)*n)*Y
# Left module:
if X != zero:
assert X * (BY + BZ) == X*BY + X*BZ
#assert (X+Y)*BZ == X*BZ + Y*BZ # nope!
assert (X*Y) * Box(Z) == X*(Y*Box(Z))
# Right module:
assert (BX + BY)*Z == BX*Z + BY*Z
if X != zero:
assert BX * (Y + Z) == BX*Y + BX*Z
assert BX*(Y*Z) == (BX*Y)*Z
# bimodule:
if X!=zero:
lhs = (X*Box(Y))*Z
rhs = X*(Box(Y)*Z)
assert lhs == rhs, "%s != %s"%(lhs, rhs)
def mkbox(count=3):
item = Multiset({}, Box)
for i in range(randint(0, count)):
b = mkmultiset()
if b == zero:
continue
item += Box(b)
return item
for trial in range(100):
A = mkbox()
B = mkbox()
C = mkbox()
lhs = (A+B) * C
rhs = A*C + B*C
assert lhs == rhs
lhs = A*(B+C)
rhs = A*B + A*C
assert lhs == rhs
def strip(s):
s = str(s)
s = s.replace("Box", "")
s = s.replace("(", "")
s = s.replace(")", "")
s = s.split("+")
s.sort()
return '+'.join(s)
#zero = Multiset({}, Box)
n = 10000
#seed(0)
for trial in range(1000):
A = mkbox(1)
B = mkbox(1)
C = mkbox(1)
#if A==zero or B==zero or C==zero:
#continue
lhs = (A*B)*C
rhs = A*(B*C)
#print(lhs)
#print(rhs)
if lhs == rhs:
continue
assert strip(lhs) == strip(rhs)
# s = str(lhs)
# if len(s) < n:
# n = len(s)
# #print(A, B, C)
# print(strip(lhs))
# print(strip(rhs))
# print()
a = Multiset({"a" : 1})
b = Multiset({"b" : 1})
c = Multiset({"c" : 1})
d = Multiset({"d" : 1})
A = Multiset({Box(a):1})
B = Multiset({Box(b):1})
C = Multiset({Box(c+d):1})
print((A*B)*C)
print(A*(B*C))
if __name__ == "__main__":
main()
|
python
|
from graphene.types.generic import GenericScalar
from ..utils import dashed_to_camel
class CamelJSON(GenericScalar):
@classmethod
def serialize(cls, value):
return dashed_to_camel(value)
|
python
|
from django.conf.urls import url
from . import views
app_name = 'users'
urlpatterns = [
url(
regex=r'^~redirect/$',
view=views.UserRedirectView.as_view(),
name='redirect'
),
url(
regex=r'^$',
view=views.UserDetailView.as_view(),
name='detail'
),
url(
regex=r'^~osu-account/$',
view=views.OsuUsernameView.as_view(),
name='osu-account'),
]
|
python
|
import torch
from PIL import Image
import torch.nn.functional as F
def load_image(filename, size=None, scale=None):
img = Image.open(filename).convert('RGB')
if size is not None:
img = img.resize((size, size), Image.ANTIALIAS)
elif scale is not None:
img = img.resize((int(img.size[0] / scale), int(img.size[1] / scale)), Image.ANTIALIAS)
return img
def save_image(filename, data):
img = data.clone().clamp(0, 255).numpy()
img = img.transpose(1, 2, 0).astype("uint8")
img = Image.fromarray(img)
img.save(filename)
def ft_map_to_patches(input , patch_size=8):
B, C, W, H = input.size()
x = F.unfold(input, kernel_size=8, stride=8)
x = x.view(B, C, patch_size, patch_size, -1)
x = x.permute(0, 4, 1, 2, 3)
return x
def gram_matrix(y):
(b, ch, h, w) = y.size()
features = y.view(b, ch, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
def patch_gram_matrix(y):
(b, patch, ch, h, w) = y.size()
features = y.reshape(b*patch, ch, w*h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (ch * h * w)
return gram
def normalize_batch(batch):
# normalize using imagenet mean and std
mean = batch.new_tensor([0.485, 0.456, 0.406]).view(-1, 1, 1)
std = batch.new_tensor([0.229, 0.224, 0.225]).view(-1, 1, 1)
# batch = batch / 255.0
return (batch - mean) / std
|
python
|
class TrapException(RuntimeError):
pass
class Memory:
def __init__(self, comp):
self.storage = dict()
self.comp = comp
def __getitem__(self, key):
if not isinstance(key, int) or key < 0:
self.comp.trap("Invalid memory access key {}".format(key))
else:
return self.storage.get(key, 0)
def __setitem__(self, key, value):
if not isinstance(key, int) or key < 0:
self.comp.trap("Invalid memory access key {}".format(key))
else:
self.storage[key] = value
def load(self, content, start=0):
self.storage.update({idx+start: value for idx, value in enumerate(content)})
return self
class Comp:
def trap(self, msg):
self.error.append((self.pc, msg))
self.mode = "halted"
raise TrapException()
def writeTarget(self, argNo):
mode = self.mem[self.pc]//(10*10**argNo)%10
if mode == 2:
return "b[{}]".format(self.mem[self.pc+argNo])
elif mode == 0:
return "m[{}]".format(self.mem[self.pc+argNo])
else:
self.trap("Illegal mode for write addressing {}".format(mode))
def readVal(self, argNo):
mode = self.mem[self.pc]//(10*10**argNo)%10
if mode == 1:
return "#{}".format(self.mem[self.pc+argNo])
elif mode == 2:
return "b[{}]".format(self.mem[self.pc+argNo])
elif mode == 0:
return "m[{}]".format(self.mem[self.pc+argNo])
else:
self.trap("Illegal mode for read addressing {}".format(mode))
def addI(self):
target = self.writeTarget(3)
arg1 = self.readVal(1)
arg2 = self.readVal(2)
return self.pc+4, "ADD {} + {} -> {}".format(arg1, arg2, target)
def mulI(self):
target = self.writeTarget(3)
arg1 = self.readVal(1)
arg2 = self.readVal(2)
return self.pc+4, "MUL {} * {} -> {}".format(arg1, arg2, target)
def inpI(self):
target = self.writeTarget(1)
return self.pc+2, "INP => {}".format(target)
def outI(self):
arg1 = self.readVal(1)
return self.pc + 2, "OUT {} =>".format(arg1)
def jitI(self):
arg1 = self.readVal(1)
arg2 = self.readVal(2)
return self.pc+3, "JIT {} ? {} -> PC".format(arg1, arg2)
def jifI(self):
arg1 = self.readVal(1)
arg2 = self.readVal(2)
return self.pc+3, "JIF ! {} ? {} -> PC".format(arg1, arg2)
def ltI(self):
target = self.writeTarget(3)
arg1 = self.readVal(1)
arg2 = self.readVal(2)
return self.pc+4, "LT {} < {} ? -> {}".format(arg1, arg2, target)
def eqI(self):
target = self.writeTarget(3)
arg1 = self.readVal(1)
arg2 = self.readVal(2)
return self.pc+4, "EQ {} == {} ? -> {}".format(arg1, arg2, target)
def aBase(self):
arg1 = self.readVal(1)
return self.pc+2, "BAS {} -> b".format(arg1)
def halt(self):
self.state = "halted"
return self.pc+1, "HLT"
def getInstr(self):
return self.mem[self.pc] % 100
ilist = {1:addI, 2:mulI, 3:inpI, 4:outI, 5:jitI, 6:jifI, 7:ltI, 8:eqI,
9: aBase, 99:halt}
def __init__(self, pinp):
self.mem = Memory(self).load(pinp)
self.pc = 0
self.relBase = 0
self.state = "ready"
self.error = []
def ready(self):
return self.state == "ready"
def waiting(self):
return self.state == "waiting"
def halted(self):
return self.state == "halted"
def step(self):
instr = self.getInstr()
if instr in self.ilist:
prePc = self.pc
mem4 = ' '.join(("{:03d}".format(self.mem[i]) for i in range(prePc, prePc+4)))
self.pc, txt = self.ilist[instr](self)
return "{:04d}:{} | {}".format(prePc, mem4, txt)
else:
self.trap((self.pc, "Invalid instruction {}".format(self.mem[self.pc])))
def run(self):
try:
while(self.ready()):
print(self.step())
except TrapException:
if (len(self.error) > 0):
for error in self.error:
print("{:04d}: {}".format(error[0], error[1]))
with open("day05.txt") as inpFile:
inp = inpFile.read()
inData = [int(s) for s in inp.strip().split(",")]
comp = Comp(inData)
comp.run()
|
python
|
"""Dependency labeling Edge Probing task.
Task source paper: https://arxiv.org/pdf/1905.06316.pdf.
Task data prep directions: https://github.com/nyu-mll/jiant/blob/master/probing/data/README.md.
"""
from dataclasses import dataclass
from jiant.tasks.lib.templates.shared import labels_to_bimap
from jiant.tasks.lib.templates import edge_probing_two_span
from jiant.utils.python.io import read_json_lines
@dataclass
class Example(edge_probing_two_span.Example):
@property
def task(self):
return DepTask
@dataclass
class TokenizedExample(edge_probing_two_span.TokenizedExample):
pass
@dataclass
class DataRow(edge_probing_two_span.DataRow):
pass
@dataclass
class Batch(edge_probing_two_span.Batch):
pass
class DepTask(edge_probing_two_span.AbstractProbingTask):
Example = Example
TokenizedExample = TokenizedExample
DataRow = DataRow
Batch = Batch
LABELS = [
"acl",
"acl:relcl",
"advcl",
"advmod",
"amod",
"appos",
"aux",
"aux:pass",
"case",
"cc",
"cc:preconj",
"ccomp",
"compound",
"compound:prt",
"conj",
"cop",
"csubj",
"csubj:pass",
"dep",
"det",
"det:predet",
"discourse",
"dislocated",
"expl",
"fixed",
"flat",
"flat:foreign",
"goeswith",
"iobj",
"list",
"mark",
"nmod",
"nmod:npmod",
"nmod:poss",
"nmod:tmod",
"nsubj",
"nsubj:pass",
"nummod",
"obj",
"obl",
"obl:npmod",
"obl:tmod",
"orphan",
"parataxis",
"punct",
"reparandum",
"root",
"vocative",
"xcomp",
]
LABEL_TO_ID, ID_TO_LABEL = labels_to_bimap(LABELS)
@property
def num_spans(self):
return 2
def get_train_examples(self):
return self._create_examples(lines=read_json_lines(self.train_path), set_type="train")
def get_val_examples(self):
return self._create_examples(lines=read_json_lines(self.val_path), set_type="val")
def get_test_examples(self):
return self._create_examples(lines=read_json_lines(self.test_path), set_type="test")
@classmethod
def _create_examples(cls, lines, set_type):
examples = []
for (line_num, line) in enumerate(lines):
for (target_num, target) in enumerate(line["targets"]):
span1 = target["span1"]
span2 = target["span2"]
examples.append(
Example(
guid="%s-%s-%s" % (set_type, line_num, target_num),
text=line["text"],
span1=span1,
span2=span2,
labels=[target["label"]] if set_type != "test" else [cls.LABELS[-1]],
)
)
return examples
|
python
|
from pydigilent import *
import time
ad2 = AnalogDiscovery2()
v = 3.5
ad2.power.vplus.enable = 1
ad2.power.vplus.voltage = v
# after configuring power options, the master must be switched to enable
ad2.power.master.enable = 1
ad2.scope.channel1.vertical_division = 1.
while ad2.scope.channel1.voltage < v:
print(ad2.scope.channel1.voltage)
time.sleep(.5)
print(ad2.scope.channel1.voltage)
|
python
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
import os
author = " * author:tlming16\n"
email = " * email:[email protected]\n"
license = " * all wrong reserved\n"
text= "/* \n" + author + email+ license +"*/\n";
file_list =os.listdir("./test");
def write_file( f):
if not f.endswith(".cpp"):
return
context=None;
with open(f,'r') as nf:
context = nf.read();
with open(f,"w") as nf:
nf.write( text);
nf.write(context);
os.system(" mv {} {}".format(f,"./cpp/"))
for f in file_list:
write_file("./test/"+f);
|
python
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Subnet(object):
"""
A logical subdivision of a VCN. Each subnet
consists of a contiguous range of IP addresses that do not overlap with
other subnets in the VCN. Example: 172.16.1.0/24. For more information, see
`Overview of the Networking Service`__ and
`VCNs and Subnets`__.
To use any of the API operations, you must be authorized in an IAM policy. If you're not authorized,
talk to an administrator. If you're an administrator who needs to write policies to give users access, see
`Getting Started with Policies`__.
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/overview.htm
__ https://docs.cloud.oracle.com/iaas/Content/Network/Tasks/managingVCNs.htm
__ https://docs.cloud.oracle.com/iaas/Content/Identity/Concepts/policygetstarted.htm
"""
#: A constant which can be used with the lifecycle_state property of a Subnet.
#: This constant has a value of "PROVISIONING"
LIFECYCLE_STATE_PROVISIONING = "PROVISIONING"
#: A constant which can be used with the lifecycle_state property of a Subnet.
#: This constant has a value of "AVAILABLE"
LIFECYCLE_STATE_AVAILABLE = "AVAILABLE"
#: A constant which can be used with the lifecycle_state property of a Subnet.
#: This constant has a value of "TERMINATING"
LIFECYCLE_STATE_TERMINATING = "TERMINATING"
#: A constant which can be used with the lifecycle_state property of a Subnet.
#: This constant has a value of "TERMINATED"
LIFECYCLE_STATE_TERMINATED = "TERMINATED"
#: A constant which can be used with the lifecycle_state property of a Subnet.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
def __init__(self, **kwargs):
"""
Initializes a new Subnet object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param availability_domain:
The value to assign to the availability_domain property of this Subnet.
:type availability_domain: str
:param cidr_block:
The value to assign to the cidr_block property of this Subnet.
:type cidr_block: str
:param compartment_id:
The value to assign to the compartment_id property of this Subnet.
:type compartment_id: str
:param defined_tags:
The value to assign to the defined_tags property of this Subnet.
:type defined_tags: dict(str, dict(str, object))
:param dhcp_options_id:
The value to assign to the dhcp_options_id property of this Subnet.
:type dhcp_options_id: str
:param display_name:
The value to assign to the display_name property of this Subnet.
:type display_name: str
:param dns_label:
The value to assign to the dns_label property of this Subnet.
:type dns_label: str
:param freeform_tags:
The value to assign to the freeform_tags property of this Subnet.
:type freeform_tags: dict(str, str)
:param id:
The value to assign to the id property of this Subnet.
:type id: str
:param ipv6_cidr_block:
The value to assign to the ipv6_cidr_block property of this Subnet.
:type ipv6_cidr_block: str
:param ipv6_virtual_router_ip:
The value to assign to the ipv6_virtual_router_ip property of this Subnet.
:type ipv6_virtual_router_ip: str
:param lifecycle_state:
The value to assign to the lifecycle_state property of this Subnet.
Allowed values for this property are: "PROVISIONING", "AVAILABLE", "TERMINATING", "TERMINATED", "UPDATING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param prohibit_internet_ingress:
The value to assign to the prohibit_internet_ingress property of this Subnet.
:type prohibit_internet_ingress: bool
:param prohibit_public_ip_on_vnic:
The value to assign to the prohibit_public_ip_on_vnic property of this Subnet.
:type prohibit_public_ip_on_vnic: bool
:param route_table_id:
The value to assign to the route_table_id property of this Subnet.
:type route_table_id: str
:param security_list_ids:
The value to assign to the security_list_ids property of this Subnet.
:type security_list_ids: list[str]
:param subnet_domain_name:
The value to assign to the subnet_domain_name property of this Subnet.
:type subnet_domain_name: str
:param time_created:
The value to assign to the time_created property of this Subnet.
:type time_created: datetime
:param vcn_id:
The value to assign to the vcn_id property of this Subnet.
:type vcn_id: str
:param virtual_router_ip:
The value to assign to the virtual_router_ip property of this Subnet.
:type virtual_router_ip: str
:param virtual_router_mac:
The value to assign to the virtual_router_mac property of this Subnet.
:type virtual_router_mac: str
"""
self.swagger_types = {
'availability_domain': 'str',
'cidr_block': 'str',
'compartment_id': 'str',
'defined_tags': 'dict(str, dict(str, object))',
'dhcp_options_id': 'str',
'display_name': 'str',
'dns_label': 'str',
'freeform_tags': 'dict(str, str)',
'id': 'str',
'ipv6_cidr_block': 'str',
'ipv6_virtual_router_ip': 'str',
'lifecycle_state': 'str',
'prohibit_internet_ingress': 'bool',
'prohibit_public_ip_on_vnic': 'bool',
'route_table_id': 'str',
'security_list_ids': 'list[str]',
'subnet_domain_name': 'str',
'time_created': 'datetime',
'vcn_id': 'str',
'virtual_router_ip': 'str',
'virtual_router_mac': 'str'
}
self.attribute_map = {
'availability_domain': 'availabilityDomain',
'cidr_block': 'cidrBlock',
'compartment_id': 'compartmentId',
'defined_tags': 'definedTags',
'dhcp_options_id': 'dhcpOptionsId',
'display_name': 'displayName',
'dns_label': 'dnsLabel',
'freeform_tags': 'freeformTags',
'id': 'id',
'ipv6_cidr_block': 'ipv6CidrBlock',
'ipv6_virtual_router_ip': 'ipv6VirtualRouterIp',
'lifecycle_state': 'lifecycleState',
'prohibit_internet_ingress': 'prohibitInternetIngress',
'prohibit_public_ip_on_vnic': 'prohibitPublicIpOnVnic',
'route_table_id': 'routeTableId',
'security_list_ids': 'securityListIds',
'subnet_domain_name': 'subnetDomainName',
'time_created': 'timeCreated',
'vcn_id': 'vcnId',
'virtual_router_ip': 'virtualRouterIp',
'virtual_router_mac': 'virtualRouterMac'
}
self._availability_domain = None
self._cidr_block = None
self._compartment_id = None
self._defined_tags = None
self._dhcp_options_id = None
self._display_name = None
self._dns_label = None
self._freeform_tags = None
self._id = None
self._ipv6_cidr_block = None
self._ipv6_virtual_router_ip = None
self._lifecycle_state = None
self._prohibit_internet_ingress = None
self._prohibit_public_ip_on_vnic = None
self._route_table_id = None
self._security_list_ids = None
self._subnet_domain_name = None
self._time_created = None
self._vcn_id = None
self._virtual_router_ip = None
self._virtual_router_mac = None
@property
def availability_domain(self):
"""
Gets the availability_domain of this Subnet.
The subnet's availability domain. This attribute will be null if this is a regional subnet
instead of an AD-specific subnet. Oracle recommends creating regional subnets.
Example: `Uocm:PHX-AD-1`
:return: The availability_domain of this Subnet.
:rtype: str
"""
return self._availability_domain
@availability_domain.setter
def availability_domain(self, availability_domain):
"""
Sets the availability_domain of this Subnet.
The subnet's availability domain. This attribute will be null if this is a regional subnet
instead of an AD-specific subnet. Oracle recommends creating regional subnets.
Example: `Uocm:PHX-AD-1`
:param availability_domain: The availability_domain of this Subnet.
:type: str
"""
self._availability_domain = availability_domain
@property
def cidr_block(self):
"""
**[Required]** Gets the cidr_block of this Subnet.
The subnet's CIDR block.
Example: `10.0.1.0/24`
:return: The cidr_block of this Subnet.
:rtype: str
"""
return self._cidr_block
@cidr_block.setter
def cidr_block(self, cidr_block):
"""
Sets the cidr_block of this Subnet.
The subnet's CIDR block.
Example: `10.0.1.0/24`
:param cidr_block: The cidr_block of this Subnet.
:type: str
"""
self._cidr_block = cidr_block
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this Subnet.
The OCID of the compartment containing the subnet.
:return: The compartment_id of this Subnet.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this Subnet.
The OCID of the compartment containing the subnet.
:param compartment_id: The compartment_id of this Subnet.
:type: str
"""
self._compartment_id = compartment_id
@property
def defined_tags(self):
"""
Gets the defined_tags of this Subnet.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this Subnet.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this Subnet.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this Subnet.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def dhcp_options_id(self):
"""
Gets the dhcp_options_id of this Subnet.
The OCID of the set of DHCP options that the subnet uses.
:return: The dhcp_options_id of this Subnet.
:rtype: str
"""
return self._dhcp_options_id
@dhcp_options_id.setter
def dhcp_options_id(self, dhcp_options_id):
"""
Sets the dhcp_options_id of this Subnet.
The OCID of the set of DHCP options that the subnet uses.
:param dhcp_options_id: The dhcp_options_id of this Subnet.
:type: str
"""
self._dhcp_options_id = dhcp_options_id
@property
def display_name(self):
"""
Gets the display_name of this Subnet.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:return: The display_name of this Subnet.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this Subnet.
A user-friendly name. Does not have to be unique, and it's changeable.
Avoid entering confidential information.
:param display_name: The display_name of this Subnet.
:type: str
"""
self._display_name = display_name
@property
def dns_label(self):
"""
Gets the dns_label of this Subnet.
A DNS label for the subnet, used in conjunction with the VNIC's hostname and
VCN's DNS label to form a fully qualified domain name (FQDN) for each VNIC
within this subnet (for example, `bminstance-1.subnet123.vcn1.oraclevcn.com`).
Must be an alphanumeric string that begins with a letter and is unique within the VCN.
The value cannot be changed.
The absence of this parameter means the Internet and VCN Resolver
will not resolve hostnames of instances in this subnet.
For more information, see
`DNS in Your Virtual Cloud Network`__.
Example: `subnet123`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/dns.htm
:return: The dns_label of this Subnet.
:rtype: str
"""
return self._dns_label
@dns_label.setter
def dns_label(self, dns_label):
"""
Sets the dns_label of this Subnet.
A DNS label for the subnet, used in conjunction with the VNIC's hostname and
VCN's DNS label to form a fully qualified domain name (FQDN) for each VNIC
within this subnet (for example, `bminstance-1.subnet123.vcn1.oraclevcn.com`).
Must be an alphanumeric string that begins with a letter and is unique within the VCN.
The value cannot be changed.
The absence of this parameter means the Internet and VCN Resolver
will not resolve hostnames of instances in this subnet.
For more information, see
`DNS in Your Virtual Cloud Network`__.
Example: `subnet123`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/dns.htm
:param dns_label: The dns_label of this Subnet.
:type: str
"""
self._dns_label = dns_label
@property
def freeform_tags(self):
"""
Gets the freeform_tags of this Subnet.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this Subnet.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this Subnet.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this Subnet.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def id(self):
"""
**[Required]** Gets the id of this Subnet.
The subnet's Oracle ID (OCID).
:return: The id of this Subnet.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Subnet.
The subnet's Oracle ID (OCID).
:param id: The id of this Subnet.
:type: str
"""
self._id = id
@property
def ipv6_cidr_block(self):
"""
Gets the ipv6_cidr_block of this Subnet.
For an IPv6-enabled subnet, this is the IPv6 CIDR block for the subnet's IP address space.
The subnet size is always /64. See `IPv6 Addresses`__.
Example: `2001:0db8:0123:1111::/64`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/ipv6.htm
:return: The ipv6_cidr_block of this Subnet.
:rtype: str
"""
return self._ipv6_cidr_block
@ipv6_cidr_block.setter
def ipv6_cidr_block(self, ipv6_cidr_block):
"""
Sets the ipv6_cidr_block of this Subnet.
For an IPv6-enabled subnet, this is the IPv6 CIDR block for the subnet's IP address space.
The subnet size is always /64. See `IPv6 Addresses`__.
Example: `2001:0db8:0123:1111::/64`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/ipv6.htm
:param ipv6_cidr_block: The ipv6_cidr_block of this Subnet.
:type: str
"""
self._ipv6_cidr_block = ipv6_cidr_block
@property
def ipv6_virtual_router_ip(self):
"""
Gets the ipv6_virtual_router_ip of this Subnet.
For an IPv6-enabled subnet, this is the IPv6 address of the virtual router.
Example: `2001:0db8:0123:1111:89ab:cdef:1234:5678`
:return: The ipv6_virtual_router_ip of this Subnet.
:rtype: str
"""
return self._ipv6_virtual_router_ip
@ipv6_virtual_router_ip.setter
def ipv6_virtual_router_ip(self, ipv6_virtual_router_ip):
"""
Sets the ipv6_virtual_router_ip of this Subnet.
For an IPv6-enabled subnet, this is the IPv6 address of the virtual router.
Example: `2001:0db8:0123:1111:89ab:cdef:1234:5678`
:param ipv6_virtual_router_ip: The ipv6_virtual_router_ip of this Subnet.
:type: str
"""
self._ipv6_virtual_router_ip = ipv6_virtual_router_ip
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this Subnet.
The subnet's current state.
Allowed values for this property are: "PROVISIONING", "AVAILABLE", "TERMINATING", "TERMINATED", "UPDATING", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this Subnet.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this Subnet.
The subnet's current state.
:param lifecycle_state: The lifecycle_state of this Subnet.
:type: str
"""
allowed_values = ["PROVISIONING", "AVAILABLE", "TERMINATING", "TERMINATED", "UPDATING"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def prohibit_internet_ingress(self):
"""
Gets the prohibit_internet_ingress of this Subnet.
Whether to disallow ingress internet traffic to VNICs within this subnet. Defaults to false.
For IPV4, `prohibitInternetIngress` behaves similarly to `prohibitPublicIpOnVnic`.
If it is set to false, VNICs created in this subnet will automatically be assigned public IP
addresses unless specified otherwise during instance launch or VNIC creation (with the `assignPublicIp`
flag in :class:`CreateVnicDetails`).
If `prohibitInternetIngress` is set to true, VNICs created in this subnet cannot have public IP addresses
(that is, it's a privatesubnet).
For IPv6, if `prohibitInternetIngress` is set to `true`, internet access is not allowed for any
IPv6s assigned to VNICs in the subnet. Otherwise, ingress internet traffic is allowed by default.
Example: `true`
:return: The prohibit_internet_ingress of this Subnet.
:rtype: bool
"""
return self._prohibit_internet_ingress
@prohibit_internet_ingress.setter
def prohibit_internet_ingress(self, prohibit_internet_ingress):
"""
Sets the prohibit_internet_ingress of this Subnet.
Whether to disallow ingress internet traffic to VNICs within this subnet. Defaults to false.
For IPV4, `prohibitInternetIngress` behaves similarly to `prohibitPublicIpOnVnic`.
If it is set to false, VNICs created in this subnet will automatically be assigned public IP
addresses unless specified otherwise during instance launch or VNIC creation (with the `assignPublicIp`
flag in :class:`CreateVnicDetails`).
If `prohibitInternetIngress` is set to true, VNICs created in this subnet cannot have public IP addresses
(that is, it's a privatesubnet).
For IPv6, if `prohibitInternetIngress` is set to `true`, internet access is not allowed for any
IPv6s assigned to VNICs in the subnet. Otherwise, ingress internet traffic is allowed by default.
Example: `true`
:param prohibit_internet_ingress: The prohibit_internet_ingress of this Subnet.
:type: bool
"""
self._prohibit_internet_ingress = prohibit_internet_ingress
@property
def prohibit_public_ip_on_vnic(self):
"""
Gets the prohibit_public_ip_on_vnic of this Subnet.
Whether VNICs within this subnet can have public IP addresses.
Defaults to false, which means VNICs created in this subnet will
automatically be assigned public IP addresses unless specified
otherwise during instance launch or VNIC creation (with the
`assignPublicIp` flag in
:class:`CreateVnicDetails`).
If `prohibitPublicIpOnVnic` is set to true, VNICs created in this
subnet cannot have public IP addresses (that is, it's a private
subnet).
Example: `true`
:return: The prohibit_public_ip_on_vnic of this Subnet.
:rtype: bool
"""
return self._prohibit_public_ip_on_vnic
@prohibit_public_ip_on_vnic.setter
def prohibit_public_ip_on_vnic(self, prohibit_public_ip_on_vnic):
"""
Sets the prohibit_public_ip_on_vnic of this Subnet.
Whether VNICs within this subnet can have public IP addresses.
Defaults to false, which means VNICs created in this subnet will
automatically be assigned public IP addresses unless specified
otherwise during instance launch or VNIC creation (with the
`assignPublicIp` flag in
:class:`CreateVnicDetails`).
If `prohibitPublicIpOnVnic` is set to true, VNICs created in this
subnet cannot have public IP addresses (that is, it's a private
subnet).
Example: `true`
:param prohibit_public_ip_on_vnic: The prohibit_public_ip_on_vnic of this Subnet.
:type: bool
"""
self._prohibit_public_ip_on_vnic = prohibit_public_ip_on_vnic
@property
def route_table_id(self):
"""
**[Required]** Gets the route_table_id of this Subnet.
The OCID of the route table that the subnet uses.
:return: The route_table_id of this Subnet.
:rtype: str
"""
return self._route_table_id
@route_table_id.setter
def route_table_id(self, route_table_id):
"""
Sets the route_table_id of this Subnet.
The OCID of the route table that the subnet uses.
:param route_table_id: The route_table_id of this Subnet.
:type: str
"""
self._route_table_id = route_table_id
@property
def security_list_ids(self):
"""
Gets the security_list_ids of this Subnet.
The OCIDs of the security list or lists that the subnet uses. Remember
that security lists are associated *with the subnet*, but the
rules are applied to the individual VNICs in the subnet.
:return: The security_list_ids of this Subnet.
:rtype: list[str]
"""
return self._security_list_ids
@security_list_ids.setter
def security_list_ids(self, security_list_ids):
"""
Sets the security_list_ids of this Subnet.
The OCIDs of the security list or lists that the subnet uses. Remember
that security lists are associated *with the subnet*, but the
rules are applied to the individual VNICs in the subnet.
:param security_list_ids: The security_list_ids of this Subnet.
:type: list[str]
"""
self._security_list_ids = security_list_ids
@property
def subnet_domain_name(self):
"""
Gets the subnet_domain_name of this Subnet.
The subnet's domain name, which consists of the subnet's DNS label,
the VCN's DNS label, and the `oraclevcn.com` domain.
For more information, see
`DNS in Your Virtual Cloud Network`__.
Example: `subnet123.vcn1.oraclevcn.com`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/dns.htm
:return: The subnet_domain_name of this Subnet.
:rtype: str
"""
return self._subnet_domain_name
@subnet_domain_name.setter
def subnet_domain_name(self, subnet_domain_name):
"""
Sets the subnet_domain_name of this Subnet.
The subnet's domain name, which consists of the subnet's DNS label,
the VCN's DNS label, and the `oraclevcn.com` domain.
For more information, see
`DNS in Your Virtual Cloud Network`__.
Example: `subnet123.vcn1.oraclevcn.com`
__ https://docs.cloud.oracle.com/iaas/Content/Network/Concepts/dns.htm
:param subnet_domain_name: The subnet_domain_name of this Subnet.
:type: str
"""
self._subnet_domain_name = subnet_domain_name
@property
def time_created(self):
"""
Gets the time_created of this Subnet.
The date and time the subnet was created, in the format defined by `RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:return: The time_created of this Subnet.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this Subnet.
The date and time the subnet was created, in the format defined by `RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:param time_created: The time_created of this Subnet.
:type: datetime
"""
self._time_created = time_created
@property
def vcn_id(self):
"""
**[Required]** Gets the vcn_id of this Subnet.
The OCID of the VCN the subnet is in.
:return: The vcn_id of this Subnet.
:rtype: str
"""
return self._vcn_id
@vcn_id.setter
def vcn_id(self, vcn_id):
"""
Sets the vcn_id of this Subnet.
The OCID of the VCN the subnet is in.
:param vcn_id: The vcn_id of this Subnet.
:type: str
"""
self._vcn_id = vcn_id
@property
def virtual_router_ip(self):
"""
**[Required]** Gets the virtual_router_ip of this Subnet.
The IP address of the virtual router.
Example: `10.0.14.1`
:return: The virtual_router_ip of this Subnet.
:rtype: str
"""
return self._virtual_router_ip
@virtual_router_ip.setter
def virtual_router_ip(self, virtual_router_ip):
"""
Sets the virtual_router_ip of this Subnet.
The IP address of the virtual router.
Example: `10.0.14.1`
:param virtual_router_ip: The virtual_router_ip of this Subnet.
:type: str
"""
self._virtual_router_ip = virtual_router_ip
@property
def virtual_router_mac(self):
"""
**[Required]** Gets the virtual_router_mac of this Subnet.
The MAC address of the virtual router.
Example: `00:00:00:00:00:01`
:return: The virtual_router_mac of this Subnet.
:rtype: str
"""
return self._virtual_router_mac
@virtual_router_mac.setter
def virtual_router_mac(self, virtual_router_mac):
"""
Sets the virtual_router_mac of this Subnet.
The MAC address of the virtual router.
Example: `00:00:00:00:00:01`
:param virtual_router_mac: The virtual_router_mac of this Subnet.
:type: str
"""
self._virtual_router_mac = virtual_router_mac
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
python
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.inferno
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Inferno os and all the related stuff.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String, Number
__all__ = ['LimboLexer']
class LimboLexer(RegexLexer):
"""
Lexer for `Limbo programming language <http://www.vitanuova.com/inferno/limbo.html>`_
TODO:
- maybe implement better var declaration highlighting
- some simple syntax error highlighting
.. versionadded:: 2.0
"""
name = 'Limbo'
aliases = ['limbo']
filenames = ['*.b']
mimetypes = ['text/limbo']
tokens = {
'whitespace': [
(r'^(\s*)([a-zA-Z_]\w*:(\s*)\n)',
bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\', String), # stray backslash
],
'statements': [
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float),
(r'16r[0-9a-fA-F]+', Number.Hex),
(r'8r[0-7]+', Number.Oct),
(r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer),
(r'[()\[\],.]', Punctuation),
(r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator),
(r'(alt|break|case|continue|cyclic|do|else|exit'
r'for|hd|if|implement|import|include|len|load|or'
r'pick|return|spawn|tagof|tl|to|while)\b', Keyword),
(r'(byte|int|big|real|string|array|chan|list|adt'
r'|fn|ref|of|module|self|type)\b', Keyword.Type),
(r'(con|iota|nil)\b', Keyword.Constant),
('[a-zA-Z_]\w*', Name),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
('', Text, 'statement'),
],
}
def analyse_text(text):
# Any limbo module implements something
if re.search(r'^implement \w+;', text, re.MULTILINE):
return 0.7
# TODO:
# - Make lexers for:
# - asm sources
# - man pages
# - mkfiles
# - module definitions
# - namespace definitions
# - shell scripts
# - maybe keyfiles and fonts
# they all seem to be quite similar to their equivalents
# from unix world, so there should not be a lot of problems
|
python
|
# Generated by Django 3.2.7 on 2021-11-23 16:39
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Brand',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(help_text='Max 255 chars', max_length=255, verbose_name='Brand')),
],
options={
'verbose_name': 'Brand',
'verbose_name_plural': 'Brands',
},
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(help_text='Max 255 chars', max_length=255, verbose_name='Title')),
('title_en', models.CharField(help_text='Max 255 chars', max_length=255, null=True, verbose_name='Title')),
('title_az', models.CharField(help_text='Max 255 chars', max_length=255, null=True, verbose_name='Title')),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Color',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(help_text='Max 30 char.', max_length=30, verbose_name='Title')),
('hex_code', models.CharField(default='ffffff', max_length=6, verbose_name='Hex Code')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('image', models.ImageField(blank=True, null=True, upload_to='images/image/', verbose_name='Image')),
('is_main', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(help_text='Max 255 chars', max_length=255, verbose_name='Title')),
('price', models.DecimalField(decimal_places=2, default=50.0, max_digits=10)),
('discount_price', models.DecimalField(decimal_places=2, default=35.0, max_digits=10)),
('description', models.TextField(blank=True, null=True, verbose_name='Description')),
],
options={
'verbose_name': 'Product',
'verbose_name_plural': 'Products',
},
),
migrations.CreateModel(
name='ProductVersion',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('quantity', models.IntegerField(default=0, verbose_name='Quantity')),
('version_description', models.TextField(blank=True, null=True, verbose_name='New_description')),
('is_main', models.BooleanField(default=False)),
],
options={
'verbose_name': 'Product Version',
'verbose_name_plural': 'Product Versions',
},
),
migrations.CreateModel(
name='Size',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(help_text='Max 30 char.', max_length=30, verbose_name='Title')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('tag', models.CharField(max_length=255, verbose_name='Tag')),
],
options={
'verbose_name': 'Tag',
'verbose_name_plural': 'Tags',
},
),
migrations.CreateModel(
name='Review',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('price_rating', models.IntegerField(choices=[(1, '*'), (2, '**'), (3, '***'), (4, '****'), (5, '*****')], default=5, verbose_name='price')),
('value_rating', models.IntegerField(choices=[(1, '*'), (2, '**'), (3, '***'), (4, '****'), (5, '*****')], default=5, verbose_name='value')),
('quality_rating', models.IntegerField(choices=[(1, '*'), (2, '**'), (3, '***'), (4, '****'), (5, '*****')], default=5, verbose_name='quality')),
('nickname', models.CharField(help_text='Max 255 chars', max_length=255, verbose_name='Nickname')),
('summary', models.CharField(help_text='Max 255 chars', max_length=255, verbose_name='Summary')),
('review', models.TextField(verbose_name='Review')),
('product', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='product_review', to='product.product', verbose_name='Product_Review')),
],
options={
'verbose_name': 'Review',
'verbose_name_plural': 'Reviews',
},
),
]
|
python
|
#!/user/bin/env python
# -*- coding: utf-8 -*-
import re
import sys
from flask import render_template, request, redirect
from flask_login import current_user
from Web import right_url_prefix as url_prefix, create_blue
from Web import control
sys.path.append('..')
__author__ = 'Zhouheng'
html_dir = "/RIGHT"
develop_right_view = create_blue('develop_right_view', url_prefix=url_prefix)
@develop_right_view.route("/", methods=["GET"])
def show_module_list():
result, info = control.get_right_module(current_user.role)
if result is False:
return info
if "module_no" in request.args:
module_no = int(request.args["module_no"])
result, module_role_info = control.get_right_module_role(current_user.role, module_no)
if result is False:
return module_role_info
module_role_dict = {}
for item in module_role_info:
module_role_dict[item["module_role"]] = item
result, action_list = control.get_right_action_role(current_user.role, module_no)
if result is False:
return action_list
if current_user.role & control.role_value["right_new"] > 0:
new_right = True
else:
new_right = False
return render_template("%s/right_module.html" % html_dir, module_list=info, url_prefix=url_prefix,
module_role_info=module_role_info, action_list=action_list, new_right=new_right,
user_name=current_user.user_name, module_role_dict=module_role_dict)
return render_template("%s/right_module.html" % html_dir, module_list=info, url_prefix=url_prefix)
@develop_right_view.route("/", methods=["POST"])
def new_action_role():
if "Referer" not in request.headers:
return "Bad Request"
ref_url = request.headers["Referer"]
find_module = re.findall("\?module_no=([0-9]+)", ref_url)
if len(find_module) < 0:
return "Bad Request."
module_no = int(find_module[0])
action_desc = request.form["action_desc"]
min_role = request.form["min_role"]
result, info = control.new_right_action(current_user.user_name, current_user.role, module_no, action_desc, min_role)
if result is False:
return info
return redirect(ref_url)
@develop_right_view.route("/action/delete/<int:action_no>/", methods=["GET"])
def del_action_role(action_no):
if "Referer" not in request.headers:
return "Bad Request"
ref_url = request.headers["Referer"]
result, info = control.delete_right_action(current_user.user_name, current_user.role, action_no)
if result is False:
return info
return redirect(ref_url)
|
python
|
#!/usr/bin/env python 3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 PanXu, Inc. All Rights Reserved
#
"""
flat pretrained vocabulary
Authors: PanXu
Date: 2021/02/24 10:36:00
"""
from easytext.data import PretrainedVocabulary, Vocabulary
class FlatPretrainedVocabulary(PretrainedVocabulary):
"""
Flat pretrained vocabulary,因为 flat ner 需要将,基于字的 embedding 以及 gaz word 的 预训练 合并在一起
"""
def __init__(self,
character_pretrained_vocabulary: PretrainedVocabulary,
gaz_word_pretrained_vocabulary: PretrainedVocabulary):
"""
初始化
:param character_pretrained_vocabulary:
:param gaz_word_pretrained_vocabulary:
"""
assert character_pretrained_vocabulary.embedding_dim == gaz_word_pretrained_vocabulary.embedding_dim, \
f"character_pretrained_vocabulary 与 gaz_word_pretrained_vocabulary embedding 维度必须相同"
char_embedding_dict = self.__token_embedding_dict(character_pretrained_vocabulary)
gaz_word_embedding_dict = self.__token_embedding_dict(gaz_word_pretrained_vocabulary)
tokens = [char_embedding_dict.keys(), gaz_word_embedding_dict.keys()]
char_embedding_dict.update(gaz_word_embedding_dict)
embedding_dict = char_embedding_dict
vocabulary = Vocabulary(tokens=tokens,
padding=Vocabulary.PADDING,
unk=Vocabulary.UNK,
special_first=True)
super().__init__(vocabulary=vocabulary, pretrained_word_embedding_loader=None)
self._embedding_dim = character_pretrained_vocabulary.embedding_dim
self._init_embedding_matrix(vocabulary=self._vocabulary,
embedding_dict=embedding_dict,
embedding_dim=self._embedding_dim)
def __token_embedding_dict(self, pretrained_vocabulary: PretrainedVocabulary):
token_embedding_dict = dict()
unk_index = pretrained_vocabulary.index(pretrained_vocabulary.unk)
for index in range(pretrained_vocabulary.embedding_matrix.size(0)):
if index in {pretrained_vocabulary.padding_index, unk_index}:
# 对 padding_index 和 unk_index 来说,需要略过,因为是在后面合并后的 vocabulary 中,会重新填充
continue
token = pretrained_vocabulary.token(index)
embedding = pretrained_vocabulary.embedding_matrix[index, :]
token_embedding_dict[token] = embedding
return token_embedding_dict
|
python
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from datetime import timedelta
import pytest
from indico.modules.events import Event
from indico.modules.events.models.events import EventType
from indico.util.date_time import now_utc
@pytest.fixture
def create_event(dummy_user, dummy_category, db):
"""Return a callable which lets you create dummy events."""
def _create_event(id_=None, **kwargs):
# we specify `acl_entries` so SA doesn't load it when accessing it for
# the first time, which would require no_autoflush blocks in some cases
now = now_utc(exact=False)
kwargs.setdefault('type_', EventType.meeting)
kwargs.setdefault('title', u'dummy#{}'.format(id_) if id_ is not None else u'dummy')
kwargs.setdefault('start_dt', now)
kwargs.setdefault('end_dt', now + timedelta(hours=1))
kwargs.setdefault('timezone', 'UTC')
kwargs.setdefault('category', dummy_category)
event = Event(id=id_, creator=dummy_user, acl_entries=set(), **kwargs)
db.session.flush()
return event
return _create_event
@pytest.fixture
def dummy_event(create_event):
"""Create a mocked dummy event."""
return create_event(0)
|
python
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True, font_scale=1)
import itertools
# marker = itertools.cycle(("o", "^", "s"))
marker = itertools.cycle((None,))
markevery = 20
from tensorboard.backend.event_processing import event_accumulator
def plot_result_csv(folders, filenames, files, max_length = 800, ifsave = False, sns = sns, marker = marker, markerevery = markevery):
# input should like this:
#
# folders = ['new_section_baseline', 'pb_c_base_500','pb_c_base_100','very_different_exploration']
# filenames = ['Total reward']
# files = [[['Tensorboard_data/{}/1/{}.csv'.format(folder, filename),
# 'Tensorboard_data/{}/2/{}.csv'.format(folder, filename),
# 'Tensorboard_data/{}/3/{}.csv'.format(folder, filename),]
# for filename in filenames] for folder in folders]
data = np.array([[[np.loadtxt(tb_file, delimiter=',', skiprows=1)[:max_length,2]
for tb_file in index_file]
for index_file in folder_file]
for folder_file in files])
data_mean = np.mean(data, axis=2)
data_std = np.std(data, axis=2)
clrs = sns.color_palette(n_colors=len(folders))
epochs = list(range(max_length))
for j in range(len(filenames)):
fig, ax = plt.subplots()
for i in range(len(folders)):
if len(epochs) > len(data_mean[i,j]):
epochs = list(range(len(data_mean[i,j])))
ax.plot(epochs, data_mean[i][j], label=folders[i], marker=next(marker), markevery=markevery, c=clrs[i])
ax.fill_between(epochs, data_mean[i][j]-data_std[i][j], data_mean[i][j]+data_std[i][j] ,alpha=0.3, facecolor=clrs[i])
ax.set_xlabel('Training iteration', color='k')
ax.set_ylabel(filenames[j], color='k')
ax.legend()
plt.show()
if ifsave:
fig.savefig('{}.png'.format(filenames[j]), format='png', dpi=600)
def plot_result_event(folders, filenames, files, max_length = 800, ifsave = False, labels = [], sns = sns, marker = marker, markerevery = markevery):
# input should like this:
#
# folders = ['new_section_baseline', 'pb_c_base_500','pb_c_base_100']
# filenames = ['1.Reward/1.Total reward']
# labels = ['Total reward']
# files = [['Tensorboard_data/{}/1'.format(folder),
# 'Tensorboard_data/{}/2'.format(folder),
# 'Tensorboard_data/{}/3'.format(folder),]
# for folder in folders]
data = []
for i in range(len(files)):
repeated_experiments = []
for j in range(len(files[0])):
event = event_accumulator.EventAccumulator(files[i][j])
event.Reload()
diff_index = []
for k in range(len(filenames)):
try:
scalars = event.scalars.Items(filenames[k])
except:
print(event.scalars.Keys())
return None
diff_index.append([item.value for item in scalars][:max_length])
repeated_experiments.append(diff_index)
data.append(repeated_experiments)
data = np.array(data)
data = np.transpose(data, (0,2,1,3))
data_mean = np.mean(data, axis=2)
data_std = np.std(data, axis=2)
clrs = sns.color_palette(n_colors=len(folders))
if max_length > len(data_mean[0][0]):
scope = len(data_mean[0][0])
else:
scope = max_length
epochs = list(range(scope))
for j in range(len(filenames)):
fig, ax = plt.subplots()
for i in range(len(folders)):
ax.plot(epochs, data_mean[i][j][:scope], label=folders[i], marker=next(marker), markevery=markevery, c=clrs[i])
ax.fill_between(epochs, data_mean[i][j][:scope]-data_std[i][j][:scope], data_mean[i][j][:scope]+data_std[i][j][:scope] ,alpha=0.3, facecolor=clrs[i])
ax.set_xlabel('Training iteration', color='k')
if labels == []:
ax.set_ylabel(filenames[j], color='k')
else:
ax.set_ylabel(labels[j], color='k')
ax.legend()
plt.show()
if ifsave:
fig.savefig('{}.png'.format(filenames[j]), format='png', dpi=600)
|
python
|
loglevel = 'info'
errorlog = "-"
accesslog = "-"
bind = '0.0.0.0:5000'
workers = 2
timeout = 60
|
python
|