seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
โ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
37273642245
|
import rosbag
import sys
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import csv
import glob
import os
from tf.transformations import euler_from_quaternion
from scipy.interpolate import interp1d
from scipy.spatial.transform import Rotation as R
import numpy as np
if (len(sys.argv) > 1):
filename = "../bag/" + sys.argv[1] + ".bag"
else:
list_of_files = glob.glob('../bag/*')
latest_file = max(list_of_files, key=os.path.getctime)
filename = latest_file
def get_rpy(msg):
roll, pitch, yaw = euler_from_quaternion([msg.pose.pose.orientation.x,
msg.pose.pose.orientation.y,
msg.pose.pose.orientation.z,
msg.pose.pose.orientation.w])
return roll, pitch, yaw
def diff_percent(a, b):
if b != 0:
return (a-b)/b * 100
else:
return (b-a)/a * 100
class topic_data:
def __init__(self, topic_name, filename):
self.topic_name = topic_name
self.data = {
"time": [],
"r_x": [],
"r_y": [],
"r_z": [],
"q_x": [],
"q_y": [],
"q_z": [],
"q_w": [],
"roll": [],
"pitch": [],
"yaw": [],
"v_x": [],
"v_y": [],
"v_z": [],
"omega_x": [],
"omega_y": [],
"omega_z": []
}
self.get_data_from_bag(filename)
def get_data_from_bag(self, filename):
with rosbag.Bag(filename) as bag:
for topic, msg, t in bag.read_messages(topics=[self.topic_name]):
self.data["time"].append(
msg.header.stamp.secs + msg.header.stamp.nsecs/1e9)
self.data["r_x"].append(msg.pose.pose.position.x)
self.data["r_y"].append(msg.pose.pose.position.y)
self.data["r_z"].append(msg.pose.pose.position.z)
self.data["q_x"].append(msg.pose.pose.orientation.x)
self.data["q_y"].append(msg.pose.pose.orientation.y)
self.data["q_z"].append(msg.pose.pose.orientation.z)
self.data["q_w"].append(msg.pose.pose.orientation.w)
self.data["roll"].append(get_rpy(msg)[0])
self.data["pitch"].append(get_rpy(msg)[1])
self.data["yaw"].append(get_rpy(msg)[2])
self.data["v_x"].append(msg.twist.twist.linear.x)
self.data["v_y"].append(msg.twist.twist.linear.y)
self.data["v_z"].append(msg.twist.twist.linear.z)
self.data["omega_x"].append(msg.twist.twist.angular.x)
self.data["omega_y"].append(msg.twist.twist.angular.y)
self.data["omega_z"].append(msg.twist.twist.angular.z)
def calculate_velocities(self):
self.data["v_x"] = []
self.data["v_y"] = []
self.data["v_z"] = []
self.data["omega_x"] = []
self.data["omega_y"] = []
self.data["omega_z"] = []
self.data["v_x"].append(0)
self.data["v_y"].append(0)
self.data["v_z"].append(0)
self.data["omega_x"].append(0)
self.data["omega_y"].append(0)
self.data["omega_z"].append(0)
for i in range(1, len(self.data["time"])):
self.data["v_x"].append((self.data["r_x"][i] - self.data["r_x"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
self.data["v_y"].append((self.data["r_y"][i] - self.data["r_y"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
self.data["v_z"].append((self.data["r_z"][i] - self.data["r_z"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
# p = R.from_quat([self.data["q_x"][i]-1, self.data["q_y"][i-1], self.data["q_z"][i-1], self.data["q_w"][i-1]])
# q = R.from_quat([self.data["q_x"][i], self.data["q_y"][i], self.data["q_z"][i], self.data["q_w"][i]])
# r = np.matmul(p.as_dcm().transpose(), q.as_dcm())
# r = p.as_dcm()-q.as_dcm()
# print(r)
self.data["omega_x"].append((self.data["roll"][i] - self.data["roll"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
self.data["omega_y"].append((self.data["pitch"][i] - self.data["pitch"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
self.data["omega_z"].append((self.data["yaw"][i] - self.data["yaw"][i-1]) / (self.data["time"][i] - self.data["time"][i-1]))
if __name__ == "__main__":
truth = topic_data('/tag_box_pose_ground_truth', filename)
detected = topic_data('/detected_object_state', filename)
predicted = topic_data('/predicted_object_state', filename)
ekf = topic_data('/updated_object_state', filename)
detected.calculate_velocities()
start_time = 3.12
simulation_time = 0.79
truth.data['time'] = [x - start_time for x in truth.data['time']]
detected.data['time'] = [x - start_time for x in detected.data['time']]
predicted.data['time'] = [x - start_time for x in predicted.data['time']]
ekf.data['time'] = [x - start_time for x in ekf.data['time']]
plot_trajectory = 0
plot_error = 0
plot_all = 0
plot_group = 1
plot_seperate = 0
save_csv = 0
plot_latency = 0
def compare_plot(attr, include_detected):
plt.plot(truth.data['time'], truth.data[attr], 'r.-', label='truth')
if include_detected:
plt.plot(detected.data['time'],
detected.data[attr], 'g.', label='detected')
plt.plot(predicted.data['time'],
predicted.data[attr], 'y.', label='predicted')
plt.plot(ekf.data['time'], ekf.data[attr], 'b.', label='ekf')
plt.title(attr)
plt.legend()
plt.xlim(2, 5)
plt.show()
plt.savefig(attr, dpi=400)
plt.clf()
def generate_plot(ax, attr, is_detection_estimation, y1=None, y2=None, title=None):
ax.plot(truth.data['time'], truth.data[attr], 'r.-', label='ground truth', ms=3, lw=1)
if is_detection_estimation:
ax.plot(detected.data['time'], detected.data[attr], 'g.-', label='detected estimation')
else:
ax.plot(detected.data['time'], detected.data[attr], 'g.-', label='detected state')
ax.plot(predicted.data['time'], predicted.data[attr], 'y.-', label='predicted state')
ax.plot(ekf.data['time'], ekf.data[attr], 'b.-', label='EKF estimation')
ax.set_title(title if title else attr)
ax.legend()
ax.set_xlim(0, simulation_time)
ax.set_ylim(y1, y2)
ax.set_xlabel("time [s]")
if attr == "r_x" or attr == "r_y" or attr == "r_z":
ax.set_ylabel("position [m]")
if attr == "roll" or attr == "pitch" or attr == "yaw":
ax.set_ylabel("rotation [rad]")
if attr == "v_x" or attr == "v_y" or attr == "v_z":
ax.set_ylabel("linear velocity [m/s]")
if attr == "omega_x" or attr == "omega_y" or attr == "omega_z":
ax.set_ylabel("angular velocity [rad/s]")
def error_plot(ax, attr):
f = interp1d(truth.data['time'], truth.data[attr])
ax.plot(ekf.data['time'], f(ekf.data['time']))
ax.plot(ekf.data['time'], ekf.data[attr])
err_det = []
err_ekf = []
for i in range(len(ekf.data['time'])):
err_det.append(diff_percent(
detected.data[attr][i], f(detected.data['time'][i])))
err_ekf.append(diff_percent(ekf.data[attr][i], f(ekf.data['time'][i])))
ax.plot(detected.data['time'], err_det, 'g.-', label='detected')
ax.plot(ekf.data['time'], err_ekf, 'b.-', label='ekf')
ax.set_title(attr)
ax.legend()
# ax.set_xlim()
if plot_trajectory:
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(truth.data["r_x"][300:390], truth.data["r_y"][300:390], truth.data["r_z"]
[300:390], 'r.-', ms=6, lw=2, label='gournd truth trajectory')
# ax.plot(predicted.data["r_x"][:-5], predicted.data["r_y"][:-5], predicted.data["r_z"][:-5],'y.-', ms=12, lw= 2,label='predicted trajectory')
ax.plot(detected.data["r_x"][1:-5], detected.data["r_y"][1:-5], detected.data["r_z"][1:-5],'g.-', ms=12, lw= 2,label='detected trajectory')
ax.plot(ekf.data["r_x"][:-5], ekf.data["r_y"][:-5], ekf.data["r_z"][:-5], 'b.-', ms=12, lw=2, label='ekf trajectory')
ax.legend()
ax.set_xlabel('X [m]')
ax.set_ylabel('Y [m]')
ax.set_zlabel('Z [m]')
ax.set_xlim(ekf.data['r_x'][-1], ekf.data['r_x'][0])
ax.set_ylim()
ax.set_zlim()
plt.figure(dpi=400)
plt.show()
if plot_error:
plt.clf()
fig, axs = plt.subplots(4, 3)
error_plot(axs[0][0], 'r_x')
error_plot(axs[0][1], 'r_y')
error_plot(axs[0][2], 'r_z')
error_plot(axs[1][0], 'roll')
error_plot(axs[1][1], 'pitch')
error_plot(axs[1][2], 'yaw')
error_plot(axs[2][0], 'v_x')
error_plot(axs[2][1], 'v_y')
error_plot(axs[2][2], 'v_z')
error_plot(axs[3][0], 'omega_x')
error_plot(axs[3][1], 'omega_y')
error_plot(axs[3][2], 'omega_z')
plt.show()
if plot_all:
plt.clf()
fig, axs = plt.subplots(4, 3)
generate_plot(axs[0][0], 'r_x', False)
generate_plot(axs[0][1], 'r_y', False)
generate_plot(axs[0][2], 'r_z', False)
generate_plot(axs[1][0], 'roll', False)
generate_plot(axs[1][1], 'pitch', False)
generate_plot(axs[1][2], 'yaw', False)
generate_plot(axs[2][0], 'v_x', True)
generate_plot(axs[2][1], 'v_y', True)
generate_plot(axs[2][2], 'v_z', True)
generate_plot(axs[3][0], 'omega_x', True)
generate_plot(axs[3][1], 'omega_y', True)
generate_plot(axs[3][2], 'omega_z', True)
plt.show()
if plot_group:
# fig, axs = plt.subplots(1, 3, dpi=200, figsize=(14,4))
# generate_plot(axs[0], 'r_x', False, title=r'$r_x$', y1=1.5, y2=5.5)
# generate_plot(axs[1], 'r_y', False, title=r'$r_y$', y1=-2, y2=2)
# generate_plot(axs[2], 'r_z', False, title=r'$r_z$', y1=-1, y2=3)
# plt.savefig('/home/ziyou/Desktop/report/figures/s2_r_plot.png', dpi=400)
# plt.show()
# plt.clf()
# fig, axs = plt.subplots(1, 3, dpi=200, figsize=(14,4))
# generate_plot(axs[0], 'roll', False, title=r'$roll$', y1=-0.5, y2=1)
# generate_plot(axs[1], 'pitch', False, title=r'$pitch$', y1=-0.4, y2=1.1)
# generate_plot(axs[2], 'yaw', False, title=r'$yaw$', y1=-0.75, y2=0.75)
# plt.savefig('/home/ziyou/Desktop/report/figures/s2_q_plot.png', dpi=400)
# plt.show()
# plt.clf()
# fig, axs = plt.subplots(1, 3, dpi=200, figsize=(14,4))
# generate_plot(axs[0], 'v_x', True, title=r'$v_x$', y1=-6, y2=4)
# generate_plot(axs[1], 'v_y', True, title=r'$v_y$', y1=-4.5, y2=5.5)
# generate_plot(axs[2], 'v_z', True, title=r'$v_z$', y1=-5, y2=5)
# plt.savefig('/home/ziyou/Desktop/report/figures/s2_v_plot.png', dpi=400)
# plt.show()
# plt.clf()
fig, axs = plt.subplots(1, 3, dpi=200, figsize=(14,4))
generate_plot(axs[0], 'omega_x', True, title=r'$\omega_x$', y1=-15, y2=15)
generate_plot(axs[1], 'omega_y', True, title=r'$\omega_y$', y1=-15, y2=15)
generate_plot(axs[2], 'omega_z', True, title=r'$\omega_z$', y1=-15, y2=15)
plt.savefig('/home/ziyou/Desktop/report/figures/s2_omega_plot.png', dpi=400)
plt.show()
plt.clf()
if plot_seperate:
compare_plot('r_x', True)
compare_plot('r_y', True)
compare_plot('r_z', True)
compare_plot('q_x', True)
compare_plot('q_y', True)
compare_plot('q_z', True)
compare_plot('q_w', True)
compare_plot('roll', True)
compare_plot('pitch', True)
compare_plot('yaw', True)
compare_plot('v_x', False)
compare_plot('v_y', False)
compare_plot('v_z', False)
compare_plot('omega_x', False)
compare_plot('omega_y', False)
compare_plot('omega_z', False)
if save_csv:
# rows = [truth.time,
# truth.r_z,
# truth.roll,
# truth.v_z,
# truth.omega_x,
# predicted.time,
# predicted.r_z,
# predicted.roll,
# predicted.v_z,
# predicted.omega_x,
# detected.time,
# detected.r_z,
# detected.roll,
# detected.v_z,
# detected.omega_x,
# ekf.time,
# ekf.r_z,
# ekf.roll,
# ekf.v_z,
# ekf.omega_x]
rows = []
with open('analysis.csv', 'wb') as f:
writer = csv.writer(f, delimiter=',', quoting=csv.QUOTE_ALL)
for row in rows:
writer.writerow(row)
if plot_latency:
latency = [20, 20, 20, 10, 20, 20, 20, 10, 20, 20, 30, 20, 20, 20, 20, 10, 10]
seq = range(1, len(latency)+1)
plt.clf()
plt.bar(seq, latency)
plt.xlabel("Image processing sequence")
plt.ylabel("detected latency [ms]")
plt.xticks(np.arange(1, len(latency)+1, step=1))
plt.show()
print(sum(latency) / len(latency))
|
ZiyouZhang/rotors_datmo
|
scripts/bag_analysis.py
|
bag_analysis.py
|
py
| 13,599 |
python
|
en
|
code
| 4 |
github-code
|
6
|
24429656200
|
# Isaiah Grace
# May 13 2020
import colorsys
import random
import rpi_ws281x
import threading
class StripDriver():
def __init__(self, config):
self.strip = rpi_ws281x.PixelStrip(
config['COUNT'],
config['PIN'],
config['FREQ_HZ'],
config['DMA'],
config['INVERT'],
config['BRIGHTNESS'],
config['CHANNEL']
)
self.strip.begin()
# This lock will hopefully help prevent white flashing
self.strip_lock = threading.Lock()
# This is the color that pixels will gravitate towards
self.base_red = 0 # range 0 - 255
self.base_blue = 0 # range 0 - 255
self.base_green = 0 # range 0 - 255
# Pixels can mutate to any color with +- the tolerance from the base
# A tolerance of 0 means all pixels will be the base_color
# A tolerance of 255 means pixels color will randomly span the entire color space
self.tolerance = 0 # range 0 - 255
# This is the % chance that a color value of a pixel has to mutate
self.mutation_rate = 0 # range 0 - 100
# A mutated color will change by this value, either + or -
# increasing this will increase the speed that the colors change
self.mutation_step = 2 # range 0 - 255
# This offset is applied to the hue color wheel and will create a phase change from the valence mapping to the RGB mapping.
# This website has a good demonstration of HSV color: http://aleto.ch/color-mixer/
# A offset value of 0.66 would be equivelent of a Hue value of 240 on the above site.
# Our mapping of the range 0 - 1 to the RGB color space is then represented by the circumpfrence of the HSV color wheel.
# Note that this only allows mixing of two of the three colors, one will always be zero.
# Thankfully our mutations will take care of this and we can rely on them to produce more colors.
self.hue_offset = 0.66 # range 0 - 1
# These HSV values allow a direct mapping from the 0 - 1 range to the RGB space
# See: https://en.wikipedia.org/wiki/HSL_and_HSV
# maximum values for S and V create more vivid and intense colors, and are a good choice with these cheep LEDs
self.hsv_S = 1
self.hsv_V = 1
# This will allow us to detect when all the lights are off, and we don't have to bother doing all the music effects
self.idle = False
def is_idle(self):
return self.idle
def refresh(self):
if self.base_red or self.base_green or self.base_blue or self.tolerance:
self.idle = False
# This transmits data to the LED strip to set the color of each LED according to the strip array
with self.strip_lock:
print(self.print_colors()[1:],end='')
self.strip.show()
def clear(self):
# DOES NOT TRANSMIT NEW COLORS TO STRIP
# refresh is needed after this to actually turn off the lights
for i in range(self.strip.numPixels()):
self.strip.setPixelColorRGB(i,0,0,0)
def set_base_color_from_valence(self, valence):
# Takes a valence value (0 - 1) describing the song and returns a color mapping of that value, with the hue_offset applied
# The offset shifts
hue = valence + self.hue_offset
if hue > 1:
hue = hue - 1
# colorsys.hsv_to_rgb returns values from 0 - 1, we need to scale these to integers in the range 0 - 255
(self.base_red, self.base_green, self.base_blue) = tuple(int(i * 255) for i in colorsys.hsv_to_rgb(hue, self.hsv_S, self.hsv_V))
def set_base_color_from_RGB(self, red, green, blue):
self.base_red = red
self.base_green = green
self.base_blue = blue
def music_effect(self):
if self.base_red or self.base_green or self.base_blue or self.tolerance:
self.idle = False
if self.idle:
return
# If all the lights are off, this will remain True through the updates
all_off = True
with self.strip_lock:
for i in range(self.strip.numPixels()):
pixel_color = self.strip.getPixelColor(i)
# Extract the R G B values for this pixel from the packed 24 bit color
pixel_color = ((0xFF0000 & pixel_color) >> 16,
(0x00FF00 & pixel_color) >> 8,
(0x0000FF & pixel_color))
# Mutate each of the colors to find a new color value for the pixel
pixel_color = self.mutate_color(pixel_color, self.mutation_rate, self.mutation_step)
# Apply the limiting bound on the new pixel color
red = self.apply_bound(pixel_color[0], self.base_red, self.tolerance)
green = self.apply_bound(pixel_color[1], self.base_green, self.tolerance)
blue = self.apply_bound(pixel_color[2], self.base_blue, self.tolerance)
# Update the color of this pixel
self.strip.setPixelColorRGB(i, red, green, blue)
# If a pixel_color is nonzero, than not all lights are off
if red or green or blue:
all_off = False
self.idle = all_off
#and not self.base_red and not self.base_green and not self.base_blue and not self.tolerance
def mutate_color(self, color, mutation_rate, mutationStep):
# Applies a random mutation to each pixel, allowing a natural variation between pixel colors
mutated_color = list(color)
# This arithmetic means we only need one call to random in order to generate all three random numbers
rand = random.randint(0,10000000)
mutation = [rand % 100,
(rand // 100) % 100,
(rand // 100000) % 100
]
for i in (0,1,2):
if mutation[i] < mutation_rate:
# Apply the color mutation, either increase or decrease the color
if mutation[i] % 2 == 0:
mutated_color[i] = color[i] + mutationStep
else:
mutated_color[i] = color[i] - mutationStep
# Make sure the mutated color is within 0 - 255
if mutated_color[i] > 255:
mutated_color[i] = 255
if mutated_color[i] < 0:
mutated_color[i] = 0
return mutated_color
def apply_bound(self, color, base_color, bound_size):
# Applies the bound on a particular color value (0 - 255).
# If the color is outside the allowed bound, gently push it towards the bound.
# This allows the lights to turn on/off change colors from song to song gradually
if color > base_color + bound_size:
return color - 1
if color < base_color - bound_size:
return color + 1
return color
def print_colors(self):
# This is some unicode magic that wont work with all terminals, etc...
strip_length = self.strip.numPixels()
i = 0
printout = []
while(i < strip_length):
if i % 10 == 0:
printout.append('\n')
pixel_color = self.strip.getPixelColor(i)
# Extract the R G B values for this pixel from the packed 24 bit color
red = str((0xFF0000 & pixel_color) >> 16)
green = str((0x00FF00 & pixel_color) >> 8)
blue = str(0x0000FF & pixel_color)
printout.append('\x1B[38;2;' + red + ';' + green + ';' + blue +'m\u2588')
i = i + 1
printout.append('\n\x1B[m')
return ''.join(printout)
|
IsaiahGrace/lightRemote
|
pi/stripDriver.py
|
stripDriver.py
|
py
| 7,953 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5539453892
|
import cv2
import numpy as np
"""
YUV๋?
๋น์ ๋ฐ๊ธฐ๋ฅผ ๋ํ๋ด๋ ํ๋(Y)์ ์์์ ํธ 2๊ฐ(U, V)๋ก ํํํ๋ ๋ฐฉ์์ด๋ค.
The YโฒUV model defines a color space in terms of one luma component (Yโฒ) and
two chrominance components, called U (blue projection) and V (red projection) respectively.
Y = 0.299R + 0.587G + 0.114B
"""
src = cv2.imread('../resources/Lena.png')
(h,w,c) = src.shape
yuv = cv2.cvtColor(src,cv2.COLOR_BGR2YUV)
my_y = np.zeros((h,w))
my_y = (src[:,:,0] * 0.114) + (src[:,:,1] * 0.587) + (src[:,:,2] * 0.299)
my_y = np.round(my_y).astype(np.uint8)
print(yuv[0:5,0:5,0])
print(my_y[0:5,0:5])
cv2.imshow('original',src)
cv2.imshow('cvtColor',yuv[:,:,0]) # yuv์ ์ฒซ๋ฒ์งธ ์ฑ๋์ y
cv2.imshow('my_y',my_y)
cv2.waitKey()
cv2.destroyAllWindows()
|
daebakk/Image-Processing
|
Assignment/Fundamentals of Image processing/BGRToY.py
|
BGRToY.py
|
py
| 794 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7755753027
|
from __future__ import absolute_import
from __future__ import unicode_literals
import argparse
from .. import api, utils
from ..command import Command, CMD_SUCCESS, HELP_LIST
from ..exceptions import InvalidDateError, NotFoundError, WrappedValueError
class Log(Command):
"""List enrollment information available in the registry.
The command list a set of enrollments. Some searching parameters
to filter the results are available. Parameters <uuid> and <organization>
filter by unique identity and organization name. Enrollments between a
period can also be listed using <from> and <to> parameters, where
<from> must be less or equal than <to>. Default values for these dates
are '1900-01-01' and '2100-01-01'.
Dates may follow several patterns. The most common and recommended
is 'YYYY-MM-DD'. Optionally, time information can be included using
patters like 'YYYY-MM-DD hh:mm:ss'.
"""
def __init__(self, **kwargs):
super(Log, self).__init__(**kwargs)
self.parser = argparse.ArgumentParser(description=self.description,
usage=self.usage)
# Enrollments search options
self.parser.add_argument('--uuid', default=None,
help="unique identity to withdraw")
self.parser.add_argument('--organization', default=None,
help="organization where the uuid is enrolled")
self.parser.add_argument('--from', dest='from_date', default=None,
help="date (YYYY-MM-DD:hh:mm:ss) when the enrollment starts")
self.parser.add_argument('--to', dest='to_date', default=None,
help="date (YYYY-MM-DD:hh:mm:ss) when the enrollment ends")
# Exit early if help is requested
if 'cmd_args' in kwargs and [i for i in kwargs['cmd_args'] if i in HELP_LIST]:
return
self._set_database(**kwargs)
@property
def description(self):
return """List enrollments."""
@property
def usage(self):
return "%(prog)s log [--uuid <uuid>] [--organization <organization>] [--from <date>] [--to <date>]"
def run(self, *args):
"""List enrollments using search parameters."""
params = self.parser.parse_args(args)
uuid = params.uuid
organization = params.organization
try:
from_date = utils.str_to_datetime(params.from_date)
to_date = utils.str_to_datetime(params.to_date)
code = self.log(uuid, organization, from_date, to_date)
except InvalidDateError as e:
self.error(str(e))
return e.code
return code
def log(self, uuid=None, organization=None, from_date=None, to_date=None):
""""List enrollment information available in the registry.
Method that returns a list of enrollments. If <uuid> parameter is set,
it will return the enrollments related to that unique identity;
if <organization> parameter is given, it will return the enrollments
related to that organization; if both parameters are set, the function
will return the list of enrollments of <uuid> on the <organization>.
Enrollments between a period can also be listed using <from_date> and
<to_date> parameters. When these are set, the method will return
all those enrollments where Enrollment.start >= from_date AND
Enrollment.end <= to_date. Defaults values for these dates are
1900-01-01 and 2100-01-01.
:param db: database manager
:param uuid: unique identifier
:param organization: name of the organization
:param from_date: date when the enrollment starts
:param to_date: date when the enrollment ends
"""
try:
enrollments = api.enrollments(self.db, uuid, organization,
from_date, to_date)
self.display('log.tmpl', enrollments=enrollments)
except (NotFoundError, WrappedValueError) as e:
self.error(str(e))
return e.code
return CMD_SUCCESS
|
timhayduk/glusterDashboard
|
gitlab/lib/python3.5/site-packages/sortinghat/cmd/log.py
|
log.py
|
py
| 4,198 |
python
|
en
|
code
| 1 |
github-code
|
6
|
32672095851
|
#!python
import collections
#text=haystack, pattern=needle
def contains(text, pattern):
"""Return a boolean indicating whether pattern occurs in text."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
#funny storytime! initially did not pass because I falsely remembered a lack of explicitly written "else" statement resulting in a default return of "False". In truth, it defaults to "None"
#Runtime: O(n) because it must go through text to find the pattern. Best case would be O(1), like if the pattern/needle was at the very front of the text/haystack.
#Space: O(1) because it will always be simply true or false. Boolean, boom!
if pattern in text:
return True
else:
return False
def index_helper(text, pattern):
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
indices = []
for index, _ in enumerate(text):
if pattern == text[index: (index + len(pattern))]:
indices.append(index)
return indices
def find_index(text, pattern):
"""Return the starting index of the first occurrence of pattern in text,
or None if not found."""
#Runtime: O(n) because it must duplicated the array AND go through the now-quasi-dictionarified text. Each of these is O(n), adding to O(2n), but because runtime is about estimations and not exact figures, it's ultimately just O(n).
#Space: O(1). Either 'None' or a single index is returned, and no matter what the parameters, the size of output will remain the same
indices = index_helper(text, pattern) #call the helper function
if len(indices) is not 0:
return indices[0] #return the first index
return None #unless there's nothing, in which case, return None
def find_all_indexes(text, pattern):
"""Return a list of starting indexes of all occurrences of pattern in text,
or an empty list if not found."""
#Runtime: O(n), for same reasons as above. Array duplication is O(n), looping through the enumerated text is O(n), adding to O(2n) but ultimately being recorded and treated as O(n) because simplification is alright
#Space: O(n). This is the same code as the function above (they use the same helper function), EXCEPT that it returns an array rather than a single variable of constant size. We don't know how many times the pattern will be found; it could be one time, it could be a ton
return index_helper(text, pattern) #call the helper function
#either it's empty or it contains something. Or some things. Either way, we're returning a list
def test_string_algorithms(text, pattern):
found = contains(text, pattern)
print('contains({!r}, {!r}) => {}'.format(text, pattern, found))
index = find_index(text, pattern)
print('find_index({!r}, {!r}) => {}'.format(text, pattern, index))
indexes = find_all_indexes(text, pattern)
print('find_all_indexes({!r}, {!r}) => {}'.format(text, pattern, indexes))
def main():
"""Read command-line arguments and test string searching algorithms."""
import sys
args = sys.argv[1:] # Ignore script file name
if len(args) == 2:
text = args[0]
pattern = args[1]
test_string_algorithms(text, pattern)
else:
script = sys.argv[0]
print('Usage: {} text pattern'.format(script))
print('Searches for occurrences of pattern in text')
print("\nExample: {} 'abra cadabra' 'abra'".format(script))
print("contains('abra cadabra', 'abra') => True")
print("find_index('abra cadabra', 'abra') => 0")
print("find_all_indexes('abra cadabra', 'abra') => [0, 8]")
if __name__ == '__main__':
main()
|
ckim42/Core-Data-Structures
|
Lessons/source/strings.py
|
strings.py
|
py
| 3,840 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70096885309
|
from collections import deque
def solution(progresses, speeds):
progresses = deque(progresses)
speeds = deque(speeds)
answer = []
while progresses:
count = 0
for i in range(len(speeds)):
progresses[i] += speeds[i]
while True:
if progresses[0] < 100:
break
else:
progresses.popleft()
speeds.popleft()
count += 1
if not progresses:
break
if count != 0:
answer.append(count)
return answer
if __name__ == "__main__":
progresses = [93, 30, 55]
speeds = [1, 30, 5]
solution(progresses, speeds)
|
YooGunWook/coding_test
|
ํ๋ก๊ทธ๋๋จธ์ค_๋ณต์ต/์คํ_๊ธฐ๋ฅ๊ฐ๋ฐ.py
|
์คํ_๊ธฐ๋ฅ๊ฐ๋ฐ.py
|
py
| 697 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24958905519
|
import bpy
from bpy.props import *
from bpy.types import Node, NodeSocket
from arm.logicnode.arm_nodes import *
class SwitchNode(Node, ArmLogicTreeNode):
'''Switch node'''
bl_idname = 'LNSwitchNode'
bl_label = 'Switch'
bl_icon = 'CURVE_PATH'
min_inputs = 1
min_outputs = 1
def __init__(self):
array_nodes[str(id(self))] = self
def init(self, context):
self.inputs.new('ArmNodeSocketAction', 'In')
self.inputs.new('NodeSocketShader', 'Value')
self.outputs.new('ArmNodeSocketAction', 'Default')
def draw_buttons(self, context, layout):
row = layout.row(align=True)
op = row.operator('arm.node_add_input_output', text='New', icon='PLUS', emboss=True)
op.node_index = str(id(self))
op.in_socket_type = 'NodeSocketShader'
op.out_socket_type = 'ArmNodeSocketAction'
op.in_name_format = 'Case {0}'
op.out_name_format = 'Case {0}'
op.in_index_name_offset = -1
op2 = row.operator('arm.node_remove_input_output', text='', icon='X', emboss=True)
op2.node_index = str(id(self))
add_node(SwitchNode, category='Logic')
|
phillipmacon/armory-3d-engine
|
blender/arm/logicnode/logic_switch.py
|
logic_switch.py
|
py
| 1,162 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19663543751
|
import tensorflow as tf, numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from baselines.bench.monitor import load_results
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo/train'
df_train = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo/test'
df_test = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo_v1/train/cpo_d_is_1'
df_train_d_is_1 = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo_v1/test/cpo_d_is_1'
df_test_d_is_1 = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo_v1/train/cpo_d_is_2'
df_train_d_is_2 = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/cpo_v1/test/cpo_d_is_2'
df_test_d_is_2 = load_results(logger_dir)
logger_dir = '/home/lihepeng/Documents/Github/tmp/ev/sp/sp_returns.txt'
f_sp = np.loadtxt(logger_dir)
xmax = 3000000
rolling_window = 365*1
rolling_reward = pd.Series(df_train["r"]).rolling(rolling_window)
rolling_reward = rolling_reward.mean().values[rolling_window-1:]
rolling_safety = pd.Series(df_train["s"]).rolling(rolling_window)
rolling_safety = rolling_safety.mean().values[rolling_window-1:]
linestyle_str = [
('solid', 'solid'), # Same as (0, ()) or '-'
('dotted', 'dotted'), # Same as (0, (1, 1)) or '.'
('dashed', 'dashed'), # Same as '--'
('dashdot', 'dashdot')] # Same as '-.'
linestyle_tuple = dict([
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))])
d = np.sum(f_sp)
cpo_r = "{0:.2f}%".format(np.mean((d+df_test["r"].sum())/d) * 100)
cpo_r_d_is_1 = "{0:.2f}%".format(np.mean((d+df_test_d_is_1["r"].sum())/d) * 100)
cpo_r_d_is_2 = "{0:.2f}%".format(np.mean((d+df_test_d_is_2["r"].sum())/d) * 100)
fig = plt.figure(figsize=(10,7))
ax = plt.subplot(111)
plt.plot(np.cumsum(f_sp), label='SP', linewidth=3.0, marker='*', markersize=10, markevery=20, color='#ff7f0e')
plt.plot(np.cumsum(-df_test["r"]), label=r'$d=0.1$', linewidth=3.0)
plt.plot(np.cumsum(-df_test_d_is_1["r"]), label=r'$d=1$', marker='v', markersize=7, markevery=20, linewidth=3.0, color='#2ca02c')
plt.plot(np.cumsum(-df_test_d_is_2["r"]), label=r'$d=2$', linewidth=3.0, linestyle='dashed', marker='X', markersize=8, markevery=20, color='#9467bd')
plt.xlim(0, 365)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel('Day', fontsize=20)
plt.ylabel('Cumulative Costs ($)', fontsize=20)
plt.legend(fontsize=20)
ax.text(368, np.around(np.sum(-df_test["r"])+3,2), cpo_r, style='italic', fontsize='x-large',
bbox={'facecolor': '#1f77b4', 'alpha': 0.5, 'pad': 5})
ax.text(368, np.around(np.sum(-df_test_d_is_1["r"]),2), cpo_r_d_is_1, style='italic', fontsize='x-large',
bbox={'facecolor': '#2ca02c', 'alpha': 0.5, 'pad': 5})
ax.text(368, np.around(np.sum(-df_test_d_is_2["r"])-6,2), cpo_r_d_is_2, style='italic', fontsize='x-large',
bbox={'facecolor': '#9467bd', 'alpha': 0.5, 'pad': 5})
axh = ax.axhline(y=np.sum(-df_test["r"]))
axh.set_linestyle('--')
axh.set_color('#7f7f7f')
axh = ax.axhline(y=np.sum(-df_test_d_is_1["r"]))
axh.set_linestyle('--')
axh.set_color('#7f7f7f')
axh = ax.axhline(y=np.sum(-df_test_d_is_2["r"]))
axh.set_linestyle('--')
axh.set_color('#7f7f7f')
ax.yaxis.set_label_coords(-0.11,0.5)
plt.tight_layout(rect=(0,0,1,1))
plt.show(block=False)
d = 0.1
cpo_v_01 = "{0:.2f}%".format(np.mean(np.maximum(0, df_test["s"].values-d)/d) * 100)
d = 1.0
cpo_v_1 = "{0:.2f}%".format(np.mean(np.maximum(0, df_test_d_is_1["s"].values-d)/d) * 100)
d = 2.0
cpo_v_2 = "{0:.2f}%".format(np.mean(np.maximum(0, df_test_d_is_2["s"].values-d)/d) * 100)
fig = plt.figure(figsize=(10,7))
ax = plt.subplot(111)
plt.plot(np.cumsum(df_test["s"]), label=r'$d=0.1$', linewidth=3.0)
plt.plot(np.cumsum(df_test_d_is_1["s"]), label=r'$d=1$', marker='v', markersize=7, markevery=20, linewidth=3.0, color='#2ca02c')
plt.plot(np.cumsum(df_test_d_is_2["s"]), label=r'$d=2$', marker='X', markersize=8, markevery=20, linewidth=3.0, linestyle='dashed', color='#9467bd')
plt.xlim(0, 365)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlabel('Day', fontsize=20)
plt.ylabel('Cumulative Constraint Values (kWh)', fontsize=20)
ax.text(370, 70, cpo_v_01, style='italic', fontsize='x-large',
bbox={'facecolor': '#1f77b4', 'alpha': 0.5, 'pad': 7})
ax.text(370, 420, cpo_v_1, style='italic', fontsize='x-large',
bbox={'facecolor': '#2ca02c', 'alpha': 0.5, 'pad': 7})
ax.text(370, 780, cpo_v_2, style='italic', fontsize='x-large',
bbox={'facecolor': '#9467bd', 'alpha': 0.5, 'pad': 7})
axh = ax.axhline(y=399)
axh.set_linestyle('--')
axh.set_color('#7f7f7f')
axh1 = ax.axhline(y=780)
axh1.set_linestyle('--')
axh1.set_color('#7f7f7f')
plt.legend(fontsize=20)
plt.tight_layout(rect=(0,0,1,1))
plt.show(block=True)
|
liudading/tmp
|
ev/plot_d.py
|
plot_d.py
|
py
| 5,409 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6642507334
|
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from .serializer import perfil_usuarioSerializer
from .models import perfil_usuario
@api_view(['GET', 'POST'])
def lista_usuarios(request):
if request.method == 'GET':
users = perfil_usuario.objects.all()
serializer = perfil_usuarioSerializer(users, many=True)
return Response(serializer.data)
elif request.method == 'POST':
data = request.data
correo = data.get('correo')
existing_user = perfil_usuario.objects.filter(correo=correo).first()
if existing_user:
return Response({'error': 'Ya existe un usuario con este correo electrรณnico.'}, status=status.HTTP_400_BAD_REQUEST)
serializer = perfil_usuarioSerializer(data=data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
MarcoToloza/Caso_A_Parte-2-API
|
Api_qr/AppPi/views.py
|
views.py
|
py
| 1,057 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29462931299
|
from evdev import ecodes
from datetime import datetime, timedelta
from four_button_pages import StaticMenu, TokenEntryMenu, PAYGStatusMenu, ServiceMenu
class FourButtonUserInterface(object):
BACKLIGHT_TIMEOUT = 300
def __init__(self, lcd, conn, kbd, static_pages):
self.conn = conn
self.disp = lcd
self.kbd = kbd
self.static_pages = static_pages
self.last_key_pressed = datetime.now()
self.selected_menu = None
self.current_menu = None
self.index = 0
self.last_index = 1
self.last_menu_number = 0
self.menus = [
('PAYG Status', PAYGStatusMenu(self.conn)),
('Enter Token', TokenEntryMenu(self.conn)),
('LAN Status', StaticMenu(self.static_pages[16])),
('WiFi Status', StaticMenu(self.static_pages[17])),
('General Status', StaticMenu(self.static_pages[0])),
('Solar Status', StaticMenu(self.static_pages[12])),
('Battery Status', StaticMenu(self.static_pages[18])),
('Solar History', StaticMenu(self.static_pages[14])),
('Service Menu', ServiceMenu(self.conn)),
]
self.alarm_menus = [
StaticMenu(self.static_pages[2]), # VE Bus error
StaticMenu(self.static_pages[3]), # VE Bus alarm
StaticMenu(self.static_pages[13]), # Solar error
]
def start(self):
self.disp.clear()
self.update_menu_list()
def key_pressed(self):
self.last_key_pressed = datetime.now()
for event in self.kbd.read():
if event.type == ecodes.EV_KEY and event.value == 1:
self.update_current_menu(event.code)
def tick(self):
self.display_alarms()
self.update_current_menu(None)
self.update_backlight_status()
def update_backlight_status(self):
if self.last_key_pressed + timedelta(seconds=self.BACKLIGHT_TIMEOUT) < datetime.now():
self.disp.on = False
else:
self.disp.on = True
def display_alarms(self):
for alarm in self.alarm_menus:
alarm.enter(self.conn, self.disp) # It will only display if the menu actually exists
def get_available_menus(self):
menus = []
for menu in self.menus:
if menu[1].is_available(self.conn):
menus.append(menu)
return menus
def update_menu_list(self):
menus = self.get_available_menus()
number_menus = len(menus)
if number_menus < self.last_menu_number:
self.index = 0
self.last_menu_number = number_menus
if number_menus == 0:
top_string = ' Victron Energy '
bottom_string = ' '.ljust(16, ' ')
elif number_menus == 1:
top_string = menus[0][0].ljust(15, ' ') + '>'
bottom_string = ' '.ljust(16, ' ')
else:
if self.index < self.last_index:
top_string = menus[self.index][0].ljust(15, ' ') + '>'
bottom_string = menus[self.index + 1][0].ljust(15, ' ') + ' '
else:
top_string = menus[self.index - 1][0].ljust(15, ' ') + ' '
bottom_string = menus[self.index][0].ljust(15, ' ') + '>'
self.disp.display_string(top_string, 1)
self.disp.display_string(bottom_string, 2)
self.selected_menu = menus[self.index][1]
def menu_list_loop(self, key_pressed):
number_of_menus = len(self.get_available_menus())
if key_pressed == ecodes.KEY_UP:
if self.index > 0:
self.last_index = self.index
self.index -= 1
self.update_menu_list()
if key_pressed == ecodes.KEY_DOWN:
if self.index < number_of_menus - 1:
self.last_index = self.index
self.index += 1
self.update_menu_list()
if key_pressed == ecodes.KEY_RIGHT:
self.current_menu = self.selected_menu
self.current_menu.enter(self.conn, self.disp)
else:
self.update_menu_list()
def update_current_menu(self, key_pressed):
if self.current_menu is not None and not self.current_menu.update(self.conn, self.disp, key_pressed):
self.current_menu = None
key_pressed = None
self.disp.clear()
self.update_menu_list()
if self.current_menu is None:
self.menu_list_loop(key_pressed)
|
victronenergy/dbus-characterdisplay
|
four_button_ui.py
|
four_button_ui.py
|
py
| 4,511 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3066916179
|
tokens = "this is a decent enough test sentence".split(" ")
cap = len(tokens)
degree = 2
for ti in range(cap):
lower = ti if ti < degree else degree
upper = cap - ti -1 if ti >= cap - degree -1 else degree
# the -1 is because (range(1, 1) won't run once for 1
for i in range(upper):
print(i, ti, ti + i +1, upper, cap, degree)
print(tokens[ti], tokens[ti+i+1])
|
sendakvmahr/c_sea
|
text/one-shots/test iterationing.py
|
test iterationing.py
|
py
| 395 |
python
|
en
|
code
| 0 |
github-code
|
6
|
12100401616
|
'''
Aoccdrnig to a rscheearch at Cmabrigde Uinervtisy, it deosn't mttaer in waht oredr the ltteers in a wrod are, the olny iprmoatnt tihng is taht the frist and lsat ltteers be at the rghit pclae. The rset can be a toatl mses and you can sitll raed it wouthit porbelm. Tihs is bcuseae the huamn mnid deos not raed ervey lteter by istlef, but the wrod as a wlohe
input: String arr[words in a dictionary]
output: all sets of words that are 'equivalent'
sample input: ['word', 'string', 'field', 'flied', 'fried', 'array', 'fired']
'''
'''
Idea:
Let's use a hashtable:
Keys = a touple containing the first and last characters of that word
Values = a touple: a set of inner characters and a list of words.
'''
from collections import Counter
def print_equivalent(words):
ht = dict()
for word in words:
word = word.strip('\'s')
key = (word[0], word[-1])
inner_letters = Counter(word[1:-1])
if key not in ht.keys():
ht[key] = [(inner_letters, set([word]))]
else:
matched = False
for entry in ht[key]:
if entry[0] == inner_letters:
entry[1].add(word)
matched = True
if not matched:
new_entry = (inner_letters, set([word]))
ht[key].append(new_entry)
for key in ht.keys():
for entry in ht[key]:
if len(entry[1]) > 1:
print (' '.join(entry[1]))
if __name__ == '__main__':
import file_utils, os
#print_equivalent([word for word in file_utils.read_file(os.path.join(os.environ['PYTHONPATH'], 'dic.txt')).splitlines() if len(word) > 2])
print_equivalent(['word', 'fired', 'fried', 'flied', 'field', 'felid'])
|
Shaywei/MyDevTools
|
Python/misc/mock_iterview_quinn.py
|
mock_iterview_quinn.py
|
py
| 1,757 |
python
|
en
|
code
| 0 |
github-code
|
6
|
22648817021
|
import matplotlib.pyplot as plt
from scipy.io import loadmat
import numpy as np
import pandas
from os.path import join,exists
from os import mkdir
import cv2
import math
import os
rho = 1
actions = ['No-Action', 'sweeping', 'gargling', 'opening cupboard', 'washing hands', 'eating', 'writing', 'wiping',
'drinking','opening microwave oven', 'Throwing trash']
def get_skeleton(skeleton):
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
skeleton[:,0]=-skeleton[:,0]
lines=[[0,1],[1,2],[2,3],[3,4],[21,22],[22,23],[23,24],[21,4],[4,20],[20,11],[11,13],[13,12],[10,11],[14,11]
,[10,9],[9,8],[8,7],[7,5],[5,6],[14,15],[15,16],[16,17],[17,19],[18,19]]
for a,b in lines:
ax.plot3D([skeleton[a][0],skeleton[b][0]], [skeleton[a][1],skeleton[b][1]], [skeleton[a][2],skeleton[b][2]], 'gray')
ax.scatter3D(skeleton[:,0], skeleton[:,1], skeleton[:,2] ,c=skeleton[:,2])
ax = plt.gca()
xmin,xmax=min(skeleton[:,0])-0.25,max(skeleton[:,0])+0.25
ymin,ymax=min(skeleton[:,1])-0.25,max(skeleton[:,1])+0.25
zmin,zmax=min(skeleton[:,2])-0.25,max(skeleton[:,2])+0.25
ax.set_xlim([xmin, xmax])
ax.set_ylim([ymin, ymax])
ax.set_zlim([zmin, zmax])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.view_init(elev=-75, azim=90)
ax.set_axis_off()
plt.tight_layout()
plt.show()
def get_data(file,type='foot_to_foot'):
try:
f = open(file,'r').read().split()
datait = [float(x) for x in f]
if type=='no_order':
data = np.asarray(datait)
data = data.reshape((25,3))
else:
spine_base = datait[0:3]
spine_mid = datait[3:6]
neck = datait[6:9]
head = datait[9:12]
shoulder_left = datait[12:15]
elbow_left = datait[15:18]
wrist_left = datait[18:21]
hand_left = datait[21:24]
shoulder_right = datait[24:27]
elbow_right = datait[27:30]
wrist_right = datait[30:33]
hand_right = datait[33:36]
hip_left = datait[36:39]
knee_left = datait[39:42]
ankle_left = datait[42:45]
foot_left = datait[45:48]
hip_right = datait[48:51]
knee_right = datait[51:54]
ankle_right = datait[54:57]
foot_right = datait[57:60]
spine_shoulder = datait[60:63]
handtip_left = datait[63:66]
thumb_left = datait[66:69]
handtip_right = datait[69:72]
thumb_right = datait[72:75]
if type=='human':
data=np.stack((head, neck, spine_shoulder, shoulder_left, shoulder_right, elbow_left, elbow_right,
wrist_left, wrist_right, thumb_left, thumb_right, hand_left, hand_right, handtip_left,
handtip_right, spine_mid, spine_base, hip_left, hip_right, knee_left, knee_right,
ankle_left, ankle_right, foot_left, foot_right))
else :
data=np.stack((foot_left, ankle_left, knee_left, hip_left, spine_base, handtip_left, thumb_left,
hand_left, wrist_left, elbow_left, shoulder_left
,spine_shoulder,head,neck, shoulder_right,elbow_right,
wrist_right, hand_right,thumb_right
, handtip_right, spine_mid, hip_right,
knee_right, ankle_right,foot_right))
return data
except:
print('Ex',file)
return None
def normalize(array):
min_ = np.min(array,0)
max_ = np.max(array,0)
return (array-min_)/(max_-min_)
def get_sequence_energy(sequence):
energy = np.zeros((len(sequence),25))
for i in range(len(sequence)):
for k in range(25):
if i == 0:
energy[i][k] = np.linalg.norm(sequence[i][k] - sequence[i + 1][k])
elif i == len(sequence)-1:
energy[i][k] = np.linalg.norm(sequence[i][k] - sequence[i - 1][k])
else:
energy[i][k] = (np.linalg.norm(sequence[i][k] - sequence[i + 1][k])+np.linalg.norm(sequence[i][k] - sequence[i - 1][k]))/2
E = normalize(energy)
w = rho*E + (1-rho)
return w
def get_labels(file):
labels = open(file,'r').read().splitlines()
prev_action=None
start =[]
end = []
actions=[]
for line in labels:
if line.replace(' ','').isalpha():
prev_action = line.strip()
else:
tab = line.split(' ')
start.append(int(tab[0]))
end.append(int(tab[1]))
actions.append(prev_action)
return (start,end,actions)
def get_image_label(start,end,labels):
index = (start+end)//2
for s,e,a in set(zip(labels[0],labels[1],labels[2])):
if s <= index and index <= e:
return a
return 'No-Action'
def to_ludl(data_path,labels,window_length=30,type='no_order'):
start_frame = min(labels[0]) - window_length//2
end_frame = max(labels[1]) + window_length //2
data = []
for i in range(start_frame,end_frame+1):
data.append(get_data(data_path+'/'+str(i)+'.txt',type))
images = [data[i:i + window_length] for i in range(len(data) - window_length + 1)]
lab = [get_image_label(i,i+window_length,labels) for i in range(start_frame,end_frame -window_length+2)]
i=0
while i <len(lab):
if lab[i] is None:
del lab[i]
del images[i]
else:
i+=1
i = 0
while i < len(images):
for x in images[i]:
if x is None or not x.shape==(25,3):
del lab[i]
del images[i]
break
else:
i += 1
return np.asarray(images),lab
def transform_image_ludl(image,path,name,weights):
RGB = image
height = image.shape[1]
width = image.shape[0]
X = np.arange(height)
Y = np.arange(width)
RGB = np.squeeze(RGB)
# weights = np.expand_dims(weights,0)
white = np.ones((width,height))*255
for i in range(3):
RGB[:,:,i] = np.floor(255 * (RGB[:,:,i] - np.amin(RGB[:,:,i])) / (np.amax(RGB[:,:,i]) - np.amin(RGB[:,:,i])))
RGB[:, :, i] = RGB[:, :, i]*weights+(1-weights)*white
# w = np.expand_dims(w,1)
# print(w[:10])
# print(sequence[0][:10])
# # w = np.concatenate([w,w,w],axis=1)
# print(w.shape)
# for i in range(len(sequence)):
# sequence[i]=sequence[i]*w + np.asarray([255,255,255])*(1-w)
# sequence = np.asarray(sequence)
# print(sequence[0][:10])
# print(sequence.shape,w.shape)
# print(sequence*w)
#
#
img = np.zeros((height, width, 3), dtype=np.uint8)
for i in X:
for j in Y:
img[i,j]=RGB[j,i]
# img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_LINEAR)
cv2.imwrite(join(path,name+'_.png'),img)
return img
def to_nassim(data_path,labels,window_length=40,type_='foot_to_foot'):
# start_frame = min(labels[0]) - window_length//2
# end_frame = max(labels[1]) + window_length //2
subdirs = [x[2] for x in os.walk(data_path)][0]
frames = [int(x[:-4]) for x in subdirs]
start_frame = min(frames)
end_frame = max(frames)
data = []
for i in range(start_frame,end_frame+1):
data.append(get_data(data_path+'/'+str(i)+'.txt',type_))
images = [data[i:i + window_length] for i in range(len(data) - window_length + 1)]
lab = [get_image_label(i,i+window_length,labels) for i in range(start_frame,end_frame - window_length+2)]
i=0
No_action_count = 100
while i <len(lab):
if lab[i] is None:
del lab[i]
del images[i]
elif lab[i] == 'No-Action':
# if No_action_count <= 0:
del lab[i]
del images[i]
# else:
# No_action_count -= 1
# i+=1
else:
i+=1
i = 0
images_aug=[]
while i < len(images):
jump = False
new_image=[]
for x in images[i]:
if x is None or not x.shape==(25,3):
del lab[i]
del images[i]
jump = True
break
else:
new_image.append(x * [1,1,-1])
if not jump:
i += 1
images_aug.append(new_image)
# lab.append(lab[i])
# images.extend(images_aug)
return np.asarray(images),np.asarray(lab),[get_sequence_energy(x) for x in images]
def transform_nassim(data_path,label_path,out_path):
images, labels, weights = to_nassim(data_path, get_labels(label_path), window_length=10,type_='foot')
data = []
lab = []
for i in range(len(images)):
path = join(out_path,labels[i])
if not exists(path):
mkdir(path)
data.append(transform_image_ludl(images[i],path,str(i),weights[i]))
lab.append(actions.index(labels[i]))
data = np.asarray(data)
labels = np.asarray(lab)
return data , labels
data_path='data'
train_path = 'Train_OAD_40_base'
test_path = 'Test_OAD_40_base'
if not exists(train_path):
mkdir(train_path)
if not exists(test_path):
mkdir(test_path)
train_sub = [1, 2, 3, 4, 7, 8, 9, 14, 15, 16, 18, 19, 20, 22, 23, 24, 25, 32, 33, 34, 35, 37, 38, 39, 49, 50, 51, 54, 57, 58]
test_sub = [0, 10, 13, 17, 21, 26, 27, 28, 29, 36, 40, 41, 42, 43, 44, 45, 52, 53, 55, 56]
train = None
train_label = None
test = None
test_label = None
for i in range(59):
path = join(data_path, str(i))
label_path = join(path,'label','label.txt')
image_path = join(path,'skeleton')
print('Processing sequence num ===========>',i)
data, label = transform_nassim(image_path, label_path, train_path if i in train_sub else test_path)
if i in train_sub:
if train_sub.index(i)==0:
train = data
train_label = label
else:
train = np.concatenate([train, data])
train_label = np.concatenate([train_label, label])
elif i in test_sub:
if test_sub.index(i)==0:
test = data
test_label = label
else:
test = np.concatenate([test,data])
test_label = np.concatenate([test_label,label])
#
# from keras.utils.np_utils import to_categorical
# test_label = to_categorical(test_label)
# train_label = to_categorical(train_label)
# test_label=test_label[:,1:]
# train_label=train_label[:,1:]
# np.save('train_x_{}_base_one_by_one.npy'.format(rho),train)
# np.save('test_x_{}_base_one_by_one.npy'.format(rho),test)
# np.save('train_y_{}_base_one_by_one.npy'.format(rho),train_label)
# np.save('test_y_{}_base_one_by_one.npy'.format(rho),test_label)
Y = np.argmax(train_label,axis=1)
print(Y.shape)
unique, counts = np.unique(Y, return_counts=True)
print(dict(zip(unique, counts)))
Y = np.argmax(test_label,axis=1)
print(Y.shape)
unique, counts = np.unique(Y, return_counts=True)
print(dict(zip(unique, counts)))
print(train.shape,train_label.shape,test.shape,test_label.shape)
#29126,)
# (23912,)
|
Vincent-Fer/activity-recognition-prediction-online
|
encodage/encodage.py
|
encodage.py
|
py
| 11,449 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71484029308
|
def parser(l, r):
if all(S[i].isdigit() for i in range(l, r + 1)):
return int(S[l:r + 1]) // 2 + 1
ret = []
cnt = 0
start = 0
for i in range(l, r + 1):
if S[i] == '[':
cnt += 1
if cnt == 1:
start = i
elif S[i] == ']':
if cnt == 1:
ret.append(parser(start + 1, i - 1))
cnt -= 1
ret.sort()
return sum(ret[:len(ret) // 2 + 1])
N = int(input())
for _ in range(N):
S = input()
print(parser(0, len(S) - 1))
|
knuu/competitive-programming
|
aoj/11/aoj1188.py
|
aoj1188.py
|
py
| 543 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18349597036
|
#To Calcculate the Roots for Quadratic Equation
import cmath
num1 = int(input("Enter the first number:"))#Take three numbers from user as num1,num2,num3
num2 = int(input("Enter the Second number:"))
num3 = int(input("Enter the Third number:"))
def Quadratic(num1, num2, num3):#Defining function
delta = (num2 ** 2) - (4 * num1 * num3)#Calculating delta by using formula
Root1 = (-num2 + cmath.sqrt(delta)) / (2 * num1)#Calculating Root1 and Root2 for three numbers
Root2 = (-num2 - cmath.sqrt(delta)) / (2 * num1)
print('The Roots are {0} and {1}'.format(Root1, Root2))#Display the result by Using format function to calculating the complex numbers
Quadratic(num1, num2, num3)
|
MayuriTambe/FellowshipPrograms
|
Quadratic.py
|
Quadratic.py
|
py
| 699 |
python
|
en
|
code
| 1 |
github-code
|
6
|
25815068031
|
import csv
from utils import DateCounter
def pad(filename: str):
#---------------read_in---------------
data = []
with open(filename, mode='r') as file:
reader = csv.reader(file)
for row in reader:
data.append(row)
#---------------padding---------------
newdata = []
newdata.append(data[0])
for n in range(1, len(data)-1):
counter = DateCounter(data[n][1])
counter.count()
# print(counter.current_date())
if (data[n][0] != data[n+1][0]) or (counter.current_date() == data[n+1][1]): # ็จๆทๅๆด๏ผๆๆฅ่ตท่ฟ็ปญ๏ผไธ้่ฆ่กฅ้ฝ
newdata.append(data[n])
else:
newdata.append(data[n])
while counter.current_date() != data[n+1][1]:
newdata.append(data[n].copy())
newdata[-1][1] = counter.current_date()
counter.count()
#--------------write_back--------------
with open(filename, mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerows(newdata)
|
UX404/Financial-data-processing
|
data_process/padding.py
|
padding.py
|
py
| 1,068 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21952104219
|
import itertools
import logging
from django.conf import settings
from django.core.management.base import BaseCommand
from autoslug.utils import slugify
from rainboard.models import (
Forge,
Namespace,
Project,
Robotpkg,
update_github,
update_gitlab,
)
class Command(BaseCommand):
help = "run project creation stuff" # noqa: A003
def add_arguments(self, parser):
parser.add_argument("org")
parser.add_argument("project")
def handle(self, org, project, *args, **options): # noqa: C901
path = settings.RAINBOARD_RPKG
logger = logging.getLogger("rainboard.management.project")
org = Namespace.objects.get(slug=org)
slug = slugify(project)
logger.warning("looking for %s / %s", org, slug)
project = Project.objects.filter(slug=slug)
if project.exists():
logger.warning("found %s", project.get())
else:
logger.warning("not found. let's get it from github & gitlab")
github = Forge.objects.get(slug="github")
for data in github.api_list(f"/orgs/{org.slug}/repos"):
if slugify(data["name"]) == slug:
logger.warning("found on github / %s", org)
update_github(github, org, data)
break
for user in Namespace.objects.filter(group=False):
for data in github.api_list(f"/users/{user.slug}/repos"):
if slugify(data["name"]) == slug:
logger.warning("found on github / %s", user)
update_github(github, user, data)
break
gitlab = Forge.objects.get(slug="gitlab")
for data in gitlab.api_list("/projects"):
if slugify(data["name"]) == slug:
logger.warning("found on gitlab / %s", data["namespace"]["name"])
update_gitlab(gitlab, data)
project = Project.objects.get(slug=slug)
for slug in [project.slug, project.slug.replace("_", "-")]:
for pkg in itertools.chain(
path.glob(f"*/{slug}{project.suffix}"),
path.glob(f"*/py-{slug}{project.suffix}"),
):
obj, created = Robotpkg.objects.get_or_create(
name=pkg.name,
category=pkg.parent.name,
project=project,
)
if created:
logger.warning("found on robotpkg %s", obj)
obj.update(pull=False)
for rpkg in project.robotpkg_set.all():
logger.warning("updating images for %s", rpkg)
rpkg.update_images()
logger.warning("Done")
|
Gepetto/dashboard
|
rainboard/management/commands/project.py
|
project.py
|
py
| 2,763 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29224887814
|
""" Plot cross-section profile: Multiple cross-sections
"""
import sys
sys.path.append('/home/zhouyj/software/seis_view')
import numpy as np
import matplotlib.pyplot as plt
from reader import read_ctlg, slice_ctlg
import warnings
warnings.filterwarnings("ignore")
# i/o paths
fctlg = 'input/catalog_example.csv'
title = 'Example Cross-Section View: Multiple Cross-Sections'
fout = 'output/example_prof_multi-cross-sec.pdf'
# catalog info
lon_rng = [102.23, 102.32]
lat_rng = [29.14, 29.25]
dep_rng = [5, 15]
dep_corr = 0
mag_corr = 1.
ref_pnts = np.array(\
[[102.26,29.235],[102.285,29.165],
[102.25,29.2],[102.29,29.21],
[102.255,29.18],[102.295,29.19]]) # [lon,lat]
pnt_names = ["A","A'","B","B'","C","C'"]
prof_wids = [1.5,1,1] # km
# fig params
fig_size = (10*0.8, 10*0.8)
subplots = [212,221,222]
mark_size = 5.
alpha=0.8
color = 'tab:blue'
fsize_label = 12
fsize_title = 14
xlabel = 'Along-Profile Distance (km)'
ylabel = 'Depth (km)'
subplot_rect = {'left':0.08, 'right':0.96, 'bottom':0.08, 'top':0.95, 'wspace':0.1, 'hspace':0.1}
# read catalog
events = read_ctlg(fctlg)
events = slice_ctlg(events, lat_rng=lat_rng, lon_rng=lon_rng, dep_rng=dep_rng)
lat = np.array(list(events['lat']))
lon = np.array(list(events['lon']))
dep = np.array(list(events['dep'])) + dep_corr
mag = (np.array(list(events['mag'])) + mag_corr) * mark_size
num_events = len(events)
# calc along profile dist
def calc_prof(ref_pnt):
prof_dist, prof_dep, prof_mag = [], [], []
cos_lat = np.cos(ref_pnt[0][1]*np.pi/180)
vec_ab = ref_pnt[1] - ref_pnt[0]
vec_ab[0] *= cos_lat
abs_ab = np.linalg.norm(vec_ab)
for i in range(num_events):
loc_c = np.array([lon[i], lat[i]])
vec_ac = loc_c - ref_pnt[0]
vec_ac[0] *= cos_lat
abs_ac = np.linalg.norm(vec_ac)
cos = vec_ac.dot(vec_ab) / abs_ab / abs_ac
if abs_ac * (1-cos**2)**0.5 > prof_wid/111.: continue
if cos<0 or abs_ac*cos>abs_ab: continue
prof_dist.append(abs_ac * cos * 111)
prof_dep.append(dep[i])
prof_mag.append(mag[i])
return prof_dist, prof_dep, prof_mag, abs_ab*111
def plot_label(xlabel=None, ylabel=None, yvisible=True):
if xlabel: plt.xlabel(xlabel, fontsize=fsize_label)
if ylabel: plt.ylabel(ylabel, fontsize=fsize_label)
plt.setp(ax.xaxis.get_majorticklabels(), fontsize=fsize_label)
plt.setp(ax.yaxis.get_majorticklabels(), fontsize=fsize_label, visible=yvisible)
# start plot
plt.figure(figsize=fig_size)
for i,subplot in enumerate(subplots):
# get specific params
prof_wid = prof_wids[i]
# plot subplot
plt.subplot(subplot)
ax = plt.gca()
ax.invert_yaxis()
# proj to proile
prof_dist, prof_dep, prof_mag, abs_ab = calc_prof(ref_pnts[2*i:2*i+2])
plt.scatter(prof_dist, prof_dep, prof_mag, color=color, edgecolor='none', alpha=alpha)
# plot ref pnt
plt.annotate(pnt_names[2*i], (0,dep_rng[0]), fontsize=fsize_label, va='top', ha='center')
plt.annotate(pnt_names[2*i+1], (abs_ab,dep_rng[0]), fontsize=fsize_label, va='top', ha='center')
# fill edge
edgex = [0,0,abs_ab,abs_ab]
edgey = [dep_rng[0],dep_rng[1],dep_rng[0],dep_rng[1]]
plt.scatter(edgex, edgey, alpha=0)
if i==0: plot_label(xlabel,ylabel)
elif i==1: plot_label(ylabel=ylabel)
else: plot_label(yvisible=False)
plt.suptitle(title, fontsize=fsize_title)
plt.subplots_adjust(**subplot_rect)
plt.savefig(fout)
plt.show()
|
ali4413/Seismicity-Visualization
|
Python/plot_prof_multi-cross-sec.py
|
plot_prof_multi-cross-sec.py
|
py
| 3,432 |
python
|
en
|
code
| null |
github-code
|
6
|
74471480187
|
#SpinLIne.py
import turtle as tt
import time
tt.pensize(2)
tt.bgcolor("black")
colors = ['red','yellow','purple','blue']
tt.tracer(False)
for x in range(400):
tt.forward(10*x)
tt.color(colors[x%4])
tt.left(95)
tt.tracer(True)
time.sleep(10)
|
izhiman/python-learning
|
Python-basic/basic-week1/SpinLIne.py
|
SpinLIne.py
|
py
| 243 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32187914747
|
# ๋ฐฑ์ค - 9012๋ฒ ๊ดํธ
n = int(input()) # ์ซ์ ์
๋ ฅ
for _ in range(n): # ์
๋ ฅํ ์ซ์๋งํผ range๋ฅผ ๋๋ ค์ค๋ค.
stack = [] # ๋ฐ์ดํฐ๋ฅผ ์ง์ด๋ฃ์ด์ค stack์ ๋ง๋ค์ด์ค๋ค.
vpss = input() # ์๋ฅผ ๋ค์ด, (())()๊ฐ ๋ค์ด๊ฐ๊ฒ ๋๋ค.
for vps in vpss: # (())()๊ฐ for๋ฌธ์ ์ฌ์ฉํด์ ํ๊ฐ์ฉ ๋ค์ด๊ฐ๊ฒ ๋๋ค.
if vps == '(': # ๋ง์ฝ์ (๊ฐ ๋์ค๊ฒ ๋๋ฉด append
stack.append(vps)
elif vps == ')': # ๋ง์ฝ์ )๊ฐ ๋์ค๊ฒ ๋๋ฉด ์กฐ๊ฑด๋ฌธ์ ์ฌ์ฉํด์
if stack: # stack์ด ๋น์ด์์ง์์ผ๋ฉด pop, ๋น์ด์๋ค๋ฉด NO์ด ์ถ๋ ฅ๋๊ณ break๋ฅผ ๊ฑธ์ด์ค๋ค.
stack.pop()
else:
print("NO")
break
else: # break๊ฐ ๋์ง ์๊ณ ์งํ ๋์๋ค๋ฉด
if not stack: # stack์ด ๋น์ด ์๋ค๋ฉด YES์ถ๋ ฅ
print("YES")
else: # stack์ด ๋น์ด์์ง ์๋ค๋ฉด NO์ถ๋ ฅ
print("NO")
|
kcw0331/python-for-coding-test
|
baekjoo/9012.py
|
9012.py
|
py
| 890 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
39671957475
|
""" ะะปะฐัั ะดะปั ัะฐะฑะพัั ั ะบะพะฝัะตััะฐะผะธ """
import os
import re
import json
import string
import shutil
import configparser, itertools
from collections import OrderedDict
from bs4 import BeautifulSoup
from mysite import settings
from .models import Cntsregs, Logins, Contests, Problems
from problems.classes import ProblemsCreator
# ะัะฟะพะผะพะณะฐัะตะปัะฝัะน ะบะปะฐัั ะดะปั ะฟะฐััะธะฝะณะฐ ะฝะฐัััะพะตะบ ะบะพะฝัะตััะฐ
class MultiDict(OrderedDict):
_unique = 0 # class variable
def __setitem__(self, key, val):
if isinstance(val, dict):
self._unique += 1
key += "_" + str(self._unique)
OrderedDict.__setitem__(self, key, val)
# ะะปะฐัั ะดะปั ะฟะฐััะธะฝะณะฐ ะฝะฐัััะพะตะบ ะบะพะฝัะตััะฐ
class SettingParser(object):
# ะฃะดะฐะปัะตั ะฝะพะผะตั ะธะท ะฝะฐะทะฒะฐะฝะธั ะบะปััะฐ ะฟัะธ ะฟะฐััะธะฝะณะต
def delete_number_in_key(self, keyvalue):
keyname = str(keyvalue)
clear_keyname = re.sub('_[\d]+', '', keyname)
return clear_keyname
# ะะพะฝะฒะตััะธััะตั ัััะพะบั ะธะท Windows-1251 ะฒ UTF-8
def convert_from_windows1251_to_utf8(self, value):
string = str(value)
decoded_string = ""
try:
decoded_string = string.encode('windows-1251').decode('utf-8')
except:
decoded_string = string
return decoded_string
# ะะฐััะธั ะบะพะฝัะธะณ ะบะพะฝัะตััะฐ
# ะะพะทะฒัะฐัะฐะตั ัะปะพะฒะฐัั
def parse_config(self, filepath):
config_data = dict()
config = configparser.RawConfigParser(strict=False, allow_no_value=True, dict_type=MultiDict)
with open(filepath) as fp:
config.read_file(itertools.chain(['[general]'], fp), source=filepath)
for key in config:
config_data[key] = dict()
for i in config.items(key):
item_key = self.convert_from_windows1251_to_utf8(i[0])
item_value = self.convert_from_windows1251_to_utf8(i[1])
config_data[key][item_key] = item_value
return config_data
# ะะปะฐัั-ะผะตะฝะตะดะถะตั ะดะปั ะพะฑัะฐะฑะพัะบะธ ะบะพะฝัะตััะพะฒ
class ContestsManager(object):
_errors = list()
_problems_folder = "problems/"
# ะะพะทะฒัะฐัะฐะตั ัะฟะธัะพะบ ะพัะธะฑะพะบ
def get_errors(self):
return self._errors
# ะัะฝะพะฒะฝะฐั ะดะธัะตะบัะพัะธั ั ะบะพะฝัะตััะฐะผะธ
@property
def main_dir(self):
return settings.EJUDGE_CONTEST_PATH
# ะะธัะตะบัะพัะธั ั Xml ะบะพะฝัะตััะพะฒ
@property
def xml_contests_dir(self):
return settings.EJUDGE_CONTEST_SETTINGS_PATH
# ะัะตัะธะบั ะดะปั ะฟััะธ ะบ ัะฐะนะปั ะบะพะฝัะธะณััะฐัะธะธ
@property
def conf_prefix(self):
return '/conf/serve.cfg'
# ะกะพะทะดะฐัั ะฒัะต ะฝะตะพะฑั
ะพะดะธะผัะต ะฟะฐะฟะบะธ ะดะปั ะบะพะฝัะตััะฐ
def create_contest_dirs(self, full_id):
contest_folder = self.main_dir + str(full_id) + "/"
if os.path.isdir(contest_folder):
self._errors.append("Contest dir already exist")
return False
conf_folder = contest_folder + "conf/"
problems_folder = contest_folder + "problems/"
var_folder = contest_folder + "var/"
include_var_folders = ["archive", "run", "status", "team_extra", "work"]
try:
os.mkdir(contest_folder)
os.mkdir(conf_folder)
os.mkdir(problems_folder)
os.mkdir(var_folder)
for folder in include_var_folders:
path = var_folder + folder + "/"
os.mkdir(path)
except:
self._errors.append("Cannot create contest folders")
return False
return True
# ะะฐะณััะถะฐะตั ะดะฐะฝะฝัะต ะพ ะบะพะฝัะตััะฐั
ะดะปั ะฟะพะปัะทะพะฒะฐัะตะปั
def upload_user_contests(self, user_id):
user_contests = list()
user_contests_obj = Cntsregs.objects.all().filter(user=user_id)
for contest_object in user_contests_obj:
contest_id = contest_object.contest_id
user_contests.append(contest_id)
return user_contests
# ะ ะตะณะธัััะธััะตั ะฟะพะปัะทะพะฒะฐัะตะปั ะฝะฐ ัะพัะตะฒะฝะพะฒะฐะฝะธะต
def reg_user_to_contest(self, user_id, contest_id):
error = ""
try:
user = Logins.objects.get(user_id=user_id)
except:
error = "Cannot get User"
return error
try:
is_register_exist = Cntsregs.objects.filter(user=user, contest_id=contest_id).exists()
except:
error = "Cannot check if record exist"
return error
if not is_register_exist:
try:
Cntsregs.objects.create(user=user, contest_id=contest_id, status=0)
except:
error = "Cannot add User to Contest"
return error
else:
error = "Record already exist"
return error
return False
# ะะตะฝะตัะธััะตั ะฟััั ะบ ัะฐะนะปั ะบะพะฝัะธะณััะฐัะธะธ
def get_config_path(self, full_id):
return self.main_dir + str(full_id) + self.conf_prefix
# ะะตะฝะตัะธััะตั ะฟััั ะบ ะฟะฐะฟะบะต ั ะบะพะฝัะตััะพะผ
def get_contest_dir(self, full_id):
return self.main_dir + str(full_id) + "/"
# ะะตะฝะตัะธััะตั ะฟััั ะบ ัะฐะนะปั XML ะบะพะฝัะธะณััะฐัะธะธ
def get_xml_config_path(self, full_id):
return self.xml_contests_dir + str(full_id) + ".xml"
# ะกััะตััะฒัะตั ะปะธ ัะฐะนะป ะบะพะฝัะธะณััะฐัะธะธ
def is_config_exist(self, full_id):
return os.path.isfile(self.main_dir + str(full_id) + self.conf_prefix)
# ะกััะตััะฒัะตั ะปะธ ัะฐะนะป xml ะบะพะฝัะธะณััะฐัะธะธ
def is_xml_config_exist(self, full_id):
return os.path.isfile(self.xml_contests_dir + str(full_id) + ".xml")
# ะกััะตััะฒัะตั ะปะธ ะดะธัะตะบัะพัะธั ั ะบะพะฝัะตััะพะผ
def is_contest_dir_exist(self, full_id):
return os.path.isdir(self.main_dir + str(full_id) + "/")
# ะะพะปััะฐะตั ัะฟะธัะพะบ ะฒัะตั
xml ะดะปั ะดะธัะตะบัะพัะธะน
def get_contests_xml_list(self):
directory = self.xml_contests_dir
if not os.path.isdir(directory):
raise Exception("ะะธัะตะบัะพัะธั ั ะบะพะฝัะตััะฐะผะธ ะฝะต ะพะฑะฝะฐััะถะตะฝะฐ")
files = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and re.search('.xml', f)]
return files
# ะะพะปััะฐะตั ะดะฐะฝะฝัะต ะพ ะบะพะฝัะตััะต ะธะท xml
def parse_contest_xml(self, filepath):
if not os.path.isfile(filepath):
return False
try:
file = open(filepath, 'r', encoding="utf-8")
except IOError:
return False
soup = BeautifulSoup(file, 'xml')
try:
name = soup.find('name').get_text()
except:
name = "Unknown"
try:
sched_time = soup.find('sched_time').get_text()
except:
sched_time = "Unknown"
info = {
"name": name,
"sched_time": sched_time,
}
file.close()
return info
# ะะพะปััะฐะตั ID ะบะพะฝัะตััะฐ ะธะท ะฝะฐะทะฒะฐะฝะธั ัะฐะนะปะฐ xml
def parse_contest_id_from_xml(self, xmlname):
search = re.match(r'\d+', str(xmlname))
if search is not None:
return search.group(0)
else:
return None
# ะะฐััะธั ะฝะฐัััะพะนะบะธ ะบะพะฝัะตััะฐ
# ะะพะทะฒัะฐัะฐะตั ัะปะพะฒะฐัั ะธะปะธ False
def parse_contest_settings(self, contest_full_id):
if not self.is_config_exist(contest_full_id):
return False
config_path = self.get_config_path(contest_full_id)
setting_parser = SettingParser()
config_data = setting_parser.parse_config(config_path)
return config_data
# ะะพะปััะฐะตั ัะฟะธัะพะบ id ะฒัะตั
ะบะพะฝัะตััะพะฒ ะธะท xml
def get_contests_ids(self, filenames):
ids = list()
for filename in filenames:
contest_id = self.parse_contest_id_from_xml(filename)
ids.append(contest_id)
return ids
# ะะพะปััะฐะตั ะดะฐะฝะฝัะต ะพ ะบะพะฝัะตััะต
def get_contest(self, contest_full_id):
contest = dict()
contest_id = int(contest_full_id)
contest_settings = ""
contest_config_path = ""
contest_xml_config_path = ""
contest_info = dict()
# ะะพะปะฝัะน ะฟััั ะบ ัะฐะนะปั ะบะพะฝัะธะณััะฐัะธะธ ะบะพะฝัะตััะฐ
if (self.is_config_exist(contest_full_id)):
contest_config_path = self.get_config_path(contest_full_id)
try:
contest_settings = self.parse_contest_settings(contest_full_id)
except:
contest_settings = dict()
#self._errors.append("Cannot parse contest settings")
# ะะพะปะฝัะน ะฟััั ะบ xml ัะฐะนะปั ะบะพะฝัะธะณััะฐัะธะธ ะบะพะฝัะตััะฐ
if (self.is_xml_config_exist(contest_full_id)):
contest_xml_config_path = self.get_xml_config_path(contest_full_id)
try:
contest_info = self.parse_contest_xml(contest_xml_config_path)
except:
contest_info = dict()
self._errors.append("Cannot parse contest XML")
# ะะฐะฝะฝัะต ะพะฑ ะบะพะฝัะตััะฐั
contest["full_id"] = contest_full_id
contest["id"] = contest_id
contest["dir"] = self.get_contest_dir(contest_full_id)
contest["problems_dir"] = self.get_contest_dir(contest_full_id) + self._problems_folder
if "name" in contest_info:
contest["name"] = contest_info["name"]
else:
contest["name"] = "Unknown"
if "sched_time" in contest_info:
contest["sched_time"] = contest_info["sched_time"]
else:
contest["sched_time"] = "Unknown"
contest["xml_config_path"] = contest_xml_config_path
contest["config_path"] = contest_config_path
contest["settings"] = contest_settings
return contest
# ะะพะปััะฐะตั ะดะฐะฝะฝัะต ะพ ะฒัะตั
ะบะพะฝัะตััะฐั
def get_contests(self):
contests = list()
contest_xmls = self.get_contests_xml_list()
for xml in contest_xmls:
contest_full_id = self.parse_contest_id_from_xml(xml)
contest = self.get_contest(contest_full_id)
contests.append(contest)
return contests
# ะะพะปััะฐะตั ัะปะตะดัััะธะน ัะฝะธะบะฐะปัะฝัะน FULL ID
def get_next_full_id(self):
contests = Contests.objects.all()
ids = list()
for contest in contests:
ids.append(int(contest.full_id))
if len(ids):
last_id = int(max(ids))
next_id = last_id + 1
else:
next_id = 1
full_id = str(next_id)
while len(full_id) != 6:
full_id = "0" + full_id
# ะะตะฝะตัะธััะตะผ ัะฝะธะบะฐะปัะฝัะน Full ID
count = 0
while self.is_contest_dir_exist(full_id):
full_id = str(int(full_id) + 1)
while len(full_id) != 6:
full_id = "0" + full_id
count = count + 1
if count > 100:
break
return full_id
# ะกะพั
ัะฐะฝัะตั ะบะพะฝัะตัั ะฒ ะะ
def save_contest(self, form_data):
name = form_data.get('name')
sched_time = form_data.get('sched_time')
problems = form_data.get('tasks')
duration = form_data.get('duration')
try:
full_id = self.get_next_full_id()
except:
self._errors.append("ะะต ะผะพะณั ะฟะพะปััะธัั ัะปะตะดัััะธะน FULL_ID")
return False
contest_dir = self.get_contest_dir(full_id)
xml_config_path = self.get_xml_config_path(full_id)
config_path = self.get_config_path(full_id)
try:
Contests.objects.create(name=name,
sched_time=sched_time,
problems=problems,
full_id=full_id,
contest_dir=contest_dir,
duration=duration,
xml_config_path=xml_config_path,
config_path=config_path)
except:
self._errors.append("ะะต ัะดะฐะปะพัั ัะพะทะดะฐัั ะบะพะฝัะตัั")
return False
return True
# ะะฑะฝะพะฒะปัะตั ะบะพะฝัะตัั ะฒ ะะ
def update_contest(self, form_data):
contest_id = form_data.get('contest_id')
try:
contest_object = Contests.objects.get(pk=contest_id)
except:
return False
name = form_data.get('name')
sched_time = form_data.get('sched_time')
problems = form_data.get('tasks')
duration = form_data.get('duration')
try:
contest_object.name = name
contest_object.sched_time = sched_time
contest_object.problems = problems
contest_object.duration = duration
contest_object.save()
except:
self._errors.append("ะะต ัะดะฐะปะพัั ะพะฑะฝะพะฒะธัั ะบะพะฝัะตัั")
return False
return True
# ะกะพะทะดะฐัั XML ัะฐะนะป ะดะปั ะบะพะฝัะตััะฐ
def create_contest_xml(self, contest):
filepath = self.get_xml_config_path(contest.full_id)
xml_template = settings.EJUDGE_FILE_EXAMPLES_FOLDER + "config.xml"
if os.path.isfile(filepath):
self._errors.append("XML ัะฐะนะป ะดะปั ะบะพะฝัะตััะฐ ัะถะต ัััะตััะฒัะตั")
return False
if not os.path.isfile(xml_template):
self._errors.append("ะจะฐะฑะปะพะฝ XML ะดะปั ะบะพะฝัะตััะฐ ะฝะต ัััะตััะฒัะตั")
return False
if contest.sched_time != "":
sched_time = '<sched_time>' + contest.sched_time + '</sched_time>'
else:
sched_time = ""
if contest.name != "":
name = contest.name
else:
name = ""
try:
with open(xml_template, encoding="utf-8") as fp:
xml_example_data = fp.read()
xml_example_data = xml_example_data.replace("{{ name }}", name)
xml_example_data = xml_example_data.replace("{{ sched_time }}", sched_time)
except:
self._errors.append("ะะต ะผะพะณั ะฟัะพัะธัะฐัั XML ัะฐะฑะปะพะฝ ะดะปั ะบะพะฝัะตััะฐ")
return False
try:
with open(filepath, mode="w", encoding="utf-8") as fp2:
fp2.write(xml_example_data)
except:
self._errors.append("ะะต ะผะพะณั ัะพะทะดะฐัั XML ะดะปั ะบะพะฝัะตััะฐ")
return False
return True
# ะกะฒะพัะฐัะธะฒะฐะตั ะบะพะฝัะตัั
def undeploy_contest(self, contest_id):
try:
contest_id = int(contest_id)
contest = Contests.objects.get(pk=contest_id)
except:
self._errors.append("ะัะธะฑะบะฐ ะฟะพะปััะตะฝะธั ะบะพะฝัะตััะฐ")
return False
if os.path.isfile(contest.config_path):
os.remove(contest.config_path)
if os.path.isfile(contest.xml_config_path):
os.remove(contest.xml_config_path)
if os.path.isdir(contest.contest_dir):
shutil.rmtree(contest.contest_dir)
ejudge_contest_id = int(contest.full_id)
Cntsregs.objects.filter(contest_id=ejudge_contest_id).delete()
return True
# ะ ะฐะทะฒะพัะฐัะธะฒะฐะตั ะบะพะฝัะตัั
def deploy_contest(self, contest_id):
# ะจะฐะณ 1. ะะพะปััะฐะตะผ ะบะพะฝัะตัั
try:
contest_id = int(contest_id)
contest = Contests.objects.get(pk=contest_id)
except:
self._errors.append("ะัะธะฑะบะฐ ะฟะพะปััะตะฝะธั ะบะพะฝัะตััะฐ")
return False
# ะจะฐะณ 2. ะะพะปััะฐะตะผ ัะฒัะทะฐะฝะฝัะต ั ะฝะธะผ ะทะฐะดะฐัะธ
try:
tasks = json.loads(contest.problems)
except:
self._errors.append("ะะต ะผะพะณั ัะฐัะฟะฐััะธัั JSON ั ะทะฐะดะฐัะฐะผะธ")
return False
problems = list()
for task in tasks:
try:
item = Problems.objects.get(pk=task["id"])
problems.append(item)
except:
self._errors.append("ะะต ะผะพะณั ะฟะพะปััะธัั ะทะฐะดะฐัั ั ID " + task["id"])
continue
# ะจะฐะณ 3. ะกะพะทะดะฐัะผ ะฒัะต ะฟะฐะฟะบะธ
create_dir_success = self.create_contest_dirs(contest.full_id)
if not create_dir_success:
self._errors.append("ะัะธะฑะบะฐ ัะพะทะดะฐะฝะธั ะดะธัะตะบัะพัะธะน ะบะพะฝัะตััะฐ")
return False
create_xml_success = self.create_contest_xml(contest)
if not create_xml_success:
self._errors.append("ะัะธะฑะบะฐ ัะพะทะดะฐะฝะธั XML ะดะปั ะบะพะฝัะตััะฐ")
return False
problemsManager = ProblemsCreator()
# ะะฐะถะดะพะน ะทะฐะดะฐัะต ะทะฐะดะฐัะผ ัะฒะพะน ะบะพัะพัะบะธะน ะฑัะบะฒะตะฝะฝัะน ID
problemsShortIds = list(string.ascii_uppercase)
max_length = len(problemsShortIds)
i = 0
problems_dir = contest.contest_dir + "problems/"
problems_configs = ""
for problem in problems:
if i >= max_length:
break
problem_id = problemsShortIds[i]
create_problem_dir_success = problemsManager.create_problem_folder(problems_dir, problem_id)
if not create_problem_dir_success:
self._errors.append("ะะต ะผะพะณั ัะพะทะดะฐัั ะดะธัะตะบัะพัะธั ะดะปั ะทะฐะดะฐัะธ " + problem.title)
return False
create_xml_success = problemsManager.create_xml(create_problem_dir_success, problem.id, problem_id)
if not create_xml_success:
self._errors.append("ะะต ะผะพะณั ัะพะทะดะฐัั XML ะดะปั ะทะฐะดะฐัะธ " + problem.title)
return False
problem_dir = create_problem_dir_success + "/tests/"
create_tests_success = problemsManager.create_tests(problem_dir, problem.tests)
if not create_tests_success:
self._errors.append("ะะต ะผะพะณั ัะพะทะดะฐัั ัะตััั ะดะปั ะทะฐะดะฐัะธ " + problem.title)
return False
problems_config = problemsManager.get_problem_config(problem, i + 1, problem_id)
problems_configs = problems_configs + problems_config + "\n"
i = i + 1
contest_config_template = settings.EJUDGE_FILE_EXAMPLES_FOLDER + "serve.cfg"
with open(contest_config_template, mode="r", encoding="utf-8") as fp:
serveCfg = fp.read()
serveCfg = serveCfg.replace("{{ duration }}", str(contest.duration))
serveCfg = serveCfg.replace("{{ problems }}", problems_configs)
with open(contest.config_path, mode="w", encoding="utf-8") as fp2:
fp2.write(serveCfg)
try:
self.reg_user_to_contest(1, int(contest.full_id))
except:
self._errors.append("ะะต ะผะพะณั ะทะฐัะตะณะธัััะธัะพะฒะฐัั ะฐะดะผะธะฝะธัััะฐัะพัะฐ")
return True
|
Raftor74/ejudge-web-app
|
contests/classes.py
|
classes.py
|
py
| 19,662 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
20913974107
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Book API",
default_version='v1',
description="API for books and authors",
terms_of_service="https://www.yourapp.com/terms/",
contact=openapi.Contact(email="[email protected]"),
license=openapi.License(name="Your License"),
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('book.urls')),
path('api/', include('api.urls')),
path('user/', include('users.urls')),
path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
path('api/token-auth/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token-refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('api-auth/vote', include('rest_framework.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root = settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
almazuulu/BookCatalogue
|
bookcatalogue/bookcatalogue/urls.py
|
urls.py
|
py
| 1,502 |
python
|
en
|
code
| 0 |
github-code
|
6
|
37158413403
|
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
data = load_iris()
X = data.data
X[:, 0] /= 2.54
X[:, 1] /= 100
def scikit_pca(X):
X_std = StandardScaler().fit_transform(X)
sklearn_pca = PCA(n_components=2)
X_transf = sklearn_pca.fit_transform(X_std)
plt.figure(figsize=(11,11))
plt.scatter(X_transf[:,0], X_transf[:,1], s=600, color='#8383c4', alpha=0.56)
plt.title('PCA via scikit-learn (using SVD)', fontsize=20)
plt.xlabel('Petal Width', fontsize=15)
plt.ylabel('Sepal Length', fontsize=15)
plt.show()
scikit_pca(X)
|
QiliWu/Python-datavis
|
datavis/PCA.py
|
PCA.py
|
py
| 668 |
python
|
en
|
code
| 2 |
github-code
|
6
|
32802746456
|
import gensim
import gensim.downloader as api
from gensim.models import Word2Vec as w2v
import inspect
import logging
import warnings
import numpy as np
from sklearn import *
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import pairwise_distances
import os
import pandas as pd
import matplotlib.pyplot as plt
from elasticsearch import Elasticsearch, helpers
import json
import time
''' To query '''
matching_query = { "query_string": {
"query" : None
}
}
def main():
global matching_query
muser = None #o user gia ton opoio 8a exw to id tou
#sundesh
es = Elasticsearch(host = "localhost", port = 9200)
#results["hits"]["hits"][0]["_score"] to score ka8e document
#results[0]["_source"] periexontai ta kleidia ths plhroforias
while 1:
#pairnw eisodo
mvar = str(input("Give a string : "))
res_sz = str(input(" *** \n(1 < #results && 10 000 > #results)n\ *** \nNumber of results : "))
mykappa = int(input("Number of clusters for kmeans : "))
maxIterations = int(input("Number of iterations for kmeans : "))
matching_query["query_string"]["query"] = str(mvar)
#searching ...
results = es.search(index="bx_books_2",query=matching_query,size = int(res_sz))
mcounter = 0 #gia na apari8mhsw to plh8os
results = results["hits"]["hits"]#ta apotelesmata moy
#pairnw ta kleidia
try :
lst = list(results[0]["_source"].keys())
except IndexError : #an paizei index error den exw parei apotelesmata, afou prospa8w na parw to 0 ke den to vriskw eimai se empty list
print("No results.\nSearch again.")
summaries = []
for res in results :
summaries.append(res["_source"]["summary"])
print(str(summaries))
warnings.filterwarnings('ignore')
#ratings_df = pd.read_csv('BX-Book-Ratings.csv')
#ratings_df = ratings_df.loc[ratings_df['uid']==uid]
#ratings_df['isbn'] = ratings_df['isbn'].map(lambda x: x.strip())
# print(len(ratings_df.isbn))
#books_df = pd.read_csv('BX-Books.csv')
# print(books_df.columns)
#books_df = books_df.drop(['book_author', 'year_of_publication', 'publisher', 'category'], axis='columns')
#books_df['isbn'] = books_df['isbn'].map(lambda x: x.strip())
mtuple= []
'''logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
corpus = api.load('text8')
print(inspect.getsource(corpus.__class__))
print(inspect.getfile(corpus.__class__))
model = w2v(corpus)
model.save('readyvocab.model')'''
#myratings = []
#i = 0
#for isbn in ratings_df.isbn:
# try:
# i+=1
# summaries.append(list2string(books_df[books_df['isbn']==str(isbn)].summary.values))
# if summaries[len(summaries)-1] == "":
# continue
# else:
# mtuple.append( (isbn , summaries[len(summaries)-1] ))
# myratings.append( ratings_df[ratings_df['isbn']==str(isbn)].rating.values[0])
# except:
# ratings_df.pop(i)
model = w2v.load('readyvocab.model')
processed_sentences = []
for sentence in summaries:
processed_sentences.append(gensim.utils.simple_preprocess(sentence))
# print(*processed_sentences, sep='\n')
vectors = {}
i = 0
for v in processed_sentences:
vectors[str(i)] = []
for k in v:
try:
vectors[str(i)].append(model.wv[k].mean())
except:
vectors[str(i)].append(np.nan)
i+=1
df_input = pd.DataFrame(dict([ (k,pd.Series(v)) for k,v in vectors.items() ]))
for i in range(0,len(vectors)):
df_input.fillna(value=0.0,inplace=True)
df_input[str(i)].replace(to_replace=0,value=df_input[str(i)].mean(),inplace=True )
processed = my_kmeans(df=df_input,k=mykappa,maxIterations=maxIterations)
#np.any(np.isnan(df_input))
#np.all(np.isfinite(df_input))
#X_train, X_test, y_train, y_test = train_test_split(X, y,shuffle=False)
'''pairnw tous titlous'''
titles = []
for res in results:
try:
titles.append(res["_source"]["book_title"])
#titles.append(list2string(books_df[books_df['isbn']==str(isbn)].book_title.values))
except:
pass#ratings_df.pop(i)
'''print tis klaseis'''
for myint in range(0,mykappa):#poses klaseis exw
mcounter = -1
print('\n'+5*"*"+" Klash : "+str(myint+1)+ " "+5*"*"+'\n')
for j in processed[1]:
mcounter+=1
if myint == j:#einai sthn idia klash
print(titles[mcounter])
else:
pass
def my_kmeans(df,k = 2,maxIterations=10):
#arxikopoihsh kentrweidwn
always_centroid = []
c1 = None
c2 = None
choose = np.random.randint(df.shape[1] , size=k)
my_centroids = []
for i in range(0,k):
my_centroids.append( df[str(choose[i])].values.tolist() )
always_centroid.append( df[str(choose[i])] )
#ta exw kanei lista
i = 0
to_centroid = []
for i in range(0,df.shape[1]):
if i in choose:
pass
else:
similarities = []
for j in range(0,len(my_centroids)):
#vazw tis omoiothtes se lista ke pairnw thn megaluterh apoluth timh
similarities.append( my_cosine_similarity(np.squeeze( np.asarray(my_centroids[j] ) ) ,np.squeeze( np.asarray(df[str(i)].values.tolist() ) ) ) )
#dialegw to megalutero similarity
best = 0
for j in range(0,len(similarities)):
if abs(similarities[j]) > best:
best = similarities[j]
#prepei na kanw ke ena pop
if len(to_centroid)-1 == i:#to plh8os twn stoixeiwn einai iso me to i panta!1 kentroeides gia ka8e perilhpsh
to_centroid.pop(len(to_centroid) -1)
#to dianusma 8a paei sto kentroeides tade
to_centroid.append(j)
iterations = -1
while iterations < maxIterations:
c1 = always_centroid#prin allaksei to kentroeides
iterations+=1
kappa = 0
#update centroids
for i in range(0,len(my_centroids)):#gia ka8e kedroeides
for j in range(0,len(to_centroid)):
#an eimai sto katallhlo kanw summ
if to_centroid[j] == i:
#kane sum
always_centroid[i] = always_centroid[i]+df[str(j)]
else:
pass
#sto telos pollaplasiazw ola ta stoixeia
always_centroid[i] = always_centroid[i]*(1/len(always_centroid[i]))
#ksanakanw thn diadikasia ?
my_centroids = []
for i in range(0,k):
my_centroids.append( always_centroid[i].values.tolist() )
#ta exw kanei lista
i = 0
to_centroid = []
for i in range(0,df.shape[1]):
if i in choose:
pass
else:
similarities = []
for j in range(0,len(my_centroids)):
#vazw tis omoiothtes se lista ke pairnw thn megaluterh apoluth timh
similarities.append( my_cosine_similarity(np.squeeze( np.asarray(my_centroids[j] ) ) ,np.squeeze( np.asarray(df[str(i)].values.tolist() ) ) ) )
#dialegw to megalutero similarity
best = 0
for j in range(0,len(similarities)):
if abs(similarities[j]) > best:
best = similarities[j]
#prepei na kanw ke ena pop
if len(to_centroid)-1 == i:#to plh8os twn stoixeiwn einai iso me to i panta!1 kentroeides gia ka8e perilhpsh
to_centroid.pop(len(to_centroid) - 1)
#to dianusma 8a paei sto kentroeides tade
#print(csimilarity)
to_centroid.append(j)
c2 = my_centroids
#an ta kedroeidh idia tote break
p = True
for i in range(0,k):
#print(str(c1[i]))
#print(str(c2[i]))
print("Finished in : "+ str(iterations) +" iterations .")
if c1[i].equals(c2[i]):
pass
else:
p = False
return (choose, to_centroid)
def my_cosine_similarity(arr1,arr2):
dot = sum(a*b for a,b in zip(arr1,arr2) )
norm_arr1 = sum(a*a for a in arr1) ** 0.5
norm_arr2 = sum(b*b for b in arr2) ** 0.5
csimilarity = dot/(norm_arr1*norm_arr2)
return csimilarity
def list2string(s):
strl = ""
for e in s :
strl +=e
return strl
if __name__ == "__main__":
main()
|
d4g10ur0s/InformationRetrieval_21_22
|
paradotea/erwthma_4.py
|
erwthma_4.py
|
py
| 9,321 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16627798046
|
from flask import Flask, request
import json
app = Flask(__name__)
# create python dictionary to hold user data
user_account = {
1:
{"first_name": 'betty',
"last_name": 'joy',
"phone_number": '0493827405',
"email_address": '[email protected]'
}
}
user_count = 1
@app.route('/get_user/<id>', methods=["GET"])
def get_user(id):
return user_account[int(id)]
@app.route('/create_user', methods=["POST"])
def create_user():
global user_count
user_count += 1
value = json.loads(request.data)
new_id = user_count
user_account[new_id] = value
return str(user_count)
@app.route('/update_user/<id>', methods=["PUT"])
def update_user(id):
value = json.loads(request.data)
user_account[int(id)] = value
return user_account[int(id)]
@app.route('/delete_user/<id>', methods=["DELETE"])
def delete_user(id):
global user_count
user_account.pop(int(id))
user_count -= 1
return "user deleted"
|
rikiapst/cephaloPy
|
user.py
|
user.py
|
py
| 995 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8717492636
|
from .. import Connector
from ..libs.Connection import Connection
class SICS(Connector):
"""
Mettler Toledo Standard Interface Command Set
Commands:
- '1': self.get_weight,
- '2': self.get_info
"""
def __init__(self, config: dict):
super(SICS, self).__init__(config)
self.connection = Connection(self.address, self.device_id)
self.interpreter = {
'1': self.get_weight,
'2': self.get_info
}
def get_weight(self):
"""
Get actual measured weight.
WARNING: can raise Exception("Unknown unit %s")
:return: Current weight in grams and stability of measured weight (1 - stable, 0 - dynamic).
"""
units = {"kg": 1000, "lb": 453.5924, "g": 1, "oz": 28.34952, "t": 1000000}
result = self.connection.get_weight()
try:
return {'weight': result['value'] * units[result['unit']], 'attribute': int(result['stable'])}
except KeyError:
self._raise_error(self.whoami(), "Unknown unit {}".format(result['unit']))
def get_info(self):
"""
Get the balance available info.
:return: Model, SN, SW, capacity and unit.
"""
return self.connection.get_info()
def disconnect(self):
self.connection.close()
return True
def test_connection(self) -> bool:
try:
self.connection.get_address()
return True
except Exception:
return False
|
SmartBioTech/DeviceControl
|
app/workspace/devices/MettlerToledo/classes/SICS.py
|
SICS.py
|
py
| 1,518 |
python
|
en
|
code
| 2 |
github-code
|
6
|
30728525190
|
r, c = [int(x) for x in input().strip().split()]
#print( sum([x*x for x in [1,4,2,9,7]]))
def prime(n):
P = [1,2]
k = 3
while len(P) < n:
is_prime = True
for j in P[1:]:
if k % j == 0 :
is_prime = False
break
elif j*j > k:
break
if is_prime:
P.append(k)
k+=2
return P
primes = prime(r+c)
if r == 1 or c == 0:
print (0)
else:
for j in range(r):
print(*[primes[i] * primes[c+j] for i in range(c)])
|
mdaw323/alg
|
codeforces/1266/c.py
|
c.py
|
py
| 575 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26833816694
|
# https://pypi.org/project/firebirdsql/
# pip install firebirdsql
import firebirdsql
from decouple import config
# pip install mysql-connector-python
import mysql.connector
import re
import os
try:
# Mysql Local
# con_mysql = mysql.connector.connect(
# host=config("host"),
# user=config("user"),
# password=config("password"),
# database=config("database"))
# MYSQL Site
con_mysql = mysql.connector.connect(
host=config("host_"),
user=config("user_"),
password=config("password_"),
database=config("database_"))
print("Database connection Mysql made!")
cursor_mysql = con_mysql.cursor()
# site
cursor_mysql.execute("""SELECT cpf_cnpj, data_uso
FROM core_cliente""")
t_cli = cursor_mysql.fetchall()
dt_new = input('Por favor digite a data de vencimento(aaaa-mm-dd): ') # aaaa-mm-dd
for cpf_cnpj, dtus in t_cli:
print(cpf_cnpj, "Data Uso: ", dtus)
dtus = '2021-09-10' # aaaa-mm-dd
value_column = 'data_uso'
value_where = 'cpf_cnpj'
comando_sql = f"""UPDATE core_cliente
SET {value_column}=('{dt_new}')
WHERE {value_where}=('{cpf_cnpj}')"""
print('Atualizando: ', value_column)
cursor_mysql.execute(comando_sql)
con_mysql.commit()
con_mysql.close()
# fecha terminal?
os._exit(1)
except ValueError:
print('Error database')
else:
con_mysql.close()
con_fire.close()
os._exit(1)
|
sistemadevsys/db_firebird_mysql
|
update_clientes.py
|
update_clientes.py
|
py
| 1,549 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8757517027
|
from urllib3.exceptions import ProtocolError, ReadTimeoutError
import tweepy
import dataset
import json
from tweepy import StreamListener
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from textblob import TextBlob
from models import *
import pandas as pd
import numpy as np
from config import *
engine = create_engine('postgresql://paulinazheng:@localhost5432/flu')
Session = sessionmaker(bind=engine)
session = Session()
analyser = SentimentIntensityAnalyzer()
def sentiment_score(text):
return analyser.polarity_scores(text)
api = tweepy.API(auth_r)
def calculate_centroid(box):
avg_lat = (box[1][1] + box[0][1])/2
avg_long = (box[2][0] + box[1][0])/2
return avg_lat, avg_long
LOCATION = [-164.639405, 18.776344, -66.947028, 71.76871]
cities = [('New York', 40.7127837, -74.00594129999999),
('Los Angeles', 34.0522342, -118.24368490000002),
('Chicago', 41.8781136, -87.62979820000001),
('Houston', 29.7604267, -95.36980279999999),
('Philadelphia', 39.9525839, -75.1652215),
('Phoenix', 33.4483771, -112.07403729999999),
('San Antonio', 29.4241219, -98.4936282),
('San Diego', 32.715738, -117.1610838),
('Dallas', 32.7766642, -96.7969879),
('San Jose', 37.338208200000004, -121.88632859999998),
('Austin', 30.267153000000004, -97.7430608),
('Indianapolis', 39.768403, -86.158068),
('Jacksonville', 30.3321838, -81.65565099999998),
('San Francisco', 37.7749295, -122.4194155),
('Columbus', 39.9611755, -82.99879419999998),
('Charlotte', 35.2270869, -80.8431267),
('Fort Worth', 32.7554883, -97.3307658),
('Detroit', 42.331427000000005, -83.0457538),
('El Paso', 31.7775757, -106.44245590000001),
('Memphis', 35.1495343, -90.0489801),
('Seattle', 47.6062095, -122.33207079999998),
('Denver', 39.739235799999996, -104.990251),
('Washington', 38.9071923, -77.03687070000001),
('Boston', 42.360082500000004, -71.0588801),
('Nashville-Davidson', 36.1626638, -86.78160159999999),
('Baltimore', 39.2903848, -76.6121893),
('Oklahoma City', 35.4675602, -97.5164276),
('Louisville/Jefferson County', 38.252664700000004, -85.7584557),
('Portland', 45.523062200000005, -122.67648159999999),
('Las Vegas', 36.169941200000004, -115.13982959999998)]
def add_item(item):
db.session.add(item)
db.session.commit()
def find_closest_city(centroid_lat, centroid_long, cities=cities):
smallest = 10000
point = (centroid_lat, centroid_long)
for city in cities:
dist = np.sqrt((city[1]-point[0])**2 + (city[2]-point[1])**2)
if dist < smallest:
smallest = dist
closest = city
return closest
def get_city_id(lat, long):
closest = find_closest_city(lat, long, cities=cities)
if closest[0] not in [city.name for city in City.query.all()]:
city = City(name=closest[0], lat=closest[1], long=closest[2])
add_item(city)
city_id = city.id
else:
city = City.query.filter_by(name = closest[0]).all()
city_id=city[0].id
return city_id
def get_or_create_user(user_id, location):
user = User.query.filter_by(user_id=user_id).first()
if user:
return user
else:
user = User(user_id=user_id, location=location)
add_item(user)
return user
def get_or_create_tweet(user_id, location, twitter_id, created, centroid_lat, centroid_long, text, city_id):
tweet = Tweet.query.filter_by(twitter_id=twitter_id).first()
if tweet:
return tweet
else:
user = get_or_create_user(user_id, location)
sentiment = sentiment_score(text)
positivity = round(sentiment['pos'], 4)
negativity = round(sentiment['neg'], 4)
compound = round(sentiment['compound'], 4)
polarity = round((TextBlob(text)).sentiment.polarity, 4)
tweet = Tweet(twitter_id=twitter_id, text=text, created=created, centroid_lat=centroid_lat,
centroid_long=centroid_long, positivity=positivity, negativity=negativity, compound=compound,
polarity=polarity, user_id=user.id, city_id=city_id)
add_item(tweet)
return tweet
class StreamListener(tweepy.StreamListener):
def on_connect(self):
print("Now we're saving from Twitter!")
def on_status(self, status):
#avoids retweets, non-geolocated
if status.retweeted:
return
if not status.place:
return
if status.lang == 'en':
if status.truncated == True:
text = status.extended_tweet['full_text']
if len(text) > 320:
text = text[:320]
else:
text=status.text
id_str = status.id_str
created = status.created_at
box = status.place.bounding_box.coordinates[0]
centroid_lat, centroid_long = calculate_centroid(box)
coords = status.coordinates
if coords is not None:
coords = json.dumps(coords)
loc = status.user.location
user_id = str(status.user.id)
sentiment = sentiment_score(text)
positivity = round(sentiment['pos'], 4)
negativity = round(sentiment['neg'], 4)
compound = round(sentiment['compound'], 4)
polarity = round((TextBlob(text)).sentiment.polarity, 4)
city_id = get_city_id(centroid_lat, centroid_long)
get_or_create_tweet(user_id, loc, id_str, created, centroid_lat, centroid_long, text, city_id)
def on_exception(self, exception):
print(exception)
return
def on_error(self, status_code):
if status_code == 420:
return False
flu = ['flu', 'influenza', 'cough', 'fever', 'sore throat', 'headache',
'phlegm', 'runny nose', 'stuffy nose', 'Robitussin',
'dayquil', 'nyquil', 'tamiflu', 'vomit', 'body ache', 'mucinex',
'pneumonia', 'vomit', 'bodyache', 'medicine']
stream_listener = StreamListener()
stream = tweepy.Stream(auth=api.auth, listener=stream_listener)
while True:
try:
stream.filter(track=flu)
except (ProtocolError, AttributeError, ReadTimeoutError):
continue
test = stream.filter(track=flu)
|
paulinaczheng/twitter_flu_tracking
|
twitter_package/tweet-stream.py
|
tweet-stream.py
|
py
| 6,168 |
python
|
en
|
code
| 11 |
github-code
|
6
|
40095183195
|
from airflow import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from datetime import datetime
from Tecnocasa.tecnocasa_main import scrape_tecnocasa_url
default_args = {
'start_date': datetime(2023, 1, 1),
'retries': 1,
}
dag = DAG('AIHOME', default_args=default_args, schedule_interval=None)
start = DummyOperator(task_id='start', dag=dag)
Tecnocasa_scrapper = PythonOperator(
task_id='Scrapper_tecnocasa',
python_callable=scrape_tecnocasa_url,
dag=dag,
)
start >> Tecnocasa_scrapper
if __name__ == "__main__":
dag.cli()
|
pasqualepescina/AIHome
|
dags/dag.py
|
dag.py
|
py
| 630 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6942872733
|
#!/usr/bin/env python3
import rospy
from ros_assignment.srv import ComputeAngVel, ComputeAngVelResponse
from std_msgs.msg import Float64
def compute_ang_vel_server(req):
# Assuming linear velocity of 0.1
linear_velocity = 0.1
radius = req.radius
# Compute angular velocity using v = ฯ * r
angular_velocity = linear_velocity / radius
return ComputeAngVelResponse(angular_velocity)
if __name__ == "__main__":
rospy.init_node('compute_ang_vel_server')
s = rospy.Service('compute_ang_vel', ComputeAngVel, compute_ang_vel_server)
rospy.loginfo("Compute Angular Velocity Server is ready.")
rospy.spin()
|
Suraj945gh/Antariksh
|
ros_assignment/scripts/compute_ang_vel_server.py
|
compute_ang_vel_server.py
|
py
| 642 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11120230113
|
def vowel_remover(text):
vowels = ('a', 'e', 'i', 'o', 'y', 'A', 'E', 'I', 'O', 'Y', 'U') #I consider y as a vowel
for char in text:
if char in vowels:
text = text.replace(char, '')
return text
with open('aaa.txt', 'r') as myfile:
words = myfile.read().split()
long1 = max(words, key=len)#Search for longest word
words.remove(long1)#Remove the longest from list of words
long1 = vowel_remover(long1)#Call function to remove the vowels
print("The first longest word is : ",long1[::-1])#Print the longest word backwords
long2 = max(words, key=len)#Search for the longest word
words.remove(long2)#Remove the longest from list of words
long2 = vowel_remover(long2)#Call function to remove the vowels
print("The second longest word is : ",long2[::-1])#Print the longest word backwords
long3 = max(words, key=len)#Search for the longest word
words.remove(long3)#Remove the longest word from list of words
long3 = vowel_remover(long3)#Call function to remove the vowels
print("The third longest word is : ",long3[::-1])#Print the longest word backwords
long4 = max(words, key=len)#Search for the longest word
words.remove(long4)#Remove the longest word from list of words
long2 = vowel_remover(long4)#Call function to remove the vowels
print("The fourth longest word is : ",long4[::-1])#Print the longest word backwords
long5 = max(words, key=len)#Search for the longest word
words.remove(long5)#Remove the longest word from list of words
long5 = vowel_remover(long5)#Call function to remove the vowels
print("The fifth longest word is : ",long5[::-1])#Print the longest word backwords
|
GiorgosMeg/Python_University
|
askhsh1.py
|
askhsh1.py
|
py
| 1,749 |
python
|
en
|
code
| 0 |
github-code
|
6
|
15125626664
|
from data_higher_lower import data
from art_higher_lower import logo, vs
from random import choice
from functions_higher_lower import check_followers
updated_data = data
a_card = choice(list(updated_data))
updated_data.remove(a_card)
should_continue = True
print(logo)
score = 0
while should_continue:
b_card = choice(list(updated_data))
updated_data.remove(b_card)
print(f"Compare A: {a_card['name']}, {a_card['description']} from {a_card['country']}\n"
f"{vs}\n"
f"Compare B: {b_card['name']}, {b_card['description']} from {b_card['country']}")
user_input = input("Who has more followers? Type 'A' or 'B'").lower()
if user_input == 'a':
should_continue = check_followers(a_card['name'], a_card['name'], a_card['follower_count'], b_card['name'],
b_card['follower_count'])
elif user_input == 'b':
should_continue = check_followers(b_card['name'], a_card['name'], a_card['follower_count'], b_card['name'],
b_card['follower_count'])
a_card = b_card
score += 1
print(f"Current Score: {score}")
|
Tulip2MF/100_Days_Challenge
|
day_014/higher_lower_main.py
|
higher_lower_main.py
|
py
| 1,153 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17961034725
|
from flask import render_template, redirect, url_for, request
from surv import app
from .storage import c, new_game, save_game, load_game
@app.route('/')
def home():
return redirect(url_for('new'))
@app.route('/new/')
def new():
new_game()
return redirect(url_for('list'))
@app.route('/list/')
def list():
return render_template('list.html',title='List',game=load_game())
@app.route('/schedule/')
def schedule():
return render_template('schedule.html',title="schedule",game=load_game())
@app.route('/event/<eventid>/')
def event(eventid):
g = load_game()
this_event = g.get_event(int(eventid))
return render_template('event.html',title='event',game=g,event=this_event)
@app.route('/run/<eventid>/')
def run(eventid):
g = load_game()
this_event = g.get_event(int(eventid))
if this_event.complete == False:
this_event.run(g)
save_game(g)
return render_template('event.html',title='event',game=g,event=this_event)
else:
return redirect(url_for('event',eventid=eventid))
@app.route('/tribe/<tribeid>/')
def tribe(tribeid):
g = load_game()
id = int(tribeid)
this_tribe = [x for x in g.tribes if x.id == id][0]
return render_template('tribe.html',title='tribe',game=g,tribe=this_tribe)
@app.route('/player/<playerid>/')
def player(playerid):
id = int(playerid)
this_player = [x for x in g.players if x.id == id][0]
return render_template('player.html',title='tribe',game=g,player=this_player)
@app.route('/next/')
def next():
g = load_game()
g.run_next()
save_game(g)
old_url = request.referrer
return redirect(old_url)
@app.route('/story/')
def story():
g = load_game()
return render_template('story.html',title='Story',game=g)
@app.route('/sim/')
def sim():
g = load_game()
g.run_all()
save_game(g)
return redirect(url_for('story'))
|
pkugelmass/survivor
|
surv/routes.py
|
routes.py
|
py
| 1,892 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71484374588
|
N = int(input())
A = [int(x) for x in input().split()]
ret = 0
for state in range(1 << N):
flag = False
ans = 1
for i in range(N):
if (state >> i & 1):
if A[i] % 2 == 1:
flag = True
ans *= 2
else:
if A[i] % 2 == 0:
flag = True
if flag:
ret += ans
print(ret)
|
knuu/competitive-programming
|
atcoder/corp/cf17_qc_b.py
|
cf17_qc_b.py
|
py
| 366 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72531854269
|
from typing import Final
import sqlalchemy as sa
def column_created_datetime(*, timezone: bool = True) -> sa.Column:
return sa.Column(
"created",
sa.DateTime(timezone=timezone),
nullable=False,
server_default=sa.sql.func.now(),
doc="Timestamp auto-generated upon creation",
)
def column_modified_datetime(*, timezone: bool = True) -> sa.Column:
return sa.Column(
"modified",
sa.DateTime(timezone=timezone),
nullable=False,
server_default=sa.sql.func.now(),
onupdate=sa.sql.func.now(),
doc="Timestamp with last row update",
)
_TRIGGER_NAME: Final[str] = "auto_update_modified_timestamp"
def register_modified_datetime_auto_update_trigger(table: sa.Table) -> None:
"""registers a trigger/procedure couple in order to ensure auto
update of the 'modified' timestamp column when a row is modified.
NOTE: Add a *hard-coded* version in the alembic migration code!!!
see [this example](https://github.com/ITISFoundation/osparc-simcore/blob/78bc54e5815e8be5a8ed6a08a7bbe5591bbd2bd9/packages/postgres-database/src/simcore_postgres_database/migration/versions/e0a2557dec27_add_services_limitations.py)
Arguments:
table -- the table to add the auto-trigger to
"""
assert "modified" in table.columns # nosec
# NOTE: scoped on database
procedure_name: Final[str] = f"{table.name}_auto_update_modified_timestamp()"
# TRIGGER
modified_timestamp_trigger = sa.DDL(
f"""
DROP TRIGGER IF EXISTS {_TRIGGER_NAME} on {table.name};
CREATE TRIGGER {_TRIGGER_NAME}
BEFORE INSERT OR UPDATE ON {table.name}
FOR EACH ROW EXECUTE PROCEDURE {procedure_name};
"""
)
# PROCEDURE
update_modified_timestamp_procedure = sa.DDL(
f"""
CREATE OR REPLACE FUNCTION {procedure_name}
RETURNS TRIGGER AS $$
BEGIN
NEW.modified := current_timestamp;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
"""
)
# REGISTER THEM PROCEDURES/TRIGGERS
sa.event.listen(table, "after_create", update_modified_timestamp_procedure)
sa.event.listen(table, "after_create", modified_timestamp_trigger)
NUMERIC_KWARGS = {"scale": 2}
|
ITISFoundation/osparc-simcore
|
packages/postgres-database/src/simcore_postgres_database/models/_common.py
|
_common.py
|
py
| 2,235 |
python
|
en
|
code
| 35 |
github-code
|
6
|
18537342899
|
# prob_link: https://www.codingninjas.com/codestudio/problems/job-sequencing-problem_8230832?challengeSlug=striver-sde-challenge&leftPanelTab=0
def jobScheduling(jobs):
maxi = 0
for t, profit in jobs:
maxi = max(maxi, t)
maxi += 1
flag = [0] * maxi
jobs.sort(key=lambda x: x[1], reverse=True)
for t, profit in jobs:
for starts in range(t, 0, -1):
if flag[starts] == 0:
flag[starts] = profit
break #
return sum(flag)
|
Red-Pillow/Strivers-SDE-Sheet-Challenge
|
P45_Job_Sequencing_Problem.py
|
P45_Job_Sequencing_Problem.py
|
py
| 521 |
python
|
en
|
code
| 0 |
github-code
|
6
|
1124184800
|
palavra1 = input()
palavra2 = input()
for i in list(range(len(palavra2))):
contador = 0
for j in list(range(len(palavra1))):
if palavra1[j] == palavra2[i]:
if contador > 0:
print(" ", end = "")
print(j, end = "")
contador += 1
x = (len(palavra2)-1)
if contador == 0:
print(-1)
else:
print("")
|
vitorbarbosa123/lp1-python
|
Semana4/indices/indices.py
|
indices.py
|
py
| 390 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
20146094706
|
from datetime import datetime, timedelta
from tqdm import tqdm
import aiohttp
import asyncio
from pymongo import MongoClient
from private import royale_token
client = MongoClient('mongodb://localhost:27017/')
MAX_TROPHIES = "ะกะพะฟะตัะฝะธะบ(ะธ) ั ะฝะฐะธะฑะพะปััะธะผ ะบะพะปะธัะตััะฒะพะผ ะบัะฑะบะพะฒ:"
MAX_BEST_TROPHIES = "ะกะพะฟะตัะฝะธะบ(ะธ) ั ะฝะฐะธะฑะพะปััะธะผ ัะตะบะพัะดะพะผ ะฟะพ ะบัะฑะบะฐะผ:"
MAX_CLAN_WAR_WINS = "ะกะพะฟะตัะฝะธะบ(ะธ) ั ะฝะฐะธะฑะพะปััะธะผ ะบะพะปะธัะตััะฒะพะผ ะฟะพะฑะตะด ะฒ ะบะฒ:"
MIN_CARD_LEVEL = "ะกะฐะผะฐั ะฝะตะฟัะพะบะฐัะฐะฝะฝะฐั ะบะฐััะฐ ะฒ ะบะพะปะพะดะต ะฒ ะบะฒ:"
MIN_MEAN_CARDS_LEVEL = "ะกะฐะผะฐั ะฝะตะฟัะพะบะฐัะฐะฝะฝะฐั ะบะพะปะพะดะฐ ะฒ ะบะฒ:"
class Player:
def __init__(self, tag, name, trophies, best_trophies, war_day_wins):
self.tag = tag
self.name = name
self.trophies = trophies
self.best_trophies = best_trophies
self.war_day_wins = war_day_wins
self.cards = []
@property
def min_card_level(self):
min_level = 13
for card in self.cards:
level = 13 - card["maxLevel"] + card["level"]
if min_level > level:
min_level = level
return min_level
@property
def mean_level(self):
s = 0
for card in self.cards:
s += 13 - card["maxLevel"] + card["level"]
return s / len(self.cards)
async def load_player(session, player_tag: str) -> Player:
player_tag = player_tag.replace("#", "")
url = f"https://api.clashroyale.com/v1/players/%23{player_tag}"
params = dict(
authorization=royale_token
)
async with session.get(url, params=params) as response:
p = await response.json()
if "name" not in p:
return None
player = Player(player_tag, p["name"], p["trophies"], p["bestTrophies"], p["warDayWins"])
return player
async def fetch_current_war(session, clan_tag: str):
clan_tag = clan_tag.replace("#", "")
url = f"https://api.clashroyale.com/v1/clans/%23{clan_tag}/currentwar"
params = dict(
authorization=royale_token
)
async with session.get(url, params=params) as response:
if response.status != 200:
return None
war = await response.json()
return war
def filter_battles_by_clan(battles, clan_tag):
filtered_battles = []
for battle in battles:
for player in battle["team"]:
if player["clan"]["tag"] == clan_tag:
filtered_battles.append(battle)
break
return filtered_battles
def filter_battles_by_date(battles, start_date, end_date):
filtered_battles = []
for battle in battles:
date = battle["battleTime"]
battle_time = datetime.strptime(date, "%Y%m%dT%H%M%S.%fZ")
if start_date <= battle_time <= end_date:
filtered_battles.append(battle)
return filtered_battles
def filter_battles_by_win(battles):
filtered_battles = []
for battle in battles:
player_crowns = int(battle["team"][0]["crowns"])
opponent_crowns = int(battle["opponent"][0]["crowns"])
if player_crowns > opponent_crowns:
filtered_battles.append(battle)
return filtered_battles
def load_collection_day_battles(start_date, end_date, battle_log, clan_tag):
battles = battle_log.find({"type": "clanWarCollectionDay"})
current_war_battles = filter_battles_by_date(battles, start_date, end_date)
current_war_battles_by_clan = filter_battles_by_clan(current_war_battles, clan_tag)
# current_war_battles_by_clan = filter_battles_by_win(current_war_battles_by_clan)
return current_war_battles_by_clan
def load_war_day_battles(start_date, end_date, battle_log, clan_tag):
battles = battle_log.find({"type": "clanWarWarDay"})
current_war_battles = filter_battles_by_date(battles, start_date, end_date)
current_war_battles_by_clan = filter_battles_by_clan(current_war_battles, clan_tag)
# current_war_battles_by_clan = filter_battles_by_win(current_war_battles_by_clan)
return current_war_battles_by_clan
async def load_opponents(session, battles):
players = []
for battle in tqdm(battles):
player = await load_player(session, battle["team"][0]["tag"])
if player is None:
continue
player.trophies = battle["team"][0]["startingTrophies"]
player.cards = battle["team"][0]["cards"]
player_crowns = int(battle["team"][0]["crowns"])
opponent = await load_player(session, battle["opponent"][0]["tag"])
if opponent is None:
continue
opponent.trophies = battle["opponent"][0]["startingTrophies"]
opponent.cards = battle["opponent"][0]["cards"]
opponent_crowns = int(battle["opponent"][0]["crowns"])
players.append((player, opponent, player_crowns, opponent_crowns))
return players
async def collection_day_results(session, clan_tag: str):
db = client["clashroyale"]
war_log = db["warlog"]
war = next(war_log.find({}).sort("createdDate", -1))
date = war["createdDate"]
end_date = datetime.utcnow()
start_date = datetime.strptime(date, "%Y%m%dT%H%M%S.%fZ")
current_war_battles = load_collection_day_battles(start_date, end_date, db["battlelog"], clan_tag)
players = await load_opponents(session, current_war_battles)
text = ""
text += find_best(players, lambda x: x[1].trophies, True, MAX_TROPHIES)
text += find_best(players, lambda x: x[1].best_trophies, True, MAX_BEST_TROPHIES, 7000)
text += find_best(players, lambda x: x[1].war_day_wins, True, MAX_CLAN_WAR_WINS)
return text
async def war_day_results(session, clan_tag: str):
db = client["clashroyale"]
war_log = db["warlog"]
war = next(war_log.find({}).sort("createdDate", -1))
date = war["createdDate"]
end_date = datetime.strptime(date, "%Y%m%dT%H%M%S.%fZ")
start_date = end_date + timedelta(days=-1)
current_war_battles = load_war_day_battles(start_date, end_date, db["battlelog"], clan_tag)
players = await load_opponents(session, current_war_battles)
text = ""
text += find_best(players, lambda x: x[1].trophies, True, MAX_TROPHIES)
text += find_best(players, lambda x: x[1].best_trophies, True, MAX_BEST_TROPHIES, 7000)
text += find_best(players, lambda x: x[1].war_day_wins, True, MAX_CLAN_WAR_WINS)
text += find_best(players, lambda x: x[0].min_card_level, False, MIN_CARD_LEVEL, 9)
text += find_best(players, lambda x: x[0].mean_level, False, MIN_MEAN_CARDS_LEVEL)
return text
def find_best(values, key, reverse, name, threshold=None):
values = sorted(values, key=key, reverse=reverse)
threshold = threshold or key(values[0])
if (reverse and key(values[0]) < threshold) or (not reverse and key(values[0]) > threshold):
threshold = key(values[0])
result = f"{name}\n"
for value in values:
if reverse:
if key(value) < threshold:
break
else:
if key(value) > threshold:
break
if reverse:
result += f"{value[0].name} {value[2]}-{value[3]} {value[1].name} ({key(value)})\n"
else:
result += f"{value[0].name} (ััะพะฒะตะฝั: {key(value)}) {value[2]}-{value[3]} {value[1].name}\n"
result += "\n"
return result
async def main():
clan_tag = "#2UJ2GJ"
async with aiohttp.ClientSession() as session:
current_war = await fetch_current_war(session, clan_tag)
if current_war is not None:
state = current_war["state"]
if state == "collectionDay" or state == "notInWar":
text = await war_day_results(session, clan_tag)
elif state == "warDay":
text = collection_day_results(session, clan_tag)
else:
text = "Current war is unavailable or unknown state."
print(text)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
except asyncio.CancelledError:
pass
client.close()
|
dfomin/clashroyaledata
|
data_analyzer.py
|
data_analyzer.py
|
py
| 8,146 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5635045727
|
import time
import socket
import struct
from collections import deque
class ClientClass:
def __init__(self, clientID, connection, addr):
self.clientID = clientID
self.addr = addr
self.clientSocket = connection
self.UDPAddr = None
self.name = ""
self.lastPacket = time.time() + 11
self.des = 0
self.sleepTime = 0
# 0 = ping
def send_buffer_to_client(self, buff, addNum):
try:
buff = struct.pack("I", self.des)
self.clientSocket.sendall(buff)
except:
print("Failed to send {} to {}".format(buff, self.addr))
|
LogFlames/WandAndSwordServer
|
zombie hunt server/clientClass.py
|
clientClass.py
|
py
| 672 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16496474868
|
import re
def calculate_profanity_degree(tweet, racial_slurs):
words = re.findall(r'\b\w+\b', tweet.lower())
total_words = len(words)
profanity_count = 0
for word in words:
if word in racial_slurs:
profanity_count += 1
return profanity_count / total_words if total_words > 0 else 0
def main():
racial_slurs = ['racial_slur1', 'racial_slur2',
'racial_slur3']
with open('tweets.txt', 'r') as file:
tweets = file.readlines()
for tweet in tweets:
degree = calculate_profanity_degree(tweet, racial_slurs)
print(f"Tweet: {tweet.strip()}")
print(f"Profanity degree: {degree}")
print()
if __name__ == '__main__':
main()
|
nameisvid/ebank
|
index.py
|
index.py
|
py
| 738 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75149559227
|
"""
Question 13
Level 2
Question: Write a program that accepts a sentence and calculate the number of letters and digits. Suppose the following input is supplied to the program: hello world! 123 Then, the output should be: LETTERS 10 DIGITS 3
Hints: In case of input data being supplied to the question, it should be assumed to be a console input.
"""
t = input()
dig = 0
let = 0
for z in t:
if z.isdigit():
dig += 1
elif z.isalpha():
let += 1
print("LETTERS: " + str(let) + " DIGITS: " + str(dig))
|
BYN13K/Learning
|
zhiwehu_Python_exercises/Q13.py
|
Q13.py
|
py
| 519 |
python
|
en
|
code
| 0 |
github-code
|
6
|
8561287023
|
import requests
import json
from typing import Dict, List
from config.config_secret import secret_file_path
class Auth:
@classmethod
def get_secret(cls, secret_types: List[str]) -> Dict[str,str]:
"""json ํ์ผ๋ก๋ถํฐ ์๊ตฌ๋๋ ์ํฌ๋ฆฟํค ๋ชฉ๋ก์ ์ฝ์ด ๋ฐํํ๋ค.
Args:
secret_types : ์๊ตฌ๋๋ ์ํฌ๋ฆฟ ํค ์ด๋ฆ๋ค์ ๋ด์ ๋ฐฐ์ด
Returns:
์๊ตฌ๋๋ ์ํฌ๋ฆฟ ํค ์ ๋ณด๋ฅผ ๋ด์ ๋์
๋๋ฆฌ
example:
{
"access_token" : "86289e71b93e7d9f67f4dcfbe69bc44d"
"client_id" : "86289e71b93e7d9f67f4dcfbe6123w4d"
}
"""
with open(secret_file_path, "r") as json_file:
secret_info = json.load(json_file)
return dict(filter(
lambda secret: True if secret[0] in secret_types else False, secret_info.items()))
@classmethod
def save_token(cls, access_token: str) -> None:
"""์ก์ธ์ค ํ ํฐ ์ ๋ณด๋ฅผ ๋ฐ์ json ํ์ผ์ ์ ์ฅํ๋ค.
Args:
access_token (str) : ๋ฐ๊ธํ ์ก์ธ์ค ํ ํฐ
"""
with open(secret_file_path, "r") as json_file:
secret_info = json.load(json_file)
secret_info["access_token"] = access_token
with open(secret_file_path, "w") as outfile:
json.dump(secret_info, outfile, indent=4)
@classmethod
def get_access_token_info(cls) -> Dict[str, str]:
"""์ก์ธ์ค ํ ํฐ์ ๋ง๋ฃ์ฌ๋ถ, ์ ํจ๊ธฐ๊ฐ ๋ฑ ์ ๋ณด๋ฅผ ํ์ธํ๋ค.
Returns:
์์ธ์ค ํ ํฐ ๊ด๋ จ ์ ๋ณด๋ฅผ ๋ด์ ๋์
๋๋ฆฌ
example:
{
'id': 2110957569,
'expiresInMillis': 14132012,
'expires_in': 14132,
'app_id': 701835,
'appId': 701835
}
"""
access_token = cls.get_access_token()
url = "https://kapi.kakao.com/v1/user/access_token_info"
headers = {
"Authorization": f"Bearer {access_token}"
}
response = requests.get(url=url, headers=headers)
if response.status_code == 200:
return response.json()
@classmethod
def update_token(cls) -> None:
"""์ก์ธ์ค ํ ํฐ๊ณผ ๋ฆฌํ๋ ์ ํ ํฐ์ ๊ฐฑ์ ํ๋ค.
"""
secret = cls.get_secret(['client_id','refresh_token'])
url = "https://kauth.kakao.com/oauth/token"
headers = {
"Content-Type": "application/x-www-form-urlencoded"
}
data = {
"grant_type": "refresh_token",
"client_id": f"{secret['client_id']}",
"refresh_token": f"{secret['refresh_token']}"
}
response = requests.post(url=url, headers=headers, data=data)
if response.status_code == 200:
token_info = response.json()
# ๋ฆฌํ๋ ์ ํ ํฐ ๊ฐ์ ๋ง๋ฃ ๊ธฐ๊ฐ์ด 1๊ฐ์ ๋ฏธ๋ง์ผ๋ก ๋จ์์ ๋ ๊ฐฑ์ ๋์ด ์ ๋ฌ๋๊ธฐ ๋๋ฌธ์ ์๋ต์ ๋ฆฌํ๋ ์ ํ ํฐ์ด ์๋์ง ํ์ธํ๋ค.
# https://developers.kakao.com/docs/latest/ko/kakaologin/rest-api ์ฐธ๊ณ
if "refresh_token" in token_info:
cls.save_token(token_info['refresh_token'])
cls.save_token(token_info['access_token'])
else:
print(f"request failed with status: {response.status_code}")
@classmethod
def get_tokens(cls) -> Dict[str, str]:
"""์ธ๊ฐ์ฝ๋ ๋ฅผ ํตํด ํ ํฐ ๊ด๋ จ ์ ๋ณด๋ฅผ ๋ฐํํ๋ฉฐ ์ฌ์คํ ํ์์ ์น ๋ธ๋ผ์ฐ์ ๋ฅผ ํตํด ์ธ๊ฐ์ฝ๋๋ฅผ ์ฌ๋ฐ๊ธ ๋ฐ์์ผ ํ๋ค.
https://developers.kakao.com/docs/latest/ko/kakaologin/rest-api#request-code ์ฐธ๊ณ
Returns:
Dict[str,str] : ๋ฐํ ์์ ์ฐธ์กฐ
{
'access_token': 'zmQou5uWoCpFNkfuu4N2-R5eZAUpMYTVqHHi_Qopb9UAAAF-97vVNg',
'token_type': 'bearer',
'refresh_token': 'QhBJVrzDpsZU3mteae0xikZR5ob1bQ1CQ8_YAwopb9UAAAF-97vVNQ',
'expires_in': 21599,
'scope': 'account_email profile_image talk_message profile_nickname',
'refresh_token_expires_in': 5183999
}
"""
url = "https://kauth.kakao.com/oauth/token"
secret = cls.get_secret(["code"])
headers = {
"Content-type": "application/x-www-form-urlencoded;charset=utf-8"
}
data = {
"grant_type": "authorization_code",
"client_id": "86289e71b93e7d9f67f4dcfbe69bc44d",
"redirect_uri": "http://localhost:3000",
# ์ผํ์ฑ ์ธ๊ฐ์ฝ๋
"code": f"{secret['code']}"
}
response = requests.post(url=url, headers=headers, data=data)
return response.json()
# ์ง์ ์คํ์ ํ ํฐ ์ ๋ณด ํ์ธ
if __name__ == "__main__":
print(Auth.get_tokens())
|
Ywoosang/Dossa-Notification
|
app/auth/auth.py
|
auth.py
|
py
| 4,905 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
40276685615
|
import gc
import machine
import time
import dht
import network
import urequests
from umqttsimple import MQTTClient
import ubinascii
import micropython
import esp
from boot import ip
sensor23 = dht.DHT22(machine.Pin(23))
sensor23.measure()
client_id = ubinascii.hexlify(machine.unique_id())
client_id_str = ubinascii.hexlify(machine.unique_id()).decode('utf-8')
print(ip)
topic_sensor_cmd = b'dht_sensor_cmd'
topic_sensor_pub = b'dht_sensor_measurement'
def sub_cb(topic, msg):
print((topic, msg))
if topic == topic_sensor_cmd:
print('DHT.cmd: Received %s' % msg)
def connect_and_subscribe():
global client_id, mqtt_server, topic_sensor_cmd
client = MQTTClient(client_id, mqtt_server)
client.set_callback(sub_cb)
client.connect()
client.subscribe(topic_sensor_cmd)
print('Connected to %s MQTT broker, subscribed to %s topic' % (mqtt_server, topic_sensor_cmd))
return client
def restart_and_reconnect():
print('Failed to connect to MQTT broker. Reconnecting...')
time.sleep(10)
machine.reset()
try:
client = connect_and_subscribe()
except OSError as e:
restart_and_reconnect()
last_measure = 0
measure_interval = 15
try:
data = '{"host": "%s", "client_id": "%s", "event": "starting"}' % (ip, client_id_str)
client.publish("dht_sensor_events", data)
except OSError as e:
print("OSError: %s" % e)
wdt = machine.WDT(timeout=15000)
wdt.feed()
while True:
gc.collect()
wdt.feed()
try:
client.check_msg()
if (time.time() - last_measure) > measure_interval:
sensor23.measure()
temp14 = sensor23.temperature()
hum14 = sensor23.humidity()
data14 = '{"host": "%s", "client_id": "%s", "sensor": %d, "temp": %3.1f, "hum": %3.1f}' % (ip, client_id_str, 14, temp14, hum14)
print(data14)
client.publish(topic_sensor_pub, data14)
last_measure = time.time()
time.sleep(5)
except OSError as e:
restart_and_reconnect()
|
andrevdm/dht_sensor
|
esp/main.py
|
main.py
|
py
| 2,028 |
python
|
en
|
code
| 0 |
github-code
|
6
|
20831019932
|
from django.urls import path
from posts.views import *
urlpatterns = [
path("", MainPage.as_view(), name="main"),
path("follow_post/", FollowPost.as_view(), name="follow_post"),
path("posts/<int:user_id>/", PostByUserId.as_view(), name="post"),
path("posts/view_post/<int:post_id>", ViewPost.as_view(), name="view_post"),
path("posts/view_post/<int:post_id>/upvote", upvote, name="upvote_post"),
path("posts/view_post/<int:post_id>/downvote", downvote, name="downvote_post"),
path("profile/add_post/", CreatePost.as_view(), name="create_post"),
path("user_tags/", tag_format_json, name="tags"),
]
|
YevheniiMorozov/social
|
gramm/posts/urls.py
|
urls.py
|
py
| 632 |
python
|
en
|
code
| 0 |
github-code
|
6
|
14958626079
|
# -*- coding: utf-8 -*-
from datetime import datetime, date
import logging
import re
ERROR_PARSING_DATE = "Error parsing date"
def from_iso_format(string):
parts = [int(a) for a in string.split("-")]
if len(parts) != 3:
raise ValueError
return date(parts[0], parts[1], parts[2])
def datetime_with_microsecond(string):
# Python2 does not have a way of parsing date formats.
# Deprecate this once Python2 support is dropped.
time_split = re.split("[^0-9]", string)
parts = len(time_split)
if parts <= 6:
if logging.getLogger().propagate:
logging.warning(ERROR_PARSING_DATE)
return None
try:
year = int(time_split[0])
month = int(time_split[1])
day = int(time_split[2])
hour = int(time_split[3])
minute = int(time_split[4])
second = int(time_split[5])
microsecond = int(round(float("0." + time_split[6]) * 1e6))
return datetime(year, month, day, hour, minute, second, microsecond)
except ValueError:
if logging.getLogger().propagate:
logging.warning(ERROR_PARSING_DATE)
return None
|
getyoti/yoti-python-sdk
|
yoti_python_sdk/date_parser.py
|
date_parser.py
|
py
| 1,157 |
python
|
en
|
code
| 9 |
github-code
|
6
|
19024426652
|
from enum import Enum
from typing import Union
from typing import NamedTuple
from typing import Callable
class TaskType(Enum):
"""Type of machine learning task
Attributes
----------
MULTI_CLASS_CLASSIFICATION
multi-class classification
MULTI_LABEL_CLASSIFICATION
multi-label classification
REGRESSION
regression
REPRESENTATION_LEARNING
representation learning
"""
MULTI_CLASS_CLASSIFICATION = 0
MULTI_LABEL_CLASSIFICATION = 1
REGRESSION = 2
REPRESENTATION_LEARNING = 3
class TaskOutput(Enum):
"""Expected output
Attributes
----------
SEQUENCE
A sequence of vector is expected.
VECTOR
A single vector is expected.
"""
SEQUENCE = 0
VECTOR = 1
class Task(NamedTuple):
type: TaskType
output: TaskOutput
@classmethod
def from_str(cls, representation: str):
task_output, task_type = representation.split(" ", 1)
if task_output == "frame-wise":
task_output = TaskOutput.SEQUENCE
elif task_output == "chunk-wise":
task_output = TaskOutput.VECTOR
else:
msg = f'"{task_output}" task output is not supported.'
raise NotImplementedError(msg)
if task_type == "multi-class classification":
task_type = TaskType.MULTI_CLASS_CLASSIFICATION
elif task_type == "multi-label classification":
task_type = TaskType.MULTI_LABEL_CLASSIFICATION
elif task_type == "regression":
task_type = TaskType.REGRESSION
elif task_type == "representation learning":
task_type = TaskType.REPRESENTATION_LEARNING
else:
msg = f'"{task_type}" task type is not supported.'
raise NotImplementedError(msg)
return cls(type=task_type, output=task_output)
def __str__(self) -> str:
"""String representation"""
if self.returns_sequence:
name = "frame-wise"
elif self.returns_vector:
name = "chunk-wise"
else:
msg = (
"string representation (__str__) is not implemented "
"for this task output."
)
raise NotImplementedError(msg)
if self.is_multiclass_classification:
name = f"{name} multi-class classification"
elif self.is_multilabel_classification:
name = f"{name} multi-label classification"
elif self.is_regression:
name = f"{name} regression"
elif self.is_representation_learning:
name = f"{name} representation learning"
else:
msg = (
"string representation (__str__) is not implemented "
"for this type of task."
)
raise NotImplementedError(msg)
return name
@property
def returns_sequence(self) -> bool:
"""Is the output expected to be a sequence?
Returns
-------
`bool`
`True` if the task output is a sequence, `False` otherwise.
"""
return self.output == TaskOutput.SEQUENCE
@property
def returns_vector(self) -> bool:
"""Is the output expected to be a single vector?
Returns
-------
`bool`
`True` if the task output is a single vector, `False` otherwise.
"""
return self.output == TaskOutput.VECTOR
@property
def is_multiclass_classification(self) -> bool:
"""Is it multi-class classification?
Returns
-------
`bool`
`True` if the task is multi-class classification
"""
return self.type == TaskType.MULTI_CLASS_CLASSIFICATION
@property
def is_multilabel_classification(self) -> bool:
"""Is it multi-label classification?
Returns
-------
`bool`
`True` if the task is multi-label classification
"""
return self.type == TaskType.MULTI_LABEL_CLASSIFICATION
@property
def is_regression(self) -> bool:
"""Is it regression?
Returns
-------
`bool`
`True` if the task is regression
"""
return self.type == TaskType.REGRESSION
@property
def is_representation_learning(self) -> bool:
"""Is it representation learning?
Returns
-------
`bool`
`True` if the task is representation learning
"""
return self.type == TaskType.REPRESENTATION_LEARNING
@property
def default_activation(self):
"""Default final activation
Returns
-------
`torch.nn.LogSoftmax(dim=-1)` for multi-class classification
`torch.nn.Sigmoid()` for multi-label classification
`torch.nn.Identity()` for regression
Raises
------
NotImplementedError
If the default activation cannot be guessed.
"""
import torch.nn
if self.is_multiclass_classification:
return torch.nn.LogSoftmax(dim=-1)
elif self.is_multilabel_classification:
return torch.nn.Sigmoid()
elif self.is_regression:
return torch.nn.Identity()
else:
msg = f"Unknown default activation for {self} task."
raise NotImplementedError(msg)
|
DanRuta/xva-trainer
|
lib/_dev/pyannote/audio/train/task.py
|
task.py
|
py
| 5,402 |
python
|
en
|
code
| 78 |
github-code
|
6
|
30859953212
|
import configparser
import pandas as pd
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, dayofweek, date_format
import pyspark.sql.functions as F
from pyspark.sql.types import StructType as R, StructField as Fld, DoubleType as Dbl, StringType as Str, IntegerType as Int, DateType as Date, TimestampType as Ti, LongType as Lo, TimestampType as T
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS']['AWS_SECRET_ACCESS_KEY']
# Schema for songs data
song_table_schema = R([
Fld('artist_id',Str()),
Fld('artist_latitude',Dbl()),
Fld('artist_location',Str()),
Fld('artist_longitude',Dbl()),
Fld('artist_name',Str()),
Fld('duration',Dbl()),
Fld('num_songs',Dbl()),
Fld('song_id',Str()),
Fld('title',Str()),
Fld('year',Int()),
])
#Create Spark Session
def create_spark_session():
"""
Create Spark Session
"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
# Song Data Process using Spark
def process_song_data(spark, input_data, output_data):
"""
Read song data and process it and save to provided output location
:param spark: Spark session
:param input_data: Input url
:param output_data: Output location
"""
# File path for song data
song_data = os.path.join(input_data,"song_data/*/*/*/*.json")
# Path to write the data back to S3
output_songs_data = os.path.join(output_data,"songs_table")
output_artists_data = os.path.join(output_data,"artists_table")
# Read song data file
df_song_data = spark.read.json(song_data,schema=song_table_schema).dropDuplicates()
df_song_data.printSchema()
df_song_data.show(5)
# Extract columns for songs table
songs_table = df_song_data.selectExpr("song_id","title","artist_name","artist_id","year","duration")
songs_table.printSchema()
songs_table.show(5)
# Write songs table back to S3
songs_table.partitionBy("year", "artist_id").write.parquet(output_songs_data, mode="overwrite")
# Extract columns for artists table
artists_table = df_song_data.selectExpr("artist_id",
"artist_name as name",
"artist_location as location",
"artist_latitude as latitude",
"artist_longitude as longitude")
artists_table.printSchema()
artists_table.show(5)
# Write artists table back to S3
artists_table.write.parquet(output_artists_data, mode="overwrite")
# Log Data Process Using Spark
def process_log_data(spark, input_data, output_data):
"""
Read log data and process it and sand create songplays table to
using both the log data and song data
Store the songplays data to specified output location
:param spark: Spark session
:param input_data: Input url
:param output_data: Output location
"""
output_users_data = os.path.join(output_data,"users_table")
output_time_data = os.path.join(output_data,"time_table")
output_songs_data = os.path.join(output_data,"songs_table")
output_songplays_data = os.path.join(output_data,"songplays_table")
# get filepath to log data file
log_data =os.path.join(input_data,"log_data/*/*/*.json")
print(log_data)
# read log data file
df_log_data = spark.read.json(log_data).dropDuplicates()
df_log_data.printSchema()
df_log_data.show(5)
# filter by actions for song plays
df_log_data = df_log_data.filter("page=='NextSong'")
df_log_data.show(5)
# extract columns for users table
users_table = df_log_data.selectExpr("userId as user_id",
"firstName as first_name",
"lastName as last_name",
"gender",
"level")
users_table.printSchema()
users_table.show(5)
# write users table to parquet files
users_table.write.parquet(output_users_data, mode="overwrite")
# create timestamp column from original timestamp column
get_timestamp = F.udf(lambda x : datetime.fromtimestamp(x),T())
df_log_data = df_log_data.withColumn("start_time",get_timestamp(df_log_data['ts']/1000))
df_log_data.printSchema()
df_log_data.show(5)
# create datetime column from original timestamp column
get_datetime = F.udf(lambda x : datetime.fromtimestamp(x),T())
df_log_data = df_log_data.withColumn("year",year(get_datetime(df_log_data['ts']/1000)))
df_log_data = df_log_data.withColumn("month",month(get_datetime(df_log_data['ts']/1000)))
df_log_data = df_log_data.withColumn("day",dayofmonth(get_datetime(df_log_data['ts']/1000)))
df_log_data = df_log_data.withColumn("hour",hour(get_datetime(df_log_data['ts']/1000)))
df_log_data = df_log_data.withColumn("week",weekofyear(get_datetime(df_log_data['ts']/1000)))
df_log_data = df_log_data.withColumn("weekday",date_format(df_log_data['start_time'],'EEEE'))
df_log_data.printSchema()
df_log_data.show(5)
# extract columns to create time table
time_table = df_log_data.selectExpr("start_time","hour","day","week","month","year","weekday")
time_table.printSchema()
time_table.show(5)
# write time table to parquet files partitioned by year and month
time_table.partitionBy('year','month').write.parquet(output_time_data, mode="overwrite")
# read in song data to use for songplays table
song_df = spark.read.parquet(output_songs_data).dropDuplicates()
song_df.printSchema()
song_df.show(5)
song_df.createOrReplaceTempView("songView")
df_log_data.createOrReplaceTempView("logView")
# extract columns from joined song and log datasets to create songplays table
songplays_table = spark.sql("""
SELECT l.start_time,
l.userId as user_id,
l.level,s.song_id,
s.artist_id,
l.sessionId as session_id,
l.location,
l.userAgent as user_agent,
l.year,
l.month
FROM songView s
JOIN logView l
ON (s.artist_name == l.artist)
""")
songplays_table.printSchema()
songplays_table.show(5)
# write songplays table to parquet files partitioned by year and month
songplays_table.partitionBy("year","month").write.parquet(output_songplays_data, mode="overwrite")
def main():
spark = create_spark_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://udacity-datalake-project/"
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
if __name__ == "__main__":
main()
|
yashth/Data_Lake
|
etl.py
|
etl.py
|
py
| 7,406 |
python
|
en
|
code
| 0 |
github-code
|
6
|
3323951129
|
#!/usr/bin/python
import os
import re
import math
import matplotlib.pyplot as plt
import numpy
from numpy import sin, pi, arange
import astropy.io
from astropy.io import fits
from PIL import Image
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# This script contrasts photon index and luminosity for various observations. #
# In order to reduce error, the data is binned together #
# #
# Usage: The script was strictly developed for the 13 galaxies avaliable at #
# the time of development, and thus is more of a script to be used for #
# reference.
# #
# Author: April Walker #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
def main():
fits_file = []
#set to the directory containing all data you wish to plot
fits_dir = os.listdir(os.environ["RESEARCH_DATA_PATH"] + "/Final-fits/")
#specify the path each data file
for i in range(len(fits_dir)):
fits_file.append(os.environ["RESEARCH_DATA_PATH"] + "/Final-fits/" + fits_dir[i])
fig = plt.figure(figsize=(20, 10))
ax1 = fig.add_subplot(111)
x = []
x_error = []
x_error_mean = []
y = []
for iterator, value in enumerate(fits_file):
hdu_list = fits.open(value)
#our data is now put into columns
data = hdu_list[1].data
cols = hdu_list[1].columns
hdu_list.close()
#take the values you need. If you need a new field, use print(cols) and use
gamma = data.field('gamma')
gamma_error = data.field('gamma_err')
lh8 = data.field('lh8')
#gamma of value 1.7 is an error as is NaN, so we only append indexes wihtout
#those values
gamma_indices = numpy.where(gamma != 1.7)
for i, value in enumerate(gamma_indices[0]):
if (gamma[value] == gamma[value] and lh8[value] == lh8[value] and gamma_error[value] > 0):
x.append(gamma[value])
x_error.append(gamma_error[value])
y.append(lh8[value])
#this guy holds our data set sorted by luminosity
data_set = []
for i in range(len(x)):
temp = [y[i],x[i],x_error[i]]
data_set.append(temp)
#sort it
data_set.sort(key=lambda x: x[0])
Ay = []
Ax = []
Ax_error = []
for i in range(len(x)):
Ay.append(data_set[i][0])
Ax.append(data_set[i][1])
Ax_error.append(data_set[i][2])
bin_minimum = Ay[0]
#Set this in case you're binning too many values
if iterator == 4:
bin_upper_limit = 16
bin_lower_limit = 8
if iterator == 5 or 6 or 7 or 10:
bin_upper_limit = 14
bin_lower_limit = 6
if iterator == 9:
bin_lower_limit = 4
bin_upper_limit = 10
if iterator == 8:
bin_upper_limit = 25
bin_lower_limit = 10
else:
bin_upper_limit = 15
bin_lower_limit = 10
y_binned = []
x_binned = []
x_error_binned = []
y_unbinned = []
x_unbinned = []
x_error_unbinned = []
counter = []
# THE BELOW CODE AUTOBINS NORMALLY #
j = 0
for i in range(len(Ay)):
# if(Ax_error[i] < 0.17):
# y_unbinned.append(Ay[i])
# x_unbinned.append(Ax[i])
# x_error_unbinned.append(Ax_error[i])
if(j == len(x_error_binned) - 1):
if((numpy.sqrt(x_error_binned[j])/(counter[j]) < 0.13) and (counter[j] >= bin_lower_limit)):
j += 1
elif(counter[j] >= bin_upper_limit):
j += 1
else:
counter[j] += 1
y_binned[j] += Ay[i]
x_binned[j] += Ax[i]
x_error_binned[j] += Ax_error[i]**2
else:
y_binned.append(0)
x_binned.append(0)
x_error_binned.append(0)
counter.append(0)
counter[j] += 1
y_binned[j] += Ay[i]
x_binned[j] += Ax[i]
x_error_binned[j] += Ax_error[i]**2
#calculates the mean error as sqrt(sum(errors^2))/sqrt(n)
for j in range(len(y_binned)):
if value == value:
y_binned[j] = y_binned[j]/counter[j]
x_binned[j] = x_binned[j]/counter[j]
x_error_binned[j] = numpy.sqrt(x_error_binned[j])/(counter[j])
no = '_nolegend_'
if iterator == 0 or iterator == 5 or iterator == 10:
if iterator == 0:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#231193', fmt = 'o', marker = "s", zorder = 4)
elif iterator == 5:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#231193', fmt = 'o', marker = "o", zorder = 4)
elif iterator == 8:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#231193', fmt = 'o', marker = "^", zorder = 4)
ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#3219cd', fmt = 'o', zorder = 3, label = no, ms=3)
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# ax1.scatter(x, y, color = '#aabaff', zorder = 2, label = no, s=5)
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC4382')
# legend_labels = ["NGC4382"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of M63')
# legend_labels = ["M63"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC3184')
# legend_labels = ["NGC3184"]
# The following ignores this galaxy
# ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
# ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#e6e6e6', fmt = '.', marker = "s", zorder = 1, label = no)
# ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#e6e6e6', fmt = '.', zorder = 1, label = no)
dummy_variable = 1
if iterator == 1 or iterator == 6 or iterator == 11:
if iterator == 1:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#096612', fmt = 'o', marker = "s", zorder = 4)
elif iterator == 6:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#096612', fmt = 'o', marker = "o", zorder = 4)
elif iterator == 11:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#096612', fmt = 'o', marker = "^", zorder = 4)
ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#0c8718', fmt = 'o', zorder = 3, label = no, ms=3)
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# ax1.scatter(x, y, color = '#90e097', zorder = 2, label = no, s=5)
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC0628')
# legend_labels.append(["NGC0628"])
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of M94')
# legend_labels = ["M94"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC3198')
# legend_labels = ["NGC3198"]
# The following ignores this galaxy
# ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
# ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#e6e6e6', fmt = '.', marker = "s", zorder = 1, label = no)
# ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#e6e6e6', fmt = '.', zorder = 1, label = no)
dummy_variable = 1
if iterator == 2 or iterator == 7 or iterator == 12:
if iterator == 2:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#6a0a7a', fmt = 'o', marker = "s", zorder = 4)
elif iterator == 7:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#6a0a7a', fmt = 'o', marker = "o", zorder = 4)
elif iterator == 12:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#6a0a7a', fmt = 'o', marker = "^", zorder = 4)
ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#880d9d', fmt = 'o', zorder = 3, label = no, ms=3)
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# ax1.scatter(x, y, color = '#edddff', zorder = 2, label = no, s=5)
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC2403')
# legend_labels = ["NGC2403"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of M95')
# legend_labels = ["M95"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC4559')
# legend_labels = ["NGC4559"]
# The following lines ignore this galaxy
# ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
# ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#e6e6e6', fmt = '.', marker = "s", zorder = 1, label = no)
# ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#e6e6e6', fmt = '.', zorder = 1, label = no)
dummy_variable = 1
if iterator == 3 or iterator == 8:
if iterator == 3:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#b50636', fmt = 'o', marker = "s", zorder = 4)
elif iterator == 8:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#b50636', fmt = 'o', marker = "o", zorder = 4)
ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#ec0040', fmt = 'o', zorder = 3, label = no, ms=3)
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# ax1.scatter(x, y, color = '#f2bcc5', zorder = 2, label = no, s=5)
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC6946')
# legend_labels = ["NGC6946"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of M100')
# legend_labels = ["M100"]
# The following lines ignore this galaxy
# ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
# ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#e6e6e6', fmt = '.', marker = "s", zorder = 1, label = no)
# ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#e6e6e6', fmt = '.', zorder = 1, label = no)
dummy_variable = 1
if iterator == 4 or iterator == 9:
if iterator == 4:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#bb9407', fmt = 'o', marker = "s", zorder = 4)
elif iterator == 9:
ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#bb9407', fmt = 'o', marker = "*", zorder = 4)
ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#f1bc00', fmt = 'o', zorder = 3, label = no, ms=3)
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# ax1.scatter(x, y, color = '#f1d9ac', zorder = 2, label = no, s=5)
# legend_labels = ["NGC7793"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC7793')
# legend_labels = ["NGC2841"]
# ax1.set_title(r'Luminosity vs Photon Index ($\Gamma$) of NGC2841')
# The following lines ignore this galaxy
# ax1.scatter(x, y, color = '#e6e6e6', zorder = 1, label = no, s=5)
# ax1.errorbar(x_binned, y_binned, xerr = x_error_binned, color = '#e6e6e6', fmt = '.', marker = "s", zorder = 1, label = no)
# ax1.errorbar(x_unbinned, y_unbinned, xerr = x_error_unbinned, color = '#e6e6e6', fmt = '.', zorder = 1, label = no)
dummy_variable = 1
plt.yscale('log');
ax1.set_xlabel("Photon Index")
ax1.set_ylabel("Luminosity")
ax1.set_title('Comparitive Chart')
legend_labels = []
for i in range(len(fits_dir)):
legend_labels.append("".join(fits_dir[i].rsplit("-final-sample.fits")))
ax1.legend(legend_labels)
plt.grid(True)
plt.draw()
ax1.apply_aspect()
fig.savefig('comparitive_auto.eps', dpi=fig.dpi)
Image.open('comparitive_auto.eps').save('comparitive_auto.png','png')
# The following lines can be uncommented (and the above commented) to develop graphs for individual galaxies
# if (legend_labels == ["NGC4382", "NGC0628", "NGC2403", "NGC6946", "NGC7793", "M63", "M94", "M95", "M100", "NGC2841", "NGC3184", "NGC3198", "NGC4559"]):
# fig.savefig('comparitive_auto_update.eps', dpi=fig.dpi)
# Image.open('comparitive_auto_update.eps').save('comparitive_auto_update.png','png')
#
# elif (legend_labels == ["NGC4382"]):
# fig.savefig('NGC4382.eps', dpi=fig.dpi)
# Image.open('NGC4382.eps').save('NGC4382.png','png')
#
# elif (legend_labels == ["NGC0628"]):
# fig.savefig('NGC0628.eps', dpi=fig.dpi)
# Image.open('NGC0628.eps').save('NGC0628.png','png')
#
# elif (legend_labels == ["NGC2403"]):
# fig.savefig('NGC2403.eps', dpi=fig.dpi)
# Image.open('NGC2403.eps').save('NGC2403.png','png')
#
# elif (legend_labels == ["NGC6946"]):
# fig.savefig('NGC6946.eps', dpi=fig.dpi)
# Image.open('NGC6946.eps').save('NGC6946.png','png')
#
# elif (legend_labels == ["NGC7793"]):
# fig.savefig('NGC7793.eps', dpi=fig.dpi)
# Image.open('NGC7793.eps').save('NGC7793.png','png')
#
# if (legend_labels == ["M63"]):
# fig.savefig('M63.eps', dpi=fig.dpi)
# Image.open('M63.eps').save('M63.png','png')
#
# elif (legend_labels == ["M94"]):
# fig.savefig('M94.eps', dpi=fig.dpi)
# Image.open('M94.eps').save('M94.png','png')
#
# elif (legend_labels == ["M95"]):
# fig.savefig('M95.eps', dpi=fig.dpi)
# Image.open('M95.eps').save('M95.png','png')
#
# elif (legend_labels == ["M100"]):
# fig.savefig('M100.eps', dpi=fig.dpi)
# Image.open('M100.eps').save('M100.png','png')
#
# elif (legend_labels == ["NGC2841"]):
# fig.savefig('NGC2841.eps', dpi=fig.dpi)
# Image.open('NGC2841.eps').save('NGC2841.png','png')
#
# elif (legend_labels == ["NGC3184"]):
# fig.savefig('NGC3184.eps', dpi=fig.dpi)
# Image.open('NGC3184.eps').save('NGC3184.png','png')
#
# elif (legend_labels == ["NGC3198"]):
# fig.savefig('NGC3198.eps', dpi=fig.dpi)
# Image.open('NGC3198.eps').save('NGC3198.png','png')
#
# elif (legend_labels == ["NGC4559"]):
# fig.savefig('NGC4559.eps', dpi=fig.dpi)
# Image.open('NGC4559.eps').save('NGC4559.png','png')
plt.show()
return 0
main()
|
aprilcotwut/photon_index_binning
|
compare_bin.py
|
compare_bin.py
|
py
| 15,988 |
python
|
en
|
code
| 0 |
github-code
|
6
|
563107290
|
import re
#dictionary for rule mapping such that each rule by id maps to class label
rule_dict={}
rule_dict[0]=1 #location , key is rule ID and value is Label id. here it means rule 1 labels example as location
rule_dict[1]=5 #Cuisine
rule_dict[2]=1 #Location
rule_dict[3]=4 #Price
rule_dict[4]=8 #Rating
rule_dict[5]=2 #Hours
rule_dict[6]=2 #Hours
rule_dict[7]=3 #Amenity
rule_dict[8]=2 #Hours
rule_dict[9]=7 #Restaurant_Name
rule_dict[10]=6 #Dish
rule_dict[11]=4 #Price
rule_dict[12]=8 #Rating
rule_dict[13]=3 #Amenity
rule_dict[14]=7 #Restaurant Name
#all rules functions
#ex: any kid friendly restaurants around here
def rule0(sentence,sent_dict,rule_firing):
label=rule_dict[0]
s=sentence.lower()
pattern= re.compile("( |^)[^\w]*(within|near|next|close|nearby|around|around)[^\w]*([^\s]+ ){0,2}(here|city|miles|mile)*[^\w]*( |$)")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(rule_firing[sent_dict[key]]==0):
rule_firing[sent_dict[key]]=label
return rule_firing
#ex: can you find me some chinese food
def rule1(sentence,sent_dict,rule_firing):
label=rule_dict[1]
s=sentence.lower()
#print("sentence : ",sentence)
#print("sentence in lowercase",s)
words=s.strip().split(" ")
#rule_firing=[0]*len(words)
cuisine1a=['italian','american','japanese','spanish','mexican','chinese','vietnamese','vegan']
cuisine1b=['bistro','delis']
cuisine2=['barbecue','halal','vegetarian','bakery']
#cuisine3=[('italian','bistro'),('japanese','delis')]
for i in range(0,len(words)):
if rule_firing[i]==0 : #rule not fired yet
if words[i] in cuisine2:
rule_firing[i]=label
elif words[i] in cuisine1a:
rule_firing[i]=label
if i<len(words)-1:
if words[i+1] in cuisine1b:
rule_firing[i+1]=label
#print(rule_firing)
return rule_firing
#rule2 done in the area location
#ex: im looking for a 5 star restaurant in the area that serves wine
def rule2(sentence,sent_dict,firing_rule):
label=rule_dict[2]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("in the area")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
#ex: i need a family restaurant with meals under 10 dollars and kids eat
def rule3(sentence,sent_dict,firing_rule):
label=rule_dict[3]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile(" ([0-9]+|few|under [0-9]+) dollar")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
#ex: where can i get the highest rated burger within ten miles
def rule4(sentence,sent_dict,firing_rule):
label=rule_dict[4]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("( (high|highly|good|best|top|well|highest|zagat) (rate|rating|rated))|((rated|rate|rating) [0-9]* star)|([0-9]+ star)")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
#ex: where is the nearest italian restaurant that is still open
def rule5(sentence,sent_dict,firing_rule):
label=rule_dict[5]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("((open|opened) (now|late))|(still (open|opened|closed|close))|(((open|close|opened|closed) \w+([\s]| \w* | \w* \w* ))*[0-9]+ (am|pm|((a|p) m)|hours|hour))")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
#ex: find a vegan cuisine which is open until 2 pm
def rule6(sentence,sent_dict,firing_rule):
label=rule_dict[6]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("(open|close) (\w* ){0,3}until (\w* ){0,2}(([0-9]* (am|pm|((a|p) m)|hour|hours))|(late (night|hour))|(midnight))")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
#ex: i want to go to a restaurant within 20 miles that got a high rating and is considered fine dining
def rule7(sentence,sent_dict,firing_rule):
label=rule_dict[7]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("(outdoor|indoor|group|romantic|family|outside|inside|fine|waterfront|outside|private|business|formal|casual|rooftop|(special occasion))([\s]| \w+ | \w+ \w+ )dining")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
#ex:i need some late night chinese food within 4 miles of here
def rule8(sentence,sent_dict,firing_rule):
label=rule_dict[8]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("(open |this |very ){0,2}late( night| dinner| lunch| dinning|( at night)){0,2}")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
# ex : is passims kitchen open at 2 am
def rule9(sentence,sent_dict,firing_rule):
label=rule_dict[9]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("[\w+ ]{0,2}(palace|cafe|bar|kitchen|outback|dominoes)")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if sent_dict[key] != 'restaurants' and (firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
#ex: please find me a pub that serves burgers
def rule10(sentence,sent_dict,firing_rule):
label=rule_dict[10]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("wine|sandwich|pasta|burger|peroggis|burrito|(chicken tikka masala)|appetizer|pizza|winec|upcake|(onion ring)|tapas")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
#ex: im looking for an inexpensive mexican restaurant
def rule11(sentence,sent_dict,firing_rule):
label=rule_dict[11]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("(affordable|cheap|expensive|inexpensive)")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
bad_words=['the','a','an','has','have','that','this','beef','for','with','if','at']
good_words=['price','prices','pricing','priced']
words=s.strip().split(" ")
for i in range(1,len(words)):
if firing_rule[i-1]==0:
if words[i] in good_words :
if words[i-1] not in bad_words:
firing_rule[i-1]=label
return firing_rule
#ex: which moderately priced mexican restaurants within 10 miles have the best reviews
def rule12(sentence,sent_dict,firing_rule):
label=rule_dict[12]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("(([0-9]*)|very|most)* (good|great|best|bad|excellent|negative|star) (\w* ){0,2}(review|reviews|rating|rated)")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
# ex: is there a pet friendly restaurant within 10 miles from here
def rule13(sentence,sent_dict,firing_rule):
label=rule_dict[7]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("(pet|kid|)(friendly|indoor|outdoor|date|dining|buffet|great|fine|good|friend|group|birthday|anniversary|family|historical|family friendly|friendly)([\s]| \w+ | \w+ \w+ )(spot|dining|parking|dinne|style|eatries|catering|drive throughs|allow|amenity|amenity)*")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if(firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
#ex: where is the next mcdonalds
def rule14(sentence,sent_dict,firing_rule):
label=rule_dict[9]
#firing_rule=[0]*len(sent_dict.keys())
s=sentence.lower()
pattern=re.compile("(burger king|mcdonalds|taco bells|Mcdills|denneys|dennys|Mcdills)")
r=re.finditer(pattern,s)
for match in r:
start=match.start()
end=match.end()
for key in sent_dict.keys():
if key in range(start,end):
if sent_dict[key] != 'restaurants' and (firing_rule[sent_dict[key]]==0):
firing_rule[sent_dict[key]]=label
return firing_rule
rule_list=[rule0,rule1,rule2,rule3,rule4,rule5,rule6,rule7,rule8,rule9,rule10,rule11,rule12,rule13,rule14]
num_rules=len(rule_list)
|
yueyu1030/COSINE
|
data/mitr/rules.py
|
rules.py
|
py
| 10,114 |
python
|
en
|
code
| 197 |
github-code
|
6
|
39269918095
|
import copy
import os
import shlex
import sys
import textwrap
from functools import wraps
def bash_quote(w, quote):
'''
Quote word *w* with quote character *quote* which may be empty, single quote or double quote.
'''
assert quote in ('', '"', "'")
if quote == "'":
w = w.replace("'", quote + '"\'"' + quote)
else:
# some characters are special and cannot be escaped unless we use a single quote:
# ! - get rid of history expansion
# \x01 - breaks escaping in bash: echo "\\$" -> \\$
# \n - when only using escapes
special_characters = '!\x01'
if quote == '':
special_characters += '\n'
for special in special_characters:
if special in w:
return ("'" + special + "'").join(bash_quote(s, quote) for s in w.split(special))
# escape characters
escaped_chars = set()
if quote == '':
escaped_chars |= set(os.environ.get("COMP_WORDBREAKS", " \t\"'@><=;|&(:."))
escaped_chars |= set("`$\"'\t ~&;?|#()*{><[")
elif quote == '"':
escaped_chars |= set("`$\"")
escaped = ''
last = ''
for i, c in enumerate(w):
if last == '\\' and (c in escaped_chars | set('\n\\') or quote == ''):
escaped += '\\'
if (c == '\\' and i == len(w) - 1) or (c in escaped_chars):
escaped += '\\'
escaped += c
last = c
w = escaped
return quote + w + quote
class Namespace(dict):
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
self[key] = value
def __deepcopy__(self, memo):
return copy.deepcopy(dict(self), memo)
class Parser(object):
def __init__(self, tokens, complete_token=None):
'''
:param complete_token: the token to be completed; `None` disables completion
'''
self.tokens = tokens
self.complete_token = complete_token
# internal state
self.long = {}
self.short = {}
self.pos = 0
# results
self._completions = []
self.values = Namespace()
self.errors = []
self.subcommands = []
def get_state(self):
return dict([(attr, copy.copy(getattr(self, attr)))
for attr in ('long', 'short', 'pos', '_completions', 'errors', 'subcommands')] +
[('values', Namespace(copy.deepcopy(self.values)))])
def set_state(self, state):
for attr, val in state.items():
setattr(self, attr, val)
def add_options(self, options):
for opt in options:
if opt.short:
self.short[opt.short] = opt
if opt.long:
self.long[opt.long] = opt
def error(self, error):
self.errors.append(error)
def __repr__(self):
return "<Parser values=%r, errors=%r, subcommands=%r>" % (self.values, self.errors, self.subcommands)
@property
def token(self):
return self.tokens[self.pos] if self.pos < len(self.tokens) else None
@property
def last_token(self):
return self.tokens[self.pos - 1] if self.pos - 1 >= 0 else None
def token_is_option(self):
return self.token.startswith('-')
def eat_token(self):
token = self.token
self.pos += 1
return token
def barf_token(self):
self.pos -= 1
def parse_options(self):
while self.token and self.token_is_option():
option = None
token = self.eat_token()
if token.startswith('--'):
if token[2:] in self.long:
option = self.long[token[2:]]
elif token[1:] in self.short:
option = self.short[token[1:]]
if option is None:
self.error('Unknown option %s' % token)
return
else:
option.parse(self)
if self._completing_option:
self._add_completions('-' + k for k in list(self.short.keys()))
self._add_completions('--' + k for k in list(self.long.keys()))
def parse_arguments(self, arguments):
for arg in arguments:
if arg.nargs not in (None, '?', '*', '+'):
raise Exception('Invalid nargs %s' % arg.nargs)
self._add_arg_completions(arg)
self.parse_options()
if arg.nargs in (None, '+'):
arg.parse(self)
self.parse_options()
if arg.nargs in ('?', '*', '+'):
rewind_state = None
while self.token and (not arg.choices or self.token in arg.choices):
if type(arg.stop_at) != list and self.token == arg.stop_at:
rewind_state = self.get_state()
elif type(arg.stop_at) == list and self.token in arg.stop_at:
rewind_state = self.get_state()
arg.parse(self)
self.parse_options()
if arg.nargs == '?':
break
if rewind_state:
self.set_state(rewind_state)
if arg.nargs in ('*', '+'):
# Even if the token doesn't match the set of choices, it
# might still yield valid completions for the current arg
self._add_arg_completions(arg)
if self.errors:
return
self.parse_options()
@property
def completing(self):
return not self.errors and self.token is None and self.complete_token is not None
@property
def _completing_option(self):
return self.completing and len(self.complete_token) > 0 and self.complete_token[0] == '-'
@property
def _completing_argument(self):
return self.completing and (len(self.complete_token) == 0 or self.complete_token[0] != '-')
def _add_completions(self, completions):
self._completions.extend(c for c in completions if c.startswith(self.complete_token))
def _add_arg_completions(self, arg):
if self._completing_argument:
self._add_completions(arg.completions(self.complete_token, self))
class Option(object):
def __init__(self, short, long, action='store_true', dest=None, help=None, default=None):
'''
The number of additional tokens needed for an Option is determined by
*action*:
- ``store_true`` requires 0 tokens and stores True in *dest*
- ``store`` requires 1 token and stores it in *dest**
'''
self.short = short
self.long = long
self.dest = dest if dest else long
self.help = help
self.action = action
self.default = default
def __repr__(self):
return '-%s/--%s' % (self.short, self.long)
def set_default(self, parser):
parser.values[self.dest] = self.default
def parse(self, parser):
if self.action == 'store_true':
parser.values[self.dest] = True
elif self.action == 'store':
if parser.token is None or parser.token_is_option():
parser.error("%s expects an argument" % parser.last_token)
else:
value = parser.eat_token()
parser.values[self.dest] = value
class ArgMixin(object):
def usage(self):
if self.nargs is None:
return self.metavar
elif self.nargs == '?':
return '[%s]' % self.metavar
elif self.nargs == '*':
return '[%s]...' % self.metavar
elif self.nargs == '+':
return '%s...' % self.metavar
else:
raise Exception('Invalid nargs %s' % self.nargs)
def __repr__(self):
return self.metavar
def set_default(self, parser):
'''
Sets the default value for the curent argument. Called as soon as the argument's command is seen.
'''
pass
def completions(self, complete_token, parser):
'''
Returns the completions matching `complete_token` for the current state from `parser`.
'''
pass
def parse(self, parser):
'''
Uses the state from `parser` to consume the tokens for the current arg
(only one instance, even if nargs says otherwise). Called only if at
least a token is required for the current argument.
'''
pass
class Argument(ArgMixin):
def __init__(self, name, dest=None, metavar=None, nargs=None, action='store', choices=None,
default=None, completions=None, stop_at=None):
self.name = name
self.dest = dest if dest else name
if metavar:
self.metavar = metavar
elif choices:
self.metavar = '|'.join(choices)
else:
self.metavar = name.upper()
self.nargs = nargs
self.action = action
self.choices = choices
self.completion_fn = completions
self.default = default
# stop_at is an ugly hack to resolve grammar ambiguity
# The parser will revert to the state for the last instance of this token
self.stop_at = stop_at
def set_default(self, parser):
if self.action in ('append', 'append_unique') or self.nargs in ('*', '+'):
parser.values.setdefault(self.dest, [])
elif self.action == 'store':
parser.values.setdefault(self.dest, self.default)
else:
pass
def completions(self, complete_token, parser):
if self.choices:
if self.action == 'append_unique':
return set(self.choices) - set(parser.values[self.dest])
else:
return self.choices
elif hasattr(self, 'completion_fn') and callable(self.completion_fn):
comps = self.completion_fn(complete_token, parser)
if self.action == 'append_unique':
return set(comps) - set(parser.values[self.dest])
return comps
else:
return []
def parse(self, parser):
token = parser.eat_token()
if token is None:
parser.error("A value is required for %s" % self.metavar)
return
if self.choices and token not in self.choices:
parser.error("%s must be one of: %s" % (self.metavar, ' '.join(self.choices)))
return
if self.action == 'append' or self.nargs in ('*', '+'):
parser.values[self.dest].append(token)
elif self.action == 'store':
parser.values[self.dest] = token
elif self.action == 'append_unique':
pv = parser.values[self.dest]
if token in pv:
parser.error('%s cannot be specified twice' % token)
else:
pv.append(token)
elif self.action is None:
pass
else:
raise Exception('Invalid action %s' % self.action)
class Token(Argument):
def __init__(self, name, dest=None, nargs=None, action=None):
super(Token, self).__init__(name, metavar=name, choices=(name, ), action=action, nargs=nargs)
if dest is None:
self.dest = None
class Group(ArgMixin):
'''
If the group has nargs='?' or nargs='*' and it's not followed by eof it must
start with a static set of choices (otherwise the grammar would be
ambiguous).
'''
def __init__(self, *args, **kwargs):
self.nargs = kwargs.pop('nargs', None)
self.stop_at = kwargs.pop('stop_at', None)
self.arguments = args
@property
def metavar(self):
return ' '.join(a.usage() for a in self.arguments)
@property
def choices(self):
return self.arguments[0].choices
def completions(self, complete_token, parser):
return self.arguments[0].completions(complete_token, parser)
def parse(self, parser):
parser.parse_arguments(self.arguments)
def set_default(self, parser):
for arg in self.arguments:
arg.set_default(parser)
class Command(object):
def __init__(self, name, *args, **kwargs):
self.name = name
self.options = []
self.subcommands = []
self.arguments = []
for o in args:
if isinstance(o, Option):
self.options.append(o)
elif isinstance(o, Command):
self.subcommands.append(o)
else:
self.arguments.append(o)
self.help = kwargs.pop('help', None)
self.description = kwargs.pop('description', None)
self.defaults = kwargs.pop('defaults', {})
self.default_subcommand = kwargs.pop('default_subcommand', None)
assert not kwargs
def register(self, *args, **kwargs):
def decorator(func):
cmd, path = self._get_scmd_path(args[0])
if 'description' not in kwargs and func.__doc__:
kwargs['description'] = textwrap.dedent(func.__doc__).strip()
kwargs.setdefault('defaults', {}).setdefault('run', func)
cmd.subcommands.append(Command(path[-1], *(args[1:]), **kwargs))
@wraps(func)
def wrapper(*wargs, **wkwargs):
func(*wargs, **wkwargs)
return wrapper
return decorator
def alias(self, source_path, dest_path):
scmd, spath = self._get_scmd_path(source_path)
dcmd, dpath = self._get_scmd_path(dest_path)
dest_cmd = copy.copy(scmd._get_subcommand(spath[-1]))
dest_cmd.name = dpath[-1]
dcmd.subcommands.append(dest_cmd)
def set_default(self, parser):
parser.values.update(self.defaults)
for arg in self.arguments:
arg.set_default(parser)
for opt in self.options:
opt.set_default(parser)
def parse(self, tokens):
parser = Parser(tokens)
self._parse_command(parser)
if parser.token:
parser.error('Unparsed tokens: %s' % ' '.join(parser.tokens[parser.pos:]))
return parser
def complete(self, line, point):
# ignore everything after point
line = line[:point]
# if the line ends in an incomplete escape sequence skip it
if line[-1] == '\\' and line[-2] != '\\':
line = line[:-1]
quote_char = ''
for attempt in range(2):
try:
lex = shlex.shlex(line, posix=True)
lex.whitespace_split = True
tokens = list(lex)
except ValueError:
if attempt == 0:
# close the quotes and try again
quote_char = lex.state
line += quote_char
else:
raise
tokens = tokens[1:] # skip the program name
if tokens and (line[-1] != ' ' or line[-2:] == '\ '):
complete_token = tokens.pop()
else:
complete_token = ''
parser = Parser(tokens, complete_token)
self._parse_command(parser)
return set(bash_quote(c, quote_char) for c in parser._completions)
def handle_shell_completion(self):
if 'COMP_LINE' in os.environ:
for c in self.complete(os.environ['COMP_LINE'], int(os.environ['COMP_POINT'])):
print(c)
sys.exit()
def usage(self):
return ' '.join([self.name] + [a.usage() for a in self.arguments])
def chain_usage(self, chain):
return ' '.join(c.usage() for c in chain)
def print_help(self, subcommands):
'''Only works for the top-level command'''
last = self
chain = [self]
for cmd_name in subcommands:
last = last._get_subcommand(cmd_name)
if last is None:
print("Unknown subcommand: %s" % cmd_name)
return
chain.append(last)
usage = self.chain_usage(chain)
if last.subcommands:
if last.default_subcommand:
usage += ' [<subcommand>]'
else:
usage += ' <subcommand>'
print("Usage: {}".format(usage))
if last.description or last.help:
print("\n", last.description or last.help)
def _cmd_chains(cmd, stop_on_args=False):
'''Follows subcommand chains until an argument can be specified'''
if not cmd.subcommands or (cmd.arguments and stop_on_args):
return {'': cmd}
else:
return dict(((s.name + ' ' + name).strip(), cmd)
for s in cmd.subcommands
for name, cmd in _cmd_chains(s, True).items())
if last.subcommands:
print("\nSubcommands:")
if last.default_subcommand:
cmd = last._get_subcommand(last.default_subcommand)
print(" %-20s %s" % ('[%s]' % cmd.name, cmd.help or cmd.name))
for name, cmd in sorted(_cmd_chains(last).items()):
if not last.default_subcommand or last.default_subcommand != name:
print(" %-20s %s" % (name, cmd.help or name))
for i, cmd in enumerate(reversed(chain)):
if cmd.options:
print("\nOptions for %s:" % ' '.join(c.name for c in chain[:len(chain) - i]))
wrapper = textwrap.TextWrapper(width=80,
initial_indent=' ' * 26,
subsequent_indent=' ' * 26)
for opt in sorted(cmd.options, key=lambda x: x.long or x.short):
print(" %-2s %-20s %s" % ('-' + opt.short if opt.short else '',
'--' + opt.long if opt.long else '',
wrapper.fill(opt.help or '').lstrip()))
def _get_subcommand(self, subcommand):
for cmd in self.subcommands:
if cmd.name == subcommand:
return cmd
else:
return None
def _get_scmd_path(self, path_string):
path = path_string.split()
cmd = self
for cname in path[:-1]:
cmd = cmd._get_subcommand(cname)
if cmd is None:
raise Exception('Invalid command path: %s (%s not found)' % (path_string, cname))
return cmd, path
def _parse_command(self, parser):
self.set_default(parser)
parser.add_options(self.options)
parser.parse_arguments(self.arguments)
if self.subcommands:
if parser._completing_argument:
parser._add_completions(s.name for s in self.subcommands)
token = parser.eat_token()
if token is None:
if self.default_subcommand:
self._get_subcommand(self.default_subcommand).set_default(parser)
else:
parser.error("Subcommand expected")
else:
cmd = self._get_subcommand(token.lower())
if cmd:
parser.subcommands.append(cmd.name)
cmd._parse_command(parser)
elif self.default_subcommand:
parser.barf_token()
cmd = self._get_subcommand(self.default_subcommand)
cmd._parse_command(parser)
else:
parser.error("Invalid subcommand %s" % token)
|
1and1/dim
|
ndcli/dimcli/cliparse.py
|
cliparse.py
|
py
| 19,558 |
python
|
en
|
code
| 39 |
github-code
|
6
|
28021381750
|
name_of_file = input("ะะฒะตะดะธัะต ะธะผั ัะฐะนะปะฐ ะดะปั ะฐะฝะฐะปะธะทะฐ")
name_of_file += '.txt'
amount = 0
with open(name_of_file, 'r') as file:
lines = file.readlines()
for line in lines:
amount += int(line)
print("ะะฑัะฐั ััะผะผะฐ ั ะฝะฐั ัะพััะพัะฒะปัะตั", amount)
print("ะกัะตะดะฝะตะต ั ะฝะฐั ะฟะพะปััะฐะตััั: ", amount/len(lines))
|
isakura313/01_2_2
|
files/all_sum.py
|
all_sum.py
|
py
| 389 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
16748915300
|
#!/usr/bin/python3
"""This is a function that prints stuffies"""
def say_my_name(first_name, last_name=""):
"""This functions does the printing
Args:
first_name (string)
last_name (string)
Raises:
TypeError: when the names aren't a string
"""
if not isinstance(first_name, str):
TypeError ("first_name must be a string")
if not isinstance(last_name, str):
TypeError ("last_name must be a string")
print("My name is {} {}".format(first_name, last_name))
|
Mourad-Azouga/alx-higher_level_programming
|
0x07-python-test_driven_development/3-say_my_name.py
|
3-say_my_name.py
|
py
| 526 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29479738416
|
# https://leetcode.com/problems/find-all-duplicates-in-an-array/description/
#Time: O(n), Space: O(N)
class Solution:
def findDuplicates(self, nums: List[int]) -> List[int]:
lst = []
hashMap = {}
for num in nums:
if num not in hashMap:
hashMap[num] = 0
hashMap[num] += 1
if hashMap[num] == 2:
lst.append(num)
return lst
|
Suraj7879/-CrackYourInternship
|
Day2/4.py
|
4.py
|
py
| 424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25262905175
|
import datetime
import logging
from concurrent import futures
from google.appengine.ext import ndb
from upvote.gae.bigquery import tables
from upvote.gae.datastore.models import exemption as exemption_models
from upvote.gae.datastore.models import host as host_models
from upvote.gae.datastore.models import policy as policy_models
from upvote.gae.lib.bit9 import api as bit9_api
from upvote.gae.lib.bit9 import utils as bit9_utils
from upvote.gae.lib.exemption import checks
from upvote.gae.lib.exemption import notify
from upvote.gae.lib.exemption import monitoring
from upvote.gae.utils import env_utils
from upvote.gae.utils import mail_utils
from upvote.gae.utils import template_utils
from upvote.shared import constants
# Done for brevity.
_STATE = constants.EXEMPTION_STATE
class Error(Exception):
"""Base error class for this module."""
class UnknownHostError(Error):
"""Raised when a particular host cannot be found."""
class InvalidEnforcementLevelError(Error):
"""Raised when an invalid Bit9 enforcement level is provided."""
class UnknownPolicyError(Error):
"""Raised if a Bit9 host has an unknown policy."""
class InvalidClientModeError(Error):
"""Raised when an invalid Santa client mode is provided."""
class UnsupportedPlatformError(Error):
"""Raised if an Exemption with an unsupported platform is encountered."""
class UnsupportedClientError(Error):
"""Raised when attempting to take an action for an unsupported client."""
class InvalidStateChangeError(Error):
"""Raised when attempting to change an Exemption to an invalid state."""
class InvalidReasonError(Error):
"""Raised when an invalid EXEMPTION_REASON is provided."""
class InvalidDurationError(Error):
"""Raised when an invalid EXEMPTION_DURATION is provided."""
class InvalidRenewalError(Error):
"""Raised when trying to make an invalid Exemption renewal."""
_POLICY_CHECKS = {
constants.PLATFORM.MACOS: [],
constants.PLATFORM.WINDOWS: [],
}
def _ChangeEnforcementInBit9(host_id, new_enforcement_level):
"""Changes enforcement level for a Bit9Host.
Args:
host_id: The ID of the Bit9Host.
new_enforcement_level: The new enforcement level to set for the Bit9Host.
Raises:
UnknownHostError: if the host cannot be found in Datastore.
InvalidEnforcementLevelError: if the provided enforcement level is invalid.
UnknownPolicyError: if the host's Bit9 policy is unknown.
"""
# Verify the host_id corresponds to an actual Bit9Host.
if not host_models.Bit9Host.get_by_id(host_id):
monitoring.enforcement_errors.Increment()
raise UnknownHostError('Host %s is unknown' % host_id)
# Verify the specified enforcement level is valid.
if new_enforcement_level not in constants.BIT9_ENFORCEMENT_LEVEL.SET_ALL:
monitoring.enforcement_errors.Increment()
raise InvalidEnforcementLevelError(
'Invalid Bit9 enforcement level: %s' % new_enforcement_level)
# Retrieve the current Computer policy from Bit9.
computer = bit9_api.Computer.get(int(host_id), bit9_utils.CONTEXT)
current_policy_id = computer.policy_id
# Determine the appropriate policy for the new enforcement level.
policy_map = constants.BIT9_ENFORCEMENT_LEVEL.MAP_TO_POLICY_ID
new_policy_id = policy_map.get(new_enforcement_level)
# If there's not a valid policy, bail.
if not new_policy_id:
monitoring.enforcement_errors.Increment()
raise UnknownPolicyError(
'Host %s has an unknown policy ID: %s' % (host_id, current_policy_id))
logging.info(
'Changing policy from %s to %s', current_policy_id, new_policy_id)
# Write the new policy back to Bit9.
computer.policy_id = new_policy_id
computer.put(bit9_utils.CONTEXT)
# Change the policy Key on the entity itself.
new_policy_key = ndb.Key(policy_models.Bit9Policy, new_policy_id)
host_models.Bit9Host.ChangePolicyKey(host_id, new_policy_key)
# Insert a row into BigQuery reflecting the change.
host = host_models.Bit9Host.get_by_id(host_id)
tables.HOST.InsertRow(
device_id=host_id,
timestamp=datetime.datetime.utcnow(),
action=constants.HOST_ACTION.MODE_CHANGE,
hostname=host.hostname,
platform=constants.PLATFORM.WINDOWS,
users=host.users,
mode=new_enforcement_level)
def _ChangeEnforcementInSanta(host_id, new_client_mode):
"""Toggles between MONITOR and LOCKDOWN for a SantaHost.
Args:
host_id: The ID of the SantaHost.
new_client_mode: The new client mode to set for the SantaHost.
Raises:
UnknownHostError: if the host cannot be found in Datastore.
InvalidClientModeError: if the provided client mode is invalid.
"""
# Verify the host_id corresponds to an actual SantaHost.
host = host_models.SantaHost.get_by_id(host_id)
if not host:
monitoring.enforcement_errors.Increment()
raise UnknownHostError('Host %s is unknown' % host_id)
# Verify the specified client mode is valid.
if new_client_mode not in constants.CLIENT_MODE.SET_ALL:
monitoring.enforcement_errors.Increment()
raise InvalidClientModeError(
'Invalid Santa client mode: %s' % new_client_mode)
host_models.SantaHost.ChangeClientMode(host_id, new_client_mode)
# If changing to MONITOR mode and transitive whitelisting is enabled, disable
# it.
if (new_client_mode == constants.CLIENT_MODE.MONITOR and
host.transitive_whitelisting_enabled):
ChangeTransitiveWhitelisting(host_id, False)
host = host_models.Host.get_by_id(host_id)
tables.HOST.InsertRow(
device_id=host_id,
timestamp=datetime.datetime.utcnow(),
action=constants.HOST_ACTION.MODE_CHANGE,
hostname=host.hostname,
platform=constants.PLATFORM.MACOS,
users=[host.primary_user],
mode=host.client_mode)
def _EnableLockdown(exm_key):
"""Enables LOCKDOWN mode for a given Exemption.
Args:
exm_key: The Key of the Exemption we're enabling LOCKDOWN for.
Raises:
UnsupportedPlatformError: if the platform of the corresponding Host is
unsupported.
"""
host_id = exm_key.parent().id()
platform = exemption_models.Exemption.GetPlatform(exm_key)
logging.info('Enabling LOCKDOWN mode for Host %s', host_id)
if platform == constants.PLATFORM.WINDOWS:
_ChangeEnforcementInBit9(host_id, constants.BIT9_ENFORCEMENT_LEVEL.LOCKDOWN)
elif platform == constants.PLATFORM.MACOS:
_ChangeEnforcementInSanta(host_id, constants.CLIENT_MODE.LOCKDOWN)
else:
monitoring.enforcement_errors.Increment()
raise UnsupportedPlatformError(
'Host %s has an unsupported platform: %s' % (host_id, platform))
def _DisableLockdown(exm_key):
"""Disables LOCKDOWN mode for a given Exemption.
Args:
exm_key: The Key of the Exemption we're disabling LOCKDOWN for.
Raises:
UnsupportedPlatformError: if the platform of the corresponding Host is
unsupported.
"""
host_id = exm_key.parent().id()
platform = exemption_models.Exemption.GetPlatform(exm_key)
logging.info('Disabling LOCKDOWN mode for Host %s', host_id)
if platform == constants.PLATFORM.WINDOWS:
_ChangeEnforcementInBit9(host_id, constants.BIT9_ENFORCEMENT_LEVEL.MONITOR)
elif platform == constants.PLATFORM.MACOS:
_ChangeEnforcementInSanta(host_id, constants.CLIENT_MODE.MONITOR)
else:
monitoring.enforcement_errors.Increment()
raise UnsupportedPlatformError(
'Host %s has an unsupported platform: %s' % (host_id, platform))
@ndb.transactional
def Request(host_id, reason, other_text, duration):
"""Creates a new Exemption, or reuses an existing one.
If no corresponding Exemption exists, creates a new one in the REQUESTED
state. Otherwise, if one exists in a terminal state
(CANCELLED/REVOKED/EXPIRED), sets it back to REQUESTED with the new
deactivation date.
Args:
host_id: (str) Host ID
reason: (str) The reason for requesting an Exemption. Must be one of
constants.EXEMPTION_REASON.
other_text: (str) Additional text if the reason is OTHER
duration: (str) The requested duration of the Exemption. Must be one of
constants.EXEMPTION_DURATION.
Raises:
InvalidReasonError: if the provided reason is invalid.
InvalidDurationError: if the provided duration is invalid.
InvalidRenewalError: if the Exemption cannot currently be renewed.
"""
logging.info('Requesting Exemption for host %s', host_id)
# Validate the reason.
if reason not in constants.EXEMPTION_REASON.SET_ALL:
message = 'Invalid reason provided: %s' % reason
logging.error(message)
raise InvalidReasonError(message)
# Validate the duration.
if duration not in constants.EXEMPTION_DURATION.SET_ALL:
message = 'Invalid exemption duration: %s' % duration
logging.error(message)
raise InvalidDurationError(message)
duration_delta = datetime.timedelta(
days=constants.EXEMPTION_DURATION.MAP_TO_DAYS[duration])
deactivation_dt = datetime.datetime.utcnow() + duration_delta
exm = exemption_models.Exemption.Get(host_id)
# If an Exemption has never existed for this host_id, just create one.
if exm is None:
exm_key = exemption_models.Exemption.Insert(
host_id, deactivation_dt, reason, other_text=other_text)
notify.DeferUpdateEmail(exm_key, _STATE.REQUESTED, transactional=True)
return
# If we're dealing with an existing Exemption which can state change back to
# REQUESTED, then make the change.
if exm.CanChangeToState(_STATE.REQUESTED):
exm_key = exemption_models.Exemption.CreateKey(host_id)
details = [reason, other_text] if other_text else [reason]
exemption_models.Exemption.ChangeState(
exm_key, _STATE.REQUESTED, details=details)
exm.deactivation_dt = deactivation_dt
exm.put()
notify.DeferUpdateEmail(exm_key, _STATE.REQUESTED, transactional=True)
# Otherwise, we've received a request for an invalid renewal.
else:
message = 'Host %s already has a(n) %s Exemption' % (host_id, exm.state)
logging.error(message)
raise InvalidRenewalError(message)
def Process(exm_key):
"""Checks if a REQUESTED Exemption is compatible with all policies.
Args:
exm_key: The NDB Key of the Exemption entity.
"""
host_id = exm_key.parent().id()
logging.info('Processing Exemption for host %s', host_id)
# Change state from REQUESTED to PENDING.
try:
exemption_models.Exemption.ChangeState(exm_key, _STATE.PENDING)
# Process() shouldn't be transactional due to all the potential calls out made
# below. Because of this, it's entirely possible that the calls to Process()
# in RequestExemptionHandler and ProcessExemptions could both end up trying to
# transition this Exemption to PENDING at the same time. It's a benign race
# condition, so we should just note it and move on.
except exemption_models.InvalidStateChangeError:
logging.warning(
'Error encountered while processing Exemption for host %s', host_id)
return
# Any other Exceptions should make noise.
except Exception: # pylint: disable=broad-except
monitoring.processing_errors.Increment()
logging.exception(
'Error encountered while processing Exemption for host %s', host_id)
return
try:
# If no platform can be determined, auto-deny, because it means there's a
# bug. Otherwise this request will just endlessly bounce between REQUESTED
# and PENDING.
try:
platform = exemption_models.Exemption.GetPlatform(exm_key)
except exemption_models.UnknownPlatformError:
message = 'Host %s has an unknown platform' % host_id
logging.error(message)
monitoring.processing_errors.Increment()
Deny(exm_key, details=[message])
return
# If no policy has been defined for the platform, auto-deny, because it
# means there's a bug. Otherwise this request will just endlessly bounce
# between REQUESTED and PENDING.
if platform not in _POLICY_CHECKS:
message = 'Platform "%s" is unsupported' % platform
logging.error(message)
monitoring.processing_errors.Increment()
Deny(exm_key, details=[message])
return
# An empty policy should fail open, otherwise it would require a no-op check
# which always returns APPROVED. An empty policy that fails closed would be
# better suited by simply disabling the exemption system altogether.
policy_checks = _POLICY_CHECKS[platform]
if not policy_checks:
logging.info('Empty policy defined for platform "%s"', platform)
Approve(exm_key)
return
# Create a ThreadPoolExecutor and run the individual policy checks.
logging.info(
'Executing %d policy check(s) against host %s', len(policy_checks),
host_id)
with futures.ThreadPoolExecutor(max_workers=len(policy_checks)) as executor:
running_futures = [
executor.submit(check, exm_key) for check in policy_checks]
done_futures = futures.wait(running_futures).done
results = [done_future.result() for done_future in done_futures]
# If any of the checks return a non-'outcome' state, auto-deny, because it
# means there's a bug. Otherwise this request will just endlessly bounce
# between REQUESTED and PENDING.
for result in results:
if result.state not in _STATE.SET_OUTCOME:
message = '%s returned an invalid state: %s' % (
result.name, result.state)
logging.error(message)
monitoring.processing_errors.Increment()
Deny(exm_key, details=[message])
return
details = [result.detail for result in results if result.detail]
# Outcome precedence is: any(DENIED) > any(ESCALATED) > any(APPROVED).
if any(result.state == _STATE.DENIED for result in results):
Deny(exm_key, details=details)
elif any(result.state == _STATE.ESCALATED for result in results):
Escalate(exm_key, details=details)
else:
Approve(exm_key, details=details)
except Exception as e: # pylint: disable=broad-except
logging.exception(
'Error encountered while processing Exemption for host %s', host_id)
monitoring.processing_errors.Increment()
# If something breaks, revert back to REQUESTED so the cron can retry.
exemption_models.Exemption.ChangeState(
exm_key, _STATE.REQUESTED,
details=['Error while processing: ' + str(e)])
@ndb.transactional(xg=True) # xg due to Windows (Bit9Host & Bit9ApiAuth)
def Approve(exm_key, details=None):
"""Transitions an Exemption to the APPROVED state.
Args:
exm_key: The NDB Key of the Exemption entity.
details: Optional list of strings describing the rationale.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Approving Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.APPROVED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.APPROVED))
_DisableLockdown(exm_key)
exemption_models.Exemption.ChangeState(
exm_key, _STATE.APPROVED, details=details)
notify.DeferUpdateEmail(
exm_key, _STATE.APPROVED, details=details, transactional=True)
@ndb.transactional
def Deny(exm_key, details=None):
"""Transitions an Exemption to the DENIED state.
Args:
exm_key: The NDB Key of the Exemption entity.
details: Optional list of strings describing the rationale.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Denying Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.DENIED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.DENIED))
exemption_models.Exemption.ChangeState(
exm_key, _STATE.DENIED, details=details)
notify.DeferUpdateEmail(
exm_key, _STATE.DENIED, details=details, transactional=True)
@ndb.transactional
def Escalate(exm_key, details=None):
"""Transitions an Exemption to the ESCALATED state.
Args:
exm_key: The NDB Key of the Exemption entity.
details: Optional list of strings describing the rationale.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Escalating Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.ESCALATED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.ESCALATED))
exemption_models.Exemption.ChangeState(
exm_key, _STATE.ESCALATED, details=details)
@ndb.transactional(xg=True) # xg due to Windows (Bit9Host & Bit9ApiAuth)
def Expire(exm_key):
"""Transitions an Exemption to the EXPIRED state.
Args:
exm_key: The NDB Key of the Exemption entity.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Expiring Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.EXPIRED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.EXPIRED))
_EnableLockdown(exm_key)
exemption_models.Exemption.ChangeState(exm_key, _STATE.EXPIRED)
notify.DeferUpdateEmail(exm_key, _STATE.EXPIRED, transactional=True)
@ndb.transactional(xg=True) # xg due to Windows (Bit9Host & Bit9ApiAuth)
def Revoke(exm_key, details):
"""Transitions an Exemption to the REVOKED state.
Args:
exm_key: The NDB Key of the Exemption entity.
details: List of strings describing the rationale.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Revoking Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.REVOKED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.REVOKED))
_EnableLockdown(exm_key)
exemption_models.Exemption.ChangeState(
exm_key, _STATE.REVOKED, details=details)
notify.DeferUpdateEmail(
exm_key, _STATE.REVOKED, details=details, transactional=True)
@ndb.transactional(xg=True) # xg due to Windows (Bit9Host & Bit9ApiAuth)
def Cancel(exm_key):
"""Transitions an Exemption to the CANCELLED state.
Args:
exm_key: The NDB Key of the Exemption entity.
Raises:
InvalidStateChangeError: If the desired state cannot be transitioned to from
the current state.
"""
host_id = exemption_models.Exemption.GetHostId(exm_key)
logging.info('Cancelling Exemption for Host %s', host_id)
# Verify that the desired state change is still valid.
exm = exm_key.get()
if not exm.CanChangeToState(_STATE.CANCELLED):
raise InvalidStateChangeError('%s to %s' % (exm.state, _STATE.CANCELLED))
_EnableLockdown(exm_key)
exemption_models.Exemption.ChangeState(exm_key, _STATE.CANCELLED)
notify.DeferUpdateEmail(exm_key, _STATE.CANCELLED, transactional=True)
@ndb.transactional
def ChangeTransitiveWhitelisting(host_id, enable):
"""Changes the transitive whitelisting state for a SantaHost.
Args:
host_id: The ID of the SantaHost.
enable: Whether to enable or disable transitive whitelisting.
Raises:
UnsupportedClientError: if called against anything other than a SantaHost.
"""
# Only Santa clients are supported.
host = host_models.Host.get_by_id(host_models.Host.NormalizeId(host_id))
if host.GetClientName() != constants.CLIENT.SANTA:
raise UnsupportedClientError(
'Only Santa clients support transitive whitelisting')
# If this is a no-op, just bail now.
if host.transitive_whitelisting_enabled == enable:
logging.warning(
'Transitive whitelisting is already %s for %s',
'enabled' if enable else 'disabled', host.hostname)
return
# Make the change.
host.transitive_whitelisting_enabled = enable
host.put()
modification = 'enabled' if enable else 'disabled'
logging.info('Transitive whitelisting %s for %s', modification, host.hostname)
# If enabling transitive whitelisting and the SantaHost has an APPROVED
# Exemption, cancel it.
exm_key = exemption_models.Exemption.CreateKey(host_id)
exm = exm_key.get()
if enable and exm and exm.state == constants.EXEMPTION_STATE.APPROVED:
Cancel(exm_key)
# Notify the user of the mode change.
body = template_utils.RenderEmailTemplate(
'transitive_modified.html', modification=modification,
device_hostname=host.hostname, upvote_hostname=env_utils.ENV.HOSTNAME)
subject = 'Developer mode changed: %s' % host.hostname
mail_utils.Send(subject, body, to=[host.primary_user], html=True)
# Note the state change in BigQuery.
comment = 'Transitive whitelisting %s' % modification
tables.HOST.InsertRow(
device_id=host_id,
timestamp=datetime.datetime.utcnow(),
action=constants.HOST_ACTION.COMMENT,
hostname=host.hostname,
platform=constants.PLATFORM.MACOS,
users=[host.primary_user],
mode=host.client_mode,
comment=comment)
|
google/upvote_py2
|
upvote/gae/lib/exemption/api.py
|
api.py
|
py
| 21,555 |
python
|
en
|
code
| 449 |
github-code
|
6
|
71344819069
|
#sets nรฃo permitem valores duplicados
numeros = set([1,2,1,3,3,4,5,2,6]) #valido somente 1,2,3,4,5,6
carros = set(('palio','gol','celta','palio'))#valido somente palio,gol,celta
print(numeros)
print(carros)
#Unindo conjuntos com union
conjunto_a = {1,2}
conjunto_b = {3,4}
print(conjunto_a.union(conjunto_b))
#Adicionar itens a um conjunto
adicionar = {11,22,33}
adicionar.add(24)#24 foi adicionado
print(adicionar)
#Remover um nรบmero de um conjunto
remover = {101,102,103}
remover.discard(101)#101 foi eliminado
print(remover)
|
Dnx0/trilha-python-dio
|
conjuntos.py
|
conjuntos.py
|
py
| 538 |
python
|
pt
|
code
| 1 |
github-code
|
6
|
22388129127
|
import pygame
# Initialize pygame
pygame.init()
# Set up window
size = (400, 400)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Circle Line")
# Set up colors
WHITE = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
click_sound = pygame.mixer.Sound("clicked_sound.mp3")
# Set up circles
circle_radius = 10
circle_distance = 100
circle_x1 = (size[0] // 2) - (circle_distance // 2) - circle_radius
circle_x2 = (size[0] // 2) + (circle_distance // 2) - circle_radius
circle_y = size[1] // 2
circle_color1 = RED
circle_color2 = RED
circle1_active = False
circle2_active = False
# Set up line
line_thickness = 5
line_color = GREEN
line_rect = pygame.Rect(0, 0, 0, line_thickness)
line_active = False
# Main game loop
done = False
while not done:
# Handle events
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.MOUSEBUTTONDOWN:
mouse_pos = pygame.mouse.get_pos()
# Toggle circle 1 on/off
if not circle1_active and abs(mouse_pos[0] - (circle_x1 + circle_radius)) <= circle_radius and abs(mouse_pos[1] - (circle_y + circle_radius)) <= circle_radius:
circle1_active = True
circle_color1 = GREEN
# Toggle circle 2 on/off and draw line if both circles are active
elif not circle2_active and abs(mouse_pos[0] - (circle_x2 + circle_radius)) <= circle_radius and abs(mouse_pos[1] - (circle_y + circle_radius)) <= circle_radius:
circle2_active = True
circle_color2 = GREEN
if circle1_active:
line_rect = pygame.Rect(circle_x1 + circle_radius, circle_y + (line_thickness // 2), circle_distance, line_thickness)
line_active = True
click_sound.play()
# Draw everything
screen.fill(WHITE)
pygame.draw.circle(screen, circle_color1, (circle_x1 + circle_radius, circle_y + circle_radius), circle_radius)
pygame.draw.circle(screen, circle_color2, (circle_x2 + circle_radius, circle_y + circle_radius), circle_radius)
if line_active:
pygame.draw.rect(screen, line_color, line_rect)
pygame.display.update()
# Quit pygame properly
pygame.quit()
|
Vormamim/boxes
|
matrix_stage3.py
|
matrix_stage3.py
|
py
| 2,268 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18910107031
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 24 18:36:51 2022
@author: User
"""
# -*- coding: utf-8 -*-
"""
Created on Thu May 19 20:12:51 2022
@author: User
"""
import urllib.request
# pretty-print python data structures
from pprint import pprint
from nsetools import Nse
from nsepy import get_history
from html_table_parser.parser import HTMLTableParser
# for converting the parsed data in a
# pandas dataframe
import pandas as pd
# Opens a website and read its
# binary contents (HTTP Response Body)
def url_get_contents(url):
# Opens a website and read its
# binary contents (HTTP Response Body)
#making request to the website
req = urllib.request.Request(url=url)
f = urllib.request.urlopen(req)
#reading contents of the website
return f.read()
# defining the html contents of a URL.
nse = Nse()
d = {}
#q = nse.get_quote('infy')
all_stock_codes = nse.get_stock_codes()
def comma(val):
if ',' in val:
return val.replace(',','')
else:
return val
for stock in list(all_stock_codes.keys())[1]:
print(stock)
xhtml = url_get_contents('https://www.screener.in/company/'+ 'SUNCLAYLTD' +'/consolidated/').decode('utf-8')
# Defining the HTMLTableParser object
p = HTMLTableParser()
# feeding the html contents in the
# HTMLTableParser object
p.feed(xhtml)
l= []
for idx1 in range(0,len(p.tables)):
for idx2 in range(0,len(p.tables[idx1])):
print(p.tables[idx1][idx2])
if idx1 == 0:
if p.tables[idx1][idx2][0] == 'Sales +':
p_qtr = float(comma(p.tables[idx1][idx2][-2]))
c_qtr = float(comma(p.tables[idx1][idx2][-1]))
qtr_sal = (c_qtr - p_qtr)/p_qtr *100
l.append(p_qtr)
l.append(c_qtr)
l.append(qtr_sal)
if p.tables[idx1][idx2][0] == 'Operating Profit':
p_opr_prf = float(comma(p.tables[idx1][idx2][-2]))
c_opr_prf= float(comma(p.tables[idx1][idx2][-1]))
qtr_opr_prf = (c_opr_prf - p_opr_prf)/p_opr_prf *100
l.append(p_opr_prf)
l.append(c_opr_prf)
l.append(qtr_opr_prf)
if p.tables[idx1][idx2][0] == 'Net Profit':
p_net_prf = float(comma(p.tables[idx1][idx2][-2]))
c_net_prf= float(comma(p.tables[idx1][idx2][-1]))
qtr_net_prf = (c_net_prf - p_net_prf)/p_net_prf *100
l.append(p_net_prf)
l.append(c_net_prf)
l.append(qtr_net_prf)
if idx1 == 1:
if p.tables[idx1][0][-1] != 'TTM':
if p.tables[idx1][idx2][0] == 'Sales +':
p_anl = float(comma(p.tables[idx1][idx2][-2]))
c_anl = float(comma(p.tables[idx1][idx2][-1]))
anl_sal = (c_anl - p_anl)/p_anl *100
l.append(p_anl)
l.append(c_anl)
l.append(anl_sal)
if p.tables[idx1][idx2][0] == 'Operating Profit':
p_opr_prf_a = float(comma(p.tables[idx1][idx2][-2]))
c_opr_prf_a= float(comma(p.tables[idx1][idx2][-1]))
opr_prf_a = (c_opr_prf_a - p_opr_prf_a)/p_opr_prf_a * 100
l.append(p_opr_prf_a)
l.append(c_opr_prf_a)
l.append(opr_prf_a)
if p.tables[idx1][idx2][0] == 'Net Profit':
p_net_prf_a = float(comma(p.tables[idx1][idx2][-2]))
c_net_prf_a= float(comma(p.tables[idx1][idx2][-1]))
net_prf_a = (c_net_prf_a - p_net_prf_a)/p_net_prf_a * 100
l.append(p_net_prf_a)
l.append(c_net_prf_a)
l.append(net_prf_a)
else:
if p.tables[idx1][idx2][0] == 'Sales +':
p_anl = float(comma(p.tables[idx1][idx2][-2]))
c_anl = sum([float(comma(x)) for x in p.tables[idx1-1][idx2][-3:]]) + float(comma(p.tables[idx1][idx2][-1]))
anl_sal = (c_anl - p_anl)/p_anl *100
l.append(p_anl)
l.append(c_anl)
l.append(anl_sal)
if p.tables[idx1][idx2][0] == 'Operating Profit':
p_opr_prf_a = float(comma(p.tables[idx1][idx2][-2]))
c_opr_prf_a= sum([float(comma(x)) for x in p.tables[idx1-1][idx2][-3:]] )+ float(comma(p.tables[idx1][idx2][-1]))
opr_prf_a = (c_opr_prf_a - p_opr_prf_a)/p_opr_prf_a *100
l.append(p_opr_prf_a)
l.append(c_opr_prf_a)
l.append(opr_prf_a)
if p.tables[idx1][idx2][0] == 'Net Profit':
p_net_prf_a = float(comma(p.tables[idx1][idx2][-2]))
c_net_prf_a = sum([float(comma(x)) for x in p.tables[idx1-1][idx2][-3:]]) + float(comma(p.tables[idx1][idx2][-1]))
net_prf_a = (c_net_prf_a - p_net_prf_a)/p_net_prf_a *100
l.append(p_net_prf_a)
l.append(c_net_prf_a)
l.append(net_prf_a)
if p.tables[idx1][idx2][0] == 'Public +':
public_holders = p.tables[idx1][idx2][-1]
l.append(public_holders)
d[stock] = l
df_screener = pd.DataFrame.from_dict(d, orient = 'index')
df_screener.columns = ['prev_qtr_sales','curr_qtr_sales','perc_imp_sales','prev_qtr_oprPro','curr_qtr_oprPro','perc_imp_oprPro','prev_qtr_NetPro','curr_qtr_NetPro','perc_imp_NetPro','prev_anl_sales','curr_anl_sales','perc_imp_sales_anl','prev_anl_oprPro','curr_anl_oprPro','perc_imp_oprPro_anl','prev_anl_NetPro','curr_anl_NetPro','perc_imp_NetPro_anl','public_holding']
df_screener.to_excel("E:\Trade\Analysis\\screener_data.xlsx")
|
karthickbmca/Trade_code
|
rough.py
|
rough.py
|
py
| 6,475 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71150102587
|
def solution(start, length):
checksum = 0
l = length
while l:
checksum^=xor_sum(start)^xor_sum(start+l)
start+=length
l-=1
return checksum
def xor_sum(n):
if n == 0:
return 0
elif (n-1) % 4 == 0:
return n-1
elif (n-1) % 4 == 1:
return 1
elif (n-1) % 4 == 2:
return n
else:
return 0
|
leander-dsouza/Google_Foobar_2020
|
Level 3/queue-to-do/solution.py
|
solution.py
|
py
| 383 |
python
|
en
|
code
| 4 |
github-code
|
6
|
71903724669
|
# -*- coding: utf-8 -*-
from .core import *
from .. import _logging as logging
import tensorflow as tf
__all__ = [
'StackLayer',
'UnStackLayer',
]
class StackLayer(Layer):
"""
The :class:`StackLayer` class is layer for stacking a list of rank-R tensors into one rank-(R+1) tensor, see `tf.stack() <https://www.tensorflow.org/api_docs/python/tf/stack>`__.
Parameters
----------
layers : list of :class:`Layer`
Previous layers to stack.
axis : int
Dimension along which to concatenate.
name : str
A unique layer name.
Examples
---------
>>> x = tf.placeholder(tf.float32, shape=[None, 30])
>>> net = tl.layers.InputLayer(x, name='input')
>>> net1 = tl.layers.DenseLayer(net, 10, name='dense1')
>>> net2 = tl.layers.DenseLayer(net, 10, name='dense2')
>>> net3 = tl.layers.DenseLayer(net, 10, name='dense3')
>>> net = tl.layers.StackLayer([net1, net2, net3], axis=1, name='stack')
... (?, 3, 10)
"""
def __init__(
self,
layers,
axis=1,
name='stack',
):
Layer.__init__(self, prev_layer=layers, name=name)
self.inputs = []
for l in layers:
self.inputs.append(l.outputs)
self.outputs = tf.stack(self.inputs, axis=axis, name=name)
logging.info("StackLayer %s: axis: %d" % (self.name, axis))
# self.all_layers = list(layers[0].all_layers)
# self.all_params = list(layers[0].all_params)
# self.all_drop = dict(layers[0].all_drop)
#
# for i in range(1, len(layers)):
# self.all_layers.extend(list(layers[i].all_layers))
# self.all_params.extend(list(layers[i].all_params))
# self.all_drop.update(dict(layers[i].all_drop))
#
# self.all_layers = list_remove_repeat(self.all_layers)
# self.all_params = list_remove_repeat(self.all_params)
self.all_layers.append(self.outputs)
def unstack_layer(layer, num=None, axis=0, name='unstack'):
"""
It is layer for unstacking the given dimension of a rank-R tensor into rank-(R-1) tensors., see `tf.unstack() <https://www.tensorflow.org/api_docs/python/tf/unstack>`__.
Parameters
----------
layer : :class:`Layer`
Previous layer
num : int or None
The length of the dimension axis. Automatically inferred if None (the default).
axis : int
Dimension along which axis to concatenate.
name : str
A unique layer name.
Returns
-------
list of :class:`Layer`
The list of layer objects unstacked from the input.
"""
inputs = layer.outputs
with tf.variable_scope(name):
outputs = tf.unstack(inputs, num=num, axis=axis)
logging.info("UnStackLayer %s: num: %s axis: %d, n_outputs: %d" % (name, num, axis, len(outputs)))
net_new = []
scope_name = tf.get_variable_scope().name
if scope_name:
full_name = scope_name + '/' + name
else:
full_name = name
for i, _v in enumerate(outputs):
n = Layer(prev_layer=layer, name=full_name + str(i))
n.outputs = outputs[i]
# n.all_layers = list(layer.all_layers)
# n.all_params = list(layer.all_params)
# n.all_drop = dict(layer.all_drop)
# n.all_layers.append(inputs)
net_new.append(n)
return net_new
# Alias
UnStackLayer = unstack_layer
|
feilab-hust/VCD-Net
|
vcdnet/tensorlayer/layers/stack.py
|
stack.py
|
py
| 3,434 |
python
|
en
|
code
| 25 |
github-code
|
6
|
14761591721
|
import os
from twisted.internet import reactor, defer
def read_mail(mailitems):
print(mailitems)
return "Junk Mail... Sending to shredder: " + mailitems
def shred_mail(mailitems):
print('buzzzzz: ' + mailitems)
os.remove('mail')
reactor.stop()
def create_mail(msg):
with open("mail","w") as f:
f.write(msg)
def wait_for_mail(d=None):
if not d:
d = defer.Deferred()
if not os.path.isfile('mail'):
reactor.callLater(1, wait_for_mail, d)
else:
with open("mail") as f:
contents = f.readlines()
d.callback(contents[0])
return d
deferred = wait_for_mail()
deferred.addCallback(read_mail)
deferred.addCallback(shred_mail)
reactor.callLater(2, create_mail, "Look at this new letter!")
reactor.callLater(20, reactor.stop)
reactor.run()
|
mina32/APT_Black
|
mailExample/mailPolling.py
|
mailPolling.py
|
py
| 826 |
python
|
en
|
code
| 0 |
github-code
|
6
|
30056444826
|
import re
from collections import defaultdict
def parse_rule_container(rule):
regex = re.compile(r'(.*) bags contain(.*)')
matches = regex.match(rule)
return matches.group(1), matches.group(2)
def parse_contained_rule(rule):
regex = re.compile(r'((\d+) ([a-z ]+) bag)+')
all_matches = regex.findall(rule)
return [(lambda x: (x[1], x[2]))(match) for match in all_matches]
def contains_bag(contained_bags_arr, expected_color):
for (count, color) in contained_bags_arr:
if color == expected_color:
return True
return False
def main():
f = open("input.txt", "r")
inp = f.read().split('\n')
inp = list(filter(lambda inp_line: len(inp_line) != 0, inp))
rules = [parse_rule_container(inp_line) for inp_line in inp]
fwd_map = defaultdict(lambda: [])
rev_map = defaultdict(lambda: [])
for rule in rules:
container, contained_rule = rule
contained_bags_arr = parse_contained_rule(contained_rule)
fwd_map[container] = contained_bags_arr
for contained_bags in contained_bags_arr:
contained_bags_count, contained_bags_color= contained_bags
rev_map[contained_bags_color].append(container)
ans = set()
possible_container = set(rev_map['shiny gold'])
while len(possible_container) != 0:
for possible_bag in possible_container.copy():
possible_container.remove(possible_bag)
if possible_bag in ans:
continue
ans.add(possible_bag)
for bag in rev_map[possible_bag]:
possible_container.add(bag)
print(ans)
print(len(ans))
if __name__ == '__main__':
main()
|
nithish-thomas/adventOfCode2020
|
aoc20/day7/day7_1.py
|
day7_1.py
|
py
| 1,700 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24529853326
|
import numpy as np
import torch
import torch.nn as nn
from lib.datasets.utils import class2angle
from utils import box_ops
import math
padsize = np.array([28.,11.],dtype=np.float32)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
#new_pt = torch.cat((pt, torch.ones(1).to(device=device)), dim=0)
#new_pt = new_pt.unsqueeze(-1)
# expand project points as [N, 3, 1]
#new_pt=torch.matmul(t, new_pt)
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def softget(weighted_depth,outputs_coord):
h,w = weighted_depth.shape[-2], weighted_depth.shape[-1] # 32 58
pts_x =outputs_coord [0]
pts_y = outputs_coord[ 1]
pts_x_low = math.floor(pts_x )
pts_x_high =math.ceil(pts_x )
pts_y_low = math.floor(pts_y )
pts_y_high =math.ceil(pts_y)
pts_x_low = np.clip(pts_x_low, a_min=0,a_max = w-1)
pts_x_high = np.clip(pts_x_high, a_min=0,a_max = w-1)
pts_y_low = np.clip(pts_y_low, a_min=0,a_max = h-1)
pts_y_high =np.clip(pts_y_high, a_min=0,a_max = h-1)
rop_lt = weighted_depth[..., pts_y_low, pts_x_low]
rop_rt = weighted_depth[..., pts_y_low, pts_x_high]
rop_ld = weighted_depth[..., pts_y_high, pts_x_low]
rop_rd = weighted_depth[..., pts_y_high, pts_x_high]
rop_t = (1 - pts_x + pts_x_low) * rop_lt + (1 - pts_x_high + pts_x) * rop_rt
rop_d = (1 - pts_x + pts_x_low) * rop_ld + (1 - pts_x_high + pts_x) * rop_rd
rop = (1 - pts_y + pts_y_low) * rop_t + (1 - pts_y_high + pts_y) * rop_d
return rop
def decode_detections(dets, info, calibs, cls_mean_size, threshold,trans_inv,depthmaps,pre_denorm,weighted_depth):
'''
NOTE: THIS IS A NUMPY FUNCTION
input: dets, numpy array, shape in [batch x max_dets x dim]
input: img_info, dict, necessary information of input images
input: calibs, corresponding calibs for the input batch
output:
'''
results = {}
for i in range(dets.shape[0]): # batch
preds = []
for j in range(dets.shape[1]): # max_dets
cls_id = int(dets[i, j, 0])
score = dets[i, j, 1]
if score < threshold:
continue
# 2d bboxs decoding
x = dets[i, j, 2] * 928
y = dets[i, j, 3] * 512
w = dets[i, j, 4] * 928
h = dets[i, j, 5] * 512
#x = dets[i, j, 2] * info['img_size'][i][0]
#y = dets[i, j, 3] * info['img_size'][i][1]
#w = dets[i, j, 4] * info['img_size'][i][0]
#h = dets[i, j, 5] * info['img_size'][i][1]
bbox = np.array([x-w/2, y-h/2, x+w/2, y+h/2])
bbox[:2] = bbox[:2]-padsize
bbox[2:] =bbox[2:] -padsize
bbox[:2] = bbox[:2]*2.2#affine_transform(bbox[:2], trans_inv[i])
bbox[2:] = bbox[2:]*2.2#affine_transform(bbox[2:], trans_inv[i])
# 3d bboxs decoding
# depth decoding
depth_p = dets[i, j, 6]
# dimensions decoding
dimensions = dets[i, j, 31:34]
dimensions += cls_mean_size[int(cls_id)]
# positions decoding
#x3d = dets[i, j, 34]
#y3d = dets[i, j, 35]
size = np.array([928,512])
#size = torch.tensor(size).to(device)
pad = np.array([28,11])
#pad =torch.tensor(pad).to( device)
pad2 = np.array([2,1])
#pad2 =torch.tensor(pad2).to(device)
#coord =(dets[i, j, 34:36]*size-pad)/16+[2,1]
coord =(dets[i, j, 34:36]*size-pad)/16 # ๆฌๆฅๅพๅ็้คไปฅ35.2 x,y
pts = np.array(coord)
w = 56
h = 31
pts_x = pts[0]
pts_x = np.clip(pts_x, a_min=0,a_max =w-1)
pts_y = pts[1]
pts_y = np.clip(pts_y , a_min=0,a_max = h-1)
denorm = pre_denorm[i] # 32,58,4
P = calibs[i].P2
#coord = np.array([i+2,j+1])
'''
d = denorm[int(pts_y)+1,int(pts_x)+2] #[16.69273 5.326345]
W =torch.tensor([[P[0,0]/35.2,0,P[0,2]/35.2-coord[0]],[0,P[1,1]/35.2,P[1,2]/35.2-coord[1]],[d[0],d[1],d[2]]])
result = torch.tensor([0,0,-d[3]]).reshape(-1,1)
W_inv = torch.inverse(W)
vvxyz = torch.mm(W_inv,result)
depth=vvxyz[2,0]
'''
'''
#print(coord.shape)
#weighteddepth = depthmaps[i]
coord = np.array([pts_x+2,pts_y+1])
weighteddepth = weighted_depth[i]
#weighteddepth = weighteddepth.transpose(1,0) #
weighteddepth = weighted_depth.cpu().numpy() # 32,58
depth = softget(weighteddepth,coord)
'''
x3d = dets[i, j, 34] * 928
y3d = dets[i, j, 35] * 512
x3d = x3d - padsize[0] # -28
y3d = y3d - padsize[1] #-11
xy = np.array([x3d, y3d])
xy= xy *2.2 #affine_transform(xy , trans_inv[i])
#xy= affine_transform(xy , trans_inv[i])
x3d = xy[0]
y3d = xy[1]
#x3d = dets[i, j, 34] * info['img_size'][i][0]
#y3d = dets[i, j, 35] * info['img_size'][i][1]
#locations = calibs[i].img_to_rect(x3d, y3d, depth).reshape(-1)
locations = calibs[i].img_to_rect(x3d, y3d, depth_p).reshape(-1)
#locations[1] += dimensions[0] / 2
# heading angle decoding
alpha = get_heading_angle(dets[i, j, 7:31])
ry = calibs[i].alpha2ry(alpha, x)
score = score * dets[i, j, -1]
preds.append([cls_id, alpha] + bbox.tolist() + dimensions.tolist() + locations.tolist() + [ry, score])
results[info['img_id'][i]] = preds
return results
def extract_dets_from_outputs(outputs, K=50, topk=50):
# get src outputs
# b, q, c
out_logits = outputs['pred_logits']
out_bbox = outputs['pred_boxes']
prob = out_logits.sigmoid()
topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), topk, dim=1)
# final scores
scores = topk_values
# final indexes
topk_boxes = (topk_indexes // out_logits.shape[2]).unsqueeze(-1)
# final labels
labels = topk_indexes % out_logits.shape[2]
heading = outputs['pred_angle']
size_3d = outputs['pred_3d_dim']
depth = outputs['pred_depth'][:, :, 0: 1]
sigma = outputs['pred_depth'][:, :, 1: 2]
sigma = torch.exp(-sigma)
# decode
boxes = torch.gather(out_bbox, 1, topk_boxes.repeat(1, 1, 6)) # b, q', 4
xs3d = boxes[:, :, 0: 1]
ys3d = boxes[:, :, 1: 2]
heading = torch.gather(heading, 1, topk_boxes.repeat(1, 1, 24))
depth = torch.gather(depth, 1, topk_boxes)
sigma = torch.gather(sigma, 1, topk_boxes)
size_3d = torch.gather(size_3d, 1, topk_boxes.repeat(1, 1, 3))
corner_2d = box_ops.box_cxcylrtb_to_xyxy(boxes)
xywh_2d = box_ops.box_xyxy_to_cxcywh(corner_2d)
size_2d = xywh_2d[:, :, 2: 4]
xs2d = xywh_2d[:, :, 0: 1]
ys2d = xywh_2d[:, :, 1: 2]
batch = out_logits.shape[0]
labels = labels.view(batch, -1, 1)
scores = scores.view(batch, -1, 1)
xs2d = xs2d.view(batch, -1, 1)
ys2d = ys2d.view(batch, -1, 1)
xs3d = xs3d.view(batch, -1, 1)
ys3d = ys3d.view(batch, -1, 1)
detections = torch.cat([labels.float(), scores, xs2d, ys2d, size_2d, depth, heading, size_3d, xs3d, ys3d, sigma], dim=2)
#detections = torch.cat([labels.float(), scores, xs2d, ys2d, size_2d, heading, size_3d, xs3d, ys3d ], dim=2)
return detections
############### auxiliary function ############
def _nms(heatmap, kernel=3):
padding = (kernel - 1) // 2
heatmapmax = nn.functional.max_pool2d(heatmap, (kernel, kernel), stride=1, padding=padding)
keep = (heatmapmax == heatmap).float()
return heatmap * keep
def _topk(heatmap, K=50):
batch, cat, height, width = heatmap.size()
# batch * cls_ids * 50
topk_scores, topk_inds = torch.topk(heatmap.view(batch, cat, -1), K)
topk_inds = topk_inds % (height * width)
topk_ys = (topk_inds / width).int().float()
topk_xs = (topk_inds % width).int().float()
# batch * cls_ids * 50
topk_score, topk_ind = torch.topk(topk_scores.view(batch, -1), K)
topk_cls_ids = (topk_ind / K).int()
topk_inds = _gather_feat(topk_inds.view(batch, -1, 1), topk_ind).view(batch, K)
topk_ys = _gather_feat(topk_ys.view(batch, -1, 1), topk_ind).view(batch, K)
topk_xs = _gather_feat(topk_xs.view(batch, -1, 1), topk_ind).view(batch, K)
return topk_score, topk_inds, topk_cls_ids, topk_xs, topk_ys
def _gather_feat(feat, ind, mask=None):
'''
Args:
feat: tensor shaped in B * (H*W) * C
ind: tensor shaped in B * K (default: 50)
mask: tensor shaped in B * K (default: 50)
Returns: tensor shaped in B * K or B * sum(mask)
'''
dim = feat.size(2) # get channel dim
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim) # B*len(ind) --> B*len(ind)*1 --> B*len(ind)*C
feat = feat.gather(1, ind) # B*(HW)*C ---> B*K*C
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat) # B*50 ---> B*K*1 --> B*K*C
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
'''
Args:
feat: feature maps shaped in B * C * H * W
ind: indices tensor shaped in B * K
Returns:
'''
feat = feat.permute(0, 2, 3, 1).contiguous() # B * C * H * W ---> B * H * W * C
feat = feat.view(feat.size(0), -1, feat.size(3)) # B * H * W * C ---> B * (H*W) * C
feat = _gather_feat(feat, ind) # B * len(ind) * C
return feat
def get_heading_angle(heading):
heading_bin, heading_res = heading[0:12], heading[12:24]
cls = np.argmax(heading_bin)
res = heading_res[cls]
return class2angle(cls, res, to_label_format=True)
|
HIYYJX/MonoGAE
|
lib/helpers/decode_helper.py
|
decode_helper.py
|
py
| 10,233 |
python
|
en
|
code
| 4 |
github-code
|
6
|
40974269745
|
import numpy
import os
numpy.seterr('ignore')
luz = int(300000000)
while True:
inicio = input("ยฟque deseas?(entrar, salir o limpiar): \n>")
print ("")
if inicio == "entrar" or inicio == "enter":
opcion = input("elige la magnitud(energia, fuerza o recorrido): \n>")
if opcion == "energia" or opcion == "energy" or opcion == "energi":
try:
print(" ")
masa_en_reposo = float(input("ingresa la masa (kg): \n>"))
print(" ")
aceleracion = float(input("ingresa la aceleracion (m/s): \n>"))
if aceleracion > (luz):
print ("las ecuaciones de einstein impiden velocidades superluminicas por lo que intentarlo conlleva numeros โrotosโ aun asรญ, si te interesa ver el resultado, elimina esta condicion del codigo y veras un โnanโ.")
print("")
else:
relatividad = (masa_en_reposo/(numpy.sqrt(1-(aceleracion**2)/luz**2)))
energia = (0.5*(relatividad * (aceleracion**2)))
print ("la energia usada son:", energia,"J\n")
except:
print("")
print(" ")
elif opcion == "fuerza" or opcion == "momento lineal" or opcion == "momentum" or opcion == "impetรบ":
try:
print("")
masa_en_reposo = float(input("ingresa la masa (kg): \n>"))
print(" ")
aceleracion = float(input("ingresa la aceleracion (m/s): \n>"))
print(" ")
if (aceleracion) > (luz):
print ("las ecuaciones de einstein impiden velocidades superluminicas por lo que intentarlo conlleva numeros โrotosโ aun asรญ, si te interesa ver el resultado, elimina esta condicion del codigo y veras un โnanโ.")
print("")
else:
relatividad = (masa_en_reposo/(numpy.sqrt(1-(aceleracion**2)/luz**2)))
fuerza = (relatividad * aceleracion)
peso = (fuerza/9.81)
print ("la fuerza (newtons) es:",fuerza,"N")
print (" ")
print("la fuerza (kilogramos) es:", peso, "kg/f\n")
print(" ")
except:
print("")
print(" ")
elif opcion == "recorrido":
try:
print(" ")
espacio = float(input("ingresa la distancia a recorrer(m):\n>"))
print(" ")
velocidad = float(input("ingresa la velocidad (m/s): \n>"))
print("")
if velocidad > (luz):
print ("las ecuaciones de einstein impiden velocidades superluminicas por lo que intentarlo conlleva numeros โrotosโ aun asรญ, si te interesa ver el resultado, elimina esta condicion del codigo y veras un โnanโ.")
print("")
else:
relatividad = (espacio *(numpy.sqrt(1-(velocidad**2)/(luz**2))))
dilatacion = (1*(numpy.sqrt(1-(velocidad**2)/(luz**2))))
tiempo = (relatividad/velocidad)
total= (tiempo/dilatacion)
print ("recorrer la distancia de", espacio,"metros", "toma", tiempo, "segundos")
print(" ")
except:
print("")
else:
print("")
elif inicio == "salir" or inicio == "exit":
break
elif inicio == "limpiar" or inicio == "clean":
if os.name == "posix":
os.system ("clear")
elif os.name == "ce" or os.name == "nt" or os.name == "dos":
os.system ("cls")
else:
print("ingresa opcion valida")
print(" ")
|
infra-determista/calculadora-relativista
|
Calculadora-relativista.py
|
Calculadora-relativista.py
|
py
| 3,349 |
python
|
es
|
code
| 0 |
github-code
|
6
|
36010827079
|
import json
import serial
import numpy as np
class CameraMLX90640(serial.Serial):
"""
Implements communications camera_mlx90640_firmware
"""
FRAME_HEIGHT = 24
FRAME_WIDTH = 32
def __init__(self, port):
self.port_param = {'port': port, 'baudrate': 115200, 'timeout': 2.0}
super().__init__(**self.port_param)
self.num_throw_away = 10
self.throw_away_lines()
def throw_away_lines(self):
"""
Throw away first few lines. Deals with case where user has updated the
firmware which writes a bunch text to the serial port.
"""
self.timeout = 0.1
for i in range(self.num_throw_away):
line = self.readline()
self.timeout = self.port_param['timeout']
def send_and_receive(self, msg_dict):
"""
Send and receive message from the device.
"""
msg_json = f'{json.dumps(msg_dict)}\n'
self.write(msg_json.encode())
rsp_json = self.read_until()
rsp_json = rsp_json.strip()
rsp_dict = {}
try:
rsp_dict = json.loads(rsp_json.decode('utf-8'))
except json.decoder.JSONDecodeError as e:
print(f'Error decoding json message: {e}')
return rsp_dict
def grab_frame(self):
"""
Grab from from camera and convert it to a numpy array
"""
cmd = {'cmd': 'frame'}
rsp = self.send_and_receive(cmd)
try:
frame = np.array(rsp['frame'])
except KeyError:
frame = np.zeros((self.FRAME_HEIGHT,self.FRAME_WIDTH))
ok = False
else:
frame = np.array(frame)
frame = np.reshape(frame, (self.FRAME_HEIGHT,self.FRAME_WIDTH))
frame = np.flipud(frame)
frame = np.fliplr(frame)
ok = True
return ok, frame
|
willdickson/camera_mlx90640
|
camera_mlx90640/camera_mlx90640.py
|
camera_mlx90640.py
|
py
| 1,876 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29951489140
|
from .base import Shape
class Circle(Shape):
def __init__(self, radius, **kwargs):
super().__init__(**kwargs)
self.radius = radius
def draw(self, turtle):
turtle.penup()
turtle.goto(self.center_x, self.center_y - self.radius) # From docs: The center is radius units left of the turtle;
turtle.pendown()
turtle.color(self.color)
turtle.circle(self.radius)
|
disulfiram/SoftUni-PythonProgramming
|
06 Python OOP/01 Shape Extension/Shapes/circle.py
|
circle.py
|
py
| 424 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41814241194
|
from django.shortcuts import render, get_object_or_404
from django.http import Http404
from .models import Question
# Create your views here.
def index(request):
try:
latest_question_list = Question.objects.order_by('pub_date')[:5]
except Question.DoesNotExist:
raise Http404('Question does not exist')
context = {
'latest_question_list': latest_question_list,
}
return render(request, 'pools/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
context = {
'question': question
}
return render(request, 'pools/detail.html', context)
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
context = {
'question': question
}
return render(request, 'pools/results.html', context)
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
context = {
'question': question
}
return render(request, 'pools/vote.html', context)
|
Vladimir-vut/django_mysite
|
mysite/pools/views.py
|
views.py
|
py
| 1,074 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36947412448
|
from functools import partial
from random import shuffle
from kivy.clock import Clock
from kivy.app import App
from kivy.lang import Builder
import ani_property
ani_property.install()
KV_CODE = r'''
GridLayout:
rows: 4
cols: 4
padding: 20
spacing: 20
'''
def shuffle_children(widget, dt):
children = widget.children[:]
widget.clear_widgets()
shuffle(children)
for c in children:
widget.add_widget(c)
class SampleApp(App):
def build(self):
return Builder.load_string(KV_CODE)
def on_start(self):
from kivy.uix.button import Button
from ani_property import AniMagnet
grid = self.root
for i in range(grid.rows * grid.cols):
label = Button(text=str(i), font_size=50, opacity=0.5)
magnet = AniMagnet()
magnet.add_widget(label)
grid.add_widget(magnet)
Clock.schedule_interval(partial(shuffle_children, grid), 3)
if __name__ == '__main__':
SampleApp().run()
|
gottadiveintopython/ani-property
|
examples/magnet.py
|
magnet.py
|
py
| 1,011 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11342650070
|
# -*- encoding: utf-8 -*-
import requests
from scdl import CLIENT_ID
class Client():
def get_collection(self, url, token, maxpage):
params = {
'client_id': CLIENT_ID,
'linked_partitioning': '1',
}
if token:
params['oauth_token'] = token
resources = list()
count = 0
while url and count < maxpage:
response = requests.get(url,
headers={
"Sec-Fetch-Mode":"cors",
"Origin": "https://soundcloud.com",
"Authorization": "OAuth {}".format(token),
"Content-Type": "application/json",
"Accept": "application/json, text/javascript, */*; q=0.1",
"Referer": "https://soundcloud.com/",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36",
"DNT": "1",
})
response.raise_for_status()
json_data = response.json()
if 'collection' in json_data:
resources.extend(json_data['collection'])
else:
resources.extend(json_data)
if 'next_href' in json_data:
url = json_data['next_href']
count += 1
else:
url = None
return resources
|
jz1/scdl
|
scdl/client.py
|
client.py
|
py
| 1,399 |
python
|
en
|
code
| null |
github-code
|
6
|
29906192183
|
# ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "Surface Heat Diffuse Skinning",
"author": "mesh online",
"version": (3, 4, 2),
"blender": (2, 80, 0),
"location": "View3D > UI > Mesh Online",
"description": "Surface Heat Diffuse Skinning",
"warning": "",
"wiki_url": "http://www.mesh-online.net/vhd.html",
"category": "Object"
}
import bpy
import sys
import os
import time
import platform
from subprocess import PIPE, Popen
from threading import Thread
from bpy.props import *
from queue import Queue, Empty
class SFC_OT_ModalTimerOperator(bpy.types.Operator):
"""Operator which runs its self from a timer"""
bl_idname = "wm.surface_heat_diffuse"
bl_label = "Surface Heat Diffuse Skinning"
bl_options = {'REGISTER', 'UNDO'}
_timer = None
_pid = None
_queue = None
_objs = []
_permulation = []
_selected_indices = []
_selected_group_index_weights = []
_start_time = None
def write_bone_data(self, obj, filepath):
f = open(filepath, 'w', encoding='utf-8')
f.write("# surface heat diffuse bone export.\n")
amt = obj.data
bpy.ops.object.mode_set(mode='EDIT')
for bone in amt.edit_bones:
if bone.use_deform:
world_bone_head = obj.matrix_world @ bone.head
world_bone_tail = obj.matrix_world @ bone.tail
f.write("b,{},{},{},{},{},{},{}\n".format(
bone.name.replace(",", "\\;"), world_bone_head[0], world_bone_head[1], world_bone_head[2],
world_bone_tail[0], world_bone_tail[1], world_bone_tail[2]))
bpy.ops.object.mode_set(mode='OBJECT')
f.close()
def write_mesh_data(self, objs, filepath):
f = open(filepath, 'w', encoding='utf-8')
f.write("# surface heat diffuse mesh export.\n")
vertex_offset = 0
for obj in objs:
for v in obj.data.vertices:
world_v_co = obj.matrix_world @ v.co
f.write("v,{},{},{}\n".format(world_v_co[0], world_v_co[1], world_v_co[2]))
for poly in obj.data.polygons:
f.write("f");
for loop_ind in poly.loop_indices:
vert_ind = obj.data.loops[loop_ind].vertex_index
f.write(",{}".format(vertex_offset + vert_ind))
f.write("\n")
vertex_offset += len(obj.data.vertices)
f.close()
def read_weight_data(self, objs, filepath):
# make permulation for all vertices
vertex_offset = 0;
for obj in objs:
for index in range(len(obj.data.vertices)):
self._permulation.append((vertex_offset + index, index, obj))
vertex_offset += len(obj.data.vertices)
if bpy.context.scene.surface_protect:
for index in range(len(objs)):
obj = objs[index]
# get selected vertex indices
self._selected_indices.append([i.index for i in obj.data.vertices if i.select])
self._selected_group_index_weights.append([])
# push protected vertices weight
for vert_ind in self._selected_indices[index]:
for g in obj.data.vertices[vert_ind].groups:
self._selected_group_index_weights[index].append((obj.vertex_groups[g.group].name, vert_ind, g.weight))
f = open(filepath, 'r', encoding='utf-8')
bones = []
for line in f:
if len(line) == 0:
continue
tokens = line.strip("\r\n").split(",")
if tokens[0] == "b":
group_name = tokens[1].replace("\\;", ",")
bones.append(group_name)
for obj in objs:
#check for existing group with the same name
if None != obj.vertex_groups.get(group_name):
group = obj.vertex_groups[group_name]
obj.vertex_groups.remove(group)
obj.vertex_groups.new(name = group_name)
if tokens[0] == "w":
group_name = bones[int(tokens[2])]
index = int(tokens[1])
vert_ind = self._permulation[index][1]
weight = float(tokens[3])
obj = self._permulation[index][2]
# protect vertices weight
if bpy.context.scene.surface_protect and vert_ind in self._selected_indices[objs.index(obj)]:
continue
obj.vertex_groups[group_name].add([vert_ind], weight, 'REPLACE')
f.close()
if bpy.context.scene.surface_protect:
for index in range(len(objs)):
obj = objs[index]
# pop protected vertices weight
for (group_name, vert_ind, weight) in self._selected_group_index_weights[index]:
obj.vertex_groups[group_name].add([vert_ind], weight, 'REPLACE')
def modal(self, context, event):
if event.type == 'ESC':
self._pid.terminate()
return self.cancel(context)
if event.type == 'TIMER':
# background task is still running
if None == self._pid.poll():
# read line without blocking
try: rawline = self._queue.get_nowait()
except Empty:
pass
else:
line = rawline.decode().strip("\r\n")
self.report({'INFO'}, line)
else:
# background task finished running
self.read_weight_data(self._objs, os.path.join(os.path.dirname(__file__), "data", "untitled-weight.txt"))
running_time = time.time() - self._start_time
self.report({'INFO'}, "".join(("Complete, ", "running time: ", \
str(int(running_time / 60))," minutes ", str(int(running_time % 60)), " seconds")))
# bind meshes to the armature
bpy.ops.object.parent_set(type='ARMATURE')
return self.cancel(context)
return {'RUNNING_MODAL'}
def execute(self, context):
arm_count = 0
obj_count = 0
for ob in bpy.context.selected_objects:
if 'ARMATURE' == ob.type:
arm_count += 1
if 'MESH' == ob.type:
obj_count += 1
if not (context.mode == 'OBJECT' and arm_count == 1 and obj_count >= 1):
self.report({'ERROR'}, "Please select one armature and at least one mesh in 'OBJECT' mode, then try again.")
return {'CANCELLED'}
self._objs = []
self._permulation = []
self._selected_indices = []
self._selected_group_index_weights = []
arm = None
objs = []
# get armature and mesh
for ob in bpy.context.selected_objects:
if 'ARMATURE' == ob.type:
arm = ob
if 'MESH' == ob.type:
objs.append(ob)
# sort meshes by name
objs.sort(key=lambda obj:obj.name);
# save the reference for later use
self._objs = objs
for obj in objs:
# focus on the mesh
bpy.context.view_layer.objects.active = obj
# synchronize data
bpy.ops.object.mode_set(mode='OBJECT')
# write mesh data
self.write_mesh_data(objs, os.path.join(os.path.dirname(__file__), "data", "untitled-mesh.txt"))
# we must focus on the armature before we can write bone data
bpy.context.view_layer.objects.active = arm
# synchronize data
bpy.ops.object.mode_set(mode='OBJECT')
# write bone data
self.write_bone_data(arm, os.path.join(os.path.dirname(__file__), "data", "untitled-bone.txt"))
# do voxel skinning in background
ON_POSIX = 'posix' in sys.builtin_module_names
# chmod
if ON_POSIX:
os.chmod(os.path.join(os.path.dirname(__file__), "bin", platform.system(), "shd"), 0o755)
def enqueue_output(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
executable_path = None
if platform.system() == 'Windows':
if platform.machine().endswith('64'):
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "x64", "shd")
else:
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "x86", "shd")
else:
executable_path = os.path.join(os.path.dirname(__file__), "bin", platform.system(), "shd")
self._pid = Popen([executable_path,
"untitled-mesh.txt",
"untitled-bone.txt",
"untitled-weight.txt",
str(context.scene.surface_resolution),
str(context.scene.surface_loops),
str(context.scene.surface_samples),
str(context.scene.surface_influence),
str(context.scene.surface_falloff),
context.scene.surface_sharpness,
"y" if context.scene.detect_surface_solidify else "n"],
cwd = os.path.join(os.path.dirname(__file__), "data"),
stdout = PIPE,
bufsize = 1,
close_fds = ON_POSIX)
self._queue = Queue()
t = Thread(target=enqueue_output, args=(self._pid.stdout, self._queue))
t.daemon = True
t.start()
self._start_time = time.time()
# start timer to poll data
self._timer = context.window_manager.event_timer_add(0.1, window=context.window)
context.window_manager.modal_handler_add(self)
return {'RUNNING_MODAL'}
def cancel(self, context):
# remove timer
context.window_manager.event_timer_remove(self._timer)
self._objs = []
self._permulation = []
self._selected_indices = []
self._selected_group_index_weights = []
return {'CANCELLED'}
def init_properties():
bpy.types.Scene.surface_resolution = IntProperty(
name = "Voxel Resolution",
description = "Maximum voxel grid size",
default = 128,
min = 32,
max = 1024)
bpy.types.Scene.surface_loops = IntProperty(
name = "Diffuse Loops",
description = "Heat diffuse pass = Voxel Resolution * Diffuse Loops",
default = 5,
min = 1,
max = 9)
bpy.types.Scene.surface_samples = IntProperty(
name = "Sample Rays",
description = "Ray samples count",
default = 64,
min = 32,
max = 128)
bpy.types.Scene.surface_influence = IntProperty(
name = "Influence Bones",
description = "Max influence bones per vertex, please decrease the value (such as 4) for mobile devices",
default = 8,
min = 1,
max = 128)
bpy.types.Scene.surface_falloff = FloatProperty(
name = "Diffuse Falloff",
description = "Heat diffuse falloff",
default = 0.2,
min = 0.01,
max = 0.99)
bpy.types.Scene.surface_protect = BoolProperty(
name = "Protect Selected Vertex Weight",
description = "Protect selected vertex weight",
default = False)
bpy.types.Scene.surface_sharpness = EnumProperty(
name = "Edges",
description = "Edges",
items = [
('1','Soft','Soft Curvature'),
('2','Normal','Normal Curvature'),
('3','Sharp','Sharp Curvature'),
('4','Sharpest','Sharpest Curvature')],
default = '3')
bpy.types.Scene.detect_surface_solidify = BoolProperty(
name = "Detect Solidify",
description = "Detect solidified clothes, if you enable this option, make sure that all bones are in the charecter's volume, otherwise, the result may be wrong",
default = False)
def clear_properties():
props = ["surface_resolution",
"surface_samples",
"surface_falloff",
"surface_loops",
"surface_influence",
"surface_protect"]
for p in props:
if p in bpy.types.Scene.bl_rna.properties:
exec("del bpy.types.Scene." + p)
class SFC_PT_SurfaceHeatDiffuseSkinningPanel(bpy.types.Panel):
"""Creates a Panel in the Object properties window"""
bl_label = "Surface Heat Diffuse Skinning"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = 'Mesh Online'
@classmethod
def poll(self, context):
return True
def draw(self, context):
layout = self.layout
layout.prop(context.scene, 'surface_resolution', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_loops', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_samples', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_influence', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_falloff', icon='BLENDER', toggle=True)
layout.prop(context.scene, 'surface_sharpness')
layout.prop(context.scene, 'surface_protect')
layout.prop(context.scene, 'detect_surface_solidify')
row = layout.row()
row.operator("wm.surface_heat_diffuse")
def register():
bpy.utils.register_class(SFC_PT_SurfaceHeatDiffuseSkinningPanel)
bpy.utils.register_class(SFC_OT_ModalTimerOperator)
init_properties()
def unregister():
bpy.utils.unregister_class(SFC_PT_SurfaceHeatDiffuseSkinningPanel)
bpy.utils.unregister_class(SFC_OT_ModalTimerOperator)
clear_properties()
if __name__ == "__main__":
register()
|
meshonline/Surface-Heat-Diffuse-Skinning
|
addon/surface_heat_diffuse_skinning/__init__.py
|
__init__.py
|
py
| 14,549 |
python
|
en
|
code
| 170 |
github-code
|
6
|
10812461830
|
# -*- coding: utf-8 -*-
import os
from sys import path
from django.conf.global_settings import (TEMPLATE_CONTEXT_PROCESSORS,
STATICFILES_FINDERS)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
path.append(os.path.join(BASE_DIR, 'apps'))
SECRET_KEY = '_c#^&2xzwd@xt@i2b5kftn+*-9$t&l+bg9&zb3@^jq)&^s38*d'
DEBUG = False
TEMPLATE_DEBUG = True
# Application definition
DJANGO_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
)
THIRD_PARTY_APPS = (
'compressor',
# 'south',
'typogrify',
'bourbon',
'meta',
)
LOCAL_APPS = (
'usrs',
'wknd',
)
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
ROOT_URLCONF = 'wknd_project.urls'
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Australia/Adelaide'
USE_I18N = False
USE_L10N = True
USE_TZ = False
STATIC_URL = '/s/'
STATIC_ROOT = os.path.join(BASE_DIR, '../static_root')
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS += (
'compressor.finders.CompressorFinder',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Django compressor
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'django_libsass.SassCompiler'),
)
# Django extensions.
GRAPH_MODELS = {
'all_applications': True,
'group_models': True,
}
# WKND defaults
#AUTH_PROFILE_MODULE = 'usrs.Profile'
APPLICATION_PER_DAY_LIMIT = 2
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/'
# Django Meta
META_SITE_PROTOCOL = 'http'
META_SITE_DOMAIN = 'wkndcrew.com'
META_SITE_TYPE = 'website'
META_DEFAULT_KEYWORDS = META_INCLUDE_KEYWORDS = ['events', 'South Australia', 'Adelaide', 'WKND crew']
META_USE_OG_PROPERTIES = True
META_USE_TWITTER_PROPERTIES = True
|
andreyshipilov/wknd_django
|
wknd_project/settings/common.py
|
common.py
|
py
| 2,378 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40899444822
|
"""DeviceManagement URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url,patterns
from django.contrib import admin
from Dmanage import views
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^borrowDeviceForm/$',views.borrowDeviceForm,name='borrowDeviceForm'),
url(r'^return_device/$',views.return_device,name='returnDevice'),
url(r'^list/$',views.list,name='list'),
url(r'^list/(?P<device_sn_slug>\w+)/history/$',views.device_history,name='device_history'),
url(r'^list/data$',views.list_data,name='list_data'),
url(r'^list/(?P<device_sn_slug>\w+)/history/data$',views.device_history_data,name='device_history_data'),
url(r'^login/$', views.user_login, name='login'),
url(r'^logout/$', views.user_logout, name='logout'),
]
|
BensonXiong/DeviceManagement
|
DeviceManagement/DeviceManagement/urls.py
|
urls.py
|
py
| 1,371 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19593396615
|
import h5py
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from mpl_toolkits.mplot3d import Axes3D
from sklearn.metrics import roc_auc_score, roc_curve
import numpy as np
from tensorflow.keras import datasets, layers, models
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from ocnn import OneClassNeuralNetwork
def main():
data = h5py.File('Data/http.mat', 'r')
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()
X = np.array(x_train, dtype=np.float32).T
"""
Mapping derived from http://odds.cs.stonybrook.edu/smtp-kddcup99-dataset/ and http://odds.cs.stonybrook.edu/http-kddcup99-dataset/
"""
feature_index_to_name = {0: "duration",
1: "src_bytes",
2: "dst_bytes"}
num_features = 32
num_hidden = 32
r = 1.0
epochs = 10
nu = 0.001
oc_nn = OneClassNeuralNetwork(num_features, num_hidden, r)
model, history = oc_nn.train_model(x_train, epochs=epochs, nu=nu)
plt.style.use("ggplot")
plt.figure()
plt.plot(history.epoch, history.history["loss"], label="train_loss")
plt.plot(history.epoch, history.history["quantile_loss"], label="quantile_loss")
plt.plot(history.epoch, history.history["r"], label="r")
plt.title("OCNN Training Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(loc="upper right")
y_pred = model.predict(x_test)
roc_auc_score(y_test, y_pred)
if __name__ == "__main__":
main()
exit()
|
Yoichiro-Y/oc-nn-ensemble
|
.history/mnist_20221216201506.py
|
mnist_20221216201506.py
|
py
| 1,596 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34253272584
|
from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
from flask import re # the regex module
# create a regular expression object that we'll use later
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class User:
def __init__( self , data ):
self.id = data['id']
self.name = data['name']
self.location = data['location']
self.language = data['language']
self.comment = data['comment']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
# Now we use class methods to query our database
# Now we use class methods to query our database
@classmethod
def get_all(cls):
query = "SELECT * FROM dojo_survey;"
# make sure to call the connectToMySQL function with the schema you are targeting.
results = connectToMySQL('dojo_survey_schema').query_db(query)
# Create an empty list to append our instances of friends
users = []
# Iterate over the db results and create instances of friends with cls.
for user in results:
users.append( cls(user) )
return users
# GET A SPECIFIC USER
@classmethod
def get_user_by_id(cls, data):
query = "SELECT * FROM dojo_survey WHERE dojo_survey.id = %(id)s;"
results = connectToMySQL('dojo_survey_schema').query_db(query, data)
if results:
return results[0]
return False
#CREATE
@classmethod
def save(cls, data ):
query = "INSERT INTO dojo_survey ( name , location , language , comment, created_at, updated_at ) VALUES ( %(name)s , %(location)s , %(language)s ,%(comment)s , NOW() , NOW() );"
# data is a dictionary that will be passed into the save method from server.py
return connectToMySQL('dojo_survey_schema').query_db( query, data )
@staticmethod
def validate_user(user):
is_valid = True # we assume this is true
if len(user['name']) < 3:
flash("Name must be at least 3 characters.", "firstNameRegister")
is_valid = False
if not user.get('location'):
flash("You must select a location.", "locationRegister")
is_valid = False
if not user.get('language'):
flash("You must select a language.", "languageRegister")
is_valid = False
if len(user['comment']) < 3:
flash("Comment must be at least 3 characters.", "commentRegister")
is_valid = False
# test whether a field matches the pattern
if not EMAIL_REGEX.match(user['email']):
flash("Invalid email address!")
is_valid = False
return is_valid
|
gerald-cakoni/PythonAssignments
|
flask_mysql/validation/dojo_survey/flask_app/models/user.py
|
user.py
|
py
| 2,765 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23555067153
|
#!/usr/bin/python3
import numpy, tqdm, json, os, random
import scipy.signal
import util
import speech_features
RATE = 8000
NUMCEP = 16
CLASSES = 45
LENGTH = 4000
DELTA = 2000
LABEL_SCALE = 100
LABEL_SAVE_JSON = "switchboard-labels.json"
OUT_FILE = "melspecs-switchboard.npy"
EMPTY = "<EMPTY>"
SIL = "SIL"
SIL_DROPOUT = 0.5
def load(specf):
specf = os.path.join(specf, OUT_FILE)
fragfile = util.FragmentedFile(specf)
return fragfile.load()
def view(specf):
SAMPLES = 5
import matplotlib
matplotlib.use("agg")
from matplotlib import pyplot
data = load(specf)
fig, axes = pyplot.subplots(nrows=2, ncols=SAMPLES)
fig.set_size_inches(18, 6)
kmap, imap = load_label_map(os.path.join(specf, LABEL_SAVE_JSON))
size = len(kmap)
for i, (x, y, l) in zip(range(SAMPLES), data):
axes[0, i].imshow(x.T, cmap="hot", interpolation="bicubic", aspect="auto")
y = util.onehot(y, size).T
for j in range(len(y)):
axes[1, i].plot(y[j])
title = [imap[i] for i in l if i]
axes[0, i].set_title(", ".join(title))
pyplot.savefig("switchboard-mfcc-samples.png", bbox_inches="tight")
def create_spectrograms(dataf):
data = list(_load(dataf))
Xa = []
Xb = []
ya = []
yb = []
la = []
lb = []
all_labels = set()
for num, rate, waveA, waveB, pA, pB, sA, sB in tqdm.tqdm(data, desc="Processing data", ncols=80):
assert rate == RATE
waveA = remove_noise(waveA)
waveB = remove_noise(waveB)
yA = match_labels(waveA, pA)
yB = match_labels(waveB, pB)
for wavA, slcA, slcy in slice_step(waveA, yA, pA, LENGTH, DELTA):
if keep_slice(slcA):
melA = convert_spectrogram(wavA)
Xa.append(melA)
ya.append(slcA)
la.append(slcy)
all_labels.update(slcA)
for wavB, slcB, slcy in slice_step(waveB, yB, pB, LENGTH, DELTA):
if keep_slice(slcB):
melB = convert_spectrogram(wavB)
Xb.append(melB)
yb.append(slcB)
lb.append(slcy)
all_labels.update(slcB)
print('''
***
Skipped %d files because they were shorter than 1 second.
***
''' % SKIPPED)
all_labels = sorted(all_labels)
assert all_labels[0] == EMPTY
assert len(all_labels) == CLASSES + 1
DATA_DIR = os.path.dirname(dataf)
label_file = os.path.join(DATA_DIR, LABEL_SAVE_JSON)
save_label_map(all_labels, label_file)
keymap, idxmap = load_label_map(label_file)
ya = convert_key2idx(keymap, ya)
yb = convert_key2idx(keymap, yb)
la = convert_key2idx(keymap, la)
lb = convert_key2idx(keymap, lb)
assert len(Xa) == len(ya) == len(la)
assert len(Xb) == len(yb) == len(lb)
out_file = os.path.join(DATA_DIR, OUT_FILE)
X = Xa + Xb
Y = ya + yb
L = la + lb
assert len(X) == len(Y) == len(L)
L = pad_fitlargest(L)
fragfile = util.FragmentedFile(out_file)
fragfile.dump(len(X), zip(X, Y, L))
# === PRIVATE ===
SKIPPED = 0
def pad_fitlargest(labels):
"Because the first class is EMPTY, we can use that as padding."
longest = max(map(len, labels))
print("Labels padded to length: %d" % longest)
def pad(arr):
out = numpy.zeros(longest).astype(numpy.int32)
out[:len(arr)] = arr
return out
return list(map(pad, labels))
def keep_slice(slc):
if all([v==SIL for v in slc]):
return random.random() > SIL_DROPOUT
else:
return True
def slice_step(wav, lab, phns, length, step):
if len(wav) == len(lab) and len(wav) > length:
def locate_phns(i):
out = []
for name, start, end, pid in phns:
if start > end:
start, end = end, start
if start >= i+length:
break
elif end < i:
continue
else:
out.append(name)
return out
d, r = divmod(len(wav)-length, step)
for i in range(0, d*step, step):
yield wav[i:i+length], lab[i:i+length][::LABEL_SCALE], locate_phns(i)
if r:
yield wav[-length:], lab[-length:][::LABEL_SCALE], locate_phns(len(wav)-length)
else:
global SKIPPED
SKIPPED += 1
def remove_noise(data):
b, a = scipy.signal.butter(2, 40/(8000/2), btype="highpass")
data = scipy.signal.lfilter(b, a, data)
return data
def convert_key2idx(keymap, y):
out = []
for arr in tqdm.tqdm(y, desc="Converting labels to ints", ncols=80):
out.append(numpy.array([keymap[v] for v in arr]))
return out
def save_label_map(labels, fname):
with open(fname, "w") as f:
json.dump(labels, f)
def load_label_map(fname):
with open(fname, "r") as f:
labels = json.load(f)
print("Classes: %d" % (len(labels)-1))
assert CLASSES + 1 == len(labels)
idxmap = dict(enumerate(labels))
keymap = {k:i for i, k in idxmap.items()}
return keymap, idxmap
def _load(dataf):
with open(dataf, "rb") as f:
with tqdm.tqdm(desc="Loading %s" % dataf, ncols=80) as bar:
while True:
try:
yield numpy.load(f)
bar.update()
except OSError:
break
def convert_spectrogram(wav):
return speech_features.mfcc(wav, samplerate=RATE, numcep=NUMCEP)
def match_labels(wav, phns):
y = [EMPTY] * len(wav)
for name, start, end, pid in phns:
if start > end:
start, end = end, start
y[start:end] = [name] * (end-start)
return y
@util.main(__name__)
def main(fname, sample=0):
sample = int(sample)
if sample:
view(fname)
else:
create_spectrograms(fname)
|
ychnlgy/Switchboard2.0
|
src/load.py
|
load.py
|
py
| 5,989 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24370174876
|
import unittest
import matplotlib.pyplot as plt
class TestCreateParticles(unittest.TestCase):
def test_create_particles(self):
from src.dataio import GridIO, FlowIO
from src.create_particles import Particle, LaserSheet, CreateParticles
# Read-in the grid and flow file
grid = GridIO('../data/shocks/shock_test.sb.sp.x')
grid.read_grid()
grid.compute_metrics()
flow = FlowIO('../data/shocks/shock_test.sb.sp.q')
flow.read_flow()
# Set particle data
p = Particle()
p.min_dia = 144e-9
p.max_dia = 573e-9
p.mean_dia = 281e-9
p.std_dia = 97e-9
p.density = 810
p.n_concentration = 5000
p.compute_distribution()
# print(p.particle_field)
# Read-in the laser sheet
laser = LaserSheet(grid)
laser.position = 0.0009
laser.thickness = 0.0001 # Adjusted based on grid thickness
laser.pulse_time = 1e-7
laser.compute_bounds()
# print(laser.width)
# Create particle locations array
ia_bounds = [None, None, None, None]
loc = CreateParticles(grid, flow, p, laser, ia_bounds)
loc.ia_bounds = [0, 0.003, 0, 0.001]
loc.in_plane = 90
loc.compute_locations()
loc.compute_locations2()
# Sample code to plot particle locations and relative diameters
_in_plane = int(p.n_concentration * loc.in_plane * 0.01)
# plot in-plane particle locations
plt.scatter(loc.locations[:_in_plane, 0], loc.locations[:_in_plane, 1],
s=10*loc.locations[:_in_plane, 3]/p.min_dia, c='g')
# plot out-of-plane locations
plt.scatter(loc.locations[_in_plane:, 0], loc.locations[_in_plane:, 1],
s=10*loc.locations[_in_plane:, 3]/p.min_dia, c='r')
plt.xlim([-0.0001, 0.004])
plt.ylim([0, 0.0019])
# plt.show()
# plot in-plane particle locations
plt.figure()
plt.scatter(loc.locations2[:_in_plane, 0], loc.locations2[:_in_plane, 1],
s=10 * loc.locations2[:_in_plane, 3] / p.min_dia, c='g')
# plot out-of-plane locations
plt.scatter(loc.locations2[_in_plane:, 0], loc.locations2[_in_plane:, 1],
s=10 * loc.locations2[_in_plane:, 3] / p.min_dia, c='r')
plt.xlim([-0.0001, 0.004])
plt.ylim([0, 0.0019])
plt.show()
if __name__ == '__main__':
unittest.main()
|
kalagotla/syPIV
|
test/test_create_paritcles.py
|
test_create_paritcles.py
|
py
| 2,496 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16103320682
|
# 8*8 + ์
๋ ฅ์ขํ์ ๋ํด ๊ฒ์ฌํ๋ ๊ธฐ๋ฅ ๋ง๋ค๊ธฐ
n, m = map(int, input().split());
arr = list();
for i in range(0, n):
arr.append(input());
rtn2 = 64;
flag = 0;
for y0 in range(0, n - 8 + 1):
for x0 in range(0, m - 8 + 1):
rtn = 0;
for y in range (0, 8):
for x in range(0, 8):
if arr[y0 + y][x0 + x] == 'W' and flag == 0:
rtn += 1;
elif arr[y0 + y][x0 + x] == 'B' and flag == 1:
rtn += 1;
flag = flag ^ 1;
flag = flag ^ 1;
rtn = min(rtn, 64 - rtn);
rtn2 = min(rtn, rtn2);
print(rtn2);
|
hynoes/baekjoon
|
bj1018s5.py
|
bj1018s5.py
|
py
| 545 |
python
|
en
|
code
| 1 |
github-code
|
6
|
72109396027
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
get_ipython().run_line_magic('pylab', 'inline')
import pandas
import seaborn
# # Load CSV file into memory
# In[2]:
data = pandas.read_csv('~/Documents/python/uber-raw-data-apr14.csv')
# In[3]:
data.tail()
# #convert datetime and insert coloumn
# In[4]:
dt = '4/30/2014 23:22:00'
# In[5]:
d, t = dt.split (' ')
print (d)
print (t)
# In[6]:
m, d, y = dt.split('/')
# In[7]:
d
# In[8]:
int(d)
# In[9]:
dt = pandas.to_datetime(dt)
# In[10]:
data['Date/Time'] = data['Date/Time'].map(pandas.to_datetime)
# In[11]:
data.tail()
# In[12]:
def get_dom(dt):
return dt.day
data ['dom'] = data['Date/Time'].map(get_dom)
# In[13]:
data.tail()
# In[14]:
def get_weekday(dt):
return dt.weekday()
data['weekday'] = data['Date/Time'].map(get_weekday)
def get_hour(dt):
return dt.hour
data['hour'] = data['Date/Time'].map(get_hour)
data.tail()
# # Analisa
# ## Analisa berdasar tanggal
# In[15]:
hist(data.dom, bins = 30, rwidth=.8, range=(0.5, 30.5))
xlabel('Date of the month')
ylabel('frequency')
title('Frequency Data Uber 2014')
# In[16]:
#for k, rows in data.groupby('dom'):
# print((k, rows))
# print((k, len(rows)))
def count_rows(rows):
return len(rows)
by_date = data.groupby('dom').apply(count_rows)
by_date
# In[17]:
plot(by_date)
# In[18]:
bar(range(1, 31), by_date)
# In[19]:
by_date_sorted = by_date.sort_values()
by_date_sorted
# In[20]:
bar(range(1, 31), by_date_sorted)
# In[21]:
bar(range(1, 31), by_date_sorted)
xticks(range(1, 31), by_date_sorted.index)
xlabel('Date of the month')
ylabel('frequency')
title('Frequency Data Uber 2014')
("")
# ## Analisa berdasar jam(hour)
# In[30]:
hist(data.hour, bins=24, range=(.5, 24))
# In[ ]:
# ## analisa weekday
# In[33]:
hist(data.weekday, bins=7, range =(-.5, 6.5), rwidth=.8, color='#AA6666', alpha=.4)
xticks(range(7), 'Mon Tue Wed Thue Fri Satur Sun'.split())
# In[35]:
data.groupby('hour weekday'.split()).apply(count_rows)
# In[36]:
data.groupby('hour weekday'.split()).apply(count_rows).unstack()
# In[40]:
data.groupby('weekday hour'.split()).apply(count_rows).unstack()
# In[41]:
by_cross = data.groupby('weekday hour'.split()).apply(count_rows).unstack()
# In[42]:
seaborn.heatmap(by_cross)
# #by Lat and lon
# In[44]:
hist(data['Lat'])
# In[47]:
hist(data['Lat'], bins=100, range=(40.5, 41))
("")
# In[49]:
hist(data['Lon'], bins=100)
("")
# In[54]:
hist(data['Lon'], bins=100, range=(-74.1, -73.9))
("")
# In[60]:
hist(data['Lat'], bins=100, range=(40.5, 41), color= 'g', alpha=.5)
twiny()
hist(data['Lon'], bins=100, range=(-74.1, -73.9), color='r', alpha=.5)
("")
# In[66]:
hist(data['Lat'], bins=100, range=(40.5, 41), color= 'g', alpha=.5, label ='latitude')
grid()
legend(loc='upper left')
twiny()
hist(data['Lon'], bins=100, range=(-74.1, -73.9), color='r', alpha=.5, label ='longitude')
grid()
legend(loc='best')
("")
# In[79]:
plot(data['Lat'], data['Lon'], '.', ms=3, color='green', alpha=.5)
# In[84]:
figure(figsize=(20, 20))
plot(data['Lon'], data['Lat'], '.', ms=1, color='green', alpha=.5)
xlim(-74.2, -73.7)
ylim(40.7, 41 )
# In[ ]:
|
dennyalfani/Learn_data_analys
|
Data Visualisation/Uber/data analysis uber.py
|
data analysis uber.py
|
py
| 3,244 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18481146182
|
import time
t1 = time.time()
def divn(num):
l = []
arr = [1,2,3,4,5,6,7,8,9]
for i in range(2,int(num**0.5)+1):
b = num%i
if b == 0:
rem = int(num/i)
g = [int(i) for i in list(str(num) + str(rem) + str(i))]
if len(g) == 9:
res = [x for x in arr if x in g]
if len(res) == 9:
if num not in l:
l.append(num)
return l
result = []
for i in range(1000,10000):
req = divn(i)
if len(req)>0:
print(req)
result+= req
print(sum(result))
t2 = time.time()
print(t2-t1)
'''def factors(n):
return sorted(list(reduce(list.__add__,
([i, n//i] for i in range(1, int(n**0.5) + 1) if n % i == 0))))'''
|
BreadBug007/Project-Euler
|
Prob_32.py
|
Prob_32.py
|
py
| 798 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24488924931
|
from serre_biblio import *
from toit import Toit
from uselect import poll, POLLIN
from machine import Pin, ADC
from time import sleep, ticks_ms
try:
import usocket as socket
except:
import socket
toit = Toit( )
# ============================================================================= b_poussoir (...)
bp_flag = 0 # Attention : Variable globale
def Set_BP_flag ( Pin ):
global bp_flag
bp_flag +=1
def b_poussoir():
""" Si appui sur le BP -> interruption -> ouverture ou fermeture du toit """
global bp_flag
if bp_flag > 0:
if not toit.OPEN :
toit.ouvrir()
elif not toit.CLOSE :
toit.fermer()
else :
toit.fermer()
bp_flag = 0
bp.irq ( trigger=Pin.IRQ_RISING, handler=Set_BP_flag )
# ============================================================================= web_page (...)
def web_page( )->str:
# ----------------------------------------- รtat du toit
if toit.CLOSE :
toit_html="FERMร"
elif toit.OPEN :
toit_html="OUVERT"
else :
toit_html="## INCONNU ##"
# ----------------------------------------- Tempรฉrature
temperature_html = str(capteurBME.temperature)
# ----------------------------------------- Lecture code HTML / CSS / et JS
page_html = lire_fichier ( "index.html" )
fichier_css = lire_fichier ( "style.css" )
fichier_js = lire_fichier ( "script.js" )
# ----------------------------------------- Incorporer JS et CSS dans HTML
page_html = page_html.replace("<fichier_css>",fichier_css)
page_html = page_html.replace("<fichier_js>",fichier_js)
page_html = page_html.replace("<variable_temperature>", temperature_html)
page_html = page_html.replace("<variable_toit>", toit_html)
return page_html #Affiche la page HTML
# ==================================================================================== main
if not toit.CLOSE :
toit.fermer ( )
if toit.CLOSE and Wifi_Connected : # Tout est OK => Attendre une requรชte
my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
my_socket.bind(('', 80))
my_socket.listen(3)
print("En attente de connexion ...")
repondre = False
while True : # En mode AP_IF
connexion = []
poller = poll()
poller.register(my_socket, POLLIN)
i = 0
while connexion==[] :
connexion = poller.poll(1000)
b_poussoir() # Vรฉrifier si appui sur le BP toutes les secondes
conn, addr = my_socket.accept()
print("\nRรฉception d'une requรชte depuis : "+ str(addr))
request = conn.recv(1024)
request = str(request)
ouverture = request.find('/?open_toit=1') # Rรฉcupรฉrer les arguments dans l'URL
fermeture = request.find('/?close_toit=1')
requete = request.find('GET / HTTP')
if requete == 2 : # Eviter de gรฉrer des connexions parasites
repondre = True
if ouverture == 6:
if not toit.OPEN :
toit.ouvrir ( )
repondre = True
if fermeture == 6:
if not toit.CLOSE :
toit.fermer ( )
repondre = True
if repondre :
reponse_html = web_page( ) # Construction de la page Web
conn.send('HTTP/1.1 200 OK\n')
conn.send('Content-Type: text/html\n')
conn.send('Connection: close\n\n')
conn.sendall(reponse_html)
conn.close()
repondre = False
|
SoproLab/MiniSerre
|
main_MiniSerre.py
|
main_MiniSerre.py
|
py
| 3,626 |
python
|
fr
|
code
| 0 |
github-code
|
6
|
2310558186
|
import requests
import os
from requests_oauthlib import OAuth2Session
from dotenv import load_dotenv
load_dotenv()
from config import twitter_scopes
from mongo_db import mongo
redirect_uri = os.environ.get("REDIRECT_URI")
client_id = os.environ.get("CLIENT_ID")
class Token_Manager():
def __init__(self):
self.client_id = os.environ.get("CLIENT_ID")
self.client_secret = os.environ.get("CLIENT_SECselfRET")
self.token_url = "https://api.twitter.com/2/oauth2/token"
def get_userid(self, token):
url = "https://api.twitter.com/2/users/me"
user_id = requests.request(
"GET",
url,
headers={
"Authorization": "Bearer {}".format(token["access_token"]),
"Content-Type": "application/json",
}
)
print(user_id.json())
print()
self.user_id = user_id.json()['data']['id']
return self.user_id
def save_token(self, refreshed_token):
collection_name = 'user_tokens'
user_id = self.get_userid(refreshed_token)
new_token_entry = {"user_id": user_id, "token": refreshed_token}
mongo.save_to_collection(new_token_entry, collection_name, user_id)
def refresh_token(self, token):
twitter = OAuth2Session(client_id, redirect_uri=redirect_uri, scope=twitter_scopes)
refreshed_token = twitter.refresh_token(
client_id=self.client_id,
client_secret=self.client_secret,
token_url=self.token_url,
refresh_token=token["refresh_token"],
)
self.save_token(refreshed_token)
return refreshed_token
tm = Token_Manager()
if __name__ == "__main__":
print("Executing TokenManager as main file")
|
tyrovirtuosso/Twitter_Bookmark-Manager
|
token_manager.py
|
token_manager.py
|
py
| 1,780 |
python
|
en
|
code
| 6 |
github-code
|
6
|
39869333421
|
from django import forms
from .models import Order, ProductReview
PRODUCT_QUANTITY_CHOICES = [(i, str(i)) for i in range(1, 21)]
class CustomSelectWidget(forms.Select):
template_name = 'store/tst.html'
class OrderForm(forms.Form):
'''in the single_product view, we pass the product instance to the form.'''
def __init__(self, instance, *args, **kwargs):
super(OrderForm, self).__init__(*args, **kwargs)
self.instance = instance
self.fields['size'] = forms.ModelChoiceField(
queryset=self.instance.sizes.all(),
widget=forms.RadioSelect())
self.fields['quantity'] = forms.IntegerField(widget=forms.NumberInput(attrs={'class': 'form-control'}), initial=1)
class ReviewForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ReviewForm, self).__init__(*args, **kwargs)
#remove help text from form fields
for field in self.fields:
self.fields[field].label = ''
class Meta:
model = ProductReview
fields = ['name', 'email', 'comment',]
widgets = {'name': forms.widgets.TextInput(attrs={
'placeholder': 'Name'}),
'email': forms.widgets.TextInput(attrs={
"placeholder": 'Email'}),
'comment': forms.widgets.Textarea(attrs={
"placeholder": 'Review', "rows": "5"}),
}
class OrderAddressForm(forms.ModelForm):
class Meta:
model = Order
fields = ['first_name', 'last_name', 'country', 'city', 'address', 'postal_code']
|
TodorToshev/Own-Blueprint
|
store/forms.py
|
forms.py
|
py
| 1,612 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42663326199
|
import os
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
plt.ioff()
import seaborn
def compare_abs_auroc_differences(results_dir,
model_a_path, model_a_descriptor,
model_b_path, model_b_descriptor):
"""Take the results of eval_by_scan_attr.calculate_model_perf_by_scan_attr()
for two different models and make comparison plots.
<results_dir> is the path to the directory in which to save the results
<model_a_path> is the path to the directory in which the results of
eval_by_scan_attr.py are stored for Model A
<model_b_path> is the path to the directory in which the results of
eval_by_scan_attr.py are stored for Model B
<model_a_descriptor> and <model_b_descriptor> are descriptive strings
that will be used in generating the plots
For each scan attribute, a df will be loaded that has the following format:
the columns are different scan attribute options. For example if the
attribute is StationName, then the options (the columns) can
include 'DMPRAD3FORCE', 'DMP_CT1', 'DMP_CT2', 'CCCT3Revo',
'IPCT1', 'CaryCT1',...,'CTC1'.
the rows are different abnormalities, such as 'lung_nodule',
'heart_cardiomegaly', and 'h_great_vessel_atherosclerosis'
the values are AUROCs calculated for that particular scan attribute
option and abnormality."""
if not os.path.exists(results_dir):
os.mkdir(results_dir)
#Create plots for all the attributes
for attribute in ['SliceThickness','PatientAgeYears',
'orig_square','orig_numslices','orig_slope','orig_inter',
'orig_yxspacing','orig_zdiff',
'Manufacturer','ManufacturerModelName','InstitutionName',
'StationName','SoftwareVersions','ConvolutionKernel',
'PatientSex','EthnicGroup','IterativeReconAnnotation',
'IterativeReconConfiguration','IterativeReconLevel',
'ProtocolName','ReconAlgo','ReconAlgoManuf']:
model_a_df = pd.read_csv(os.path.join(model_a_path, attribute+'_AUROC.csv'),header=0,index_col=0)
model_b_df = pd.read_csv(os.path.join(model_b_path,attribute+'_AUROC.csv'),header=0,index_col=0)
model_a_df_w_diff = add_diff_column(model_a_df,model_a_descriptor)
model_b_df_w_diff = add_diff_column(model_b_df,model_b_descriptor)
#Combine the dataframes
combined = pd.concat([model_a_df_w_diff, model_b_df_w_diff],axis=0, ignore_index=True)
#sort by model and then by difference
combined = combined.sort_values(by=['Model','Max AUROC Difference'],ascending=[False,True])
#make plots
make_bar_plot_per_abnormality(combined, attribute, results_dir)
make_boxplot_agg_abnormalities(combined, attribute, results_dir)
make_boxplot_agg_abnormality_groups(combined, attribute, results_dir)
def add_diff_column(df, model_descriptor):
"""Calculate the maximum AUROC difference between the different scan
attribute options for each abnormality, and reformat the df for subsequent
seaborn plotting.
A bigger Max AUROC Difference is worse, because it means the performance
varies a lot by that scan attribute.
A smaller difference is good, because it means the performance for
that abnormality is consistent across the different scan attribute
options.
For example, if the AUROC difference is very large for 'cardiomegaly'
depending on different StationName (CT scanner) attribute options, that
suggests the model might be cheating and using information about the
CT scanner to predict 'cardiomegly.'
<df> is a pandas dataframe with the format described in the
docstring for compare_abs_auroc_differences()
<model_descriptor> is a descriptive string"""
df['Maximum'] = df.max(axis=1)
df['Minimum'] = df.min(axis=1)
df['Max AUROC Difference'] = (df['Maximum']-df['Minimum'])
#drop 'Count' row
df = df.drop(index='Count')
#add a column indicating the model so you can use seaborn barplot easily
df['Model'] = model_descriptor
#make the abnormality index into a column so you can use seaborn barplot easily
df.reset_index(inplace=True)
df = df.rename(columns = {'index':'Abnormality'})
#keep only the 3 columns needed for plotting
df = df[['Abnormality','Max AUROC Difference','Model']]
return df
def make_bar_plot_per_abnormality(combined, attribute, results_dir):
"""Make bar plot where each abnormality has two bars, one bar for Model A
and one bar for Model B. The y axis shows the Max AUROC Difference, so
lower is better."""
fig, ax = plt.subplots(figsize=(16,8))
seaborn.barplot(x = 'Abnormality', y = 'Max AUROC Difference', data = combined,
hue = 'Model', hue_order = ['Base','Mask'], ax = ax)
plt.xticks(rotation=90, fontsize='x-small')
plt.savefig(os.path.join(results_dir,attribute+'_BarPerAbn.png'))
plt.close()
def make_boxplot_agg_abnormalities(combined, attribute, results_dir):
"""Boxplot where different abnormalities are aggregated for each model,
and the y axis shows the Max AUROC Difference, so a lower overall
boxplot is better"""
fig, ax = plt.subplots(figsize=(6,6))
seaborn.boxplot(x = 'Model', y = 'Max AUROC Difference', data = combined, ax = ax, order=['Base','Mask'])
plt.title('Max AUROC Difference \nAcross Abnormalities',fontsize='xx-large')
increase_label_sizes(plt)
plt.savefig(os.path.join(results_dir,attribute+'_BoxAggAbns.png'))
plt.close()
def make_boxplot_agg_abnormality_groups(combined, attribute, results_dir):
"""Grouped boxplot where different abnormalities are aggregated for each
model, but abnormalities are split up according to their organ: lung,
heart, great_vessel, or mediastinum. The y axis shows the Max AUROC
Difference, so a lower overall boxplot is better."""
#Assign an organ to each abnormality
combined['Organ']=''
for idx in combined.index.values.tolist():
abnormality = combined.at[idx,'Abnormality']
if 'lung' in abnormality:
combined.at[idx,'Organ'] = 'lung'
elif 'heart' in abnormality:
combined.at[idx,'Organ'] = 'heart'
elif 'vessel' in abnormality:
combined.at[idx,'Organ'] = 'great_vessel'
elif 'mediastinum' in abnormality:
combined.at[idx,'Organ'] = 'mediastinum'
#Sanity check: make sure every abnormality has an organ assigned
assert combined[combined['Organ']==''].shape[0]==0
#Make plot
fig, ax = plt.subplots(figsize=(8,8))
seaborn.boxplot(x = 'Model', y = 'Max AUROC Difference', order = ['Base','Mask'],
hue = 'Organ', data = combined, ax = ax, palette = 'mako')
plt.title('Max AUROC Difference\nAcross Grouped Abnormalities',fontsize='xx-large')
increase_label_sizes(plt)
plt.savefig(os.path.join(results_dir,attribute+'_BoxAggAbnsByOrgan.png'))
plt.close()
def increase_label_sizes(plt):
"""Increase the axis label sizes for a seaborn plot"""
#https://stackoverflow.com/questions/43670164/font-size-of-axis-labels-in-seaborn?rq=1
for ax in plt.gcf().axes:
current_xlabels = ax.get_xlabel()
ax.set_xlabel(current_xlabels, fontsize='x-large')
current_ylabels = ax.get_ylabel()
ax.set_ylabel(current_ylabels, fontsize='x-large')
plt.xticks(fontsize='x-large')
plt.yticks(fontsize='x-large')
|
rachellea/explainable-ct-ai
|
src/evals/eval_by_scan_attr_compare.py
|
eval_by_scan_attr_compare.py
|
py
| 7,694 |
python
|
en
|
code
| 3 |
github-code
|
6
|
4143583368
|
import torch
import torchvision
from torchvision.io import read_image
import os
from torch.utils.data import Dataset
"""
This part of the script is an easy API to load and get the datasets
"""
def get_dataset(dataset: str, c_angle=30, new_size=[32, 32], batch_size=300):
"""
:param new_size:
:param c_angle:
:param dataset: chosen dataset
:return: train loader ,test loader and input size
"""
if dataset == 'FASHION_MNIST':
train_set = FASHION_MNIST('./data/' + dataset + '/', download=True, train=True,
transform=torchvision.transforms.ToTensor())
test_set = FASHION_MNIST('./data/' + dataset + '/', download=True, train=False,
transform=torchvision.transforms.ToTensor())
input_size = (28, 28, 1)
elif dataset == 'Rotate FASHION_MNIST':
rotate_tran_fun = lambda x: rotate_tran(x, angle=c_angle)
train_set = FASHION_MNIST('./data/' + dataset + f'_Rotate_{c_angle}/', download=True, train=True,
transform=rotate_tran_fun)
test_set = FASHION_MNIST('./data/' + dataset + f'_Rotate_{c_angle}/', download=True, train=False,
transform=rotate_tran_fun)
input_size = (28, 28, 1)
elif dataset == 'LFW':
train_set = LFW('./data/' + dataset + '/', split='train',
transform=torchvision.transforms.ToTensor(), download=True)
test_set = LFW('./data/' + dataset + '/', split='test',
transform=torchvision.transforms.ToTensor(), download=True)
input_size = (250, 250)
elif dataset == 'LFW_resize':
resize_tran_fun = lambda x: resize_tran(x, new_size=new_size)
train_set = LFW('./data/' + dataset + f'_{new_size}/', split='train', transform=resize_tran_fun, download=True)
test_set = LFW('./data/' + dataset + f'_{new_size}/', split='test', transform=resize_tran_fun, download=True)
input_size = (new_size[0], new_size[1], 3)
train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size, shuffle=True, drop_last=True)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=True, drop_last=True)
return train_loader, test_loader, input_size, batch_size
def rotate_tran(img, angle):
tensor_img = torchvision.transforms.ToTensor(img)
return torchvision.transforms.functional.rotate(img=tensor_img, angle=angle)
def resize_tran(img, new_size=[32, 32]):
tensor_img = torchvision.transforms.ToTensor(img)
return torchvision.transforms.functional.resize(img=tensor_img, size=new_size)
class FASHION_MNIST(torchvision.datasets.FashionMNIST):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getitem__(self, index):
return super().__getitem__(index)[0]
class LFW(torchvision.datasets.LFWPeople):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getitem__(self, index):
return super().__getitem__(index)[0]
class Paining(Dataset):
def __init__(self, img_dir, transform=None, target_transform=None):
self.img_dir = img_dir
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
image = read_image(img_path)
label = self.img_labels.iloc[idx, 1]
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image
|
TamirShazman/ML2-Project
|
code/datasets.py
|
datasets.py
|
py
| 3,740 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36387303386
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.datasets import fetch_openml
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from sklearn.model_selection import train_test_split
from matplotlib.colors import LinearSegmentedColormap
from sklearn.preprocessing import LabelEncoder
def prep_data(data):
# Assuming y is your target labels
y = data['target'].values
# Normalize the data
data_normalized = data.iloc[:, :-1].values / 255.0
# Convert data to PyTorch tensors
X_tensor = torch.tensor(data_normalized, dtype=torch.float32)
y_tensor = torch.tensor(y, dtype=torch.long)
# Split the data into training and testing sets
X_train_tensor, X_test_tensor, y_train_tensor, y_test_tensor = train_test_split(X_tensor, y_tensor, test_size=0.2, random_state=42)
num_classes = len(set(y))
print("Unique classes in target labels:", num_classes)
return X_train_tensor,y_train_tensor,X_test_tensor,y_test_tensor,num_classes
def create_model(MaxoutNetworkWithSoftmax):
input_size = 784
num_classes = 10
device = torch.device("cpu")
# Define the model
model = MaxoutNetworkWithSoftmax(input_size, num_classes)
model = model.to(device)
optimizer = optim.Adam(model.parameters(), lr=0.001)
# Define the loss function
criterion = nn.CrossEntropyLoss()
return model,criterion,optimizer
def create_model_2(DeepMaxoutNetwork,X_train_tensor):
# Hyperparameters
num_epochs = 100
Batch_size = 100
# Create an instance of the DeepMaxoutNetwork
deep_maxout_model = DeepMaxoutNetwork(input_dim=784, hidden_dim=100, output_dim=10, num_units=2, num_layers=3)
input_size = X_train_tensor.shape[1]
criterion = nn.CrossEntropyLoss()
sgd = optim.SGD(deep_maxout_model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
adam = optim.Adam(deep_maxout_model.parameters(), lr=0.01,weight_decay=1e-2)
optimizer = adam
return deep_maxout_model,criterion,optimizer, num_epochs, Batch_size
def create_model_3(ShallowRBF,X_train_tensor,centers):
# Hyperparameters
num_epochs = 100
learning_rate = 0.01
momentum = 0.9
Batch_size = 100
num_classes = 10
input_size = 784
n_channels = 3
num_units = 2
# Create an instance of the DeepMaxoutNetwork
RBF_model = ShallowRBF(input_dim=784, num_classes=10, num_centers=centers.shape[0])
input_size = X_train_tensor.shape[1]
criterion = nn.CrossEntropyLoss()
sgd = optim.SGD(RBF_model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
adam = optim.Adam(RBF_model.parameters(), lr=0.01)
optimizer = adam
return RBF_model,criterion,optimizer, num_epochs, Batch_size
def create_model_4(ShallowSoftmaxNetBN,X_train_tensor):
# Hyperparameters
num_epochs = 100
learning_rate = 0.01
momentum = 0.9
Batch_size = 100
num_classes = 10
input_size = 784
n_channels = 3
num_units = 2
# Create an instance of the DeepMaxoutNetwork
ShallowSoftmax_model = ShallowSoftmaxNetBN(input_dim=784, output_dim=10)
input_size = X_train_tensor.shape[1]
criterion = nn.CrossEntropyLoss()
sgd = optim.SGD(ShallowSoftmax_model.parameters(), lr=0.01,weight_decay=1e-2)
adam = optim.Adam(ShallowSoftmax_model.parameters(), lr=0.01,weight_decay=1e-4)
optimizer = adam
return ShallowSoftmax_model,criterion,optimizer, num_epochs, Batch_size
def test_eps(test_Maxout,model,device,test_dataloader):
# Run test for each epsilon
accuracies = []
args = []
epsilons = [-15,-14,-13,-12,-11, -10,-9,-8,-7,-6, -5,-4,-3,-2,-1, 0,1,2,3,4, 5,6,7,8,9, 10,11,12,13,14, 15]
for eps in epsilons:
accuracy, arg = test_Maxout(model, device, test_dataloader, eps)
accuracies.append(accuracy)
args.append(arg)
return args, epsilons
def plot_eps(args,epsilons):
for i in range(len(args)):
args[i] = args[i].detach().numpy()
args[i] = np.log(args[i]) - np.log(np.sum(np.exp(args[i]), axis=1, keepdims=True))
args[i] = args[i].mean(axis=0)
# Plot the average values as a function of epsilon
#plt.figure(figsize=(6, 18)) # Increase the height by a factor of 3
plt.plot(epsilons, [i[:] for i in args])
plt.xlabel('Epsilon')
plt.ylabel('softmax output')
plt.title('softmax output for each class vs Epsilon')
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'])
plt.show()
def training_loop(optimizer, model, criterion, X_train_tensor, y_train_tensor, num_epochs=200, batch_size=128):
train_dataset = TensorDataset(X_train_tensor, y_train_tensor)
train_loader = DataLoader(train_dataset, batch_size, shuffle=True)
train_losses = [] # Store training losses for each epoch
for epoch in range(num_epochs):
for batch_X, batch_y in train_loader:
outputs = model(batch_X)
loss = criterion(outputs, batch_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_losses.append(loss.item()) # Store the loss after each epoch
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
return train_losses,model
def plot_losses(train_losses):
import matplotlib.pyplot as plt
plt.plot(train_losses, label='Training Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.title('Training Curve')
plt.legend()
plt.show()
def eval_train(X_train_tensor,y_train_tensor,model):
model.eval() # Set the model to evaluation mode
with torch.no_grad():
train_outputs = model(X_train_tensor)
_, predicted = torch.max(train_outputs, 1)
correct = (predicted == y_train_tensor).sum().item()
total = y_train_tensor.size(0)
train_accuracy = correct / total
print(f'Training Accuracy: {train_accuracy * 100:.2f}%')
def visualize_weights_and_signs(model):
# Get the weights from the first layer (assuming it's the layer you're interested in)
weights = model.linear.weight.data
# Extract the signs of the weights
weight_signs = torch.sign(weights)
# Check if the weights are 1-dimensional
if weights.dim() == 1:
# Reshape the weights to be a 2D tensor with one row
weights = weights.view(1, -1)
# Reshape the weights to match the original image dimensions (assuming 28x28)
weight_images = weights.view(-1, 28, 28)
# Reshape the weight signs to match the original image dimensions (assuming 28x28)
weight_sign_images = weight_signs.view(-1, 28, 28)
# Plot each set of weights and weight signs in a separate subplot
num_classes = weight_images.size(0)
fig, axes = plt.subplots(num_classes, 2, figsize=(16, 8 * num_classes))
for i in range(num_classes):
# Plot weights
axes[i, 0].imshow(weight_images[i].cpu().numpy(), cmap='gray')
axes[i, 0].set_title(f'Class {i} - Weight')
axes[i, 0].axis('off')
# Plot weight signs
axes[i, 1].imshow(weight_sign_images[i].cpu().numpy(), cmap='gray', vmin=-1, vmax=1)
axes[i, 1].set_title(f'Class {i} - Sign')
axes[i, 1].axis('off')
# Show the plot
plt.show()
def eval_test(X_test_tensor, y_test_tensor, model):
# Convert test data to DataLoader for batching
test_dataset = TensorDataset(X_test_tensor, y_test_tensor)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False)
model.eval() # Set the model to evaluation mode
total = 0
correct = 0
total_mean_confidence = 0.0
total_samples = 0
incorrect_mean_confidence = 0.0
incorrect_samples = 0
wrong_predictions = []
correct_predictions = []
all_predictions = [] # List to store all predictions
with torch.no_grad():
for batch_X, batch_y in test_loader:
outputs = model(batch_X)
_, predicted = torch.max(outputs.data, 1)
total += batch_y.size(0)
correct += (predicted == batch_y).sum().item()
# Store all predictions
all_predictions.extend(predicted.tolist())
# Calculate mean confidence for all predictions
probabilities = nn.functional.softmax(outputs, dim=1)
confidences, _ = torch.max(probabilities, dim=1)
total_mean_confidence += confidences.sum().item()
total_samples += batch_y.size(0)
# Calculate mean confidence for incorrect predictions
incorrect_mask = predicted != batch_y
if incorrect_mask.sum().item() > 0:
incorrect_mean_confidence += confidences[incorrect_mask].sum().item()
incorrect_samples += incorrect_mask.sum().item()
# Store the wrong predictions
wrong_predictions.extend(predicted[incorrect_mask].tolist())
# Store the correct predictions
correct_predictions.extend(predicted[~incorrect_mask].tolist())
# Calculate mean confidence for all examples
if total_samples > 0:
total_mean_confidence /= total_samples
# Calculate mean confidence for incorrect predictions
if incorrect_samples > 0:
incorrect_mean_confidence /= incorrect_samples
accuracy = correct / total
print(f'Accuracy: {accuracy * 100:.2f}%')
print(f'Mean Confidence for All Examples: {total_mean_confidence:.4f}')
print(f'Mean Confidence for Incorrect Predictions: {incorrect_mean_confidence:.4f}')
return wrong_predictions, correct_predictions, all_predictions # Return all predictions
|
quentinRolld/Adversarial_attack
|
generalization/train_gen.py
|
train_gen.py
|
py
| 9,674 |
python
|
en
|
code
| 1 |
github-code
|
6
|
43226628267
|
from org.eclipse.swt import SWT
from org.eclipse.swt.widgets import Shell, Menu, MenuItem, ExpandBar, ExpandItem, Label, Composite, Button, Listener, Text
from org.eclipse.swt.layout import FillLayout, GridLayout
shell = Shell()
display = shell.getDisplay()
shell.setLayout(FillLayout())
shell.setText("ExpandBar Example")
menubar = Menu(shell, SWT.BAR)
shell.setMenuBar(menubar)
fileItem = MenuItem(menubar, SWT.CASCADE)
fileItem.setText("&File")
submenu = Menu(shell, SWT.DROP_DOWN)
fileItem.setMenu(submenu)
item = MenuItem(submenu, SWT.PUSH)
item.setText("New ExpandItem")
bar = ExpandBar(shell, SWT.V_SCROLL)
image = display.getSystemImage(SWT.ICON_QUESTION)
# First item
composite = Composite(bar, SWT.NONE)
# Add a context menu, check we describe it properly
popupmenu = Menu(shell, SWT.POP_UP)
popupitem = MenuItem(popupmenu, SWT.PUSH)
popupitem.setText("Popup")
composite.setMenu(popupmenu)
layout = GridLayout(2, False)
layout.marginLeft = layout.marginTop = layout.marginRight = layout.marginBottom = 10
layout.verticalSpacing = 10
composite.setLayout(layout)
label = Label(composite, SWT.NONE)
label.setImage(display.getSystemImage(SWT.ICON_ERROR))
label = Label(composite, SWT.NONE)
label.setText("SWT.ICON_ERROR")
label = Label(composite, SWT.NONE)
label.setImage(display.getSystemImage(SWT.ICON_INFORMATION))
label = Label(composite, SWT.NONE)
label.setText("SWT.ICON_INFORMATION")
label = Label(composite, SWT.NONE)
label.setImage(display.getSystemImage(SWT.ICON_QUESTION))
label = Label(composite, SWT.NONE)
label.setText("SWT.ICON_QUESTION")
item1 = ExpandItem(bar, SWT.NONE)
item1.setText("What is your favorite icon")
item1.setHeight(composite.computeSize(SWT.DEFAULT, SWT.DEFAULT).y)
item1.setControl(composite)
item1.setImage(image)
item1.setExpanded(True)
def createButton(composite, text):
# Add a context menu, check we describe it properly
button = Button(composite, SWT.PUSH)
button.setText(text)
popupmenu = Menu(shell, SWT.POP_UP)
popupitem = MenuItem(popupmenu, SWT.PUSH)
popupitem.setText("Button Popup")
button.setMenu(popupmenu)
# Second item
composite = Composite(bar, SWT.NONE)
layout = GridLayout()
layout.marginLeft = layout.marginTop = layout.marginRight = layout.marginBottom = 10
layout.verticalSpacing = 10
composite.setLayout(layout)
button = createButton(composite, "Button1")
button = createButton(composite, "Button2")
item0 = ExpandItem(bar, SWT.NONE)
item0.setText("What is your favorite button")
item0.setHeight(composite.computeSize(SWT.DEFAULT, SWT.DEFAULT).y)
item0.setControl(composite)
item0.setImage(image)
class AddListener(Listener):
def handleEvent(self, e):
item2 = ExpandItem(bar, SWT.NONE)
composite = Composite(bar, SWT.NONE)
layout = GridLayout(2, False)
composite.setLayout(layout)
label = Label(composite, SWT.NONE)
label.setText("What is your name?")
# Just to see if this fools the text-finding algorithm
pointlessComposite = Composite(composite, SWT.NONE)
text = Text(pointlessComposite, SWT.NONE)
item2.setText("New Question")
text.pack()
composite.pack()
size = composite.computeSize(SWT.DEFAULT, SWT.DEFAULT)
item2.setHeight(size.y)
item2.setControl(composite)
item2.setImage(image)
item2.setExpanded(True)
item.addListener(SWT.Selection, AddListener())
bar.setSpacing(8)
shell.setSize(400, 550)
shell.open()
while not shell.isDisposed():
if not display.readAndDispatch():
display.sleep()
display.dispose()
|
texttest/storytext-selftest
|
swt/expandbar/target_ui.py
|
target_ui.py
|
py
| 3,595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71592748667
|
import datetime
import json
import math
import threading
import traceback
import asyncio
import pandas as pd
from constants import (
MAP_MARKET_ID_TO_NAME as MARKET_MAP,
AVAILABLE_MARKETS,
ALL_MARKET_LABEL,
QUERY_INTERVAL,
MINT_DIVISOR,
CONTRACT_ADDRESS
)
from prometheus_metrics import metrics
from blockchain.client import ResourceClient as BlockchainClient
from subgraph.client import ResourceClient as SubgraphClient
# Contract addresses
CONTRACT_ADDRESS = CONTRACT_ADDRESS
def write_to_json(data, filename):
with open(filename, 'w') as json_file:
json.dump({"data": data}, json_file, indent=4)
async def process_live_positions(blockchain_client, live_positions):
"""
Asynchronously process live positions data.
Args:
live_positions (list): List of live position data, where each element is a dictionary
containing information about a live position.
Returns:
pandas.DataFrame: DataFrame containing processed live position information.
This asynchronous function processes live position data by filtering out positions not in available markets,
retrieving their current values, and calculating UPNL (Unrealized Profit and Loss) metrics.
Args Details:
- `live_positions`: List of live position data.
Note:
- `AVAILABLE_MARKETS`, `get_current_value_of_live_positions`, and `MINT_DIVISOR` are assumed to be defined.
- This function utilizes asynchronous operations for improved performance.
"""
live_positions_df = pd.DataFrame(live_positions)
live_positions_df.drop(
live_positions_df[~live_positions_df['market'].isin(AVAILABLE_MARKETS)].index,
inplace = True
)
# values = await get_current_value_of_live_positions(blockchain_client, live_positions_df)
positions = live_positions_df[['market', 'owner.id', 'position_id']].values.tolist()
values = await blockchain_client.get_value_of_positions(positions)
values = [v / MINT_DIVISOR for v in values]
live_positions_df['value'] = values
live_positions_df['upnl'] = live_positions_df['value'] - live_positions_df['collateral_rem']
live_positions_df['upnl_pct'] = live_positions_df['upnl'] / live_positions_df['collateral_rem']
return live_positions_df
def set_metrics_to_nan():
"""
Set metrics values to NaN to indicate a query error.
This function updates the 'mint_gauge' metrics labels for all markets, setting their values to NaN.
This is typically used to indicate that there was an issue with the query or data retrieval.
Note:
- `metrics` is a global object representing a metrics collector.
- `AVAILABLE_MARKETS` is a global variable.
- `MARKET_MAP` is a global variable.
- `ALL_MARKET_LABEL` is a global variable.
Returns:
None
"""
# Set metric to NaN to indicate that something went wrong with the query
metrics['upnl_gauge'].labels(market=ALL_MARKET_LABEL).set(math.nan)
metrics['collateral_rem_gauge'].labels(market=ALL_MARKET_LABEL).set(math.nan)
metrics['upnl_pct_gauge'].labels(market=ALL_MARKET_LABEL).set(math.nan)
for market in AVAILABLE_MARKETS:
metrics['upnl_gauge'].labels(market=MARKET_MAP[market]).set(math.nan)
metrics['collateral_rem_gauge'].labels(market=MARKET_MAP[market]).set(math.nan)
metrics['upnl_pct_gauge'].labels(market=MARKET_MAP[market]).set(math.nan)
def set_metrics(live_positions_df_with_curr_values):
"""
Set metrics based on processed live positions data.
Args:
live_positions_df_with_curr_values (pandas.DataFrame): DataFrame containing processed live position information.
Returns:
None
This function sets various metrics based on the processed live position data, including UPNL (Unrealized Profit and Loss),
collateral, and UPNL percentage metrics.
Args Details:
- `live_positions_df_with_curr_values`: DataFrame containing processed live position information.
Note:
- `set_metrics_to_nan`, `metrics`, `AVAILABLE_MARKETS`, `MARKET_MAP`, and `ALL_MARKET_LABEL` are assumed to be defined.
- This function updates metrics based on the provided live position data.
"""
if not len(live_positions_df_with_curr_values):
set_metrics_to_nan()
return
# Calculate current value of each live position
live_positions_df = live_positions_df_with_curr_values
# Set initial value of upnl metric so far
upnl_total = live_positions_df['upnl'].sum()
upnl_total_per_market_df = live_positions_df.groupby(by='market')['upnl'].sum().reset_index()
upnl_total_per_market = dict(zip(upnl_total_per_market_df['market'], upnl_total_per_market_df['upnl']))
metrics['upnl_gauge'].labels(market=ALL_MARKET_LABEL).set(upnl_total)
for market_id in upnl_total_per_market:
metrics['upnl_gauge'].labels(market=MARKET_MAP[market_id]).set(upnl_total_per_market[market_id])
# Set initial value for collateral metric so far
collateral_total = live_positions_df['collateral_rem'].sum()
collateral_total_per_market_df = live_positions_df.groupby(by='market')['collateral_rem'].sum().reset_index()
collateral_total_per_market = dict(zip(collateral_total_per_market_df['market'], collateral_total_per_market_df['collateral_rem']))
metrics['collateral_rem_gauge'].labels(market=ALL_MARKET_LABEL).set(collateral_total)
for market_id in collateral_total_per_market:
metrics['collateral_rem_gauge'].labels(market=MARKET_MAP[market_id]).set(collateral_total_per_market[market_id])
metrics['upnl_pct_gauge'].labels(market=MARKET_MAP[market_id]).set(
upnl_total_per_market[market_id] / collateral_total_per_market[market_id]
)
# live_positions_df['upnl_pct'] = live_positions_df['upnl'] / live_positions_df['collateral_rem']
metrics['upnl_pct_gauge'].labels(market=ALL_MARKET_LABEL).set(upnl_total / collateral_total)
async def query_upnl(subgraph_client, blockchain_client, stop_at_iteration=math.inf):
"""
Asynchronously query unrealized profit and loss (UPNL) metrics from the subgraph.
Args:
subgraph_client: An instance of the subgraph client used for querying data.
blockchain_client: An instance of the blockchain client used for querying data.
stop_at_iteration (int, optional): The maximum number of iterations to run the query. Default is math.inf.
Returns:
None
This asynchronous function queries UPNL metrics from the provided subgraph client, connects to the Arbitrum network,
and handles exceptions.
It performs the following steps:
1. Connects to the Arbitrum network.
2. Initializes metrics and sets them to NaN.
3. Fetches live positions from the subgraph and calculates current values.
4. Sets UPNL metrics based on the live positions and current values.
5. Runs iterations to update UPNL metrics.
6. Handles exceptions and resets metrics if an error occurs.
Note:
- `process_live_positions`, `set_metrics`, and `set_metrics_to_nan` are defined functions.
- `QUERY_INTERVAL` is a global variable.
- `network` is a global object representing network connectivity.
"""
print('[upnl] Starting query...')
blockchain_client.connect_to_network()
set_metrics_to_nan()
try:
iteration = 0
# Fetch all live positions so far from the subgraph
print('[upnl] Getting live positions from subgraph...')
live_positions = subgraph_client.get_all_live_positions()
print('live_positions', len(live_positions))
# write_to_json(live_positions, 'live_positions.json')
print('[upnl] Getting live positions current value from blockchain...')
live_positions_df_with_curr_values = await process_live_positions(blockchain_client, live_positions)
# write_to_json(live_positions_df_with_curr_values.to_dict(orient="records"), 'live_positions_with_current_values.json')
print('[upnl] Calculating upnl metrics...')
set_metrics(live_positions_df_with_curr_values)
await asyncio.sleep(QUERY_INTERVAL)
while iteration < stop_at_iteration:
try:
print('===================================')
print(f'[upnl] Running iteration #{iteration}...')
timestamp_start = math.ceil(datetime.datetime.now().timestamp())
print('[upnl] timestamp_start', datetime.datetime.utcfromtimestamp(timestamp_start).strftime('%Y-%m-%d %H:%M:%S'))
# Fetch all live positions so far from the subgraph
live_positions = subgraph_client.get_all_live_positions()
live_positions_df_with_curr_values = await process_live_positions(blockchain_client, live_positions)
set_metrics(live_positions_df_with_curr_values)
# Increment iteration
iteration += 1
# Wait for the next iteration
await asyncio.sleep(QUERY_INTERVAL)
# if iteration == 10:
# 1 / 0
except Exception as e:
print(
f"[upnl] An error occurred on iteration "
f"{iteration} timestamp_start "
f"{datetime.datetime.utcfromtimestamp(timestamp_start).strftime('%Y-%m-%d %H:%M:%S')}:", e)
traceback.print_exc()
except Exception as e:
print(f"[upnl] An error occurred:", e)
traceback.print_exc()
set_metrics_to_nan()
subgraph_client = SubgraphClient()
blockchain_client = BlockchainClient()
thread = threading.Thread(target=asyncio.run, args=(query_upnl(subgraph_client, blockchain_client),))
if __name__ == '__main__':
asyncio.run(query_upnl)
|
overlay-market/ChainMonitoring
|
metrics/upnl.py
|
upnl.py
|
py
| 9,876 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24643790845
|
class Board:
""" Defines the game board """
def __init__(self, COL, ROW, BINDS, COUNT):
# self.board = [[0]*COL]*ROW # problematic echoing...!
self._cols, self._rows = COL, ROW
self._board = [[0 for i in range(ROW)] for j in range(COL)] # The COLxROW large board, all zeroes, i.e. unset
self._binds = dict()
for bind in BINDS:
c, r, v = bind
if not c in self._binds.keys():
self._binds[c] = dict()
if not r in self._binds[c].keys():
self._binds[c][r] = v
self._board[c][r] = v # Also set the board, while we are in the loop...
else:
print(f"Conflict in Binding: {bind[0]}, {bind[1]} seems to defined more than once: {BINDS}")
self._cntc = COUNT[0]
self._cntr = COUNT[1]
# Impliment BINDS
def set(self, c, r, v):
""" Set bord cell x,y to value v """
self._board[c][r] = v
def get(self, c, r):
""" So banale that it's hardly relevant """
return self._board[c][r]
def get_col(self, i):
return self._board[i]
def get_row(self, i):
vrow = [col[i] for col in self._board]
return vrow
def count(self, mode, i):
""" Count number of 'ship' celle, per coll or row
mode: 'c': coll mode, 'r': row mode
i: the number of the coll/row to count """
if mode == 'c':
return self.get_col(i).count(1)
elif mode == 'r':
return self.get_row(i).count(1)
else:
raise ValueError(f"Illegal mode: {mode}, should be 'c' or 'r'")
def full(self):
""" Is all celles, on the board, filled with a non-zero value """
return any([[v != 0 for v in inner] for inner in self._board])
def satisfied(self):
""" All bindings and counts satisfied """
if all([[self._board[i][j] == self._binds[i][j] for j in self._binds[i].keys()] for i in self._binds.keys()]):
if any([self._count('c', i) != self._cntc[i] for i in range(self._cntc)]):
return False
else:
print(f"BIND seems to be violates! {self._binds} in {self.board_astext()}")
return False
def board_asrawtxt(self):
str_ou = str()
for r in reversed(range(self._rows)): # Screen print top-down
for c in range(self._cols):
str_ou += ' ' + self._board[c][r]
str_ou += "\n"
return str_ou
def board_astext(self):
str_ou = str()
for r in reversed(range(self._rows)): # Screen print top-down
for c in range(self._cols):
v = self._board[c][r]
if v == -1: # No-ship
p = ' O'
elif v == 0: # unknown
p = ' .'
elif v == 1: # Ship
p = ' X'
else:
raise ValueError(f"Illigal value in board: {self._board}")
str_ou += p
str_ou += "\n"
return str_ou
|
MartinHvidberg/games
|
Bismarck_puzzle/bp_board.py
|
bp_board.py
|
py
| 3,115 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71964995709
|
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
import os
import dill as dpickle
import numpy as np
import pandas as pd
import logging
class MLPWrapper:
"""Wrapper for Multi-Layer Perceptron classifier"""
def __init__(self,
clf,
model_file="model.dpkl",
precision_threshold=0.7,
recall_threshold=0.5,
load_from_model=False):
"""Initialize parameters of the MLP classifier
Args:
clf: a sklearn.neural_network.MLPClassifier object
model_file: the local path to save or load model
precision_threshold: the threshold that the precision of one label must meet in order to be predicted
recall_threshold: the threshold that the recall of one label must meet in order to be predicted
load_from_model: load classifier from model file or not
"""
if clf:
self.clf = clf
elif load_from_model:
self.load_model(model_file=model_file)
else:
raise Exception("You need to pass a MLPClassifier object to the wrapper")
self.model_file = model_file
self.precision_threshold = precision_threshold
self.recall_threshold = recall_threshold
# precisions/probability_thresholds/recalls are dict
# {label_index: number or None}
self.precisions = None
self.probability_thresholds = None
self.recalls = None
# count of labels
self.total_labels_count = None
def fit(self, X, y):
"""Train the classifier
Args:
X: features, numpy.array
y: labels, numpy.array
"""
self.clf.fit(X, y)
def predict_probabilities(self, X):
"""Predict probabilities of all labels for data
Args:
X: features, numpy.array
Return: a list, shape (n_samples, n_classes)
"""
return self.clf.predict_proba(X)
def find_probability_thresholds(self, X, y, test_size=0.3):
"""Split the dataset into training and testing to find probability thresholds for all labels
Args:
X: features, numpy.array
y: labels, numpy.array
"""
# split data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=1234)
self.fit(X_train, y_train)
y_pred = self.predict_probabilities(X_test)
self.probability_thresholds = {}
self.precisions = {}
self.recalls = {}
self.total_labels_count = len(y_test[0])
for label in range(self.total_labels_count):
# find the probability for each label
best_precision, best_recall, best_threshold = 0.0, 0.0, None
precision, recall, threshold = precision_recall_curve(np.array(y_test)[:, label], y_pred[:, label])
for prec, reca, thre in zip(precision[:-1], recall[:-1], threshold):
# precision, recall must meet two thresholds respecitively
if prec >= self.precision_threshold and reca >= self.recall_threshold:
# choose the threshold with the higher precision
if prec > best_precision:
best_precision = prec
best_recall = reca
best_threshold = thre
# self.probability_thresholds is a dict {label_index: probability_threshold}
# If probability_thresholds[label] is None, do not predict this label always, which
# means this label is in the excluded list because it does not satisfy
# both of the precision and recall thresholds
self.probability_thresholds[label] = best_threshold
self.precisions[label] = best_precision
self.recalls[label] = best_recall
def grid_search(self, params=None, cv=5, n_jobs=-1):
"""Grid search to find the parameters for the best classifier
Args:
params: parameter settings to try
a dict with param names as keys and lists of settings as values
cv: cross-validation splitting strategy, int
n_jobs: number of jobs to run in parallel, int or None
"""
if not params:
# default parameters to try
params = {'hidden_layer_sizes': [(100,), (200,), (400, ), (50, 50), (100, 100), (200, 200)],
'alpha': [.001, .01, .1, 1, 10],
'learning_rate': ['constant', 'adaptive'],
'learning_rate_init': [.001, .01, .1]}
self.clf = GridSearchCV(self.clf, params, cv=cv, n_jobs=n_jobs)
def save_model(self, model_file=None):
"""Save the model to the local path
Args:
model_file: The local path to save the model, str or None
if None, use the property of this class.
"""
if model_file:
self.model_file = model_file
with open(self.model_file, 'wb') as f:
dpickle.dump(self.clf, f)
def load_model(self, model_file=None):
"""Load the model from the local path
Args:
model_file: The local path to load the model, str or None
if None, use the property of this class.
"""
if model_file:
self.model_file = model_file
if not os.path.exists(self.model_file):
raise Exception("Model path {self.model_file} does not exist")
with open(self.model_file, 'rb') as f:
self.clf = dpickle.load(f)
def calculate_auc(predictions, y_holdout, label_columns):
"""Calculate AUC.
Args:
Predictions: num_samples x num_features array
y_holdout: Labels "one" hot encoded; num_samples x num_labels
label_columns: List of labels
"""
auc_scores = []
counts = []
for i, l in enumerate(label_columns):
y_hat = predictions[:, i]
y = y_holdout[:, i]
auc = roc_auc_score(y_true=y, y_score=y_hat)
auc_scores.append(auc)
counts = y_holdout.sum(axis=0)
df = pd.DataFrame({'label': label_columns, 'auc': auc_scores, 'count': counts})
display(df)
weightedavg_auc = df.apply(lambda x: x.auc * x['count'], axis=1).sum() / df['count'].sum()
print(f'Weighted Average AUC: {weightedavg_auc}')
return df, weightedavg_auc
|
kubeflow/code-intelligence
|
py/label_microservice/mlp.py
|
mlp.py
|
py
| 6,621 |
python
|
en
|
code
| 55 |
github-code
|
6
|
24365811740
|
from itertools import groupby
from datetime import datetime, timedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
class website(models.Model):
_inherit = 'website'
def get_dynamic_count(self,prods):
filters = self.env['product.filter.value'].sudo().search([])
for fltr in filters:
count = 0
for product in prods:
for flt in product.filter_ids:
if fltr in flt.filter_value_ids:
count += 1
fltr.sudo().write({
'dynamic_count' : count,
})
class FilterProductTemplate(models.Model):
_inherit = 'product.template'
filter_ids = fields.One2many('filter.product.line','product_tmpl_id','Product Filter')
class ProductFilters(models.Model):
_name = 'product.filter'
_description = 'Filter Product'
_order = 'group_id'
name = fields.Char('Name')
type = fields.Selection([
('radio', 'Radio'),
('select', 'Select'),
('color', 'Color')], default='radio', required=True)
group_id = fields.Many2one('group.filter','Group Filter',required=True,default=lambda self: self.env['group.filter'].search([('name','=','Other Filters')],limit=1))
filter_value_ids = fields.One2many('product.filter.value','filter_id',string="Filter Values",)
filter_ids = fields.One2many('filter.product.line','filter_name_id','Product Filter')
class FilterProductValue(models.Model):
_name = 'product.filter.value'
_description = 'Filter Product Value'
@api.depends('filter_id','name','html_color')
def _attribute_count(self):
for filters in self:
product_obj = self.env['product.template']
product_ids = product_obj.search([])
count = 0
if product_ids:
for product in product_ids:
if product.filter_ids:
for flt in product.filter_ids:
if filters in flt.filter_value_ids:
count += 1
filters.product_count = count
name = fields.Char('Filter Values Name')
filter_id = fields.Many2one('product.filter','Filter Name')
html_color = fields.Char(
string='HTML Color Index', oldname='color',
help="""Here you can set a
specific HTML color index (e.g. #ff0000) to display the color if the
filterer type is 'Color'.""")
product_count = fields.Integer('Count',compute='_attribute_count',)
dynamic_count = fields.Integer("Dynamic Count")
class FilterProductGroup(models.Model):
_name = 'group.filter'
_description = 'Filter Group'
name = fields.Char('Filter Group Name')
class FilterProductLine(models.Model):
_name = 'filter.product.line'
_description = 'Filter Product'
product_tmpl_id = fields.Many2one('product.template','Filter View')
filter_name_id = fields.Many2one('product.filter','Filter Name')
filter_value_ids = fields.Many2many('product.filter.value',string="Filter Values")
@api.onchange('filter_name_id')
def onchange_filter_name_id(self):
for flt in self:
if flt.filter_name_id:
if flt.filter_name_id != flt.filter_value_ids.filter_id :
# reset task when changing project
flt.filter_value_ids = False
return {'domain': {
'filter_value_ids': [('filter_id', '=', flt.filter_name_id.id)]
}}
@api.model
def create(self,vals):
res = super(FilterProductLine, self).create(vals)
if 'filter_value_ids' in vals:
for value in res.filter_value_ids:
value._attribute_count()
return res
def write(self,vals):
if 'filter_value_ids' in vals:
for value in vals['filter_value_ids']:
for flt_value in value[2]:
flt = self.env['product.filter.value'].browse(flt_value)
flt._attribute_count()
for value in self.filter_value_ids:
value._attribute_count()
return super(FilterProductLine, self).write(vals)
|
suningwz/netaddiction_addons
|
website_all_in_one/models/product_template.py
|
product_template.py
|
py
| 3,630 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32382361329
|
'''
input number and print whether even or odd.
'''
while True :
inputNum = int(input("Enter the number: "))
if (inputNum == 0) :
print("Good-bye!")
break
if (inputNum % 2 == 0) :
print("Even!")
else :
print("Odd!")
|
museRhee/basicPython
|
evenodd.py
|
evenodd.py
|
py
| 267 |
python
|
en
|
code
| 0 |
github-code
|
6
|
71916938749
|
from userbot.events import register
from userbot import CMD_HELP, bot
PENIS_TEMPLATE = """
โโฆโฆ
โ โฌโฌโฌโฃ
โ โฌโฌโฌโฃ I โฅ
โ โฌโฌโฌโฃ Chocolate
โโฉโฉโฉโ CeteUserBot
"""
@register(outgoing=True, pattern=r"^\.(?:cho)\s?(.)?")
async def emoji_nah(e):
emoji = e.pattern_match.group(1)
await e.edit("Cete...")
message = PENIS_TEMPLATE
if emoji:
message = message.replace('๐', emoji)
await e.edit(message)
CMD_HELP.update({
"cho":
".cho\
\nKullanฤฑm: cho yaratฤฑr :o\n"
})
|
inflamesinFun/CeteUserBot
|
userbot/modules/cho.py
|
cho.py
|
py
| 545 |
python
|
en
|
code
| 0 |
github-code
|
6
|
44408183986
|
#!/usr/bin/env python
# coding: utf-8
# ## This Jupyter notebook will show you to perform basic calculations and plots with 2 dimensional data (matrices, )
#
# ## We will compare two images:
# ### * MODIS-AQUA - 31st August 2005
# ### * MODIS-AQUA - 16th Feburary 2017
#
# Now, we will need to import several packages/toolboxes that are essential for nearly every scientific work in Python.
# In[1]:
import os #change folders
import numpy as np # perform calculations and basic math
import matplotlib.pyplot as plt # plot data
import pandas as pd # work with dataframes,tables, spreadsheets, etc.
import netCDF4 as nc4 # work with netcdf files, the standard file for satellite 2D and 3D data
# ## Now, lets load each image using the netCDF4 module.
# In[2]:
# Let's open the first image (31st August 2005)
file = 'A2005243140500.L2_LAC_OC.x 2.hdf' #write the name of the file
modis_31august2005 = nc4.Dataset(file, mode='r') #open the file in python
print(modis_31august2005) #print full details of the image
# In[3]:
# You can also use fh.variables to read information only on the variables
print(modis_31august2005.variables)
# ## Notice that you have the following variables:
# ### * Time information
# * Year
# * Day of the Year
# * Milliseconds of Day
# ### * Scan line information
# * Tilt angle for scan line
# * Scan start-pixel longitude
# * Scan center-pixel longitude
# * Scan end-pixel longitude
# * Scan start-pixel latitude
# * Scan center-pixel latitude
# * Scan end-pixel latitude
# * (...)
# ### * Remote Sensing Reflectances
# ## * **Latitude**
# ## * **Longitude**
# ## * **Chl-a (OC3 algorithm)**
# ### * Aerosol optical thickness
# ### * CDOM
# ### * PAR
# ### * Particulate Organic Carbon
# In[5]:
# Extracting variables
longitude = np.array(modis_31august2005['longitude'])
print(longitude)
# In[ ]:
# Extracting variables
longitude = np.array(modis_31august2005['longitude'])
latitude = np.array(modis_31august2005['latitude'])
mld = np.array(fh['mlotst'])
mld[mld == 32767] = np.nan
mld = np.swapaxes(np.swapaxes(mld, 0, 2), 0, 1)
time = np.array(fh['time'])
pixel1 = pd.read_csv('pixel1_monthly.csv')
pixel2 = pd.read_csv('pixel2_monthly.csv')
pixel3 = pd.read_csv('pixel3_monthly.csv')
# Let's print one of the datasets to check the structure
# In[ ]:
print(pixel1)
# You will notice the data corresponds to monthly-averaged Chl-a concentrations.
#
# Let's extract the data from each dataset and calculate the mean, min, max, standard deviation
# In[ ]:
pixel1_chla = pixel1['Chl-a'].values
pixel2_chla = pixel2['Chl-a'].values
pixel3_chla = pixel3['Chl-a'].values
# Pixel 1
pixel1_mean = np.nanmean(pixel1_chla)
pixel1_min = np.nanmin(pixel1_chla)
pixel1_max = np.nanmax(pixel1_chla)
pixel1_stdev = np.nanstd(pixel1_chla)
# Pixel 2
pixel2_mean = np.nanmean(pixel2_chla)
pixel2_min = np.nanmin(pixel2_chla)
pixel2_max = np.nanmax(pixel2_chla)
pixel2_stdev = np.nanstd(pixel2_chla)
# Pixel 3
pixel3_mean = np.nanmean(pixel3_chla)
pixel3_min = np.nanmin(pixel3_chla)
pixel3_max = np.nanmax(pixel3_chla)
pixel3_stdev = np.nanstd(pixel3_chla)
print('The Chl-a dataset of pixel 1 has:',
'mean = {:.2f} mg.m/3, minimum = {:.2f} mg.m/3, maximum = {:.2f} mg.m/3 and standard deviation = {:.2f} mg.m/3 \n'.format(pixel1_mean, pixel1_min, pixel1_max, pixel1_stdev))
print('The Chl-a dataset of pixel 2 has:',
'mean = {:.2f} mg.m/3, minimum = {:.2f} mg.m/3, maximum = {:.2f} mg.m/3 and standard deviation = {:.2f} mg.m/3 \n'.format(pixel2_mean, pixel2_min, pixel2_max, pixel2_stdev))
print('The Chl-a dataset of pixel 3 has:',
'mean = {:.2f} mg.m/3, minimum = {:.2f} mg.m/3, maximum = {:.2f} mg.m/3 and standard deviation = {:.2f} mg.m/3 \n'.format(pixel3_mean, pixel3_min, pixel3_max, pixel3_stdev))
# ## Other simple to calculate and useful calculations using numpy are:
# ``` python
# np.ptp(array) # Calculates range (maximum - minimum)
# np.percentile(array) # Calculates the q-th percentile
# np.quantile(array) # Calculates the q-th quantile
# np.median(array) # Calculates the median
# ```
# ## Now say we want to plot each dataset
# In[ ]:
print('Pixel 1 Plot')
plt.plot(pixel1_chla)
# In[ ]:
print('Pixel 2 Plot')
plt.plot(pixel2_chla)
# In[ ]:
print('Pixel 3 Plot')
plt.plot(pixel3_chla)
# They all seem different but let's compare put them in the same plot for comparison.
# In[ ]:
plt.plot(pixel1_chla)
plt.plot(pixel2_chla)
plt.plot(pixel3_chla)
# We can use matplotlib options to improve our plot.
# In[ ]:
plt.figure(figsize=(12,6))
plt.plot(pixel1_chla, c='r', label='Pixel 1')
plt.plot(pixel2_chla, c='b', linestyle='--', label='Pixel 2')
plt.plot(pixel3_chla, c='k', linestyle=':', label='Pixel 3')
plt.xlabel('Years', fontsize=14)
plt.ylabel('Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.xticks(ticks=np.arange(0, len(pixel1_chla), 12), labels=np.arange(1998, 2021))
plt.xlim(0,len(pixel1_chla))
plt.ylim(0, 2)
plt.title('Pixel Chl-$\it{a}$ comparison', fontsize=18)
plt.legend(loc=0, fontsize=14)
#plt.tight_layout()
# ## Other types of plots you can do to compare one dimensional datasets!
# * Scatter plots
# * Histograms
# * Boxplots
# * etc.
# In[ ]:
plt.figure()
plt.scatter(pixel1_chla, pixel2_chla, s=10)
plt.xlabel('Pixel 1 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.ylabel('Pixel 2 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.title('Scatter Plot - Pixel 1 vs. Pixel 2', fontsize=18)
plt.figure()
plt.scatter(pixel1_chla, pixel3_chla, s=10, c='grey')
plt.xlabel('Pixel 1 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.ylabel('Pixel 3 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.title('Scatter Plot - Pixel 1 vs. Pixel 3', fontsize=18)
# In[ ]:
plt.figure()
plt.hist(pixel1_chla, color='r')
plt.xlabel('Pixel 1 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.ylabel('N', fontsize=14)
plt.title('Histogram - Pixel 1', fontsize=18)
plt.xlim(0,2)
plt.ylim(0,150)
plt.figure()
plt.hist(pixel2_chla, color='b')
plt.xlabel('Pixel 2 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.ylabel('N', fontsize=14)
plt.title('Histogram - Pixel 2', fontsize=18)
plt.xlim(0,2)
plt.ylim(0,150)
plt.figure()
plt.hist(pixel3_chla, color='b')
plt.xlabel('Pixel 3 Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.ylabel('N', fontsize=14)
plt.title('Histogram - Pixel 3', fontsize=18)
plt.xlim(0,2)
plt.ylim(0,150)
# In[ ]:
pixel1_chla_nonans = pixel1_chla[~np.isnan(pixel1_chla)] # Remove missing values
plt.figure()
bplot = plt.boxplot([pixel1_chla_nonans, pixel2_chla, pixel3_chla], notch = True, patch_artist=True, vert=True)
# fill with colors
colors = ['r', 'b', 'k']
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
for patch, color in zip(bplot['medians'], colors):
patch.set_color('w')
patch.set_linewidth(2)
plt.xlabel('Pixels', fontsize=14)
plt.ylabel('Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.title('Boxplot Comparison', fontsize=18)
# ## Last but not least, how to save an image.
#
# Let's use the boxplots image as an example
# In[ ]:
pixel1_chla_nonans = pixel1_chla[~np.isnan(pixel1_chla)] # Remove missing values
plt.figure()
bplot = plt.boxplot([pixel1_chla_nonans, pixel2_chla, pixel3_chla], notch = True, patch_artist=True, vert=True)
# fill with colors
colors = ['r', 'b', 'k']
for patch, color in zip(bplot['boxes'], colors):
patch.set_facecolor(color)
for patch, color in zip(bplot['medians'], colors):
patch.set_color('w')
patch.set_linewidth(2)
plt.xlabel('Pixels', fontsize=14)
plt.ylabel('Chl-$\it{a}$ (mg.m$^{-3}$)', fontsize=14)
plt.title('Boxplot Comparison', fontsize=18)
#plt.show()
# Save in .png
plt.savefig('boxplots_TP4.png',format = 'png', bbox_inches = 'tight', dpi = 100)
# Save in .jpeg
plt.savefig('boxplots_TP4.jpeg',format = 'jpeg', bbox_inches = 'tight', dpi = 100)
# Save in .pdf
plt.savefig('boxplots_TP4.pdf',format = 'pdf', bbox_inches = 'tight', dpi = 100)
|
afonsomferreira/ppm_jupyter
|
2D_oceancolour_plots.py
|
2D_oceancolour_plots.py
|
py
| 7,930 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32284937669
|
#!/usr/bin/env python
from pwn import *
from pwnlib.elf.elf import dotdict
import os
p = lambda x: pack(x)
u = lambda x: unpack(x, len(x)*8)
class Environment:
def __init__(self, *envs):
self.__env = None
self.env_list = list(set(envs))
for env in self.env_list:
setattr(self, env, dict())
def set_item(self, name, **obj):
if obj.keys()!=self.env_list:
fail('Environment : "%s" environment does not match' % name)
return
for env in obj:
getattr(self, env).update({name:obj[env]})
def select(self, env=None):
if env is not None and env not in self.env_list:
warn('Environment : "%s" is not defined' % env)
env = None
while env is None:
sel = raw_input('Select Environment\n%s ...' % str(self.env_list)).strip()
if not sel:
env = self.env_list[0]
elif sel in self.env_list:
env = sel
else:
for e in self.env_list:
if e.startswith(sel):
env = e
break
info('Environment : set environment "%s"' % env)
for name,obj in getattr(self, env).items():
setattr(self, name, obj)
self.__env = env
def check(self, env):
return self.__env == env
class ELF(pwnlib.elf.elf.ELF):
sap_function = {}
sap_section = {}
def __init__(self, path, checksec=True):
super(ELF, self).__init__(path, checksec)
self.sep_function = dotdict()
self.sep_section = dotdict()
self._populate_function()
self._populate_section()
@pwnlib.elf.elf.ELF.address.setter
def address(self, new):
delta = new-self._address
update = lambda x: x+delta
self.symbols = dotdict({k:update(v) for k,v in self.symbols.items()})
self.plt = dotdict({k:update(v) for k,v in self.plt.items()})
self.got = dotdict({k:update(v) for k,v in self.got.items()})
self.sep_function = dotdict({k:update(v) for k,v in self.sep_function.items()})
self.sep_section = dotdict({k:update(v) for k,v in self.sep_section.items()})
# Update our view of memory
memory = pwnlib.elf.elf.intervaltree.IntervalTree()
for begin, end, data in self.memory:
memory.addi(update(begin),
update(end),
data)
self.memory = memory
self._address = update(self.address)
def _populate_function(self):
for name in self.functions:
self.sep_function[name] = self.functions[name].address
def _populate_section(self):
for sec in self.iter_sections():
self.sep_section[sec.name] = sec.header.sh_addr
@property
def libc(self):
for lib in self.libs:
if '/libc.' in lib or '/libc-' in lib:
return ELF(lib)
def init():
if 'TMUX' in os.environ:
if 'DISPLAY' in os.environ:
del os.environ['DISPLAY']
def communicate(mode='SOCKET', *args, **kwargs):
if mode == 'SOCKET':
conn = remote(*args, **kwargs)
elif mode == 'PROC':
conn = process(*args, **kwargs)
elif mode == 'DEBUG':
if 'argv' in kwargs:
argv = kwargs['argv']
del kwargs['argv']
else:
argv = './argv'
conn = gdb.debug(argv, *args, **kwargs)
else:
warn('communicate : mode "%s" is not defined' % mode)
return conn
init()
|
shift-crops/CTFProblemArchive
|
2017/SECCON Beginners/NextTokyo/next_note/exploit/sc_expwn.py
|
sc_expwn.py
|
py
| 3,682 |
python
|
en
|
code
| 1 |
github-code
|
6
|
34268869156
|
import pandas as pd
import numpy as np
import numpy as np
import pandas as pd
from keras.models import load_model
from sklearn.preprocessing import MinMaxScaler
from numpy import concatenate
from flask import Flask, request, jsonify
import json
app = Flask(__name__)
@app.route('/', methods=['GET'])
def getMill():
raw_ts = pd.read_pickle('./time_series.pk1')
raw_ts=raw_ts.reset_index(drop=True)
raw_ts.drop(['var52(t-3)','var52(t-2)','var52(t-1)'],axis='columns', inplace=True)
raw_ts = raw_ts.sort_values(by=['var2(t)','var3(t)'])
raw_ts=raw_ts.reset_index(drop=True)
raw_val = raw_ts.values
scaler = MinMaxScaler(feature_range=(0, 1))
raw_scaled = scaler.fit_transform(raw_val)
raw_eval = raw_scaled[57193:,:]
raw_train_test = raw_scaled[:57193,:]
raw_train_test_x = raw_train_test[:, :-1]
raw_train_test_y = raw_train_test[:, -1]
x_train= raw_train_test_x[:42588, :]
x_test = raw_train_test_x[42588:, :]
y_train=raw_train_test_y[:42588]
y_test= raw_train_test_y[42588:]
x_train = x_train.reshape((x_train.shape[0], 1, x_train.shape[1]))
x_test = x_test.reshape((x_test.shape[0], 1, x_test.shape[1]))
raw_eval_x = raw_eval[:, :-1]
x_eval= raw_eval_x.reshape((raw_eval_x.shape[0], 1, raw_eval_x.shape[1]))
raw_est = pd.read_csv("RVESTfull.csv")
extract_columns = [154,155,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204]
col_names = ['SEASON','SUGARMONTH','Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu','ErrorRV']
dummie_cols = ['Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu']
y_pred_1 = model.predict(x_test)
x_test_1 = x_test.reshape((x_test.shape[0], x_test.shape[2]))
test_1 = concatenate((x_test_1,y_pred_1), axis=1 )
test_1_scaled = scaler.inverse_transform(test_1)
y_test = y_test.reshape(x_test.shape[0],1)
test_1_actual = concatenate((x_test_1,y_test), axis=1 )
test_1_actual_scaled = scaler.inverse_transform(test_1_actual)
y_test_pred = test_1_scaled[:, -1]
y_test_actual = test_1_actual_scaled[:, -1]
df_test_actual = pd.DataFrame(test_1_actual_scaled)
df_test_pred = pd.DataFrame(test_1_scaled)
mill_season_month_error_actual_test = df_test_actual[df_test_actual[155]>5][extract_columns]
mill_season_month_error_actual_test.columns = col_names
mill_col = mill_season_month_error_actual_test[dummie_cols].idxmax(axis=1)
mill_season_month_error_actual_test['mill'] = mill_col
mill_season_month_error_actual_test.drop(dummie_cols,axis='columns', inplace=True)
mill_season_month_error_actual_test
mill_season_month_error_pred_test = df_test_pred[df_test_pred[155]>5][extract_columns]
mill_season_month_error_pred_test.columns = col_names
mill_season_month_error_actual_test['pred_ErrorRv']=mill_season_month_error_pred_test['ErrorRV']
eval_1 = mill_season_month_error_actual_test
eval_1['SUGARMONTH'] = eval_1['SUGARMONTH'].round()
ev_1 = eval_1[eval_1['SUGARMONTH']<9.5].groupby(by=['mill','SUGARMONTH'])[['pred_ErrorRv','ErrorRV']].mean()
ev_1 = ev_1.reset_index(drop=False)
final_op_test = pd.merge(left= raw_est[(raw_est['SUGARMONTH']>6.5)&(raw_est['fa_SEASON']==2020)], right=ev_1[['mill','SUGARMONTH','pred_ErrorRv']], how='left', left_on=['cf_mill','SUGARMONTH'], right_on=['mill','SUGARMONTH'])
final_op_test['pred_rv'] = final_op_test['FCFORECAST'] + final_op_test['pred_ErrorRv']
final_op_test = final_op_test.dropna(how='any')
final_op_test.columns= ['SUGARMONTH', 'FC_FORECAST', 'Actual_RV', 'ErrorRV', 'cf_mill', 'fa_SEASON','mill', 'pred_ErrorRv', 'Prediction']
test_op = final_op_test[['fa_SEASON','SUGARMONTH','FC_FORECAST','Actual_RV','Prediction','mill']]
extract_columns = [154,155,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204]
col_names = ['SEASON','SUGARMONTH','Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu','ErrorRV']
dummie_cols = ['Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu']
y_pred_2 = model.predict(x_train)
x_train_2 = x_train.reshape((x_train.shape[0], x_train.shape[2]))
train_2 = concatenate((x_train_2,y_pred_2), axis=1 )
train_1_scaled = scaler.inverse_transform(train_2)
y_train = y_train.reshape(y_train.shape[0],1)
train_1_actual = concatenate((x_train_2,y_train), axis=1 )
train_1_actual_scaled = scaler.inverse_transform(train_1_actual)
y_train_pred = train_1_scaled[:, -1]
y_train_actual = train_1_actual_scaled[:, -1]
df_train_actual = pd.DataFrame(train_1_actual_scaled)
df_train_pred = pd.DataFrame(train_1_scaled)
mill_season_month_error_actual_train = df_train_actual[extract_columns].copy()
mill_season_month_error_actual_train.columns = col_names
mill_col = mill_season_month_error_actual_train[dummie_cols].idxmax(axis=1)
mill_season_month_error_actual_train['mill'] = mill_col
mill_season_month_error_actual_train.drop(dummie_cols,axis='columns', inplace=True)
mill_season_month_error_actual_train
mill_season_month_error_pred_train = df_train_pred[extract_columns]
mill_season_month_error_pred_train.columns = col_names
mill_season_month_error_actual_train['pred_ErrorRv']=mill_season_month_error_pred_train['ErrorRV']
eval_2 = mill_season_month_error_actual_train
eval_2['SUGARMONTH'] = eval_2['SUGARMONTH'].round()
ev_2 = eval_2[eval_2['SUGARMONTH']<9.5].groupby(by=["SEASON",'mill','SUGARMONTH'])[['pred_ErrorRv','ErrorRV']].mean()
ev_2 = ev_2.reset_index(drop=False)
ev_2
final_op_train = pd.merge(left= raw_est, right=ev_2[['mill','SEASON','SUGARMONTH','pred_ErrorRv']], how='left', left_on=['cf_mill','fa_SEASON','SUGARMONTH'], right_on=['mill','SEASON','SUGARMONTH'])
final_op_train = final_op_train.dropna(how='any')
final_op_train['pred_rv'] = final_op_train['FCFORECAST'] + final_op_train['pred_ErrorRv']
final_op_train.drop(['SEASON'],axis='columns', inplace=True)
final_op_train.columns= ['SUGARMONTH', 'FC_FORECAST', 'Actual_RV', 'ErrorRV', 'cf_mill', 'fa_SEASON','mill', 'pred_ErrorRv', 'Prediction']
train_op = final_op_train[['fa_SEASON','SUGARMONTH','FC_FORECAST','Actual_RV','Prediction','mill']]
extract_columns = [154,155,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204]
col_names = ['SEASON','SUGARMONTH','Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu','pred_ErrorRv']
dummie_cols = ['Amatikulu','Darnall','Eston','Felixton','Gledhow','Komati','Maidstone','Malelane','Noodsberg','Pongola','Sezela','UCL','Umfolozi','Umzimkulu']
y_pred_eval = model.predict(x_eval)
x_eval_1 = x_eval.reshape((x_eval.shape[0], x_eval.shape[2]))
eval = concatenate((x_eval_1,y_pred_eval), axis=1 )
eval_scaled = scaler.inverse_transform(eval)
eval_pred = eval_scaled[:, -1]
df_eval = pd.DataFrame(eval_scaled)
mill_season_month_error_actual_eval = df_eval[extract_columns].copy()
mill_season_month_error_actual_eval.columns = col_names
mill_col = mill_season_month_error_actual_eval[dummie_cols].idxmax(axis=1)
mill_season_month_error_actual_eval['mill'] = mill_col
mill_season_month_error_actual_eval.drop(dummie_cols,axis='columns', inplace=True)
eval_3 = mill_season_month_error_actual_eval
eval_3['SUGARMONTH'] = eval_3['SUGARMONTH'].round()
ev_3 = eval_3[eval_3['SUGARMONTH']<9.5].groupby(by=["SEASON",'mill','SUGARMONTH'])[['pred_ErrorRv']].mean()
ev_3 = ev_3.reset_index(drop=False)
ev_3
final_op_eval = pd.merge(left= raw_est[raw_est['fa_SEASON']==2021], right=ev_3[['mill','SEASON','SUGARMONTH','pred_ErrorRv']], how='left', left_on=['cf_mill','fa_SEASON','SUGARMONTH'], right_on=['mill','SEASON','SUGARMONTH'])
final_op_eval.drop(['SEASON','SRV','ErrorRV'],axis='columns', inplace=True)
final_op_eval = final_op_eval.dropna(how='any')
final_op_eval['pred_rv'] = final_op_eval['FCFORECAST'] + final_op_eval['pred_ErrorRv']
final_op_eval.columns= ['SUGARMONTH', 'FC_FORECAST', 'cf_mill', 'fa_SEASON','mill', 'pred_ErrorRv', 'Prediction']
eval_op = final_op_eval[['fa_SEASON','SUGARMONTH','FC_FORECAST','Prediction','mill']]
print(test_op.shape)
print(train_op.shape)
print(eval_op.shape)
mill = request.args.get('mill', type = str)
print(mill)
response = jsonify({'train': json.loads(train_op[train_op['mill']==mill].to_json(orient='index')) , 'test': json.loads(test_op[test_op['mill']==mill].to_json(orient='index')) ,'eval': json.loads(eval_op[eval_op['mill']==mill].to_json(orient='index'))})
response.headers.add("Access-Control-Allow-Origin", "*")
return response
if __name__ == '__main__':
model = load_model('./Model3')
app.run(debug=True, host='0.0.0.0')
|
Francis-Walker/AI_api
|
api_deploy/model_api.py
|
model_api.py
|
py
| 9,209 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19053013321
|
from functools import wraps
import re
from collections import defaultdict
from datetime import datetime, timedelta, timezone
import humanize
import simplejson as json
from dateutil.tz import tzutc
from flask import Blueprint, g, redirect, request, url_for, current_app, jsonify
from flask_login import current_user, login_required, logout_user
from flask_themes2 import render_theme_template
from sdc.crypto.encrypter import encrypt
from jwcrypto.common import base64url_decode
from structlog import get_logger
from app.globals import get_session_store, get_completeness
from app.data_model.answer_store import Answer, AnswerStore
from app.data_model.app_models import SubmittedResponse
from app.globals import get_answer_store, get_completed_blocks, get_metadata, get_questionnaire_store
from app.helpers.form_helper import post_form_for_location
from app.helpers.path_finder_helper import path_finder, full_routing_path_required
from app.helpers.schema_helpers import with_schema
from app.helpers.session_helpers import with_answer_store, with_metadata
from app.helpers.template_helper import (with_session_timeout, with_metadata_context, with_analytics,
with_questionnaire_url_prefix, with_legal_basis, render_template)
from app.questionnaire.location import Location
from app.questionnaire.navigation import Navigation
from app.questionnaire.path_finder import PathFinder
from app.questionnaire.router import Router
from app.questionnaire.rules import get_answer_ids_on_routing_path
from app.questionnaire.rules import evaluate_skip_conditions
from app.keys import KEY_PURPOSE_SUBMISSION
from app.storage import data_access
from app.storage.storage_encryption import StorageEncryption
from app.submitter.converter import convert_answers
from app.submitter.submission_failed import SubmissionFailedException
from app.templating.metadata_context import build_metadata_context_for_survey_completed
from app.templating.schema_context import build_schema_context
from app.templating.summary_context import build_summary_rendering_context
from app.templating.template_renderer import renderer, TemplateRenderer
from app.templating.view_context import build_view_context
from app.templating.utils import get_question_title
from app.utilities.schema import load_schema_from_session_data
from app.views.errors import MultipleSurveyError
from app.authentication.no_token_exception import NoTokenException
END_BLOCKS = 'Summary', 'Confirmation'
logger = get_logger()
questionnaire_blueprint = Blueprint(name='questionnaire',
import_name=__name__,
url_prefix='/questionnaire/<eq_id>/<form_type>/<collection_id>/')
post_submission_blueprint = Blueprint(name='post_submission',
import_name=__name__,
url_prefix='/questionnaire/<eq_id>/<form_type>/')
@questionnaire_blueprint.before_request
def before_questionnaire_request():
metadata = get_metadata(current_user)
if not metadata:
raise NoTokenException(401)
logger.bind(tx_id=metadata['tx_id'])
values = request.view_args
logger.bind(eq_id=values['eq_id'], form_type=values['form_type'],
ce_id=values['collection_id'])
logger.info('questionnaire request', method=request.method, url_path=request.full_path)
_check_same_survey(url_eq_id=values['eq_id'],
url_form_type=values['form_type'],
url_collection_id=values['collection_id'],
session_eq_id=metadata['eq_id'],
session_form_type=metadata['form_type'],
session_collection_id=metadata['collection_exercise_sid'])
session_data = get_session_store().session_data
g.schema = load_schema_from_session_data(session_data)
@post_submission_blueprint.before_request
def before_post_submission_request():
session = get_session_store()
if not session or not session.session_data:
raise NoTokenException(401)
session_data = session.session_data
g.schema = load_schema_from_session_data(session_data)
logger.bind(tx_id=session_data.tx_id)
values = request.view_args
logger.bind(eq_id=values['eq_id'], form_type=values['form_type'])
logger.info('questionnaire request', method=request.method, url_path=request.full_path)
_check_same_survey(url_eq_id=values['eq_id'],
url_form_type=values['form_type'],
url_collection_id='',
session_eq_id=session_data.eq_id,
session_form_type=session_data.form_type,
session_collection_id='')
@questionnaire_blueprint.after_request
def add_cache_control(response):
response.cache_control.no_cache = True
return response
def save_questionnaire_store(func):
@wraps(func)
def save_questionnaire_store_wrapper(*args, **kwargs):
response = func(*args, **kwargs)
if not current_user.is_anonymous:
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
questionnaire_store.add_or_update()
return response
return save_questionnaire_store_wrapper
@questionnaire_blueprint.route('<group_id>/<int:group_instance>/<block_id>', methods=['GET'])
@login_required
@with_answer_store
@with_metadata
@with_schema
@full_routing_path_required
def get_block(routing_path, schema, metadata, answer_store, eq_id, form_type, collection_id, group_id, # pylint: disable=too-many-locals
group_instance, block_id):
current_location = Location(group_id, group_instance, block_id)
completeness = get_completeness(current_user)
router = Router(schema, routing_path, completeness, current_location)
if not router.can_access_location():
next_location = router.get_next_location()
return _redirect_to_location(collection_id, eq_id, form_type, next_location)
block = _get_block_json(current_location, schema, answer_store, metadata)
context = _get_context(routing_path, block, current_location, schema)
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
@questionnaire_blueprint.route('<group_id>/<int:group_instance>/<block_id>', methods=['POST'])
@login_required
@with_answer_store
@with_metadata
@with_schema
@full_routing_path_required
def post_block(routing_path, schema, metadata, answer_store, eq_id, form_type, collection_id, group_id, # pylint: disable=too-many-locals
group_instance, block_id):
current_location = Location(group_id, group_instance, block_id)
completeness = get_completeness(current_user)
router = Router(schema, routing_path, completeness, current_location)
if not router.can_access_location():
next_location = router.get_next_location()
return _redirect_to_location(collection_id, eq_id, form_type, next_location)
block = _get_block_json(current_location, schema, answer_store, metadata)
schema_context = _get_schema_context(routing_path, current_location.group_instance, metadata, answer_store, schema)
rendered_block = renderer.render(block, **schema_context)
form = _generate_wtf_form(request.form, rendered_block, current_location, schema)
if 'action[save_sign_out]' in request.form:
return _save_sign_out(routing_path, current_location, form, schema, answer_store, metadata)
if form.validate():
_set_started_at_metadata_if_required(form, metadata)
_update_questionnaire_store(current_location, form, schema)
next_location = path_finder.get_next_location(current_location=current_location)
if _is_end_of_questionnaire(block, next_location):
return submit_answers(routing_path, eq_id, form_type, schema)
return redirect(_next_location_url(next_location))
context = build_view_context(block['type'], metadata, schema, answer_store, schema_context, rendered_block, current_location, form)
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
@questionnaire_blueprint.route('<group_id>/0/household-composition', methods=['POST'])
@login_required
@with_answer_store
@with_metadata
@with_schema
@full_routing_path_required
def post_household_composition(routing_path, schema, metadata, answer_store, **kwargs):
group_id = kwargs['group_id']
if _household_answers_changed(answer_store, schema):
_remove_repeating_on_household_answers(answer_store, schema)
disable_mandatory = any(x in request.form for x in ['action[add_answer]', 'action[remove_answer]', 'action[save_sign_out]'])
current_location = Location(group_id, 0, 'household-composition')
block = _get_block_json(current_location, schema, answer_store, metadata)
form = post_form_for_location(schema, block, current_location, answer_store, metadata,
request.form, disable_mandatory=disable_mandatory)
form.validate() # call validate here to keep errors in the form object on the context
context = _get_context(routing_path, block, current_location, schema, form)
if 'action[add_answer]' in request.form:
form.household.append_entry()
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
if 'action[remove_answer]' in request.form:
index_to_remove = int(request.form.get('action[remove_answer]'))
form.remove_person(index_to_remove)
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
if 'action[save_sign_out]' in request.form:
response = _save_sign_out(routing_path, current_location, form, schema, answer_store, metadata)
remove_empty_household_members_from_answer_store(answer_store, schema)
return response
if form.validate():
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
update_questionnaire_store_with_answer_data(questionnaire_store, current_location, form.serialise(), schema)
metadata = get_metadata(current_user)
next_location = path_finder.get_next_location(current_location=current_location)
return redirect(next_location.url(metadata))
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
@post_submission_blueprint.route('thank-you', methods=['GET'])
@login_required
@with_metadata
@with_schema
def get_thank_you(schema, metadata, eq_id, form_type): # pylint: disable=unused-argument
session_data = get_session_store().session_data
completeness = get_completeness(current_user)
if session_data.submitted_time:
metadata_context = build_metadata_context_for_survey_completed(session_data)
view_submission_url = None
view_submission_duration = 0
if _is_submission_viewable(schema.json, session_data.submitted_time):
view_submission_url = url_for('.get_view_submission', eq_id=eq_id, form_type=form_type)
view_submission_duration = humanize.naturaldelta(timedelta(seconds=schema.json['view_submitted_response']['duration']))
return render_theme_template(schema.json['theme'],
template_name='thank-you.html',
metadata=metadata_context,
analytics_ua_id=current_app.config['EQ_UA_ID'],
survey_id=schema.json['survey_id'],
survey_title=TemplateRenderer.safe_content(schema.json['title']),
is_view_submitted_response_enabled=is_view_submitted_response_enabled(schema.json),
view_submission_url=view_submission_url,
view_submission_duration=view_submission_duration)
routing_path = path_finder.get_full_routing_path()
collection_id = metadata['collection_exercise_sid']
router = Router(schema, routing_path, completeness)
next_location = router.get_next_location()
return _redirect_to_location(collection_id, metadata.get('eq_id'), metadata.get('form_type'), next_location)
@post_submission_blueprint.route('view-submission', methods=['GET'])
@login_required
@with_schema
def get_view_submission(schema, eq_id, form_type): # pylint: disable=unused-argument
session_data = get_session_store().session_data
if _is_submission_viewable(schema.json, session_data.submitted_time):
submitted_data = data_access.get_by_key(SubmittedResponse, session_data.tx_id)
if submitted_data:
metadata_context = build_metadata_context_for_survey_completed(session_data)
pepper = current_app.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_ENCRYPTION_USER_PEPPER')
encrypter = StorageEncryption(current_user.user_id, current_user.user_ik, pepper)
submitted_data = encrypter.decrypt_data(submitted_data.data)
# for backwards compatibility
# submitted data used to be base64 encoded before encryption
try:
submitted_data = base64url_decode(submitted_data.decode()).decode()
except ValueError:
pass
submitted_data = json.loads(submitted_data)
answer_store = AnswerStore(existing_answers=submitted_data.get('answers'))
metadata = submitted_data.get('metadata')
routing_path = PathFinder(schema, answer_store, metadata, []).get_full_routing_path()
schema_context = _get_schema_context(routing_path, 0, metadata, answer_store, schema)
rendered_schema = renderer.render(schema.json, **schema_context)
summary_rendered_context = build_summary_rendering_context(schema, rendered_schema['sections'], answer_store, metadata)
context = {
'summary': {
'groups': summary_rendered_context,
'answers_are_editable': False,
'is_view_submission_response_enabled': is_view_submitted_response_enabled(schema.json),
},
'variables': None,
}
return render_theme_template(schema.json['theme'],
template_name='view-submission.html',
metadata=metadata_context,
analytics_ua_id=current_app.config['EQ_UA_ID'],
survey_id=schema.json['survey_id'],
survey_title=TemplateRenderer.safe_content(schema.json['title']),
content=context)
return redirect(url_for('post_submission.get_thank_you', eq_id=eq_id, form_type=form_type))
def _set_started_at_metadata_if_required(form, metadata):
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
if not questionnaire_store.answer_store.answers and len(form.data) > 1:
started_at = datetime.now(timezone.utc).isoformat()
logger.info('first answer about to be stored. writing started_at time to metadata',
started_at=started_at)
metadata['started_at'] = started_at
def _render_page(block_type, context, current_location, schema, answer_store, metadata, routing_path):
if request_wants_json():
return jsonify(context)
return _build_template(
current_location,
context,
block_type,
schema,
answer_store,
metadata,
routing_path=routing_path)
def _generate_wtf_form(form, block, location, schema):
disable_mandatory = 'action[save_sign_out]' in form
wtf_form = post_form_for_location(
schema,
block,
location,
get_answer_store(current_user),
get_metadata(current_user),
request.form,
disable_mandatory)
return wtf_form
def _next_location_url(location):
metadata = get_metadata(current_user)
return location.url(metadata)
def _is_end_of_questionnaire(block, next_location):
return (
block['type'] in END_BLOCKS and
next_location is None
)
def submit_answers(routing_path, eq_id, form_type, schema):
metadata = get_metadata(current_user)
answer_store = get_answer_store(current_user)
message = json.dumps(convert_answers(
metadata,
schema,
answer_store,
routing_path,
))
encrypted_message = encrypt(message, current_app.eq['key_store'], KEY_PURPOSE_SUBMISSION)
sent = current_app.eq['submitter'].send_message(
encrypted_message,
current_app.config['EQ_RABBITMQ_QUEUE_NAME'],
metadata['tx_id'],
)
if not sent:
raise SubmissionFailedException()
submitted_time = datetime.utcnow()
_store_submitted_time_in_session(submitted_time)
if is_view_submitted_response_enabled(schema.json):
_store_viewable_submission(answer_store.answers, metadata, submitted_time)
get_questionnaire_store(current_user.user_id, current_user.user_ik).delete()
return redirect(url_for('post_submission.get_thank_you', eq_id=eq_id, form_type=form_type))
def _store_submitted_time_in_session(submitted_time):
session_store = get_session_store()
session_data = session_store.session_data
session_data.submitted_time = submitted_time.isoformat()
session_store.save()
def _store_viewable_submission(answers, metadata, submitted_time):
pepper = current_app.eq['secret_store'].get_secret_by_name('EQ_SERVER_SIDE_STORAGE_ENCRYPTION_USER_PEPPER')
encrypter = StorageEncryption(current_user.user_id, current_user.user_ik, pepper)
encrypted_data = encrypter.encrypt_data(
{
'answers': answers,
'metadata': metadata,
},
)
valid_until = submitted_time + timedelta(seconds=g.schema.json['view_submitted_response']['duration'])
item = SubmittedResponse(
tx_id=metadata['tx_id'],
data=encrypted_data,
valid_until=valid_until.replace(tzinfo=tzutc()),
)
data_access.put(item)
def is_view_submitted_response_enabled(schema):
view_submitted_response = schema.get('view_submitted_response')
if view_submitted_response:
return view_submitted_response['enabled']
return False
def _is_submission_viewable(schema, submitted_time):
if is_view_submitted_response_enabled(schema) and submitted_time:
submitted_time = datetime.strptime(submitted_time, '%Y-%m-%dT%H:%M:%S.%f')
submission_valid_until = submitted_time + timedelta(seconds=schema['view_submitted_response']['duration'])
return submission_valid_until > datetime.utcnow()
return False
def _save_sign_out(routing_path, current_location, form, schema, answer_store, metadata):
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
block = _get_block_json(current_location, schema, answer_store, metadata)
if form.validate():
_update_questionnaire_store(current_location, form, schema)
if current_location in questionnaire_store.completed_blocks:
questionnaire_store.remove_completed_blocks(location=current_location)
questionnaire_store.add_or_update()
logout_user()
return redirect(url_for('session.get_sign_out'))
context = _get_context(routing_path, block, current_location, schema, form)
return _render_page(block['type'], context, current_location, schema, answer_store, metadata, routing_path)
def _household_answers_changed(answer_store, schema):
answer_ids = schema.get_answer_ids_for_block('household-composition')
household_answers = answer_store.filter(answer_ids)
stripped_form = request.form.copy()
del stripped_form['csrf_token']
remove = [k for k in stripped_form if 'action[' in k]
for k in remove:
del stripped_form[k]
if household_answers.count() != len(stripped_form):
return True
for answer in request.form:
answer_id, answer_index = extract_answer_id_and_instance(answer)
try:
stored_answer = household_answers.filter(
answer_ids=[answer_id],
answer_instance=answer_index)[0]
except IndexError:
stored_answer = None
if stored_answer and (stored_answer['value'] or '') != request.form[answer]:
return True
return False
def _remove_repeating_on_household_answers(answer_store, schema):
answer_ids = schema.get_answer_ids_for_block('household-composition')
answer_store.remove(answer_ids=answer_ids)
questionnaire_store = get_questionnaire_store(
current_user.user_id,
current_user.user_ik,
)
for answer in schema.get_answers_that_repeat_in_block('household-composition'):
groups_to_delete = schema.get_groups_that_repeat_with_answer_id(answer['id'])
for group in groups_to_delete:
answer_ids = schema.get_answer_ids_for_group(group['id'])
answer_store.remove(answer_ids=answer_ids)
questionnaire_store.completed_blocks[:] = [b for b in questionnaire_store.completed_blocks if
b.group_id != group['id']]
def remove_empty_household_members_from_answer_store(answer_store, schema):
answer_ids = schema.get_answer_ids_for_block('household-composition')
household_answers = answer_store.filter(answer_ids=answer_ids)
household_member_name = defaultdict(list)
for household_answer in household_answers:
if household_answer['answer_id'] == 'first-name' or household_answer['answer_id'] == 'last-name':
household_member_name[household_answer['answer_instance']].append(household_answer['value'])
to_be_removed = []
for k, v in household_member_name.items():
name_value = ''.join(v).strip()
if not name_value:
to_be_removed.append(k)
for instance_to_remove in to_be_removed:
answer_store.remove(answer_ids=answer_ids, answer_instance=instance_to_remove)
def _update_questionnaire_store(current_location, form, schema):
questionnaire_store = get_questionnaire_store(current_user.user_id, current_user.user_ik)
if current_location.block_id in ['relationships', 'household-relationships']:
update_questionnaire_store_with_answer_data(questionnaire_store, current_location,
form.serialise(), schema)
else:
update_questionnaire_store_with_form_data(questionnaire_store, current_location, form.data, schema)
@save_questionnaire_store
def update_questionnaire_store_with_form_data(questionnaire_store, location, answer_dict, schema):
survey_answer_ids = schema.get_answer_ids_for_block(location.block_id)
for answer_id, answer_value in answer_dict.items():
# If answer is not answered then check for a schema specified default
if answer_value is None:
answer_value = schema.get_answer(answer_id).get('default')
if answer_id in survey_answer_ids or location.block_id == 'household-composition':
if answer_value is not None:
answer = Answer(answer_id=answer_id,
value=answer_value,
group_instance=location.group_instance)
latest_answer_store_hash = questionnaire_store.answer_store.get_hash()
questionnaire_store.answer_store.add_or_update(answer)
if latest_answer_store_hash != questionnaire_store.answer_store.get_hash() and schema.dependencies[answer_id]:
_remove_dependent_answers_from_completed_blocks(answer_id, location.group_instance, questionnaire_store, schema)
else:
_remove_answer_from_questionnaire_store(
answer_id,
questionnaire_store,
group_instance=location.group_instance)
if location not in questionnaire_store.completed_blocks:
questionnaire_store.completed_blocks.append(location)
def _remove_dependent_answers_from_completed_blocks(answer_id, group_instance, questionnaire_store, schema):
"""
Gets a list of answers ids that are dependent on the answer_id passed in.
Then for each dependent answer it will remove it's block from those completed.
This will therefore force the respondent to revisit that block.
The dependent answers themselves remain untouched.
:param answer_id: the answer that has changed
:param questionnaire_store: holds the completed blocks
:return: None
"""
answer_in_repeating_group = schema.answer_is_in_repeating_group(answer_id)
dependencies = schema.dependencies[answer_id]
for dependency in dependencies:
dependency_in_repeating_group = schema.answer_is_in_repeating_group(dependency)
answer = schema.get_answer(dependency)
question = schema.get_question(answer['parent_id'])
block = schema.get_block(question['parent_id'])
if dependency_in_repeating_group and not answer_in_repeating_group:
questionnaire_store.remove_completed_blocks(group_id=block['parent_id'], block_id=block['id'])
else:
location = Location(block['parent_id'], group_instance, block['id'])
if location in questionnaire_store.completed_blocks:
questionnaire_store.remove_completed_blocks(location=location)
def _remove_answer_from_questionnaire_store(answer_id, questionnaire_store,
group_instance=0):
questionnaire_store.answer_store.remove(answer_ids=[answer_id],
group_instance=group_instance,
answer_instance=0)
@save_questionnaire_store
def update_questionnaire_store_with_answer_data(questionnaire_store, location, answers, schema):
survey_answer_ids = schema.get_answer_ids_for_block(location.block_id)
for answer in [a for a in answers if a.answer_id in survey_answer_ids]:
questionnaire_store.answer_store.add_or_update(answer)
if location not in questionnaire_store.completed_blocks:
questionnaire_store.completed_blocks.append(location)
def _check_same_survey(url_eq_id, url_form_type, url_collection_id, session_eq_id, session_form_type, session_collection_id):
if url_eq_id != session_eq_id \
or url_form_type != session_form_type \
or url_collection_id != session_collection_id:
raise MultipleSurveyError
def _evaluate_skip_conditions(block_json, location, schema, answer_store, metadata):
for question in schema.get_questions_for_block(block_json):
if 'skip_conditions' in question:
skip_question = evaluate_skip_conditions(question['skip_conditions'], schema, metadata, answer_store, location.group_instance)
question['skipped'] = skip_question
for answer in question['answers']:
if answer['mandatory'] and skip_question:
answer['mandatory'] = False
return block_json
def extract_answer_id_and_instance(answer_instance_id):
matches = re.match(r'^household-(\d+)-(first-name|middle-names|last-name)$', answer_instance_id)
if matches:
index, answer_id = matches.groups()
else:
answer_id = answer_instance_id
index = 0
return answer_id, int(index)
def _redirect_to_location(collection_id, eq_id, form_type, location):
return redirect(url_for('questionnaire.get_block', eq_id=eq_id, form_type=form_type, collection_id=collection_id,
group_id=location.group_id,
group_instance=location.group_instance, block_id=location.block_id))
def _get_context(full_routing_path, block, current_location, schema, form=None):
metadata = get_metadata(current_user)
answer_store = get_answer_store(current_user)
schema_context = _get_schema_context(full_routing_path, current_location.group_instance, metadata, answer_store, schema)
rendered_block = renderer.render(block, **schema_context)
return build_view_context(block['type'], metadata, schema, answer_store, schema_context, rendered_block, current_location, form=form)
def _get_block_json(current_location, schema, answer_store, metadata):
block_json = schema.get_block(current_location.block_id)
return _evaluate_skip_conditions(block_json, current_location, schema, answer_store, metadata)
def _get_schema_context(full_routing_path, group_instance, metadata, answer_store, schema):
answer_ids_on_path = get_answer_ids_on_routing_path(schema, full_routing_path)
return build_schema_context(metadata=metadata,
schema=schema,
answer_store=answer_store,
group_instance=group_instance,
answer_ids_on_path=answer_ids_on_path)
def _get_front_end_navigation(answer_store, current_location, metadata, schema, routing_path=None):
completed_blocks = get_completed_blocks(current_user)
navigation = Navigation(schema, answer_store, metadata, completed_blocks,
routing_path, get_completeness(current_user))
block_json = schema.get_block(current_location.block_id)
if block_json['type'] != 'Introduction':
return navigation.build_navigation(current_location.group_id, current_location.group_instance)
return None
def get_page_title_for_location(schema, current_location, metadata, answer_store):
block = schema.get_block(current_location.block_id)
if block['type'] == 'Interstitial':
group = schema.get_group(current_location.group_id)
page_title = '{group_title} - {survey_title}'.format(group_title=group['title'], survey_title=schema.json['title'])
elif block['type'] == 'Question':
first_question = next(schema.get_questions_for_block(block))
question_title = get_question_title(first_question, answer_store, schema, metadata, current_location.group_instance)
page_title = '{question_title} - {survey_title}'.format(question_title=question_title, survey_title=schema.json['title'])
else:
page_title = schema.json['title']
return TemplateRenderer.safe_content(page_title)
def _build_template(current_location, context, template, schema, answer_store, metadata, routing_path=None):
front_end_navigation = _get_front_end_navigation(answer_store, current_location, metadata, schema, routing_path)
previous_location = path_finder.get_previous_location(current_location)
previous_url = previous_location.url(metadata) if previous_location is not None else None
return _render_template(context, current_location, template, front_end_navigation, previous_url, schema, metadata, answer_store)
@with_session_timeout
@with_questionnaire_url_prefix
@with_metadata_context
@with_analytics
@with_legal_basis
def _render_template(context, current_location, template, front_end_navigation, previous_url, schema, metadata, answer_store, **kwargs):
page_title = get_page_title_for_location(schema, current_location, metadata, answer_store)
return render_template(
template,
content=context,
current_location=current_location,
navigation=front_end_navigation,
previous_location=previous_url,
page_title=page_title,
metadata=kwargs.pop('metadata_context'), # `metadata_context` is used as `metadata` in the jinja templates
**kwargs,
)
def request_wants_json():
best = request.accept_mimetypes \
.best_match(['application/json', 'text/html'])
return best == 'application/json' and \
request.accept_mimetypes[best] > \
request.accept_mimetypes['text/html']
|
ONSdigital/census-survey-runner
|
app/views/questionnaire.py
|
questionnaire.py
|
py
| 32,299 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26486257932
|
import dcos.config
import dcos.http
import dcos.package
import json
import logging
import os
import re
import requests
import s3
import shakedown
import subprocess
import urllib
def _init_logging():
logging.basicConfig(level=logging.INFO)
logging.getLogger('dcos').setLevel(logging.WARNING)
logging.getLogger('requests').setLevel(logging.WARNING)
_init_logging()
LOGGER = logging.getLogger(__name__)
DEFAULT_HDFS_TASK_COUNT=10
HDFS_PACKAGE_NAME='beta-hdfs'
HDFS_SERVICE_NAME='hdfs'
SPARK_PACKAGE_NAME='spark'
def hdfs_enabled():
return os.environ.get("HDFS_ENABLED") != "false"
def is_strict():
return os.environ.get('SECURITY') == 'strict'
def require_hdfs():
LOGGER.info("Ensuring HDFS is installed.")
_require_package(HDFS_PACKAGE_NAME, _get_hdfs_options())
_wait_for_hdfs()
def require_spark(options={}, service_name=None):
LOGGER.info("Ensuring Spark is installed.")
_require_package(SPARK_PACKAGE_NAME, service_name, _get_spark_options(options))
_wait_for_spark(service_name)
_require_spark_cli()
# This should be in shakedown (DCOS_OSS-679)
def _require_package(pkg_name, service_name=None, options={}):
pkg_manager = dcos.package.get_package_manager()
installed_pkgs = dcos.package.installed_packages(
pkg_manager,
None,
None,
False)
pkg = next((pkg for pkg in installed_pkgs if pkg['name'] == pkg_name), None)
if (pkg is not None) and (service_name is None):
LOGGER.info("Package {} is already installed.".format(pkg_name))
elif (pkg is not None) and (service_name in pkg['apps']):
LOGGER.info("Package {} with app_id={} is already installed.".format(
pkg_name,
service_name))
else:
LOGGER.info("Installing package {}".format(pkg_name))
shakedown.install_package(
pkg_name,
options_json=options,
wait_for_completion=True)
def _wait_for_spark(service_name=None):
def pred():
dcos_url = dcos.config.get_config_val("core.dcos_url")
path = "/service{}".format(service_name) if service_name else "service/spark"
spark_url = urllib.parse.urljoin(dcos_url, path)
status_code = dcos.http.get(spark_url).status_code
return status_code == 200
shakedown.wait_for(pred)
def _require_spark_cli():
LOGGER.info("Ensuring Spark CLI is installed.")
installed_subcommands = dcos.package.installed_subcommands()
if any(sub.name == SPARK_PACKAGE_NAME for sub in installed_subcommands):
LOGGER.info("Spark CLI already installed.")
else:
LOGGER.info("Installing Spark CLI.")
shakedown.run_dcos_command('package install --cli {}'.format(
SPARK_PACKAGE_NAME))
def _get_hdfs_options():
if is_strict():
options = {'service': {'principal': 'service-acct', 'secret_name': 'secret'}}
else:
options = {"service": {}}
options["service"]["beta-optin"] = True
return options
def _wait_for_hdfs():
shakedown.wait_for(_is_hdfs_ready, ignore_exceptions=False, timeout_seconds=25 * 60)
def _is_hdfs_ready(expected_tasks = DEFAULT_HDFS_TASK_COUNT):
return is_service_ready(HDFS_SERVICE_NAME, expected_tasks)
def is_service_ready(service_name, expected_tasks):
running_tasks = [t for t in shakedown.get_service_tasks(service_name) \
if t['state'] == 'TASK_RUNNING']
LOGGER.info("Waiting for {n} tasks got {m} for service {s}".format(n=expected_tasks,
m=len(running_tasks),
s=service_name))
return len(running_tasks) >= expected_tasks
def no_spark_jobs(service_name):
driver_ips = shakedown.get_service_ips(service_name)
LOGGER.info("Waiting for drivers to finish or be killed, still seeing {}".format(len(driver_ips)))
return len(driver_ips) == 0
def _get_spark_options(options = None):
if options is None:
options = {}
if hdfs_enabled():
options["hdfs"] = options.get("hdfs", {})
options["hdfs"]["config-url"] = "http://api.hdfs.marathon.l4lb.thisdcos.directory/v1/endpoints"
if is_strict():
options["service"] = options.get("service", {})
options["service"]["principal"] = "service-acct"
options["security"] = options.get("security", {})
options["security"]["mesos"] = options["security"].get("mesos", {})
options["security"]["mesos"]["authentication"] = options["security"]["mesos"].get("authentication", {})
options["security"]["mesos"]["authentication"]["secret_name"] = "secret"
return options
def run_tests(app_url, app_args, expected_output, app_name, args=[]):
task_id = submit_job(app_url=app_url,
app_args=app_args,
app_name=app_name,
args=args)
check_job_output(task_id, expected_output)
def check_job_output(task_id, expected_output):
LOGGER.info('Waiting for task id={} to complete'.format(task_id))
shakedown.wait_for_task_completion(task_id)
stdout = _task_log(task_id)
if expected_output not in stdout:
stderr = _task_log(task_id, "stderr")
LOGGER.error("task stdout: {}".format(stdout))
LOGGER.error("task stderr: {}".format(stderr))
raise Exception("{} not found in stdout".format(expected_output))
class SecretHandler():
def __init__(self, path, value):
self.payload = json.dumps({"value": value})
self.api_url = urllib.parse.urljoin(dcos.config.get_config_val("core.dcos_url"),
"secrets/v1/secret/default/{}".format(path))
self.token = dcos.config.get_config_val("core.dcos_acs_token")
self.headers = {"Content-Type": "application/json", "Authorization": "token={}".format(self.token)}
def create_secret(self):
return requests.put(self.api_url, data=self.payload, headers=self.headers, verify=False)
def delete_secret(self):
return requests.delete(self.api_url, headers=self.headers, verify=False)
def upload_file(file_path):
LOGGER.info("Uploading {} to s3://{}/{}".format(
file_path,
os.environ['S3_BUCKET'],
os.environ['S3_PREFIX']))
s3.upload_file(file_path)
basename = os.path.basename(file_path)
return s3.http_url(basename)
def submit_job(app_url, app_args, app_name="/spark", args=[]):
if is_strict():
args += ["--conf", 'spark.mesos.driverEnv.MESOS_MODULES=file:///opt/mesosphere/etc/mesos-scheduler-modules/dcos_authenticatee_module.json']
args += ["--conf", 'spark.mesos.driverEnv.MESOS_AUTHENTICATEE=com_mesosphere_dcos_ClassicRPCAuthenticatee']
args += ["--conf", 'spark.mesos.principal=service-acct']
args_str = ' '.join(args + ["--conf", "spark.driver.memory=2g"])
submit_args = ' '.join([args_str, app_url, app_args])
cmd = 'dcos spark --name={app_name} run --verbose --submit-args="{args}"'.format(app_name=app_name, args=submit_args)
LOGGER.info("Running {}".format(cmd))
stdout = subprocess.check_output(cmd, shell=True).decode('utf-8')
LOGGER.info("stdout: {}".format(stdout))
regex = r"Submission id: (\S+)"
match = re.search(regex, stdout)
return match.group(1)
def wait_for_executors_running(framework_name, num_executors, wait_time=600):
LOGGER.info("Waiting for executor task to be RUNNING...")
shakedown.wait_for(lambda: is_service_ready(framework_name, num_executors),
ignore_exceptions=False,
timeout_seconds=wait_time)
def kill_driver(driver_id, app_name):
LOGGER.info("Killing {}".format(driver_id))
cmd = "dcos spark --name={app_name} kill {driver_id}".format(app_name=app_name, driver_id=driver_id)
out = subprocess.check_output(cmd, shell=True).decode("utf-8")
return out
def _task_log(task_id, filename=None):
cmd = "dcos task log --completed --lines=1000 {}".format(task_id) + \
("" if filename is None else " {}".format(filename))
LOGGER.info("Running {}".format(cmd))
stdout = subprocess.check_output(cmd, shell=True).decode('utf-8')
return stdout
def is_framework_completed(fw_name):
# The framework is not Active or Inactive
return shakedown.get_service(fw_name, True) is None
|
KoddiDev/spark-streaming-mesos
|
tests/utils.py
|
utils.py
|
py
| 8,430 |
python
|
en
|
code
| 1 |
github-code
|
6
|
33975836628
|
from flask import Flask, g
from flask.ext.login import LoginManager
import user_model
DEBUG = True
PORT = 8000
HOST = '0.0.0.0'
app = Flask(__name__)
app.secret_key = 'randomstuff'
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
@login_manager.user_loader
def load_user():
try:
return user_model.User.get(user_model.User.id == userid)
except user_model.DoesNotExist:
return None
@app.before_request
def before_request():
"""Connect to the database before each request."""
g.db = user_model.DATABASE
g.db.connect()
@app.after_request
def after_request(response):
"""Close the databse function after each request"""
g.db.close()
return response
if __name__ == '__main__':
user_model.initialize()
user_model.User.create_user(username = "Kaka", email = "[email protected]", password = "password", admin = True)
app.run(debug = DEBUG, port = PORT, host = HOST)
|
kaka21garuda/FlaskSocial
|
app.py
|
app.py
|
py
| 972 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35243133405
|
import sys
import os
import json
if "test_script" not in sys.modules:
from pyakaikkr.CompareCifKkr import CompareCifKkr
def get_kkr_struc_from_cif(ciffilepath: str, specx: str, displc: bool,
use_bravais=True, remove_temperaryfiles=True,
fmt="cif"):
"""get kkr structure parameter from cif file.
If specx is akaikkr, then displc is set to False.
If specx is akaikkr_cnd, then displc is set to True.
If use_bravais is True, bcc,bcc,.., and a,c,b,alpha,beta,gaam are used.
If use_bravias is False, aux and lattice vectors iare used.
rmt, lmax, ..., displc are set to be default values.
Args:
ciffilepath (str): cif file path
specx (str, optional): specx path. Defaults to str.
displc (bool, optional): displc parameter is added.
use_bravais (bool, optional): use bravias lattice. Defaults to True.
remove_temperaryfiles (bool, optional): delete temporary files on exit. Defaults to True.
fmt (str, optional): file format. Defaults to "cif".
Returns:
dict: kkr structure parameters on success.
"""
comp = CompareCifKkr(ciffilepath, specx, displc=displc, fmt=fmt)
result = comp.convert_and_compare(use_bravais=use_bravais)
struc_param = None
if result == comp.SUCCESS:
struc_param = comp.get_structure_param(
remove_temperaryfiles=remove_temperaryfiles)
else:
print("failed to convert the cif file")
print("msg=", comp.msg)
print("result=", result)
sys.exit(10)
try:
os.rmdir(comp.parent_directory)
except OSError:
# ignore errors on removing output directory
pass
return struc_param
if __name__ == "__main__":
def main(path_prefix, ciffile_path, akaikkr_type="akaikkr",):
"""load data from path_prefix and convert to the PyAkaiKKR dict format.
The output is printed to stdout.
akaikkr_type can be akaikkr or akaikkr_cnd.
Args:
path_prefix (str): path prefix to AkaiKKR
ciffile_path (st): the cif file name.
akaikkr_type (str, optional): type of AkaiKKR. Defaults to "akaikkr".
Raises:
ValueError: unknown fmt.
"""
if akaikkr_type == "akaikkr":
displc = False
elif akaikkr_type == "akaikkr_cnd":
displc = True
else:
raise ValueError("unknown akaikkr_type={}".format(akaikkr_type))
specx = os.path.join(path_prefix, akaikkr_type, "specx")
use_bravais = True
struc_param = get_kkr_struc_from_cif(ciffile_path, specx,
use_bravais=use_bravais,
displc=displc, fmt="cif")
print()
print("sturc_param")
if False:
for key, value in struc_param.items():
print(key, value)
else:
print(json.dumps(struc_param))
def define_and_get_parse():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--akaikkr", default= "kino/kit/AkaiKKRprogram.current.gfortran")
parser.add_argument("--prefix", default= "kino/kit/MaterialsLibrary")
parser.add_argument("--akaikkr_type", choices = ["akaikkr", "akaikkr_and"], default= "akaikkr")
parser.add_argument("structure_file")
# e.g. ="kino/kit/MaterialsLibrary/MaterialsLibrary/AtomWorkData/small_sites/made_by_kino/Co_P63mmc.cif"
args = parser.parse_args()
return args
homedir = os.path.expanduser("~")
args = define_and_get_parse()
main(os.path.join(homedir, args.akaikkr),
os.path.join(homedir, args.prefix, args.structure_file),
args.akaikkr_type)
|
AkaiKKRteam/AkaiKKRPythonUtil
|
util/cif2kkr_test_script/cif2kkr_convert_to_akaikkrparam_sample.py
|
cif2kkr_convert_to_akaikkrparam_sample.py
|
py
| 3,825 |
python
|
en
|
code
| 4 |
github-code
|
6
|
5308162380
|
N, K = map(int, input().split())
K = K - 5
def dfs(idx, cnt):
global answer
# ๊ธ์์ ์ฌ์ฉ ๋ค ํ ๊ฒฝ์ฐ
if cnt == K:
print(learn)
read_cnt = 0
for word in words:
for w in word:
# ํด๋น ๋จ์ด ์ค ์ ๋ฐฐ์ด ์ํ๋ฒณ ์์ผ๋ฉด break
if not learn[ord(w)-ord('a')]:
break
# word๊ฐ ๋น์ด์๋ ๊ฒฝ์ฐ
# break ๋์ ์ํ์ ๋ ์๋
else:
read_cnt += 1
answer = max(answer, read_cnt)
return
for i in range(idx, 26):
# ์ํ๋ฒณ ์ค ์ ๋ฐฐ์ด๊ฑฐ ์ฒดํฌ
if not learn[i]:
learn[i] = True
# ๋ฐฐ์ธ ์ ์๋ ๊ธ์ ์ ๋ค ์ฐ๊ธฐ
dfs(i, cnt + 1)
# dfs ์งํ ํ ๋ฐฐ์ด๊ฑฐ ์ทจ์
learn[i] = False
if K < 0:
print(0)
elif K == 26:
print(N)
else:
words = []
answer = 0
for _ in range(N):
# ์ค๋ณต์ ๊ฑฐ
word = set(input()[4:-4])
temp = set()
# ํน์ ์ํ๋ฒณ ์ ๊ฑฐ
for w in word:
if w not in ['a', 'n', 't', 'c', 'i']:
temp.add(w)
# ๊ธธ์ด๊ฐ K ๋ณด๋ค ์์์ผ ์ฝ์ ์ ์์
if len(temp) <= K:
words.append(temp)
# ์ํ๋ฒณ
learn = [False] * 26
# ์ฝ์ ์ํ๋ฒณ ์ฒดํฌ
for l in ['a', 'n', 't', 'c', 'i']:
learn[ord(l)-ord('a')] = True
dfs(0, 0)
print(answer)
|
louisuss/Algorithms-Code-Upload
|
Python/Baekjoon/BF/1062+.py
|
1062+.py
|
py
| 1,480 |
python
|
ko
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.