seq_id
stringlengths 7
11
| text
stringlengths 156
1.7M
| repo_name
stringlengths 7
125
| sub_path
stringlengths 4
132
| file_name
stringlengths 4
77
| file_ext
stringclasses 6
values | file_size_in_byte
int64 156
1.7M
| program_lang
stringclasses 1
value | lang
stringclasses 38
values | doc_type
stringclasses 1
value | stars
int64 0
24.2k
⌀ | dataset
stringclasses 1
value | pt
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|---|
32102378059
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseList1(self, head: ListNode) -> ListNode:
new_head = None
while head is not None:
nex = head.next
head.next = new_head
new_head = head
head = nex
return new_head
def reverseList2(self, head: ListNode) -> ListNode:
if head is None or head.next is None:
return head
else:
#获取到反转后的新的头结点,不管具体内容,把整个从head->next看做一个整体
new_head = self.reverseList2(head.next)
# 将当前head指向的那个节点的下一个节点的下一个节点指向head,就把head和head.next的方向换过来了。
head.next.next = head
#然后把它自己本身的head.next设置为null,就好了。
head.next = None
return new_head
s = Solution()
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = None
new_head = s.reverseList2(head)
while new_head is not None:
print(new_head.val)
new_head = new_head.next
|
Eleanoryuyuyu/LeetCode
|
python/Linked List/206. Reverse Linked List.py
|
206. Reverse Linked List.py
|
py
| 1,238 |
python
|
en
|
code
| 3 |
github-code
|
6
|
28653090658
|
##### Native libraries
##### Python Libraries
import numpy as np
from IPython.core import debugger
breakpoint = debugger.set_trace
##### Local libraries
import Utils_Data
from Timer import Timer
##### NOTE: To download the full dataset (which will take about 30 hours on wifi maybe less on ethernet)
##### set the filename_urls to train.npy, set num_labels to 14951, set the
##### for loop iterations to data_urls_train.size
##### Path to datasets
path_urls = '../../data/image_retrieval/image_recognition/'
save_path = path_urls + 'images/'
filename_urls = 'train.npy' # Change this to train.npy to download the full dataset
##### Dataset format parameters
## Number of labels to use out of all the available ones
## For train.npy (max = 14951)
## For train_100.npy (max = 79)
## For train_1000.npy (max = 692)
## For train_10000.npy (max = 3487)
num_labels=50
## Percent of entries to place in train set
train_size=0.9
## Percent of entries to place in test set
test_size=0.1
# breakpoint()
##### Load dataset
dataset = np.load(path_urls+filename_urls)
##### Split dataset in train and test containing the specified number of classes
## The following function returns all entries sorted for both train and test sets.
(data_urls_train, labels_train, imgid_train, data_urls_test, labels_test, imgid_test) = Utils_Data.FormatDataset(dataset, num_labels=num_labels, train_size=train_size, test_size=test_size)
########## DOWNLOAD TRAINING SET ##############
#### UNCOMMENT THE FOLLOWING SNIPPET TO DOWNLOAD THE TRAIN SET
# n_images = data_urls_train.size
# ##### Downloads Train set
# for i in range(0,n_images):
# with Timer('Download Image Time'):
# print("Image {} out of {}".format(i, n_images))
# # image = Utils_Data.DownloadAndSaveImage(url=data_urls_train[i],out_dir=save_path,imgid=imgid_train[i])
# image = Utils_Data.DownloadResizeAndSave(url=data_urls_train[i],out_dir=save_path,imgid=imgid_train[i])
########## DOWNLOAD TEST SET ##############
#### UNCOMMENT THE FOLLOWING SNIPPET TO DOWNLOAD THE TEST SET
n_images = data_urls_test.size
##### Downloads Test set
for i in range(0,n_images):
with Timer('Download Image Time'):
print("Image {} out of {}".format(i, n_images))
# image = Utils_Data.DownloadAndSaveImage(url=data_urls_train[i],out_dir=save_path,imgid=imgid_train[i])
image = Utils_Data.DownloadResizeAndSave(url=data_urls_test[i],out_dir=save_path,imgid=imgid_test[i])
|
BradleyAllanDavis/760-project
|
data_download/Example_DownloadDataset.py
|
Example_DownloadDataset.py
|
py
| 2,412 |
python
|
en
|
code
| 4 |
github-code
|
6
|
17430806092
|
#!/usr/bin/python
# https://www.udemy.com/course/complete-python-developer-zero-to-mastery/
# 256. Building A Flask Server
# https://flask.palletsprojects.com/en/1.1.x/quickstart/
# https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/MIME_types
# https://swapi.dev/ - Star Wars API server
# http://www.mashup-template.com/templates.html - Free HTML templates
# https://html5up.net/ - Free HTML templates
# https://robohash.org/ - Robot generating API
# We need to run:
# $ source ./venv/bin/activate
# $ export FLASK_APP=server.py
# $ export FLASK_ENV=development
# $ flask run
import os
import datetime
import csv
from flask import Flask, render_template, request, send_from_directory, redirect
app = Flask(__name__)
@app.route('/')
def my_home():
print(render_template('index.html'))
return render_template('index.html')
@app.route('/<string:page_name>')
def html_page(page_name):
return render_template(page_name)
# https://flask.palletsprojects.com/en/1.1.x/quickstart/#accessing-request-data
@app.route('/submit_form', methods=['POST', 'GET'])
def submit_form():
if request.method == 'POST':
try:
data=request.form.to_dict()
write_to_csv(data)
return redirect('/thankyou.html')
except:
return 'did not save to database'
else:
return 'something went wrong'
# https://flask.palletsprojects.com/en/1.1.x/patterns/favicon/
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static', 'assets'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
def write_to_file(data):
"""Write message to the database.txt"""
with open('database.txt', mode='a') as database:
date = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
email = data["email"]
subject = data["subject"]
message = data["message"]
database.write(f'{date}, {email}, {subject}, {message}\n')
def write_to_csv(data):
"""Write message to the database.csv"""
with open('database.csv', newline='', mode='a') as database:
date = str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
email = data["email"]
subject = data["subject"]
message = data["message"]
message_writer = csv.writer(database, delimiter=',',
quotechar='"', quoting=csv.QUOTE_NONE)
message_writer.writerow([date, email, subject, message])
|
olexandrch/UdemyCompletePythonDeveloper
|
Sec.19 Web Development with Python/portfolio/server.py
|
server.py
|
py
| 2,518 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34864677253
|
name=input("please enter your name:")
age= input("please enter your age:")
print('My name is ',name, ' and my age is ', age)
first_name=input("Please enter your first name:")
last_name=input("Please enter your last name:")
full_name=first_name+" "+last_name
print('Hello, my name is {} and my age is {}'.format(full_name,age))
#Afternoon class
def print_lyrics():
print("Hey Jude, don't make it bad.")
print("Take a sad song and make it better.")
print_lyrics()
def repeat_lyrics():
print_lyrics()
print('Na- na- na- na- na- na- na- na- na- na\n'*5)
print_lyrics()
repeat_lyrics()
def print_twice(name):
print(name)
print(name)
print_twice("Amy")
def give_a_break():
s='break'
return s
print(give_a_break())
print_twice(give_a_break())
def cal(a,b):
return a + b
print(cal(1,2))
def my_abs(n):
if n>=0:
return n
else:
return -n
print("The absolute value is ", my_abs(-8), ".")
import math
def quadratic(a,b,c):
discriminant = b**2 - 4*a*c
if discriminant >=0:
x1= (-b + math.sqrt(discriminant))/(2*a)
x2= (-b - math.sqrt(discriminant))/(2*a)
return x1 , x2
else:
return None, None
input_a=float(input("Please enter a:"))
input_b=float(input("Please enter b:"))
input_c=float(input("Please enter c:"))
sol_1,sol_2=quadratic(input_a,input_b,input_c)
if sol_1:
print('Results are: {} and {}'.format(sol_1,sol_2))
else:
print("no solution.")
age=int(input("Enter your age"))
if age <=6:
print("kid")
elif age>=18:
print("Adult")
else:
print("teenager")
def fib(n):
if n==1 or n==2:
return 1
else:
return fib(n-2)+fib(n-1)
print(fib(5))
|
xzang1/Learning-Python--bootcamp-
|
Python day2.py
|
Python day2.py
|
py
| 1,708 |
python
|
en
|
code
| 0 |
github-code
|
6
|
19250585752
|
# Modules which need to be installed
import irc.bot
from dotenv import load_dotenv
load_dotenv()
# Setup / included imports
import os
import commands
import asyncio
prefix = os.getenv('COMMANDPREFIX')
# Make sure the Twitch credentials have been added to the .env file
if os.getenv('TWITCHUSERNAME') == "" or os.getenv('TWITCHTOKEN') == "":
print("Please input your Twitch credentials in the .env file.")
exit(0)
# Login to IRC as the streamer and listen for commands in Twitch Chat
class TwitchListener(irc.bot.SingleServerIRCBot):
def __init__(self, username, token, channel):
self.token = token
self.channel = '#' + channel
server = 'irc.chat.twitch.tv'
port = 6667
# Login to Twitch IRC
print('Connecting to Twitch IRC: ' + server + ' on port ' + str(port))
irc.bot.SingleServerIRCBot.__init__(self, [(server, port, token)], username, username)
# Join this streamer's Twitch Chat channel
def on_welcome(self, c, e):
print('Joining ' + self.channel)
c.join(self.channel)
# Listen for messages, and if they start with the prefix, try to execute them as commands
def on_pubmsg(self, c, e):
if e.arguments[0][:1] == prefix:
cmd = e.arguments[0].split(' ')[0][1:]
commands.handleCommand(cmd)
return
# Load the Twitch login values from the .env file and run the IRC 'bot' above
bot = TwitchListener(str(os.getenv('TWITCHUSERNAME')), str(os.getenv('TWITCHTOKEN')), str(os.getenv('TWITCHUSERNAME')))
bot.start()
|
R2D2VaderBeef/SectorsEdgeStreamControl
|
main.py
|
main.py
|
py
| 1,555 |
python
|
en
|
code
| 1 |
github-code
|
6
|
18132740067
|
# Project Name: Rock Paper Scissors game
# Creator: Jay Chen
# Create Date: 2017/6/3
import random
options = ['rock','paper','scissors']
user_wins = 0
computer_wins = 0
while True:
user_choice = input("打rock/paper/scissors 或選擇q來退出遊戲:")
if user_choice == 'q':
print("遊戲退出!")
break
if user_choice not in options:
print("請在確認一次有沒有打錯! 然後再輸入一次!")
continue
random_number = random.randint(0,2)
computer_choice = options[random_number]
print("你的選擇:{} 敵方的選擇:{}".format(user_choice, computer_choice))
if user_choice == "rock" and computer_choice == "scissors":
print("你贏了!")
user_wins += 1
elif user_choice == 'scissors' and computer_choice == 'paper':
print("你贏了!")
user_wins += 1
elif user_choice == 'paper' and computer_choice == "rock":
print("你贏了!")
user_wins += 1
else:
print('你輸了!')
computer_wins += 1
print("你總共玩了{}局, 贏了{}次, 輸了{}次".format(user_wins + computer_wins, user_wins, computer_wins))
|
JayChen1060920909/Projects
|
Rock Paper Scissors.py
|
Rock Paper Scissors.py
|
py
| 1,178 |
python
|
en
|
code
| 1 |
github-code
|
6
|
3618688983
|
import graphviz as gv
from graphvizual import *
class Edge:
def __init__(self,node_0,node_1,weight):
self.node_0 = node_0
self.node_1 = node_1
self.weight= weight
class Graph_0:
def __init__(self):
self.list_edges =[]
def add_edge(self,start,end,weight):
self.list_edges.append(Edge(start,end,weight))
self.buble_sort()
return self
def list_nodes(self):
list=[]
for i in self.list_edges:
if i.node_0 not in list:
list.append(i.node_0)
if i.node_1 not in list:
list.append(i.node_1)
return list
def buble_sort(self):
length = len(self.list_edges) - 1
sorted = False
while not sorted:
sorted = True
for element in range(0, length):
if self.list_edges[element].weight > self.list_edges[element + 1].weight:
sorted = False
hold = self.list_edges[element + 1]
self.list_edges[element + 1] = self.list_edges[element]
self.list_edges[element] = hold
return self
def making_friends(self,node):
list=[]
for i in self.list_edges:
if i.node_0==node:
list.append(i)
return list
def print_list_edges(self):
list_e=[]
for i in self.list_edges:
list_e.append([i.node_0,i.node_1,i.weight])
print(list_e)
def sort_friends(self,friends):
length = len(friends) - 1
sorted = False
while not sorted:
sorted = True
for element in range(0,length):
# if friends[element][1] > friends[element + 1][1]:
if friends[element].weight > friends[element + 1].weight:
sorted = False
hold = friends[element + 1]
friends[element + 1] = friends[element]
friends[element] = hold
return friends
def creating_antecendents(self):
antecendents = {}
for i in self.list_nodes():
antecendents[str(i)]=0
return(antecendents)
def nodes_values(self):
nodes_values = {}
for i in self.list_nodes():
nodes_values[str(i)] = 100
return (nodes_values)
def dijkstra_alg(self,node_start,node_end):
nodes_values=self.nodes_values()
antecendents=self.creating_antecendents()
nodes_values[node_start]=0
list_visited_nodes=[str(node_start)]
list_visited_edges=[]
friends=[]
roar=1
while roar!=20:
for k in list_visited_nodes:
if roar==20:
break
friends_i=self.making_friends(k)
for i in friends_i:
if i.weight+nodes_values[str(i.node_0)]<nodes_values[str(i.node_1)]:
nodes_values[i.node_1]=nodes_values[i.node_0]+i.weight
antecendents[i.node_1]=i.node_0
for i in friends_i:
if i not in friends:
friends.append(i)
self.sort_friends(friends)
for k in friends:
if k not in list_visited_edges and k.node_1 not in list_visited_nodes and k.node_0 !=node_end:
list_visited_edges.append(k)
if k.node_0 not in list_visited_nodes:
list_visited_nodes.append(k.node_0)
if k.node_0==node_end:
roar=20
break
if k.node_1 not in list_visited_nodes:
list_visited_nodes.append(k.node_1)
if k.node_1==node_end:
roar=20
break
node=node_end
path_d=[]
while antecendents[node] != 0:
# path_d.append(antecendents[node])
node_ant=antecendents[node]
# node=antecendents[node]
# path_visible=[]
for i in list_visited_edges:
if i.node_0==node_ant and i.node_1==node:
# path_d.append([i.node_0,i.node_1,i.weight])
path_d.insert(0,[i.node_0,i.node_1,i.weight])
break
node=node_ant
# for i in list_visited_edges:
# path_visible.append([i.node_0,i.node_1,i.weight])
# path_visible.append(nodes_values[node_end])
return path_d
def drawing(self,path):
Drawing = gv.Digraph(format='png')
list_e = [['B', 'C', 1], ['C', 'E', 1], ['E', 'A', 2], ['A', 'B', 3], ['D', 'E', 3], ['A', 'D', 3], ['C', 'D', 5], ['B', 'D', 6]]
for item in list_e:
node_00 = str(item[0])
node_11 = str(item[1])
wei = str(item[2])
Drawing.edge(node_00, node_11, wei, color='black')
Drawing = apply_styles(Drawing, styles)
start = Drawing.render(filename=str(10))
Drawing.render(view=True)
list = []
Drawing = Graph(format='png')
for i in range(1, len(path) + 1):
print(path)
Drawing = gv.Digraph(format='png')
list.append([str(path[i - 1][0]), str(path[i - 1][1]), str(path[i - 1][2])])
for item in list_e:
node_00 = str(item[0])
node_11 = str(item[1])
wei = str(item[2])
if [node_00, node_11, wei] in list:
Drawing.edge(node_00, node_11, wei, color='red')
elif [node_11, node_00, wei] in list:
Drawing.edge(node_00, node_11, wei, color='red')
else:
Drawing.edge(node_00, node_11, wei, color='black')
Drawing = apply_styles(Drawing, styles)
i = Drawing.render(filename=str(i))
Drawing.render(view=True)
if __name__ == "__main__":
d=Graph_0()
d.add_edge('A', 'B', 3)
d.add_edge('B', 'C', 1)
d.add_edge('B', 'D', 6)
d.add_edge('C', 'E', 1)
d.add_edge('C', 'D', 5)
d.add_edge('D', 'E', 3)
d.add_edge('E', 'A', 2)
d.add_edge('A', 'D', 3)
# print(d.dijkstra_alg('C','B'))
path=d.dijkstra_alg('C','B')
d.drawing(path)
# print(d.list_nodes())
|
AnnaPiatek/Graph
|
Dijkstra.py
|
Dijkstra.py
|
py
| 6,638 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24923354544
|
from pytesseract import Output
import pytesseract
import argparse
import imutils
import cv2
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to input image")
ap.add_argument("-o", "--output", required=False, help="path to output image. override if not given.")
ap.add_argument("-a", "--angle", required=True, help="rotation angle", type=int)
args = vars(ap.parse_args())
original = cv2.imread(args["image"])
if original is None:
exit("Thats not an image =(")
# rotate the image and save to disk
angle = args["angle"]
rotated = imutils.rotate_bound(original, angle=args["angle"])
output = args.get("output")
if output:
text = f"Saving rotated image (by {angle} degrees) into: {output}"
else:
output = args["image"]
text = f"Overwriting rotated image (by {angle} degrees) into: {output}"
print(text)
cv2.imwrite(output, rotated)
|
uborzz/images-playground
|
tools/rotate_image.py
|
rotate_image.py
|
py
| 887 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40428186364
|
# python3
def max_gold_knapsack(W, n, gold_bars):
weight = [[0 for i in range(n+1)] for j in range(W+1)]
for i in range(1, n+1):
for w in range(1, W+1):
weight[w][i] = weight[w][i-1]
if gold_bars[i-1] <= w:
wgt = weight[w - gold_bars[i-1]][i-1] + gold_bars[i-1]
if weight[w][i] < wgt:
weight[w][i] = wgt
return weight[W][n]
if __name__ == "__main__":
W, n = [int(i) for i in input().split()]
gold_bars = [int(i) for i in input().split()]
print(max_gold_knapsack(W, n, gold_bars))
|
probhakarroy/Algorithms-Data-Structures
|
Algorithmic Toolbox/week6/max_gold.py
|
max_gold.py
|
py
| 595 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10693657978
|
def crowstorm_lol(list_args: list) -> str:
from math import sqrt, pow
[xf, yf, xi, yi, vi, r1, r2] = list(map(int, list_args))
distance: float = sqrt(pow((xf - xi), 2) + pow((yf - yi), 2))
if r1 + r2 >= distance + 1.5 * vi:
return 'Y'
return 'N'
def main() -> None:
while True:
try:
list_args: list[str] = input().split()
print(crowstorm_lol(list_args))
except EOFError:
break
if __name__ == '__main__':
main()
|
pdaambrosio/python_uri
|
Beginner/uri2203.py
|
uri2203.py
|
py
| 502 |
python
|
en
|
code
| 0 |
github-code
|
6
|
6453212033
|
from django.conf.urls import url
from testuser import views
app_name = 'test'
urlpatterns = [
# url(r'^$',views.logout, name = 'logout'),
url(r'^$',views.loginIndex, name = 'loginIndex'),
url(r'^login/$',views.login, name = 'login'),
# url(r'^signUp/$',views.signup, name = 'signup'),
# url(r'^forgotPass/$',views.forgot, name = 'forgot'),
# url(r'^login/check/$',views.loginCheck, name = 'logincheck'),
# url(r'^signUp/check/$',views.signupCheck, name = 'signupcheck'),
]
|
baivarn-tjr/SYOT-python
|
SYOT/testuser/urls.py
|
urls.py
|
py
| 503 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40761579635
|
"""Module containing the Fetcher Class to get financial
data of given ticker using the yfinance package."""
from datetime import datetime, timedelta
import yfinance as yf
import pandas as pd
class Fetcher:
"""Class that fetches data about a given ticker
This class does a few things:
1. Checks for validity of arguments before instantiating
2. Pulls data and checks for whether it contains > 1 day.
Attributes:
ticker: String containing the inputted ticker name
start_date: Beginning day of retrieved financial data
end_date: Final day of retrieved financial data
"""
def __init__(self, args: dict) -> None:
fetcher_args = self._check_args_validity(args)
self._ticker = fetcher_args["ticker"]
self._start_date = fetcher_args["start_date"]
self._end_date = fetcher_args["end_date"]
def _check_args_validity(self, args: dict) -> dict:
"""Checks for the validity of the CLI arguments
This function checks for the validity of the input arguments
before initializing the class. Otherwise, it throws an exception
Args:
args: dictionary containing the input CLI Arguments
Returns:
dictionary of parsed arguments to be used in yfinance
"""
# Check for possible ticker errors
ticker = args["ticker"]
# Datetime automatically checks for datetime argument errors
start_date = datetime.strptime(args["b"], "%Y%m%d")
end_date = datetime.strptime(args["e"], "%Y%m%d") \
if args["e"] is not None else datetime.now()
# Compensate for yfinance bug, more specifically:
# API Given Tracks until 1 day before end
end_date += timedelta(days = 1)
# Start date cannot be later than current date
if start_date > datetime.now():
raise ValueError("Start date cannot be after current time")
# Start date cannot be later than the ending date
if start_date > end_date:
raise ValueError("End Date is earlier than Start Date")
fetcher_args = {
"ticker": ticker,
"start_date": start_date,
"end_date": end_date
}
return fetcher_args
def fetch_data(self) -> pd.DataFrame:
"""Function that fetches data from yfinance.
After checking, it checks for the data validity before proceeding.
Returns:
Dataframe with the columns representing financial data
of the given ticker, arranged from earliest to latest date.
"""
tracker = yf.Ticker(self._ticker)
try:
data: pd.DataFrame = tracker.history(
start = self._start_date,
end = self._end_date
)[self._start_date : self._end_date]
if len(data) == 0:
raise Exception("No data available for given ticker.")
if len(data) == 1:
raise Exception("Only 1 data point seen. Check time period.")
return data
# Error can be caused as raw date is converted to seconds (Line 150):
# https://github.com/ranaroussi/yfinance/blob/main/yfinance/base.py
# Best solution is to try a date that's more recent, within 50 years
except OverflowError as err:
raise ValueError(
"Start date too distant. Try a start date within 50 years."
) from err
except BaseException as err:
raise err
|
webclinic017/YSC4228-QuantFin
|
scrape_mkt_data/tools/fetcher.py
|
fetcher.py
|
py
| 3,536 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17169060948
|
from django.urls import path
from . import views
urlpatterns=[
path('sub/',views.SubjectVW,name='sub'),
path('trainer/',views.TrainerVW,name='trainer'),
path('profile/',views.TranierDisplay,name='profile'),
path('batchvw/',views.BatchVW,name='batchvw'),
path('bdisplay/',views.BatchDisplay,name='bdisplay'),
path('trainerupdate/<pk>/',views.TrainerUP,name='trainerupdate'),
path('Home/',views.Home,name='Home'),
]
|
mithun-gowda/PyInstitute
|
Batch/urls.py
|
urls.py
|
py
| 444 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21729743444
|
class user():
def logged_in(self):
print('You have logged in')
class warden(user):
def __init__(self, name, age, specification, weapon, strength):
self.name = name
self.age = age
self.specification = specification
self.weapon = weapon
self.strength = strength
def attack(self):
print(f'The warden is attacking with: {self.strength}')
class witch():
def __init__(self, name, age, specification, weapon, magic_power):
self.name = name
self.age = age
self.specification = specification
self.weapon = weapon
self.magic_power = magic_power
def attack(self):
print(f'Morrigen is spelling with her {self.magic_power}')
warden_1 = warden('Roman', 22, 'Swordsman', 'Dual daggers', 37 )
print('The grey Warden: ', warden_1.name, '\n He is', warden_1.age, 'years old', '\n He is an experienced ', warden_1.specification, '\n At the moment you are using ', warden_1.weapon, '\n His current strength is ', warden_1.strength)
warden_1.attack()
def warden_attack_checker(warden_1):
while warden_1.strength >= 70:
print('The warden is causing 40% of damage')
break
else:
print('The warden is causing 20% of damage')
warden_attack_checker(warden_1)
witch_1 = witch('Morrigan', 25, 'Witch of the Wilds', 'Magic coal stuff', 74)
print('The wardens ally: ', witch_1.name, '\n She is', witch_1.age, 'years old', '\n She is an experienced ', witch_1.specification, '\n At the moment she is using ', witch_1.weapon, '\n her current power of magic is ', witch_1.magic_power)
witch_1.attack()
def witch_spell_checker(witch_1):
while witch_1.magic_power >= 60:
print('Morrigan hurts enemies with 40% of damage')
break
else:
print('Morrigan hurts enemies with 20% of damage')
witch_spell_checker(witch_1)
def player_attack(char): ### Here is polymorhism principle is exercised! We create a new function and assign a ner attribute 'char' for assigning the function which is COMMON for the both CLASSES and then we can run this function for the both classes
char.attack()
player_attack(warden_1)
player_attack(witch_1)
## However there is another way how to achieve this result
for char in [warden_1, witch_1]: ## we get the same result
char.attack()
|
TheGurtang/Python
|
Class_Inheritance_2.py
|
Class_Inheritance_2.py
|
py
| 2,402 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17549816996
|
from flask import Flask, render_template, request
from tensorflow.keras.layers import Dense, Embedding, Bidirectional, LSTM, Concatenate, Dropout
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import Input, Model
import gensim
import numpy as np
import BahdanauAttention #모델.py 불러오기
from konlpy.tag import Mecab
import pickle
import tensorflow as tf
import re
lstm_model = BahdanauAttention.BahdanauAttention(64)
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False #한글 깨짐 현상
wv_model = gensim.models.Word2Vec.load('model/aihub_review_6.model')
mecab = Mecab(dicpath=r"C:\mecab\mecab-ko-dic") #mecab 윈도우에서 설정
tokenizer = pickle.load(open('model/tokenizer.pickle','rb'))
############ 모델 부분
max_len = 100
EMBEDDING_DIM = 100
sequence_input = Input(shape=(max_len,), dtype='int32')
VOCAB_SIZE = len(tokenizer.index_word) + 1
EMBEDDING_DIM = 100
embedding_matrix = np.zeros((VOCAB_SIZE, EMBEDDING_DIM))
# tokenizer에 있는 단어 사전을 순회하면서 word2vec의 100차원 vector를 가져옵니다
for word, idx in tokenizer.word_index.items():
embedding_vector = wv_model[word] if word in wv_model else None
if embedding_vector is not None:
embedding_matrix[idx] = embedding_vector
embedded_sequences = Embedding(VOCAB_SIZE,
EMBEDDING_DIM,
input_length=max_len,
weights=[embedding_matrix], # weight는 바로 위의 embedding_matrix 대입
trainable=False # embedding layer에 대한 train은 꼭 false로 지정
)(sequence_input)
# embedded_sequences = Embedding(vocab_size, 128, input_length=max_len, mask_zero = True)(sequence_input)
lstm = Bidirectional(LSTM(64, dropout=0.5, return_sequences=True))(embedded_sequences)
lstm, forward_h, forward_c, backward_h, backward_c = Bidirectional(
LSTM(64, dropout=0.5, return_sequences=True, return_state=True))(lstm)
state_h = Concatenate()([forward_h, backward_h]) # 은닉 상태
state_c = Concatenate()([forward_c, backward_c]) # 셀 상태
attention = lstm_model # 가중치 크기 정의
context_vector, attention_weights = attention(lstm, state_h)
dense1 = Dense(20, activation="relu")(context_vector)
dropout = Dropout(0.5)(dense1)
output = Dense(1, activation="sigmoid")(dropout)
model = Model(inputs=sequence_input, outputs=output)
model.load_weights('model/best_model.h5')
stopwords = ['도', '는', '다', '의', '가', '이', '은', '한', '에', '하', '고', '을', '를', '인', '듯', '과', '와', '네', '들', '듯', '지', '임', '게', '만', '게임', '겜', '되', '음', '면']
def sentiment_predict(new_sentence):
new_sentence = re.sub(r'[^ㄱ-ㅎㅏ-ㅣ가-힣 ]','', new_sentence)
new_sentence = mecab.morphs(new_sentence) # 토큰화
new_sentence = [word for word in new_sentence] # 불용어 제거
encoded = tokenizer.texts_to_sequences([new_sentence]) # 정수 인코딩
pad_new = pad_sequences(encoded, maxlen = max_len,padding='post') # 패딩
score = float(model.predict(pad_new)) # 예측
return round(score, 2)
# if(score > 0.5):
# print("{:.2f}% 확률로 욕설에 가깝습니다.".format(score * 100))
# else:
# print("{:.2f}% 확률로 욕설이 아닙니다.".format((1 - score) * 100))
@app.route('/', methods=['GET','POST'])
def test():
return render_template('user.html')
@app.route('/post', methods=['GET','POST'])
def post():
original_test = request.form['test']
score = sentiment_predict(original_test)
return render_template('post.html', score=score)
@app.route('/ajax_model', methods=['GET','POST'])
def ajax_model():
original_test = request.json['send_data']
score = sentiment_predict(original_test)
return str(score*100)
if __name__ == '__main__':
app.run()
|
rlagywns0213/korea_bad_comments_analysis
|
comment_confirm.py
|
comment_confirm.py
|
py
| 3,906 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29541301753
|
from django.contrib import admin, messages
from .models import Poll, Question, Choice
class ChoiceInline(admin.StackedInline):
model = Choice
extra = 0
class QuestionInline(admin.StackedInline):
model = Question
readonly_fields = ['question_type']
extra = 0
class PollAdmin(admin.ModelAdmin):
inlines = [QuestionInline]
fieldsets = [
(None, {'fields': ['name', 'slug', 'status', 'description']}),
('DATE INFO', {'fields': [('starting', 'finished')]})
]
prepopulated_fields = {'slug': ('name',)}
readonly_fields = [
'starting',
'finished',
]
def save_model(self, request, obj, form, change):
"""Save Model override for access control of the poll"""
import datetime
def get_message(msg, type):
messages.add_message(
request,
type,
f'{msg}!'
)
if not obj.starting and obj.status == 'IN_PROGRESS':
obj.starting = datetime.datetime.now()
get_message('Poll has started!', messages.SUCCESS)
obj.save()
if obj.starting and not obj.finished and obj.status == 'FINISHED':
obj.finished = datetime.datetime.now()
get_message('Poll has finished!', messages.SUCCESS)
obj.save()
if not (obj.starting or obj.finished) and obj.status != 'WAITING':
obj.status = 'WAITING'
get_message('Woo Wee Woo Waa! Error!', messages.ERROR)
obj.save()
if not obj.id:
obj.save()
class QuestionAdmin(admin.ModelAdmin):
inlines = [ChoiceInline]
# def save_model(self, request, obj, form, change):
# When Admin choose type of the question is text, answer choices are removing
# choices = Choice.objects.filter(question=obj)
# if obj.question_type == '1' and choices:
# choices.delete()
# obj.save()
admin.site.register(Poll, PollAdmin)
admin.site.register(Question, QuestionAdmin)
admin.site.register(Choice)
|
RamilPowers/poll_app
|
api/admin.py
|
admin.py
|
py
| 2,063 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42629975620
|
from unittest import TestCase
import os
from yapic_io.connector import io_connector
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from yapic_io import TiffConnector, Dataset, PredictionBatch
import pytest
from tifffile import memmap
base_path = os.path.dirname(__file__)
class TestPredictionBatch(TestCase):
@pytest.fixture(autouse=True)
def setup(self, tmpdir):
self.tmpdir = tmpdir.strpath
def test_computepos_1(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (1, 1, 1)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
self.assertEqual(len(p._all_tile_positions), 6 * 4 * 3)
tilepos = [(p[0], tuple(p[1])) for p in p._all_tile_positions]
self.assertEqual(len(tilepos),
len(set(tilepos)))
def test_computepos_2(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (3, 6, 4)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
val = [(0, (0, 0, 0))]
for pos, valpos in zip(p._all_tile_positions, val):
assert_array_equal(pos[1], np.array(valpos[1]))
self.assertEqual(pos[0], valpos[0])
def test_computepos_3(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (2, 6, 4)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
val = [(0, (0, 0, 0)), (0, (1, 0, 0))]
for pos, valpos in zip(p._all_tile_positions, val):
assert_array_equal(pos[1], np.array(valpos[1]))
self.assertEqual(pos[0], valpos[0])
def test_getitem_1(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (1, 6, 4)
batch_size = 2
p = PredictionBatch(d, batch_size, size)
# batch size is 2, so the first 2 tiles go with the first batch
# (size two), the third tile in in the second batch. the second
# batch has only size 1 (is smaller than the specified batch size),
# because it contains the rest.
self.assertEqual(len(p), 2)
self.assertEqual(p[0].pixels().shape, (2, 3, 1, 6, 4))
self.assertEqual(p[1].pixels().shape, (1, 3, 1, 6, 4))
def test_getitem_2(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (1, 6, 4)
batch_size = 3
p = PredictionBatch(d, batch_size, size)
# batch size is 3, this means all3 tempplates fit in one batch
self.assertEqual(len(p), 1)
self.assertEqual(p[0].pixels().shape, (3, 3, 1, 6, 4))
def test_current_tile_positions(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path)
d = Dataset(c)
size = (1, 6, 4)
batch_size = 2
p = PredictionBatch(d, batch_size, size)
val = [(0, (0, 0, 0)), (0, (1, 0, 0))]
for pos, valpos in zip(p[0].current_tile_positions, val):
assert_array_equal(pos[1], np.array(valpos[1]))
self.assertEqual(pos[0], valpos[0])
val = [(0, (2, 0, 0))]
for pos, valpos in zip(p[1].current_tile_positions, val):
assert_array_equal(pos[1], np.array(valpos[1]))
self.assertEqual(pos[0], valpos[0])
def test_put_probmap_data(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path, savepath=self.tmpdir)
d = Dataset(c)
size = (1, 6, 4)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
data = np.ones((1, 2, 1, 6, 4))
p[0].put_probmap_data(data)
p[1].put_probmap_data(data)
p[2].put_probmap_data(data)
def test_put_probmap_data_2(self):
img_path = os.path.abspath(os.path.join(
base_path,
'../test_data/tiffconnector_1/im/6width4height3slices_rgb.tif'))
label_path = os.path.join(base_path, '/path/to/nowhere')
c = TiffConnector(img_path, label_path, savepath=self.tmpdir)
d = Dataset(c)
size = (1, 2, 2)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
pixel_val = 0
for mb in p:
pixel_val += 10
data = np.ones((1, 2, 1, 2, 2)) * pixel_val
mb.put_probmap_data(data)
pixelmap = memmap(os.path.join(self.tmpdir,
'6width4height3slices_rgb_class_1.tif'))
# zslice 0
val_0 = np.array([[10., 10., 30., 30., 50., 50.],
[10., 10., 30., 30., 50., 50.],
[20., 20., 40., 40., 60., 60.],
[20., 20., 40., 40., 60., 60.]])
assert_array_almost_equal(pixelmap[0, :, :, 0], val_0)
# zslice 1
val_1 = np.array([[70., 70., 90., 90., 110., 110.],
[70., 70., 90., 90., 110., 110.],
[80., 80., 100., 100., 120., 120.],
[80., 80., 100., 100., 120., 120.]])
assert_array_almost_equal(pixelmap[1, :, :, 0], val_1)
# zslice 2
val_2 = np.array([[130., 130., 150., 150., 170., 170.],
[130., 130., 150., 150., 170., 170.],
[140., 140., 160., 160., 180., 180.],
[140., 140., 160., 160., 180., 180.]])
assert_array_almost_equal(pixelmap[2, :, :, 0], val_2)
def test_put_probmap_data_3(self):
img_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/im/*'))
label_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/labels/*'))
c = TiffConnector(img_path, label_path, savepath=self.tmpdir)
d = Dataset(c)
size = (1, 3, 4)
batch_size = 2
p = PredictionBatch(d, batch_size, size)
data = np.ones((2, 3, 1, 3, 4))
p[0].put_probmap_data(data)
data = np.ones((2, 3, 1, 3, 4))
p[1].put_probmap_data(data)
data = np.ones((2, 3, 1, 3, 4))
p[2].put_probmap_data(data)
def test_put_probmap_data_when_no_labels_available(self):
img_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/im/*'))
c = io_connector(img_path, '', savepath=self.tmpdir)
d = Dataset(c)
size = (1, 3, 4)
batch_size = 2
p = PredictionBatch(d, batch_size, size)
data = np.ones((2, 2, 1, 3, 4))
p[0].put_probmap_data(data)
data = np.ones((2, 2, 1, 3, 4))
p[1].put_probmap_data(data)
data = np.ones((2, 2, 1, 3, 4))
p[2].put_probmap_data(data)
val = ['40width26height3slices_rgb_class_1.tif',
'40width26height3slices_rgb_class_2.tif']
self.assertEqual(sorted(os.listdir(self.tmpdir)), val)
def test_put_probmap_data_multichannel_label(self):
img_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/im/*'))
label_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/labels_multichannel/*'))
c = TiffConnector(img_path, label_path, savepath=self.tmpdir)
d = Dataset(c)
original_labels = c.original_label_values_for_all_images()
res = c.calc_label_values_mapping(original_labels)
d = Dataset(c)
size = (1, 3, 4)
batch_size = 1
p = PredictionBatch(d, batch_size, size)
data = np.ones((1, 6, 1, 3, 4))
p[0].put_probmap_data(data)
def test_prediction_loop(self):
# mock classification function
def classify(pixels, value):
return np.ones(pixels.shape) * value
# define data locations
pixel_image_dir = os.path.join(
base_path, '../test_data/tiffconnector_1/im/*.tif')
label_image_dir = os.path.join(
base_path, '../test_data/tiffconnector_1/labels/*.tif')
tile_size = (1, 5, 4) # size of network output layer in zxy
padding = (0, 0, 0) # padding of network input layer in zxy,
# in respect to output layer
# Make training_batch mb and prediction interface p with
# TiffConnector binding.
c = TiffConnector(pixel_image_dir,
label_image_dir, savepath=self.tmpdir)
p = PredictionBatch(Dataset(c), 2, tile_size, padding_zxy=padding)
self.assertEqual(len(p), 255)
self.assertEqual(p.labels, {1, 2, 3})
# classify the whole bound dataset
for counter, item in enumerate(p):
pixels = item.pixels() # input for classifier
mock_classifier_result = classify(pixels, counter)
# pass classifier results for each class to data source
item.put_probmap_data(mock_classifier_result)
def test_pixel_dimensions(self):
img_path = os.path.abspath(os.path.join(
base_path, '../test_data/tiffconnector_1/im/*'))
c = io_connector(img_path, '', savepath=self.tmpdir)
d = Dataset(c)
size = (1, 5, 4)
batch_size = 2
p = PredictionBatch(d, batch_size, size)[0]
print(p.pixels().shape)
self.assertEqual((2, 3, 1, 5, 4), p.pixels().shape)
p.set_pixel_dimension_order('bzxyc')
self.assertEqual((2, 1, 5, 4, 3), p.pixels().shape)
|
yapic/yapic_io
|
yapic_io/tests/test_prediction_batch.py
|
test_prediction_batch.py
|
py
| 10,948 |
python
|
en
|
code
| 1 |
github-code
|
6
|
27560756973
|
# preprocessing data
data = open("input/day14.txt").read().splitlines()
data = [line.split("->") for line in data]
data = [[x.strip().split(",") for x in line] for line in data]
data = [[[int(num) for num in pair] for pair in line] for line in data]
balance_x = 450
data = [[[pair[0] - balance_x, pair[1]] for pair in line] for line in data]
# %% With small sample
# data = open("input/day14_small.txt").read().splitlines()
# data = [line.split("->") for line in data]
# data = [[x.strip().split(",") for x in line] for line in data]
# data = [[[int(num) for num in pair] for pair in line] for line in data]
# balance_x = 490
# data = [[[pair[0] - balance_x, pair[1]] for pair in line] for line in data]
# %%
# flatten data
only_x = [item for line in data for pair in line for item in pair[:1]]
only_y = [item for line in data for pair in line for item in pair[1:]]
min_x, max_x = min(only_x), max(only_x)
min_y, max_y = min(only_y), max(only_y)
# %%
def create_wall_of_rock(ins):
global table
for index in range(len(ins)):
if index == len(ins) - 1:
break
first = ins[index]
next_ = ins[index + 1]
if first[1] == next_[1]:
# all point between with the same y
for x in range(min(first[0], next_[0]), max(first[0], next_[0]) + 1):
table[first[1]][x] = 1
if first[0] == next_[0]:
for y in range(min(first[1], next_[1]), max(first[1], next_[1]) + 1):
table[y][first[0]] = 1
# %% part 1
table = [[0 for x in range(max_x + 2)] for y in range(max_y + 2)]
for instruction in data:
create_wall_of_rock(instruction)
# %%
from copy import deepcopy
drop_point = [500 - balance_x, 0]
void_deep = max_y + 1
count_sand_before_go_void = 0
def model_sand_drop():
global table, drop_point, void_deep, count_sand_before_go_void
current = deepcopy(drop_point)
while True:
x_current = current[0]
y_current = current[1]
# if below still empty, continue drop down
if y_current >= void_deep:
break
if table[y_current + 1][x_current] == 0:
current[1] += 1
else:
# if below is wall, check the left first
if table[y_current + 1][x_current - 1] == 0:
current[0] -= 1
current[1] += 1
else:
# if the left is wall, check the right
if table[y_current + 1][x_current + 1] == 0:
current[0] += 1
current[1] += 1
else:
# if both left and right are wall, stop
count_sand_before_go_void += 1
break
# return if this sand go to the void
table[current[1]][current[0]] = 2
# print(current)
return current[1] >= void_deep
# %%
while True:
if model_sand_drop():
break
# %% part 2
from copy import deepcopy
drop_point = [500 - balance_x, 0]
real_void_deep = max_y + 2
def model_sand_drop2():
global table, drop_point, void_deep
current = deepcopy(drop_point)
while True:
x_current = current[0]
y_current = current[1]
# if below still empty, continue drop down
if table[drop_point[1]][drop_point[0]] == 2:
break
if current[1] == real_void_deep - 1:
break
if table[y_current + 1][x_current] == 0:
current[1] += 1
else:
# if below is wall, check the left first
if table[y_current + 1][x_current - 1] == 0:
current[0] -= 1
current[1] += 1
else:
# if the left is wall, check the right
if table[y_current + 1][x_current + 1] == 0:
current[0] += 1
current[1] += 1
else:
# if both left and right are wall, stop
break
table[current[1]][current[0]] = 2
# %% prepare table, the bottom maximum length is 2x of the (max_y + 1)
table = [[0 for x in range(2*(max_y + 2) + 1)] for y in range(max_y + 2)]
for instruction in data:
create_wall_of_rock(instruction)
#%%
while True:
if table[drop_point[1]][drop_point[0]] == 2:
break
model_sand_drop2()
#%%
sum([line.count(2) for line in table])
# %%
from pandas import DataFrame
import seaborn as sns
df = DataFrame(table)
sns.heatmap(df)
# %%
|
nhannht/aoc2022
|
day14.py
|
day14.py
|
py
| 4,428 |
python
|
en
|
code
| 0 |
github-code
|
6
|
34346333133
|
'''
Exercises of the book "Think python"
14.12.2 Exercise:
'''
# If you download my solution to Exercise 2 from
# http://thinkpython2.com/code/anagram_sets.py, you’ll see that it
# creates a dictionary that maps from a sorted string of letters
# to the list of words that can be spelled with those letters.
# For example, 'opst' maps to
# the list ['opts', 'post', 'pots', 'spot', 'stop', 'tops'].
#
# Write a module that imports anagram_sets and provides two new
# functions: store_anagrams should store the anagram dictionary
# in a “shelf”; read_anagrams should look up a word and return
# a list of its anagrams.
# Solution: http://thinkpython2.com/code/anagram_db.py.
import shelve
import os
from anagrams import all_anagrams
def store_anagrams(anagrams_dict):
"""Stores anagrams in a shelf"""
# Store the anagrams dict in 'shelf'
with shelve.open('saved_anagrams') as db:
db['anagrams_dict'] = anagrams_dict
def read_anagrams(word):
"""Looks for the list of anagrams for the word"""
# Read anagrams dictionary
with shelve.open('saved_anagrams') as db:
saved_anagrams = db.get('anagrams_dict')
# Get the list of anagrams for the word
key = ''.join(sorted(word))
anagrams_list = saved_anagrams.get(key, "No such word in the dict")
anagrams_list.remove(word)
return anagrams_list
# Test program
if __name__ == "__main__":
path = os.path.sep.join(["chapter14", "words.txt"])
anagrams = all_anagrams(path)
store_anagrams(anagrams)
result = read_anagrams("pots")
print(result)
|
LiliiaMykhaliuk/think-python
|
chapter14/14.12.2.py
|
14.12.2.py
|
py
| 1,587 |
python
|
en
|
code
| 0 |
github-code
|
6
|
29584667291
|
# -*- coding: utf-8 -*-
from datetime import datetime
import calendar
from openerp import models, fields, api, sql_db
from openerp.addons.avancys_orm import avancys_orm as orm
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DSDF, DEFAULT_SERVER_DATETIME_FORMAT as DSTF, float_compare
from openerp.exceptions import Warning
from dateutil.relativedelta import relativedelta
import unicodedata
import base64
import math
import calendar as cal
FORM_TYPES = [
('E', '[E] Planilla empleados empresas'),
# ('Y', '[Y] Planilla independientes empresas'),
# ('A', '[A] Planilla cotizantes con novedad de ingreso'),
# ('S', '[S] Planilla empleados de servicio domestico'),
# ('M', '[M] Planilla mora'),
# ('N', '[N] Planilla correcciones'),
# ('H', '[H] Planilla madres sustitutas'),
# ('T', '[T] Planilla empleados entidad beneficiaria del sistema general de participaciones'),
# ('F', '[F] Planilla pago aporte patronal faltante'),
# ('J', '[J] Planilla para pago seguridad social en cumplimiento de sentencia digital'),
# ('X', '[X] Planilla para pago empresa liquidada'),
# ('U', '[U] Planilla de uso UGPP para pagos por terceros'),
# ('K', '[K] Planilla estudiantes')
]
FORM_STATES = [
('draft', 'Borrador'),
('closed', 'Cerrada')
]
#Segun el resolucion 454
TYPE_WAGE = [
('X', 'Integral'),
('F', 'Fijo'),
('V', 'Variable'),
(' ', 'Aprendiz')
]
def monthrange(year=None, month=None):
today = datetime.today()
y = year or today.year
m = month or today.month
return y, m, cal.monthrange(y, m)[1]
def strip_accents(s):
new_string = ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
new_string = new_string.encode('ascii', 'replace').replace('?', ' ')
return new_string
def prep_field(s, align='left', size=0, fill=' ', date=False):
if s in [False, None]:
s = ''
if date:
s = datetime.strftime(s, "%Y-%m-%d")
if align == 'right':
s = str(s)[0:size].rjust(size, str(fill))
elif align == 'left':
s = str(s)[0:size].ljust(size, str(fill))
return s
def rp(value):
if value % 100.0 >= 0.01:
val = int(math.ceil(value / 100.0)) * 100
else:
val = round(value, 0)
return val
def rp1(value):
if value - round(value) > 0.0001:
res = round(value) + 1
else:
res = round(value)
return res
class HrContributionFormLine(models.Model):
_name = 'hr.contribution.form.line'
contribution_id = fields.Many2one('hr.contribution.form', 'Autoliquidacion', ondelete="cascade")
employee_id = fields.Many2one('hr.employee', 'Empleado')
contract_id = fields.Many2one('hr.contract', 'Contrato')
leave_id = fields.Many2one('hr.holidays', 'Ausencia')
main = fields.Boolean('Linea principal')
# Campos PILA
ing = fields.Selection([('X', 'X'), ('R', 'R'), ('C', 'C')], 'ING', help='Ingreso')
ret = fields.Selection([('P', 'P'), ('R', 'R'), ('C', 'C'), ('X', 'X')], 'RET', help='Retiro')
tde = fields.Boolean('TDE', help='Traslado desde otra EPS o EOC')
tae = fields.Boolean('TAE', help='Traslado a otra EPS o EOC')
tdp = fields.Boolean('TDP', help='Traslado desde otra administradora de pensiones')
tap = fields.Boolean('TAP', help='Traslado a otra administradora de pensiones')
vsp = fields.Boolean('VSP', help='Variacion permanente de salario')
fixes = fields.Selection([('A', 'A'), ('C', 'C')], 'Correcciones')
vst = fields.Boolean('VST', help='Variacion transitoria de salario')
sln = fields.Boolean('SLN', help='Licencia no remunerada o suspension temporal del contrato')
ige = fields.Boolean('IGE', help='Incapacidad general')
lma = fields.Boolean('LMA', help='Licencia de maternidad o paternidad')
vac = fields.Selection([('X', 'X'), ('L', 'L')], 'VAC', help='Vacaciones/LR')
avp = fields.Boolean('AVP', help='Aporte voluntario de pension')
vct = fields.Boolean('VCT', help='Variacion de centros de trabajo')
irl = fields.Float('IRL', help='Dias de incapacidad por accidente de trabajo o enfermedad laboral')
afp_code = fields.Char('Codigo AFP')
afp_to_code = fields.Char('Codigo AFP a la cual se traslada')
eps_code = fields.Char('Codigo EPS')
eps_to_code = fields.Char('Codigo EPS a la cual se traslada')
ccf_code = fields.Char('Codigo CCF')
pens_days = fields.Integer('Dias cotizados pension')
eps_days = fields.Integer('Dias cotizados EPS')
arl_days = fields.Integer('Dias cotizados ARL')
ccf_days = fields.Integer('Dias cotizados CCF')
wage = fields.Integer('Salario basico')
int_wage = fields.Boolean('Salario integral')
wage_type = fields.Selection(string='Tipo de salario', selection=TYPE_WAGE)
pens_ibc = fields.Float('IBC pension')
eps_ibc = fields.Float('IBC EPS')
arl_ibc = fields.Float('IBC ARL')
ccf_ibc = fields.Float('IBC CCF')
global_ibc = fields.Float('IBC Global')
pens_rate = fields.Float('Tarifa pension')
pens_cot = fields.Float('Cotizacion pension')
ap_vol_contributor = fields.Float('Aportes voluntarios del afiliado')
ap_vol_company = fields.Float('Aportes voluntarios del aportante')
pens_total = fields.Float('Aportes totales de pension')
fsol = fields.Float('Aportes a fondo de solidaridad')
fsub = fields.Float('Aportes a fondo de subsistencia')
ret_cont_vol = fields.Float('Valor no retenido por aportes voluntarios')
eps_rate = fields.Float('Tarifa EPS')
eps_cot = fields.Float('Cotizacion EPS')
ups = fields.Float('Total UPS')
aus_auth = fields.Char('Numero de autorizacion de incapacidad')
gd_amount = fields.Float('Valor de la incapacidad EG')
mat_auth = fields.Char('Numero de autorizacion de licencia')
mat_amount = fields.Float('Valor de licencia')
arl_rate = fields.Float('Tarifa ARL')
work_center = fields.Char('Centro de trabajo')
arl_cot = fields.Float('Cotizacion ARL')
ccf_rate = fields.Float('Tarifa CCF')
ccf_cot = fields.Float('Cotizacion CCF')
sena_rate = fields.Float('Tarifa SENA')
sena_cot = fields.Float('Cotizacion SENA')
icbf_rate = fields.Float('Tarifa ICBF')
icbf_cot = fields.Float('Cotizacion ICBF')
esap_rate = fields.Float('Tarifa ESAP')
esap_cot = fields.Float('Cotizacion ESAP')
men_rate = fields.Float('Tarifa MEN')
men_cot = fields.Float('Cotizacion MEN')
exonerated = fields.Boolean('Exonerado de aportes')
arl_code = fields.Char('Codigo ARL')
arl_risk = fields.Char('Clase de riesgo')
k_start = fields.Date('Fecha de ingreso')
k_end = fields.Date('Fecha de retiro')
vsp_start = fields.Date('Fecha de inicio de VSP')
sln_start = fields.Date('Inicio licencia no remunerada')
sln_end = fields.Date('Fin licencia no remunerada')
ige_start = fields.Date('Inicio incapacidad EG')
ige_end = fields.Date('Fin incapacidad EG')
lma_start = fields.Date('Inicio licencia maternidad')
lma_end = fields.Date('Fin licencia maternidad')
vac_start = fields.Date('Inicio vacaciones')
vac_end = fields.Date('Fin vacaciones')
vct_start = fields.Date('Inicio cambio centro de trabajo')
vct_end = fields.Date('Fin cambio de centro de trabajo')
atep_start = fields.Date('Inicio ATEP')
atep_end = fields.Date('Fin ATEP')
other_ibc = fields.Float('IBC otros parafiscales')
w_hours = fields.Integer('Horas laboradas')
class HrContributionForm(models.Model):
_name = 'hr.contribution.form'
name = fields.Char('Nombre')
period_id = fields.Many2one('payslip.period', 'Periodo', domain=[('schedule_pay', '=', 'monthly')])
group_id = fields.Many2one('hr.contract.group', 'Grupo de contratos')
form_type = fields.Selection(FORM_TYPES, 'Tipo de planilla', default='E')
branch_code = fields.Char('Codigo de sucursal')
presentation = fields.Char('Presentacion', size=1, default='U')
contract_ids = fields.Many2many('hr.contract', 'pila_contract_rel', 'pila_id', 'contract_id')
state = fields.Selection(FORM_STATES, 'Estado', default='draft')
file = fields.Binary('Archivo plano', readonly=True)
journal_id = fields.Many2one('account.journal', "Diario contable")
move_id = fields.Many2one('account.move', 'Asiento')
move_id_name = fields.Char('Move Name')
form_line_ids = fields.One2many('hr.contribution.form.line', 'contribution_id', string='Detalle')
error_log = fields.Text('Reporte de errores')
@api.multi
def fix_o_rights(self, rights,start_p,end_p,contract):
""" Retorna el IBC basado en los ingresos por otros derechos del empleado """
if rights <= 0:
return 0
query="""select HH.id, HH.absence_id, HHD.sequence, HHS.gi_b2, HHS.gi_b90, HHS.gi_b180, HHS.gi_a180, HHS.sub_wd, HHS.no_payable
from hr_holidays_days as HHD
inner join hr_holidays as HH
on HH.id = HHD.holiday_id
inner join hr_holidays_status as HHS
on HHS.id = HHD.holiday_status_id and HHS.active and (HHS.general_illness or (sub_wd and no_payable))
where HHD.contract_id = {contrato} and
HHD.name BETWEEN '{s_p}' and '{e_p}'""".format(
contrato=contract,
s_p = start_p,
e_p=end_p)
holiday_days = orm.fetchall(self._cr, query)
#Organizar cantidad de dias por rangos de descuento por enfermedad general o prorroga
days_b2, days_b90, days_b180, days_a180, otros = [0,None],[0,None],[0,None],[0,None], 0 #[CantidadDias, Porcentaje]
for day in holiday_days:
leave_id = self.env['hr.holidays'].browse(day[0])
if day[0] == day[1]:
raise Warning("La ausencia {aus} no puede tener una prorroga a si misma, se sugire borrar y crear una nueva ausencia".format(aus=leave_id.name))
if day[7] and day[8]:# Evalua primero si es ausencia que modifique el IBC
otros += 1
elif day[2] <= 2: #Evalua ausencias de tipo Enfermedad general
days_b2[0] += 1
if not days_b2[1]:
days_b2[1] = day[3]
if days_b2[1] and days_b2[1] != day[3]:
raise Warning("La ausencia {aus} tiene <Porcentaje a reconocer por enfermedad de 1 y 2 dias> diferente a otras ausencias reportadas en el periodo de {P}, revisar ausencias del contrato con id = {C}".format(aus=leave_id.name,P=start_p[:-2],C=contract))
elif 2 < day[2] <= 90:
days_b90[0] += 1
if not days_b90[1]:
days_b90[1] = day[4]
if days_b90[1] and days_b90[1] != day[4]:
raise Warning("La ausencia {aus} tiene <Porcentaje a reconocer por enfermedad de 3 a 90 dias> diferente a otras ausencias reportadas en el periodo de {P}, revisar ausencias del contrato con id = {C}".format(aus=leave_id.name,P=start_p[:-2],C=contract))
elif 90 < day[2] <= 180:
days_b180[0] += 1
if not days_b180[1]:
days_b180[1] = day[5]
if days_b180[1] and days_b180[1] != day[5]:
raise Warning("La ausencia {aus} tiene <Porcentaje a reconocer por enfermedad de 91 a 180 dias> diferente a otras ausencias reportadas en el periodo de {P}, revisar ausencias del contrato con id = {C}".format(aus=leave_id.name,P=start_p[:-2],C=contract))
else:
days_a180[0] += 1
if not days_a180[1]:
days_a180[1] = day[6]
if days_a180[1] and days_a180[1] != day[6]:
raise Warning("La ausencia {aus} tiene <Porcentaje a reconocer por enfermedad de 181 días en adelante> diferente a otras ausencias reportadas en el periodo de {P}, revisar ausencias del contrato con id = {C}".format(aus=leave_id.name,P=start_p[:-2],C=contract))
#---------------------Calcular el IBC
#Calcular NumeroDias por Porcentaje
DiasPorcentaje = [days_b2, days_b90, days_b180, days_a180]
total = [0,0] #[DiasPorcentaje, DiasAusencias]
for DP in DiasPorcentaje:
if not DP[1]:
continue
total[0] += float(DP[0] * DP [1])/100
total[1] += DP[0]
#Calculo IBC por otros derechos
rights = float(rights * total[1])/ total[0] if total[0] and total[0] else rights
rights += float(self.env['hr.contract'].browse(contract).wage)/30 * otros if otros > 0 else 0
return rights
@api.multi
def compute_ibc(self, contract, month, main):
sdt = month + '-01'
edt = month + "-" + str(monthrange(int(month[0:4]), int(month[5:7]))[2])
plp = self.env['hr.payslip']
earnings = plp.get_interval_category('earnings', sdt, edt, contract=contract.id)#DEVENGADO
o_salarial_earnings = plp.get_interval_category('o_salarial_earnings', sdt, edt, contract=contract.id)#OTROS DEVENGOS SALARIALES
comp_earnings = plp.get_interval_category('comp_earnings', sdt, edt, contract=contract.id)#INGRESOS COMPLEMENTARIOS
if main != 'main':
orig_exc = ('VAC_PAG', 'VAC_LIQ', 'PRIMA', 'PRIMA_LIQ')
o_rights = plp.get_interval_category('o_rights', sdt, edt, exclude=orig_exc, contract=contract.id)
if o_rights:
o_rights = self.fix_o_rights(o_rights[0][1], sdt, edt, contract.id)
else:
o_rights = 0
else:
o_rights = 0
sal_earnings_itv = earnings + o_salarial_earnings + comp_earnings
sal_earnings = sum([x[1] for x in sal_earnings_itv]) + o_rights
o_earnings_itv = plp.get_interval_category('o_earnings', sdt, edt, contract=contract.id)
o_earnings = sum([x[1] for x in o_earnings_itv])
top40 = (sal_earnings + o_earnings) * 0.4
if o_earnings > top40:
amount = sal_earnings + o_earnings - top40
sal_earnings += o_earnings - top40
else:
amount = sal_earnings
if contract.type_id.type_class == 'int':
amount = amount * 0.7
sal_earnings = sal_earnings * 0.7
e_v = self.env['variables.economicas']
smmlv = e_v.getValue('SMMLV', sdt + " 05:00:00") or 0.0
# TOP25
if amount > 25 * smmlv:
amount = 25 * smmlv
days = self.get_wd(contract, month=month, main=main)[0]
sal_days = plp.get_interval_concept_qty('BASICO', sdt, edt, contract=contract.id)
if sal_days:
sal_days = sal_days[0][2]
else:
sal_days = 0
days_to_add = days - sal_days if days != sal_days else 0
if main == 'main':
if amount < contract.wage and contract.wage != 0:
amount += contract.wage * days_to_add / 30
sal_earnings += contract.wage * days_to_add / 30
return [amount, days], sal_earnings
@api.multi
def get_wd(self, contract, period=False, month="", main=False):
if period:
start_period = period.start_period
end_period = period.end_period
else:
start_period = month + "-01"
max_day = monthrange(int(month[0:4]), int(month[5:7]))[2]
end_period = month + "-" + str(max_day)
# Amarre a 30 dias o menos e ignorar incapacidades de dia 31
max_day = int(end_period[8:10])
max_day = 30 if max_day > 30 else max_day
end_period = end_period[0:7] + "-" + str(max_day)
ld_query = ("SELECT hhd.name, hhd.holiday_id, hhs.code, hhd.sequence "
"FROM hr_holidays_days hhd "
"INNER JOIN hr_holidays_status hhs ON hhs.id = hhd.holiday_status_id "
"WHERE hhd.name BETWEEN '{sd}' AND '{ed}' "
"AND hhd.contract_id = {k} "
"AND hhd.state in ('paid','validate') ".format(
sd=start_period, ed=end_period, k=contract.id))
#Se debe mantener esto el estado en 'paid' y 'validate'
#Si un empleado se incapacida despues de causar la nomina
#Se debe pagar la autoliquidacion a lo real
ld_data = orm.fetchall(self._cr, ld_query)
year = int(end_period[:4])
month = int(end_period[5:7])
end_day_month = calendar.monthrange(year,month)[1]
day31 = end_day_month == 31
if day31:
query_day31 = """ select HHD.holiday_id
from hr_holidays_days as HHD
inner join hr_holidays_status as HHS
on HHS.id = HHD.holiday_status_id
where HHD.contract_id = {contrato}
and HHD.state in ('paid', 'validate')
and HHD.name = '{day31}' """.format(
contrato=contract.id,
day31=end_period[:-2] + '31')
day31 = orm.fetchall(self._cr, query_day31)
# Agrupacion por ausencia
leaves, total_leaves = {}, 0
for ld in ld_data:
if ld[1] not in leaves:
leaves[ld[1]] = [1, ld[2], ld[3], ld[3]]
else:
leaves[ld[1]][0] += 1
leaves[ld[1]][2] = ld[3] if leaves[ld[1]][2] > ld[3] else leaves[ld[1]][2]
leaves[ld[1]][3] = ld[3] if leaves[ld[1]][3] < ld[3] else leaves[ld[1]][3]
total_leaves += 1
if total_leaves > 30:
total_leaves = 30
w102 = 30 - total_leaves if main == 'main' else 30
# Date format
dt_sp = datetime.strptime(start_period, DSDF).date()
dt_ep = datetime.strptime(end_period, DSDF).date()
dt_ksd = datetime.strptime(contract.date_start, DSDF).date()
dt_ked = datetime.strptime(contract.date_end, DSDF).date() if contract.date_end else False
# Calculo de decuccion de contrato por inicio o fin
ded_start_days, ded_end_days = 0, 0
if dt_ksd > dt_sp:
if dt_ep >= dt_ksd:
ded_start_days = (dt_ksd - dt_sp).days
else:
ded_start_days = 30
if dt_ked and dt_ked <= dt_ep:
ded_end_days = (dt_ep - dt_ked).days
if dt_ep.day == 31 and ded_end_days:
ded_end_days -= 1
if dt_ked.month == 2:# Q hacer cuando el empeado se liquida el 28 o 29 de FEB
ded_end_days += 2 if end_day_month == 28 else 1
w102 -= ded_start_days
w102 -= ded_end_days
w102 = 0 if w102 < 0 else w102
return w102, leaves, day31
@api.multi
def calculate_pila(self):
self.get_contract_repeated()
error_log = ""
self._cr.execute("DELETE FROM hr_contribution_form_line where contribution_id = %s" % self.id)
emp_lsq = ("SELECT hc.employee_id, hc.id FROM pila_contract_rel rel "
"INNER JOIN hr_contract hc ON rel.contract_id = hc.id "
"WHERE rel.pila_id = {pila} "
"GROUP BY hc.employee_id, hc.id "
"ORDER BY hc.employee_id asc, hc.id asc".format(pila=self.id))
emp_ls = orm.fetchall(self._cr, emp_lsq)
payslip_obj = self.env['hr.payslip']
start_period = self.period_id.start_period
end_period = self.period_id.end_period
i, j = 0, len(emp_ls)
bar = orm.progress_bar(i, j)
lines = []
e_v = self.env['variables.economicas']
smmlv = e_v.getValue('SMMLV', end_period) or 0.0
for emp in emp_ls:
contract_id = self.env['hr.contract'].browse(emp[1])
cot_type = prep_field(contract_id.fiscal_type_id.code, size=2)
subcot_type = prep_field(contract_id.fiscal_subtype_id.code or '00', size=2)
retired = True if contract_id.fiscal_subtype_id.code not in ['00', False] \
or contract_id.fiscal_type_id.code in ('12', '19') else False
apr = contract_id.fiscal_type_id.code in ('12', '19')
apr_lect = contract_id.fiscal_type_id.code == '12'
# Consolidacion de dias de ausencia pagas del contrato en el periodo definido
w102, leaves, day31 = self.get_wd(contract_id, period=self.period_id, main='main')
# Generacion de lineas
fl = []
if w102:
fl.append(['main', w102, 'WORK102', 0, 0])
fl += [[k,
leaves[k][0] if leaves[k][0] <= 30 else 30,
leaves[k][1],
leaves[k][2],
leaves[k][3]]
for k in leaves]
total_days = sum([x[1] for x in fl])
if total_days > 30:
error_log += "Hay mas de 30 dias reportados en contrato {k} \n".format(k=contract_id.name)
# Asignacion de IBC GLOBAL en lineas
# ref_wage = contract_id.wage if contract_id.wage >= smmlv else smmlv
ref_wage = smmlv
for line in fl:
if line[0] == 'main':
current_comp_ibc, total_ingreso = self.compute_ibc(contract_id, self.period_id.start_period[0:7], line[0])
line_ibc = current_comp_ibc[0]
else:
leave_id = self.env['hr.holidays'].browse(line[0])
line_ibc, total_ingreso = 0, 0
if leave_id.holiday_status_id.general_illness:
#{code_concept: [start,end, vaue]}
concepts_to_eval = {'EG_B2':[1,2,0], 'EG_B90':[3,90,0], 'EG_B180':[91,180,0],'EG_A180':[181,-1,0]}
leave_days_ids = filter(lambda z: start_period <= z.name <= end_period, leave_id.line_ids)
for cte in concepts_to_eval:
gis = payslip_obj.get_interval_concept_qty(cte, start_period, end_period, contract_id.id)
leave_total, leave_qty = 0, 0
for gi in gis:
leave_total += gi[1] if gi[1] else 0
leave_qty += gi[2] if gi[2] else 0
unit_value = leave_total / leave_qty if leave_qty else 0
concepts_to_eval[cte][2] = unit_value
for leave_day in leave_days_ids:
if not leave_day.days_payslip:
continue
for dc in concepts_to_eval.values():
if dc[2] and dc[0] <= leave_day.sequence <= (dc[1] if dc[1] > 0 else leave_day.sequence):
line_ibc += dc[2]
total_ingreso += dc[2]
elif leave_id.holiday_status_id.maternal_lic:
ml = payslip_obj.get_interval_concept_qty('MAT_LIC', start_period, end_period, contract_id.id)
line_ibc = total_ingreso = sum([x[1] for x in ml])
elif leave_id.holiday_status_id.paternal_lic:
pl = payslip_obj.get_interval_concept_qty('PAT_LIC', start_period, end_period, contract_id.id)
line_ibc = total_ingreso = sum([x[1] for x in pl])
elif leave_id.holiday_status_id.atep:
atep = payslip_obj.get_interval_concept_qty('ATEP', start_period, end_period, contract_id.id)
atep_p2 = payslip_obj.get_interval_concept_qty('ATEP_P2', start_period, end_period, contract_id.id)
line_ibc = total_ingreso = sum([x[1] for x in atep + atep_p2])
else:
ref_date = datetime.strptime(leave_id.date_from[0:10], "%Y-%m-%d") - relativedelta(months=1)
month = datetime.strftime(ref_date, "%Y-%m")
leave_ibc, total_ingreso = self.compute_ibc(contract_id, month, line[0])
if leave_ibc[0] == 0 or leave_ibc[1] == 0:
line_ibc = total_ingreso = contract_id.wage * line[1] / 30
else:
line_ibc = leave_ibc[0] * line[1] / leave_ibc[1]
line.append(line_ibc)
line.append(total_ingreso) if total_ingreso else line.append(line_ibc)
total_ibc = sum([x[5] for x in fl])
ingreso = start_period <= contract_id.date_start <= end_period
retiro = (start_period <= contract_id.date_end <= end_period) and contract_id.state == 'done'
#Ajuste de tope minimo por linea, donde la sumatoria de lineas no debe ser menor a un SMMLV
if total_ibc < smmlv and not (retiro or ingreso):
for x in fl:
x[5] = float(smmlv * x[1])/30
x[6] = float(smmlv * x[1])/30
#Ajuste de tope maximo por linea, donde la sumatoria de lineas no debe ser mayor a 25 SMMLV
for x in fl:
x.append(x[5])
x.append(x[6])
if total_ibc > smmlv * 25:
for x in fl:
x[5] = (smmlv * 25 * x[1])/30
x[6] = (smmlv * 25 * x[1])/30
total_ibc = sum([x[5] for x in fl])
if total_days and total_ibc * 30 / total_days < ref_wage and not contract_id.type_id.type_class == 'int':
ibc_to_adj = ref_wage * total_days / 30 - total_ibc
else:
ibc_to_adj = 0
if ibc_to_adj:
fl[0][5] += ibc_to_adj
# ITERACION PRINCIPAL----
pay_vac_comp = True
apply_ret = True
wage_type_main_line = False
for line in fl:
if isinstance(line[0], basestring) and line[0] == 'main':
leave_id = False
main = True
else:
leave_id = self.env['hr.holidays'].browse(line[0])
leave_type = leave_id.holiday_status_id
lstart = leave_id.date_from[0:10]
if lstart < start_period:
lstart = start_period
lend = max([x.name for x in leave_id.line_ids])
if lend > end_period:
lend = end_period
main = False
# Novedad de ingreso
ing = "X" if start_period <= contract_id.date_start <= end_period and main else ''
# Novedad de retiro
wm = fl[0][0] == 'main'
ret = (start_period <= contract_id.date_end <= end_period) and contract_id.state == 'done'#((main and wm) or (not main and leave_type.vacaciones))
ret = ret and apply_ret
ret = 'X' if ret else ''
apply_ret = False
# Variacion salario permanente
wage_change_q = ("SELECT id, date "
"FROM hr_contract_salary_change "
"WHERE contract_id = {c} "
"AND date BETWEEN '{df}' AND '{dt}'".format(
c=contract_id.id, df=start_period, dt=end_period))
wage_change = orm.fetchall(self._cr, wage_change_q)
vsp = False
if wage_change:
for wc in wage_change:
if not ing:
vsp = True and main
vsp_date = wc[1]
# Variacion transitoria de salario
is_itv = payslip_obj.get_interval_category('earnings', start_period, end_period,
exclude=('BASICO',),
contract=contract_id.id)
comp_itv = payslip_obj.get_interval_category('comp_earnings', start_period, end_period,
contract=contract_id.id)
os_itv = payslip_obj.get_interval_category('o_salarial_earnings', start_period, end_period,
contract=contract_id.id)
devibc = line[5] * 30 / line[1] > contract_id.wage
if ((is_itv or comp_itv or os_itv or devibc) and main and not cot_type in ('12', '19')) or contract_id.part_time:
vst = True
else:
vst = False
# Indicador de licencia no remunerada
sln = not main and leave_type.no_payable
# Indicador novedad por incapacidad eg
ige = not main and not sln and leave_type.general_illness
# Indicador novedad por licencia de maternidad o paternidad
lma = not main and (leave_type.maternal_lic or leave_type.paternal_lic) and not sln
# Indicador por vacaciones
vac = 'X' if not main and leave_type.vacaciones and not sln \
else 'L' if not main and not leave_type.vacaciones \
and not (leave_type.maternal_lic or leave_type.paternal_lic) \
and not leave_type.general_illness and not leave_type.atep and not sln else ''
# Indicador aporte voluntario pension
avp_itv = payslip_obj.get_interval_avp(start_period, end_period, contract=contract_id.id)
if avp_itv and not retired:
avp = True
else:
avp = False
# Dias de incapacidad ATEP
if not main and leave_type.atep and not sln:
irl = leaves[line[0]][0]
else:
irl = 0
# Codigos administradoras
afp_code = contract_id.pensiones.codigo_afp if not retired else False
eps_code = contract_id.eps.codigo_eps
ccf_code = contract_id.cajacomp.codigo_ccf if not apr else False
# Validacion de ciudad de caja y ciudad de desempeño contrato
if contract_id.cajacomp and contract_id.cajacomp.city_id.provincia_id != contract_id.cuidad_desempeno.provincia_id:
error_log += u"La caja asignada en el contrato {k} " \
u"no corresponde al departamento de desempeño \n".format(k=contract_id.name)
# Dias de pension, siempre van full excepto si esta pensionado
pens_days = line[1] if not retired else 0
# Dias de EPS, ARL y CCF siempre van full excepto caja en aprendices
eps_days = line[1]
arl_days = line[1] if not (cot_type in ('12') and subcot_type in ('00')) else 0
ccf_days = line[1] if not apr else 0
# Salario
wage_actual_q = ("SELECT id, date "
"FROM hr_contract_salary_change "
"WHERE contract_id = {c} "
"AND date >= '{dt}'".format(
c=contract_id.id, dt=end_period))
wage_actual = orm.fetchall(self._cr, wage_actual_q)
if not wage_actual:
wage = contract_id.wage if contract_id.wage >= smmlv else smmlv
else:
wages = contract_id.wage_historic_ids.sorted(key=lambda r: r.date, reverse=True)
if len(wages) > 1:
wage = wages[-2].wage
int_wage = contract_id.type_id.type_class == 'int'
#Resolucion 454
if not main and wage_type_main_line:
wage_type = wage_type_main_line
elif int_wage:
wage_type = 'X'
elif vst:
wage_type = 'V'
elif apr:
wage_type = ' '
else:
wage_type = 'F'
if not wage_type_main_line:
wage_type_main_line = wage_type
# IBC
if (cot_type == '01' and subcot_type in ('01', '03', '06', '04')) or \
(cot_type in ('12', '19') and subcot_type in ('00')):
pens_ibc = 0
else:
pens_ibc = rp1(25 * smmlv if line[5] > 25 * smmlv else line[5])
eps_ibc = rp1(25 * smmlv if line[5] > 25 * smmlv else line[5])
if line[0] != 'main':
pens_ibc = rp1(25 * smmlv if line[5] > 25 * smmlv else line[5])
eps_ibc = rp1(25 * smmlv if line[5] > 25 * smmlv else line[5])
arl_ibc = rp1(line[5]) if not (cot_type in ('12') and subcot_type in ('00')) else 0
arl_ibc = rp1(arl_ibc if arl_ibc <= 25 * smmlv else 25 * smmlv)
vac_pag = payslip_obj.get_interval_concept('VAC_PAG', start_period, end_period, contract_id.id)
vac_disf_data = payslip_obj.get_interval_concept_qty('VAC_DISF', start_period, end_period, contract_id.id)
vac_liq = payslip_obj.get_interval_concept('VAC_LIQ', start_period, end_period, contract_id.id)
vac_money = sum([x[1] for x in vac_pag + vac_liq])
vac_disf = 0 if not vac_disf_data else vac_disf_data[0][1] if vac_disf_data[0][1] else 0
vac_dist_qty = 0 if not vac_disf_data else vac_disf_data[0][2] if vac_disf_data[0][2] else 0
ccf_ibc = 0
if main and not apr:
ccf_ibc = line[8]
if vac_money > 0:
ccf_ibc += vac_money
pay_vac_comp = False
else:
if not apr:
leave_id = self.env['hr.holidays'].browse(line[0])
if leave_id.holiday_status_id.vacaciones:
if self.env.user.company_id.fragment_vac:
leave_days_ids = len(filter(lambda z: start_period <= z.name <= end_period, leave_id.line_ids))
else:
leave_days_ids = leave_id.number_of_days_in_payslip
ccf_ibc += (vac_disf * leave_days_ids / vac_dist_qty) if vac_dist_qty else 0
elif leave_id.holiday_status_id.general_illness or leave_id.holiday_status_id.no_payable or leave_id.holiday_status_id.atep:
ccf_ibc = 0#Se pone para que no entre al else, como control de q configuren bien las ausencias
elif (leave_id.holiday_status_id.maternal_lic or leave_id.holiday_status_id.paternal_lic) and leave_id.holiday_status_id.ibc:
ccf_ibc += line[5]
else:
ccf_ibc += float(line[8]*line[1])/30
#Intenta arreglar el problema de las vacaciones liquidadas negativas
#Se debe poner en cero si definitivamente no hay como compensarlo
#Se debe intentar pagar con otras vacaciones disfrutadas
if pay_vac_comp and (ccf_ibc + vac_money) > 0:
ccf_ibc += vac_money
pay_vac_comp = False
else:
ccf_ibc = 0
ccf_ibc = rp1(ccf_ibc)
global_ibc = total_ibc
# Indicador de exonerabilidad
exonerated = global_ibc < 10 * smmlv and not int_wage and not apr
# IBC de otros parafiscales
other_ibc = ccf_ibc if not exonerated else 0
# Tarifa de pension van en cero solo si es pensionado y 12 si es no remunerasdo
pens_rate = self.env.user.company_id.percentage_total/100
if contract_id.high_risk:
pens_rate = 0.26
if not main and leave_type.no_payable:
if contract_id.high_risk:
pens_rate = 0.22
else:
percentage = 3.0 if self.env.user.company_id.percentage_total == 3.0 else self.env.user.company_id.percentage_employer
pens_rate = percentage/100
pens_rate = pens_rate if not retired and not apr else 0
# Cotizacion de pension
pens_cot = rp(pens_ibc * pens_rate)
# Aporte voluntario
if avp:
ap_vol_contributor = rp(sum([x[1] for x in avp_itv]) if not retired else 0)
else:
ap_vol_contributor = 0
# Total pensiones
pens_total = rp(pens_cot + ap_vol_contributor)
# Fondo de solidaridad
fsol = rp(pens_ibc * 0.005 if global_ibc >= 4 * smmlv and not retired and not sln else 0)
fsol = fsol if self.env.user.company_id.cal_fond_sol_sub else 0
# Fondo de subsistencia
fsrate = 0
if global_ibc > 4 * smmlv:
fsrate += 0.005
if 16 * smmlv <= global_ibc <= 17 * smmlv:
fsrate += 0.002
elif 17 * smmlv <= global_ibc <= 18 * smmlv:
fsrate += 0.004
elif 18 * smmlv <= global_ibc <= 19 * smmlv:
fsrate += 0.006
elif 19 * smmlv <= global_ibc <= 20 * smmlv:
fsrate += 0.008
elif global_ibc > 20 * smmlv:
fsrate += 0.01
fsub = rp(pens_ibc * fsrate if not retired and not sln else 0)
fsub = fsub if self.env.user.company_id.cal_fond_sol_sub else 0
ret_cont_vol_itv = payslip_obj.get_interval_concept('RET_CTG_DIF_FVP', start_period, end_period,
contract=contract_id.id)
ret_cont_vol = sum([x[1] for x in ret_cont_vol_itv]) if avp else 0
if ret_cont_vol < 0:
ret_cont_vol = 0
# Tarifa EPS Todas pagan
eps_rate = 0.04
if global_ibc >= 10 * smmlv or int_wage or apr:
eps_rate = 0.125
if not main and leave_type.no_payable:
eps_rate = 0
# Cotizacion EPS
eps_cot = rp(eps_ibc * eps_rate)
# Autorizacion de incapacidad
# aus_auth = line.no_incapacidad if not main and leave_type.general_illness else False
aus_auth, mat_auth = False, False # Campo exclusivo de aportes en linea.
# mat_auth = line.no_incapacidad if not main and (leave_type.maternal_lic or leave_type.paternal_lic) \
# else False
# Tarifa ARL
arl_rate = contract_id.pct_arp / 100 if main and not apr_lect else 0
# Cotizacion ARL
arl_cot = rp(arl_ibc * arl_rate)
work_center = contract_id.workcenter
# Tarifa CCF
if (main or (self.env.user.company_id.quote_rate_ibc_ccf_lics and (leave_type.paternal_lic or leave_type.maternal_lic)) or leave_type.vacaciones or (not main and ret == 'X')) and not apr and ccf_ibc:
ccf_rate = 0.04
else:
ccf_rate = 0
# Cotizacion CCF
ccf_cot = rp(ccf_ibc * ccf_rate)
# Tarifa SENA
sena_rate = 0.02 if global_ibc >= 10 * smmlv or int_wage else 0
if sln:
sena_rate = 0
# Cotizacion SENA
sena_cot = rp(other_ibc * sena_rate)
# Tarifa ICBF
icbf_rate = 0.03 if global_ibc >= 10 * smmlv or int_wage else 0
if sln:
icbf_rate = 0
# Cotizacion ICBF
icbf_cot = rp(other_ibc * icbf_rate)
# Codigo ARL
arl_code = contract_id.arl.codigo_arl if not apr_lect else False
# Riesgo ARL
arl_risk = contract_id.riesgo.name if not apr_lect else False
# Datos de contrato
k_start = contract_id.date_start if ing else False
k_end = contract_id.date_end if ret else False
# Fechas de novedades
vsp_start = vsp_date if vsp else False
sln_start = lstart if not main and sln else False
sln_end = lend if not main and sln else False
ige_start = lstart if not main and ige else False
ige_end = lend if not main and ige else False
lma_start = lstart if not main and lma else False
lma_end = lend if not main and lma else False
vac_start = lstart if not main and vac else False
vac_end = lend if not main and vac else False
atep = leave_type.atep if not main else False
atep_start = lstart if not main and atep else False
atep_end = lend if not main and atep else False
w_hours = line[1] * 8
data = {
'contribution_id': self.id,
'employee_id': emp[0],
'contract_id': contract_id.id,
'leave_id': leave_id.id if leave_id else False,
'main': main,
'ing': ing,
'ret': ret,
'tde': False, # TODO
'tae': False, # TODO
'tdp': False, # TODO
'tap': False, # TODO
'vsp': vsp,
'fixes': False, # TODO
'vst': vst,
'sln': sln,
'ige': ige,
'lma': lma,
'vac': vac,
'avp': avp,
'vct': False, # TODO
'irl': irl,
'afp_code': afp_code,
'afp_to_code': False, # TODO
'eps_code': eps_code,
'eps_to_code': False, # TODO
'ccf_code': ccf_code,
'pens_days': pens_days,
'eps_days': eps_days,
'arl_days': arl_days,
'ccf_days': ccf_days,
'wage': wage,
'int_wage': int_wage,
'pens_ibc': pens_ibc,
'eps_ibc': eps_ibc,
'arl_ibc': arl_ibc,
'ccf_ibc': ccf_ibc,
'global_ibc': global_ibc,
'pens_rate': pens_rate,
'pens_cot': pens_cot,
'ap_vol_contributor': ap_vol_contributor,
'ap_vol_company': 0, # TODO
'pens_total': pens_total,
'fsol': fsol,
'fsub': fsub,
'ret_cont_vol': ret_cont_vol,
'eps_rate': eps_rate,
'eps_cot': eps_cot,
'ups': 0, # TODO
'aus_auth': aus_auth,
'gd_amohnt': False, # TODO
'mat_auth': mat_auth,
'arl_rate': arl_rate,
'work_center': work_center,
'arl_cot': arl_cot,
'ccf_rate': ccf_rate,
'ccf_cot': ccf_cot,
'sena_rate': sena_rate,
'sena_cot': sena_cot,
'icbf_rate': icbf_rate,
'icbf_cot': icbf_cot,
'esap_rate': 0, # TODO
'esap_cot': 0, # TODO
'men_rate': 0, # TODO
'men_cot': 0, # TODO
'exonerated': exonerated,
'arl_code': arl_code,
'arl_risk': arl_risk,
'k_start': k_start,
'k_end': k_end,
'vsp_start': vsp_start,
'sln_start': sln_start,
'sln_end': sln_end,
'ige_start': ige_start,
'ige_end': ige_end,
'lma_start': lma_start,
'lma_end': lma_end,
'vac_start': vac_start,
'vac_end': vac_end,
'vct_start': False, # TODO
'vct_end': False, # TODO
'atep_start': atep_start,
'atep_end': atep_end,
'other_ibc': other_ibc,
'w_hours': w_hours,
'wage_type':wage_type,
}
lines.append(data)
i += 1
bar = orm.progress_bar(i, j, bar, emp[0])
orm.direct_create(self._cr, self._uid, 'hr_contribution_form_line', lines)
self.error_log = error_log
@api.multi
def generate_pila(self):
total_text = ''
break_line = '\r\n'
# ----- HEADER ----- #
hl = [''] * (22 + 1)
# 1: Tipo de registro
hl[1] = '01'
# 2: Modalidad de la planilla
hl[2] = '1'
# 3: Secuencia # TODO Está generando el 0001 pero se debe validar que siempre sea el mismo
hl[3] = '0001'
# 4: Nombre o razon social del aportante
hl[4] = prep_field(self.env.user.company_id.partner_id.name, size=200)
# 5: Tipo de documento del aportante # TODO Asignado directamente tipo de documento NIT
hl[5] = 'NI'
# 6: Numero de identificacion del aportante
hl[6] = prep_field(self.env.user.company_id.partner_id.ref, size=16)
# 7: Digito de verificacion
hl[7] = str(self.env.user.company_id.partner_id.dev_ref)
# 8: Tipo de planilla
hl[8] = self.form_type
# 9: Numero de la planilla asociada a esta planilla # TODO revisar casos de planillas N y F
if self.form_type in ['E']:
hl[9] = prep_field(" ", size=10)
else:
raise Warning("Tipo de planilla no soportada temporalmente")
# 10: Fecha de planilla de pago asociada a esta planilla
if self.form_type not in ['N', 'F']:
hl[10] = prep_field(" ", size=10)
else:
raise Warning("Tipo de planilla no soportada temporalmente")
# 11: Forma de presentacion # TODO temporalmente forma de presentacion unica
hl[11] = prep_field(self.presentation, size=1)
# 12: Codigo de sucursal # TODO referente campo 11
hl[12] = prep_field(self.branch_code, size=10)
# 13: Nombre de la sucursal
hl[13] = prep_field(self.branch_code, size=40)
# 14: Código de la ARL a la cual el aportante se encuentra afiliado
hl[14] = prep_field(self.env.user.company_id.arl_id.codigo_arl, size=6)
# 15: Período de pago para los sistemas diferentes al de salud
hl[15] = prep_field(self.period_id.start_period[0:7], size=7)
# 16: Período de pago para el sistema de salud.
pay_ref_date = datetime.strptime(self.period_id.start_period, "%Y-%m-%d") + relativedelta(months=1)
pay_month = datetime.strftime(pay_ref_date, "%Y-%m")
hl[16] = prep_field(pay_month, size=7)
# 17: Número de radicación o de la Planilla Integrada de Liquidación de Aportes. (Asignado por el sistema)
hl[17] = prep_field(" ", size=10)
# 18: Fecha de pago (aaaa-mm-dd) (Asignado por el siustema)
hl[18] = prep_field(" ", size=10)
# 19: Numero total de empleados
emp_count_q = ("SELECT count(hc.employee_id) FROM pila_contract_rel rel "
"INNER JOIN hr_contract hc on hc.id = rel.contract_id "
"INNER JOIN hr_employee he on he.id = hc.employee_id "
"WHERE rel.pila_id = {pila} "
"GROUP by hc.employee_id".format(pila=self.id))
emp_count = orm.fetchall(self._cr, emp_count_q)
hl[19] = prep_field(len(emp_count), align='right', fill='0', size=5)
# 20: Valor total de la nomina
ibp_sum = sum([x.ccf_ibc for x in self.form_line_ids])
hl[20] = prep_field(int(ibp_sum), align='right', fill='0', size=12)
# 21: Tipo de aportante
hl[21] = prep_field("1", size=2)
# 22: Codigo de operador de informacion
hl[22] = prep_field(" ", size=2)
for x in hl:
total_text += x
total_text += break_line
# ----- BODY ----- #
i, j = 0, len(self.form_line_ids)
bar = orm.progress_bar(i, j)
seq = 0
for l in self.form_line_ids:
seq += 1
employee = l.employee_id
ref_type = employee.partner_id.ref_type.code
bl = [''] * (98 + 1)
# 1: Tipo de registro
bl[1] = '02'
# 2: Secuencia
bl[2] = prep_field(seq, align='right', fill='0', size=5)
# 3: Tipo de documento de cotizante
bl[3] = prep_field(ref_type, size=2)
# 4: Numero de identificacion cotizante
bl[4] = prep_field(employee.partner_id.ref, size=16)
# 5: Tipo de cotizante
bl[5] = prep_field(l.contract_id.fiscal_type_id.code if l.contract_id.fiscal_type_id.code != '51' else '01',
size=2)
# 6: Subtipo de cotizante
bl[6] = prep_field(l.contract_id.fiscal_subtype_id.code or '00', size=2)
# 7: Extranjero no obligado a cotizar pensiones
foreign = False
# foreign = employee.partner_id.country_id.code != 'CO' and ref_type in ('CE', 'PA', 'CD')
bl[7] = 'X' if foreign else ' '
# 8: Colombiano en el exterior
is_col = True if ref_type in ('CC', 'TI') and employee.partner_id.country_id.code == 'CO' else False
in_ext = False
if l.contract_id.cuidad_desempeno:
in_ext = True if l.contract_id.cuidad_desempeno.provincia_id.country_id.code != 'CO' else False
bl[8] = 'X' if is_col and in_ext else ' '
# 9: Código del departamento de la ubicación laboral
bl[9] = prep_field(l.contract_id.cuidad_desempeno.provincia_id.code, size=2)
# 10: Código del municipio de ubicación laboral
bl[10] = prep_field(l.contract_id.cuidad_desempeno.code, size=3)
# 11: Primer apellido
if employee.partner_id.primer_apellido:
pap = strip_accents(employee.partner_id.primer_apellido.upper()).replace(".", "")
bl[11] = prep_field(pap, size=20)
else:
bl[11] = prep_field(' ', size=20)
# 12: Segundo apellido
if employee.partner_id.segundo_apellido:
sap = strip_accents(employee.partner_id.segundo_apellido.upper()).replace(".", "")
bl[12] = prep_field(sap, size=30)
else:
bl[12] = prep_field(' ', size=30)
# 13: Primer nombre
if employee.partner_id.primer_nombre:
pno = strip_accents(employee.partner_id.primer_nombre.upper()).replace(".", "")
bl[13] = prep_field(pno, size=20)
else:
bl[13] = prep_field(' ', size=20)
# 14: Segundo nombre
if employee.partner_id.otros_nombres:
sno = strip_accents(employee.partner_id.otros_nombres.upper()).replace(".", "")
bl[14] = prep_field(sno, size=30)
else:
bl[14] = prep_field(' ', size=30)
# 15: Ingreso
bl[15] = 'X' if l.ing else ' '
# 16: Retiro
bl[16] = 'X' if l.ret else ' '
# 17: Traslasdo desde otra eps
bl[17] = 'X' if l.tde else ' '
# 18: Traslasdo a otra eps
bl[18] = 'X' if l.tae else ' '
# 19: Traslasdo desde otra administradora de pensiones
bl[19] = 'X' if l.tdp else ' '
# 20: Traslasdo a otra administradora de pensiones
bl[20] = 'X' if l.tap else ' '
# 21: Variacion permanente del salario
bl[21] = 'X' if l.vsp else ' '
# 22: Correcciones
bl[22] = 'X' if l.fixes else ' '
# 23: Variacion transitoria del salario
bl[23] = 'X' if l.vst else ' '
# 24: Suspension temporal del contrato
bl[24] = 'X' if l.sln else ' '
# 25: Incapacidad temporal por enfermedad general
bl[25] = 'X' if l.ige else ' '
# 26: Licencia de maternidad o paternidad
bl[26] = 'X' if l.lma else ' '
# 27: Vacaciones, licencia remunerada
bl[27] = l.vac if l.vac else ' '
# 28: Aporte voluntario
bl[28] = 'X' if l.avp else ' '
# 29: Variacion de centro de trabajo
bl[29] = 'X' if l.vct else ' '
# 30: Dias de incapacidad por enfermedad laboral
bl[30] = prep_field("{:02.0f}".format(l.irl), align='right', fill='0', size=2)
# 31: Codigo de la administradora de fondos de pensiones
bl[31] = prep_field(l.afp_code, size=6)
# 32: Codigo de administradora de pensiones a la cual se traslada el afiliado #TODO
bl[32] = prep_field(l.afp_to_code, size=6)
# 33: Codigo de EPS a la cual pertenece el afiliado
bl[33] = prep_field(l.eps_code, size=6)
# 34: Codigo de eps a la cual se traslada el afiliado
bl[34] = prep_field(l.eps_to_code, size=6)
# 35: Código CCF a la cual pertenece el afiliado
bl[35] = prep_field(l.ccf_code, size=6)
# 36: Numero de dias cotizados a pension
bl[36] = prep_field("{:02.0f}".format(l.pens_days), align='right', fill='0', size=2)
# 37: Numero de dias cotizados a salud
bl[37] = prep_field("{:02.0f}".format(l.eps_days), align='right', fill='0', size=2)
# 38: Numero de dias cotizados a ARL
bl[38] = prep_field("{:02.0f}".format(l.arl_days), align='right', fill='0', size=2)
# 39: Numero de dias cotizados a CCF
bl[39] = prep_field("{:02.0f}".format(l.ccf_days), align='right', fill='0', size=2)
# 40: Salario basico
bl[40] = prep_field("{:09.0f}".format(l.wage), align='right', fill='0', size=9)
# 41: Salario integral, resolucion 454
bl[41] = l.wage_type
# 42: IBC pension
bl[42] = prep_field("{:09.0f}".format(l.pens_ibc), align='right', fill='0', size=9)
# 43: IBC salud
bl[43] = prep_field("{:09.0f}".format(l.eps_ibc), align='right', fill='0', size=9)
# 44: IBC arl
bl[44] = prep_field("{:09.0f}".format(l.arl_ibc), align='right', fill='0', size=9)
# 45: IBC CCF
bl[45] = prep_field("{:09.0f}".format(l.ccf_ibc), align='right', fill='0', size=9)
# 46: Tarifa de aporte a pensiones
bl[46] = prep_field("{:01.5f}".format(l.pens_rate), align='right', fill='0', size=7)
# 47: Cotizacion pension
bl[47] = prep_field("{:09.0f}".format(l.pens_cot), align='right', fill='0', size=9)
# 48: Aportes voluntarios del afiliado
bl[48] = prep_field("{:09.0f}".format(l.ap_vol_contributor), align='right', fill='0', size=9)
# 49: Aportes voluntarios del aportante
bl[49] = prep_field("{:09.0f}".format(l.ap_vol_company), align='right', fill='0', size=9)
# 50: Total cotizacion pensiones
bl[50] = prep_field("{:09.0f}".format(l.pens_total), align='right', fill='0', size=9)
# 51: Aportes a fondo solidaridad
bl[51] = prep_field("{:09.0f}".format(l.fsol), align='right', fill='0', size=9)
# 52: Aportes a fondo subsistencia
bl[52] = prep_field("{:09.0f}".format(l.fsub), align='right', fill='0', size=9)
# 53: Valor no retenido por aportes voluntarios
bl[53] = prep_field("{:09.0f}".format(l.ret_cont_vol), align='right', fill='0', size=9)
# 54: Tarifa de aportes salud
bl[54] = prep_field("{:01.5f}".format(l.eps_rate), align='right', fill='0', size=7)
# 55: Aportes salud
bl[55] = prep_field("{:09.0f}".format(l.eps_cot), align='right', fill='0', size=9)
# 56: Total UPS adicional
bl[56] = prep_field("{:09.0f}".format(l.ups), align='right', fill='0', size=9)
# 57: Numero de autorizacion de incapacidad
bl[57] = prep_field(l.aus_auth, size=15)
# 58: Valor de la incapacidad por enf general
bl[58] = prep_field("{:09.0f}".format(l.gd_amount), align='right', fill='0', size=9)
# 59: Numero de autorizacion por licencia de maternidad
bl[59] = prep_field(l.mat_auth, size=15)
# 60: Valor de licencia de maternidad
bl[60] = prep_field("{:09.0f}".format(l.mat_amount), align='right', fill='0', size=9)
# 61: Tarifa de aportes a riesgos laborales
bl[61] = prep_field("{:01.5f}".format(l.arl_rate), align='right', fill='0', size=9)
# 62: Centro de trabajo
bl[62] = prep_field(l.work_center, align='right', fill='0', size=9)
# 63: Cotizacion obligatoria a riesgos laborales
bl[63] = prep_field("{:09.0f}".format(l.arl_cot), align='right', fill='0', size=9)
# 64: Tarifa de aportes CCF
bl[64] = prep_field("{:01.5f}".format(l.ccf_rate), align='right', fill='0', size=7)
# 65: Aportes CCF
bl[65] = prep_field("{:09.0f}".format(l.ccf_cot), align='right', fill='0', size=9)
# 66: Tarifa SENA
bl[66] = prep_field("{:01.5f}".format(l.sena_rate), align='right', fill='0', size=7)
# 67: Aportes SENA
bl[67] = prep_field("{:09.0f}".format(l.sena_cot), align='right', fill='0', size=9)
# 68: Tarifa ICBF
bl[68] = prep_field("{:01.5f}".format(l.icbf_rate), align='right', fill='0', size=7)
# 69: Aportes ICBF
bl[69] = prep_field("{:09.0f}".format(l.icbf_cot), align='right', fill='0', size=9)
# 70: Tarifa ESAP
bl[70] = prep_field("{:01.5f}".format(l.esap_rate), align='right', fill='0', size=7)
# 71: Aportes ESAP
bl[71] = prep_field("{:09.0f}".format(l.esap_cot), align='right', fill='0', size=9)
# 72: Tarifa MEN
bl[72] = prep_field("{:01.5f}".format(l.men_rate), align='right', fill='0', size=7)
# 73: Aportes MEN
bl[73] = prep_field("{:09.0f}".format(l.men_cot), align='right', fill='0', size=9)
# 74: Tipo de documento del cotizante principal
bl[74] = prep_field(' ', size=2)
# 75: Numero de documento de cotizante principal
bl[75] = prep_field(' ', size=16)
# 76: Exonerado de aportes a paraficales y salud
bl[76] = 'S' if l.exonerated else 'N'
# 77: Codigo de la administradora de riesgos laborales
bl[77] = prep_field(l.arl_code, size=6)
# 78: Clase de riesgo en la cual se encuentra el afiliado
bl[78] = prep_field(l.arl_risk, size=1)
# 79: Indicador de tarifa especial de pensiones
bl[79] = prep_field(' ', size=1)
# 80: Fecha de ingreso
bl[80] = prep_field(l.k_start, size=10)
# 81: Fecha de retiro
bl[81] = prep_field(l.k_end, size=10)
# 82: Fecha de inicio de VSP
bl[82] = prep_field(l.vsp_start, size=10)
# 83: Fecha de inicio SLN
bl[83] = prep_field(l.sln_start, size=10)
# 84: Fecha de fin SLN
bl[84] = prep_field(l.sln_end, size=10)
# 85: Fecha de inicio IGE
bl[85] = prep_field(l.ige_start, size=10)
# 86: Fecha de fin IGE
bl[86] = prep_field(l.ige_end, size=10)
# 87: Fecha de inicio LMA
bl[87] = prep_field(l.lma_start, size=10)
# 88: Fecha de fin LMA
bl[88] = prep_field(l.lma_end, size=10)
# 89: Fecha de inicio VAC
bl[89] = prep_field(l.vac_start, size=10)
# 90: Fecha de fin VAC
bl[90] = prep_field(l.vac_end, size=10)
bl[91] = prep_field(l.vct_start, size=10)
bl[92] = prep_field(l.vct_end, size=10)
# 93: Fecha de inicio ATEP
bl[93] = prep_field(l.atep_start, size=10)
# 94: Fecha de fin ATEP
bl[94] = prep_field(l.atep_end, size=10)
# 95: IBC otros parafiscales
bl[95] = prep_field("{:09.0f}".format(l.other_ibc), align='right', fill='0', size=9)
# 96: Numero de horas laboradas
bl[96] = prep_field("{:03.0f}".format(l.w_hours), align='right', fill='0', size=3)
bl[97] = prep_field('', size=10)
i += 1
bar = orm.progress_bar(i, j, bar)
for x in bl:
total_text += x
total_text += break_line
# decode and generate txt
final_content = strip_accents(total_text.encode('utf-8', 'replace').decode('utf-8'))
file_text = base64.b64encode(final_content)
self.write({'file': file_text})
return total_text
@api.multi
def load_contract(self):
self._cr.execute("DELETE FROM pila_contract_rel where pila_id = %s" % self.id)
if self.group_id:
groupwh = " AND hc.group_id = {group} ".format(group=self.group_id.id)
else:
groupwh = " "
active = """
SELECT hc.id FROM hr_contract hc
INNER JOIN hr_payslip hp ON hp.contract_id = hc.id
WHERE hp.liquidacion_date BETWEEN '{date_from}' AND '{date_to}'
{groupwh}
and hc.id not in (
select contract_id
from pila_contract_rel
where pila_id in (select id from hr_contribution_form where period_id = {periodo}) )
GROUP BY hc.id""".format(date_from=self.period_id.start_period,
date_to=self.period_id.end_period,
groupwh=groupwh,
periodo=self.period_id.id)
ca = [x[0] for x in orm.fetchall(self._cr, active)]
for contract in ca:
self._cr.execute("INSERT into pila_contract_rel (pila_id, contract_id) VALUES ({pila}, {contract})".format(
pila=self.id, contract=contract))
return True
@api.multi
def load_pending(self):
self._cr.execute("DELETE FROM pila_contract_rel where pila_id = %s" % self.id)
if self.group_id:
groupwh = " AND hc.group_id = {group} ".format(group=self.group_id.id)
else:
groupwh = " "
calculated = ("SELECT hcfl.contract_id from hr_contribution_form_line hcfl "
"LEFT JOIN hr_contribution_form hcf on hcf.id = hcfl.contribution_id "
"WHERE hcf.period_id = {period} "
"group by hcfl.contract_id".format(period=self.period_id.id))
clc = tuple([x[0] for x in orm.fetchall(self._cr, calculated)] + [0])
active = ("SELECT hc.id FROM hr_contract hc "
"INNER JOIN hr_payslip hp ON hp.contract_id = hc.id "
"WHERE hp.liquidacion_date BETWEEN '{date_from}' AND '{date_to}' "
"AND hc.id not in {clc} "
"{groupwh} GROUP BY hc.id".format(date_from=self.period_id.start_period,
date_to=self.period_id.end_period,
clc=clc,
groupwh=groupwh))
ca = [x[0] for x in orm.fetchall(self._cr, active)]
for contract in ca:
self._cr.execute("INSERT into pila_contract_rel (pila_id, contract_id) VALUES ({pila}, {contract})".format(
pila=self.id, contract=contract))
return True
@api.multi
def get_acc_type(self, contract_id):
kt = contract_id.type_id
acc = kt.type_class + "_" + kt.section[0:3]
return acc
@api.multi
def draft_contform(self):
self.state = 'draft'
if self.move_id:
account_move_line_sel ="""
select id from account_move_line where move_id = {asiento}
""".format(asiento=self.move_id.id)
account_move_line = [x[0] for x in orm.fetchall(self._cr, account_move_line_sel)]
if account_move_line:
account_move_line_tuple = tuple(account_move_line if len(account_move_line) > 1 else [account_move_line[0],0])
analytic_lines_sel = """
select id from account_analytic_line where move_id in {moves}
""".format(moves=account_move_line_tuple)
analytic_lines = [x[0] for x in orm.fetchall(self._cr, analytic_lines_sel)]
if analytic_lines:
orm.fast_delete(self._cr, 'account_analytic_line', ('id', analytic_lines))
orm.fast_delete(self._cr, 'account_move_line', ('id', account_move_line))
orm.fast_delete(self._cr, 'account_move', ('id', self.move_id.id))
self._cr.execute('update hr_contribution_form set move_id = null where id = {pila}'.format(pila=self.id))
@api.multi
def close_contform(self):
liquid_date = self.period_id.end_period
start_date = self.period_id.start_period
start_date_tmp = datetime.strftime(
datetime.strptime(start_date, "%Y-%m-%d") - relativedelta(months=1),
"%Y-%m-%d")
account_period = self.env['account.period'].find(liquid_date)[0]
po = self.env['hr.payslip']
smmlv = self.env['variables.economicas'].getValue('SMMLV', liquid_date) or 0.0
if not self.move_id_name:
journal_seq = self.journal_id.sequence_id
name = self.env['ir.sequence'].next_by_id(journal_seq.id)
self.move_id_name = name
else:
name = self.move_id_name
move_data = {
'narration': "APORTES {p}".format(p=self.period_id.name),
'date': liquid_date,
'name': name,
'ref': self.name,
'journal_id': self.journal_id.id,
'period_id': account_period.id,
'partner_id': self.env.user.company_id.partner_id.id,
'state': 'posted'
}
move_id = orm.direct_create(self._cr, self._uid, 'account_move', [move_data], company=True)[0][0]
self.move_id = self.env['account.move'].browse(move_id)
p_query = ("SELECT contract_id, "
"sum(pens_total) + sum(fsol) + sum(fsub) - sum(ap_vol_contributor) - sum(ap_vol_company) as pens, sum(eps_cot) as eps, sum(arl_cot) as arl, "
"sum(ccf_cot) as ccf, sum(sena_cot) as sena, sum(icbf_cot) as icbf "
"from hr_contribution_form_line "
"WHERE contribution_id = {cont} "
"GROUP BY contract_id".format(cont=self.id))
hcfl = orm.fetchall(self._cr, p_query)
ap_template = {
'reg_adm_debit': False, 'reg_com_debit': False, 'reg_ope_debit': False,
'int_adm_debit': False, 'int_com_debit': False, 'int_ope_debit': False,
'apr_adm_debit': False, 'apr_com_debit': False, 'apr_ope_debit': False,
'reg_adm_credit': False, 'reg_com_credit': False, 'reg_ope_credit': False,
'int_adm_credit': False, 'int_com_credit': False, 'int_ope_credit': False,
'apr_adm_credit': False, 'apr_com_credit': False, 'apr_ope_credit': False,
'partner_type': False,
}
ap_concepts = {'AP_PENS': ap_template.copy(),
'AP_EPS': ap_template.copy(),
'AP_ARL': ap_template.copy(),
'AP_CCF': ap_template.copy(),
'AP_SENA': ap_template.copy(),
'AP_ICBF': ap_template.copy()
}
for apc in ap_concepts:
concept_id = self.env['hr.concept'].search([('code', '=', apc)])
if not concept_id:
raise Warning("No se ha encontrado el concepto {c} necesario para "
"la consulta de cuentas para la causacion de aportes".format(c=apc))
for acc in ap_concepts[apc]:
ap_concepts[apc][acc] = getattr(concept_id, '{a}'.format(a=acc))
ap_concepts[apc]['concept_id'] = concept_id
aml_data = []
for kdata in hcfl:
index = 1
contract_id = self.env['hr.contract'].browse(kdata[0])
aa_id = contract_id.analytic_account_id
employee_id = contract_id.employee_id
for apc in ap_concepts:
partner_type = ap_concepts[apc]['partner_type']
if partner_type == 'eps':
c_partner = contract_id.eps
elif partner_type == 'arl':
c_partner = contract_id.arl
elif partner_type == 'caja':
c_partner = contract_id.cajacomp
elif partner_type == 'cesantias':
c_partner = contract_id.cesantias
elif partner_type == 'pensiones':
c_partner = contract_id.pensiones
elif partner_type == 'other':
c_partner = ap_concepts[apc]['concept_id'].partner_other
else:
c_partner = employee_id.partner_id
apc_amount = kdata[index]
acc_type = self.get_acc_type(contract_id)
debit_account = ap_concepts[apc][acc_type+'_debit']
credit_account = ap_concepts[apc][acc_type+'_credit']
pyg = [4, 5, 6, 7, 8]
tot_ded = 0
if index == 1: # PENSION
ded_pens = po.get_interval_concept('DED_PENS', start_date, liquid_date, contract_id.id)
fsol = po.get_interval_concept('FOND_SOL', start_date, liquid_date, contract_id.id)
fsub = po.get_interval_concept('FOND_SUB', start_date, liquid_date, contract_id.id)
tot_pens = ded_pens + fsol + fsub
tot_ded = sum([x[1] for x in tot_pens])
elif index == 2: # EPS
ded_eps = po.get_interval_concept('DED_EPS', start_date, liquid_date, contract_id.id)
tot_ded = sum([x[1] for x in ded_eps])
if apc_amount and apc_amount - rp(tot_ded) > 0:
global_ibc = orm.fetchall(self._cr, "select global_ibc from hr_contribution_form_line where contract_id = {contract} and contribution_id = {contribution} limit 1".format(
contract=contract_id.id, contribution=self.id))
if not global_ibc:
raise Warning("Como putas el contrato {contract} en esta PILA no tiene ibc global ????? Sea serio calcule primero y luego cause".format(contract=contract_id.name))
if not (global_ibc[0][0] >= 10 * smmlv or contract_id.type_id.type_class == 'int' or contract_id.fiscal_type_id.code in ('12', '19')):
ded_eps = po.get_interval_concept('DED_EPS', start_date_tmp, liquid_date, contract_id.id)
tot_ded_previos = sum([rp(x[1]) for x in ded_eps])
ap_previos = """
select sum(HCFL.eps_cot) from hr_contribution_form_line as HCFL
inner join hr_contribution_form as HCF on HCF.id = HCFL.contribution_id
inner join payslip_period as PP on PP.id = HCF.period_id
where HCFL.contract_id = {contract}
and (HCF.state = 'closed' or HCF.id = {HCF_id})
and PP.start_period >= '{sp}' and PP.end_period <= '{ep}'
""".format(contract=contract_id.id, HCF_id=self.id,sp=start_date_tmp, ep=liquid_date)
ap_previos = sum([rp(x[0]) for x in orm.fetchall(self._cr, ap_previos) if x[0]])
if tot_ded_previos == ap_previos:
apc_amount, tot_ded = tot_ded_previos, tot_ded_previos
else:
apc_amount, tot_ded = ap_previos, tot_ded_previos
amount = apc_amount - tot_ded
if amount > 0:
# DEBIT - GASTOS
if not debit_account:
raise Warning(u"No se ha definido una cuenta debito para el "
u"concepto {c}".format(c=ap_concepts[apc]['concept_id'].name))
aml_data.append({
'name': ap_concepts[apc]['concept_id'].name,
'ref1': ap_concepts[apc]['concept_id'].code,
'date': liquid_date,
'ref': employee_id.partner_id.ref,
'partner_id': c_partner.id,
'account_id': debit_account.id,
'journal_id': self.journal_id.id,
'period_id': account_period.id,
'debit': amount,
'credit': 0,
'analytic_account_id': aa_id.id if debit_account.code[0] in pyg else False,
'tax_code_id': False,
'tax_amount': 0,
'move_id': self.move_id.id,
'state': 'valid',
'date_maturity': liquid_date,
'contract_id': contract_id.id
})
# CREDIT CxC 23
if not credit_account:
raise Warning(u"No se ha definido una cuenta credito para el "
u"concepto {c}".format(c=ap_concepts[apc]['concept_id'].name))
aml_data.append({
'name': ap_concepts[apc]['concept_id'].name,
'ref1': ap_concepts[apc]['concept_id'].code,
'date': liquid_date,
'ref': employee_id.partner_id.ref,
'partner_id': c_partner.id,
'account_id': credit_account.id,
'journal_id': self.journal_id.id,
'period_id': account_period.id,
'debit': 0,
'credit': amount,
'analytic_account_id': aa_id.id if credit_account.code[0] in pyg else False,
'tax_code_id': False,
'tax_amount': 0,
'move_id': self.move_id.id,
'state': 'valid',
'date_maturity': liquid_date,
'contract_id': contract_id.id
})
index += 1
orm.direct_create(self._cr, self._uid, 'account_move_line', aml_data, company=True, progress=True)
self.state = 'closed'
self.create_distribition_analytic(self.move_id.id)
return
def create_distribition_analytic(self, move_id):
move_line_ids = self.env['account.move.line'].search([('move_id','=',move_id)])
is_hr_roster = orm.fetchall(self._cr,"select id from ir_module_module where state = 'installed' and name = 'hr_roster'")
is_analytic_cvs = orm.fetchall(self._cr,"select id from ir_module_module where state = 'installed' and name = 'account_analytic_cvs'")
distribucion_analitica = self.env['hr.roster.close.distribution'] if is_hr_roster else False
partner_aaa = orm.fetchall(self._cr, "SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'account_analytic_line' and column_name = 'partner_aaa'")
analytic_lines_data = []
for move_line in move_line_ids:
if int(move_line.account_id.code[0]) <= 3 and not self.env.user.company_id.config_analytic_global:
continue
if not move_line.contract_id:
raise Warning("El movimieto < {m} > no tiene un contrato asociado".format(m=move_line.name))
contrato = move_line.contract_id
if not contrato.employee_id:
raise Warning("El contrato < {c} > no tiene un empleado asociado".format(c=move_line.contract_id.name))
employee_id = contrato.employee_id
if distribucion_analitica:
distri_employee = distribucion_analitica.search([('employee_id','=',employee_id.id), ('date', '>=', move_line.period_id.date_start),('date', '<=', move_line.period_id.date_stop)])
else:
distri_employee = [] # se deja [] por el for que itera distri_employee
if not distri_employee:
if not contrato.analytic_account_id:
raise Warning("El contrato < {c} > no tiene una cuenta analitica asociada".format(c=contrato.name))
self._cr.execute('update account_move_line set analytic_account_id = {AA} where id = {AML}'.format(
AA=contrato.analytic_account_id.id, AML=move_line.id))
analytic_line = {
'name': move_line.name,
'account_id': contrato.analytic_account_id.id,
'journal_id': move_line.journal_id.analytic_journal_id.id,
'user_id': self._uid,
'date': move_line.date,
'ref': move_line.ref,
'amount': (move_line.credit - move_line.debit),
'general_account_id': move_line.account_id.id,
'move_id': move_line.id,
'cc1': contrato.analytic_account_id.cc1 if not is_analytic_cvs else contrato.analytic_account_id.regional_id.name,
'cc2': contrato.analytic_account_id.cc2 if not is_analytic_cvs else contrato.analytic_account_id.city_id.name,
'cc3': contrato.analytic_account_id.cc3 if not is_analytic_cvs else contrato.analytic_account_id.linea_servicio_id.name,
'cc4': contrato.analytic_account_id.cc4 if not is_analytic_cvs else contrato.analytic_account_id.sede,
'cc5': contrato.analytic_account_id.cc5 if not is_analytic_cvs else contrato.analytic_account_id.puesto,
}
if partner_aaa:
analytic_line['partner_aaa'] = contrato.analytic_account_id.partner_id.id
analytic_lines_data.append(analytic_line)
for dis_emp in distri_employee:
analytic_line = {
'name': move_line.name,
'account_id': dis_emp.analytic_account_id.id,
'journal_id': move_line.journal_id.analytic_journal_id.id,
'user_id': self._uid,
'date': move_line.date,
'ref': move_line.ref,
'amount': (move_line.credit - move_line.debit)*dis_emp.rate/100,
'general_account_id': move_line.account_id.id,
'move_id': move_line.id,
'cc1': dis_emp.analytic_account_id.cc1 if not is_analytic_cvs else dis_emp.analytic_account_id.regional_id.name,
'cc2': dis_emp.analytic_account_id.cc2 if not is_analytic_cvs else dis_emp.analytic_account_id.city_id.name,
'cc3': dis_emp.analytic_account_id.cc3 if not is_analytic_cvs else dis_emp.analytic_account_id.linea_servicio_id.name,
'cc4': dis_emp.analytic_account_id.cc4 if not is_analytic_cvs else dis_emp.analytic_account_id.sede,
'cc5': dis_emp.analytic_account_id.cc5 if not is_analytic_cvs else dis_emp.analytic_account_id.puesto,
}
if partner_aaa:
analytic_line['partner_aaa'] = dis_emp.analytic_account_id.partner_id.id
analytic_lines_data.append(analytic_line)
orm.direct_create(self._cr, self._uid, 'account_analytic_line', analytic_lines_data, company=True)
return True
def get_contract_repeated(self):
if self.contract_ids.ids:
contracts_ids = tuple( self.contract_ids.ids if len(self.contract_ids.ids) > 1 else [self.contract_ids.ids[0],0])
contracts_ids = 'and contract_id in ' + str(contracts_ids)
else:
contracts_ids = ""
get_contract_repeated_sel = """
select name
from hr_contract
where id in (
select contract_id from pila_contract_rel where pila_id in (select id from hr_contribution_form where period_id = {periodo})
{contracts}
group by contract_id
having count(pila_id) > 1) """.format(periodo=self.period_id.id, pila=self.id, contracts=contracts_ids)
contract_repeated = [str(x[0]) for x in orm.fetchall(self._cr, get_contract_repeated_sel)]
if contract_repeated:
raise Warning ('Error, hay contratos que estan en varias autoliquidaciones en el mismo periodo, por favor validar los siguientes nombres de contratos: {ids}'.format(ids=contract_repeated))
|
odoopruebasmp/Odoo_08
|
v8_llevatelo/hr_payroll_extended/models/hr_contribution_form.py
|
hr_contribution_form.py
|
py
| 82,426 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40155982512
|
# -*- coding: utf-8 -*-
"""
This module contains functions for losses of various types: soiling, mismatch,
snow cover, etc.
"""
import numpy as np
import pandas as pd
from pvlib.tools import cosd
def soiling_hsu(rainfall, cleaning_threshold, tilt, pm2_5, pm10,
depo_veloc={'2_5': 0.004, '10': 0.0009},
rain_accum_period=pd.Timedelta('1h')):
"""
Calculates soiling ratio given particulate and rain data using the model
from Humboldt State University [1]_.
Parameters
----------
rainfall : Series
Rain accumulated in each time period. [mm]
cleaning_threshold : float
Amount of rain in an accumulation period needed to clean the PV
modules. [mm]
tilt : float
Tilt of the PV panels from horizontal. [degree]
pm2_5 : numeric
Concentration of airborne particulate matter (PM) with
aerodynamic diameter less than 2.5 microns. [g/m^3]
pm10 : numeric
Concentration of airborne particulate matter (PM) with
aerodynamicdiameter less than 10 microns. [g/m^3]
depo_veloc : dict, default {'2_5': 0.4, '10': 0.09}
Deposition or settling velocity of particulates. [m/s]
rain_accum_period : Timedelta, default 1 hour
Period for accumulating rainfall to check against `cleaning_threshold`
It is recommended that `rain_accum_period` be between 1 hour and
24 hours.
Returns
-------
soiling_ratio : Series
Values between 0 and 1. Equal to 1 - transmission loss.
References
-----------
.. [1] M. Coello and L. Boyle, "Simple Model For Predicting Time Series
Soiling of Photovoltaic Panels," in IEEE Journal of Photovoltaics.
doi: 10.1109/JPHOTOV.2019.2919628
.. [2] Atmospheric Chemistry and Physics: From Air Pollution to Climate
Change. J. Seinfeld and S. Pandis. Wiley and Sons 2001.
"""
try:
from scipy.special import erf
except ImportError:
raise ImportError("The soiling_hsu function requires scipy.")
# accumulate rainfall into periods for comparison with threshold
accum_rain = rainfall.rolling(rain_accum_period, closed='right').sum()
# cleaning is True for intervals with rainfall greater than threshold
cleaning_times = accum_rain.index[accum_rain >= cleaning_threshold]
horiz_mass_rate = pm2_5 * depo_veloc['2_5']\
+ np.maximum(pm10 - pm2_5, 0.) * depo_veloc['10']
tilted_mass_rate = horiz_mass_rate * cosd(tilt) # assuming no rain
# tms -> tilt_mass_rate
tms_cumsum = np.cumsum(tilted_mass_rate * np.ones(rainfall.shape))
mass_no_cleaning = pd.Series(index=rainfall.index, data=tms_cumsum)
mass_removed = pd.Series(index=rainfall.index)
mass_removed[0] = 0.
mass_removed[cleaning_times] = mass_no_cleaning[cleaning_times]
accum_mass = mass_no_cleaning - mass_removed.ffill()
soiling_ratio = 1 - 0.3437 * erf(0.17 * accum_mass**0.8473)
return soiling_ratio
|
Samuel-psa/pvlib-python
|
pvlib/losses.py
|
losses.py
|
py
| 2,997 |
python
|
en
|
code
| null |
github-code
|
6
|
34670410766
|
#!/usr/bin/python3
import mysql.connector
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.stem import WordNetLemmatizer
from lib.constants import brand_name_list, device_type_list, cwe_to_exp_type
from vul_scanner import query_iot_cve_from_cvetable
from lib.query_mysql import write_to_vul_analysis_table, query_cve_from_cvetable_given_cveid
def parse_description(desc):
"""
Convert the string of descriptions to a list of lemmas.
:param desc: a string of vulnerability description consisting of one or more sentences
:return: a tuple of (lemma_list, lemma_list_raw, desc_lower)
"""
# create a lemmatizer for word standardization
wordnet_lemmatizer = WordNetLemmatizer()
desc_lower = desc.lower() # a string of original CVE description in lower case
sent_list = sent_tokenize(desc_lower)
sent_list_raw = sent_tokenize(desc)
lemma_list = [] # a list of lemmatized words for one description, in lower case
for sent in sent_list:
sentence_words = word_tokenize(sent)
for word in sentence_words:
lemma_list.append(wordnet_lemmatizer.lemmatize(word, pos='v'))
lemma_list_raw = [] # a list of lemmatized words for one description, in raw form
for sent in sent_list_raw:
sentence_words_raw = word_tokenize(sent)
for word in sentence_words_raw:
lemma_list_raw.append(wordnet_lemmatizer.lemmatize(word, pos='v'))
return lemma_list, lemma_list_raw, desc_lower
def get_protocol(lemma_list):
"""
Get the wireless protocol type based on vulnerability description.
:param lemma_list: a list of lemmatized words from vulnerability description
:return: a string of wireless protocol type
"""
if 'wifi' in lemma_list or 'wi-fi' in lemma_list or 'tcp' in lemma_list or 'udp' in lemma_list or 'http' in lemma_list or 'dns' in lemma_list or 'telnet' in lemma_list or 'mqtt' in lemma_list:
return 'wifi'
if 'bluetooth' in lemma_list or 'ble' in lemma_list:
return 'bluetooth'
if 'zigbee' in lemma_list:
return 'zigbee'
if 'zwave' in lemma_list or 'z-wave' in lemma_list:
return 'zwave'
return 'undecided'
def full_fledged(lemma_list, device_type):
"""
Decide if the device is full-fledged.
:param lemma_list: a list of lemmatized words from vulnerability description
:param device_type: a string of device type
:return: a boolean indicating whether a device is full-fleged or not
"""
return 'camera' in lemma_list or 'router' in lemma_list or 'hub' in lemma_list or 'tv' in lemma_list or 'printer' in lemma_list or 'basestation' in lemma_list or 'thermostat' in lemma_list or \
device_type == 'camera' or device_type == 'router' or device_type == 'hub' or device_type == 'tv' or device_type == 'printer' or device_type == 'basestation' or device_type == 'thermostat'
def is_dos(lemma_list, desc_lower, C, I, A):
"""
Decide if the exploit type is DoS.
:return: a boolean value
"""
return 'dos' in lemma_list or 'denial of service' in desc_lower or 'denial-of-service' in desc_lower or 'crash' in lemma_list or C == 0 and I == 0 and A == 2
def is_buffer_overflow(desc_lower):
"""
Decide if the exploit type is buffer overflow.
:return: a boolean value
"""
return 'buffer overflow' in desc_lower or 'buffer overrun' in desc_lower or 'stack overflow' in desc_lower
def is_man_in_the_middle(lemma_list, lemma_list_raw, desc_lower):
"""
Decide if the exploit type is man in the middle.
:return: a boolean value
"""
return 'man-in-the-middle' in lemma_list or 'man in the middle' in desc_lower or 'MITM' in lemma_list_raw
def is_xss(lemma_list_raw, desc_lower):
"""
Decide if the exploit type is XSS.
:return: a boolean value
"""
return 'XSS' in lemma_list_raw or 'cross-site scripting' in desc_lower or 'cross site scripting' in desc_lower
def is_csrf(lemma_list_raw, desc_lower):
"""
Decide if the exploit type is CSRF.
:return: a boolean value
"""
return 'CSRF' in lemma_list_raw or 'XSRF' in lemma_list_raw or 'cross-site request forgery' in desc_lower or 'cross site request forgery' in desc_lower
def decide_exploit_precondition(exploit_range, desc, device_type):
"""
Decide the precondition of an exploit based on its exploit range and natural language description. NOTICE: Original
`Network` attack vector can be misleading as CVSS does not have enough information to decide its actual range.
Original `Adjacent` attack vector is ambiguous about physically adjacent and logically adjacent.
:param exploit_range: the exploit range field of its CVSS, including Network, Adjacent, Local, Physical
:param desc: a string of one or multiple sentences for vulnerability description
:param device_type: a string of device_type
:return: a string indicating the exploit precondition
"""
lemma_list, lemma_list_raw, desc_lower = parse_description(desc)
if exploit_range == 'PHYSICAL':
return 'physical'
if exploit_range == 'LOCAL':
return 'local'
# Decide the protocol based on vulnerability descriptions
protocol = get_protocol(lemma_list)
# If the exploit range is `ADJACENT_NETWORK`, then we identify whether it is physically or logically adjacent
if exploit_range == 'ADJACENT_NETWORK':
return decide_precondition_for_original_adjacent(protocol, lemma_list)
# If the exploit range is `NETWORK`, we should check if it is the correct range
return decide_precondition_for_original_network(device_type, protocol, lemma_list, lemma_list_raw, desc_lower)
def decide_precondition_for_original_adjacent(protocol, lemma_list):
# If the exploit is about wifi network, then attacker has to join the wifi network first
if protocol == 'wifi':
return 'wifi:adjacent_logically'
if protocol == 'bluetooth' or protocol == 'zigbee' or protocol == 'zwave':
return protocol + ':' + decide_precondition_low_power_protocol(lemma_list)
# for other undecided adjacent types, we set precondition as `wifi:adjacent_logically`
return 'wifi:adjacent_logically'
def decide_precondition_for_original_network(device_type, protocol, lemma_list, lemma_list_raw, desc_lower):
if 'remote' in lemma_list:
return 'network'
if (is_xss(lemma_list_raw, desc_lower) or is_csrf(lemma_list_raw, desc_lower) or 'dns rebinding' in desc_lower) and full_fledged(lemma_list, device_type):
return 'network'
# if a device is not full-fledged, and there is no `remote` keyword, then set precondition as `PROTOCOL:adjacent_XXX`
if not full_fledged(lemma_list, device_type):
if protocol == 'bluetooth' or protocol == 'zigbee' or protocol == 'zwave':
return protocol + ':' + decide_precondition_low_power_protocol(lemma_list)
return 'wifi:adjacent_logically'
return 'network'
def decide_precondition_low_power_protocol(lemma_list):
if 'sniff' in lemma_list or 'decrypt' in lemma_list or 'eavesdrop' in lemma_list or 'intercept' in lemma_list:
return 'adjacent_physically'
return 'adjacent_logically'
def decide_exploit_effect(desc, device_type, C, I, A):
"""
Decide the effect of an exploit based on its natural language description.
:param desc: a string of one or multiple sentences for vulnerability description
:param device_type: a string of device_type
:param C: confidentiality, 2: COMPLETE, 1: PARTIAL, 0: NONE
:param I: integrity, 2: COMPLETE, 1: PARTIAL, 0: NONE
:param A: availability, 2: COMPLETE, 1: PARTIAL, 0: NONE
:return: a string indicating the exploit effect
"""
lemma_list, lemma_list_raw, desc_lower = parse_description(desc)
# Here are some rules based on keywords in the descriptions
if 'root' in lemma_list or 'arbitrary' in lemma_list:
if full_fledged(lemma_list, device_type):
return 'rootPrivilege'
else:
return 'commandInjection'
if 'control' in lemma_list or 'take over' in desc_lower:
return 'deviceControl'
if (('inject' in lemma_list or 'insert' in lemma_list or 'execute' in lemma_list) and 'command' in lemma_list) or (
'hijack' in lemma_list and 'request' in lemma_list):
return 'commandInjection'
if ('inject' in lemma_list or 'insert' in lemma_list or 'obtain') and (
'data' in lemma_list or 'event' in lemma_list):
return 'eventAccess'
if ('steal' in lemma_list or 'obtain' in lemma_list or 'retrieve' in lemma_list) and (
'wifi' in lemma_list or 'wi-fi' in lemma_list):
return 'wifiAccess'
if is_dos(lemma_list, desc_lower, C, I, A):
return 'DoS'
# Here are some customized rules based on CIA triad
# if the device has CIA all high, and it is a full-fledged device, then it is root, otherwise, we return deviceControl
if C == 2 and I == 2 and A == 2:
if full_fledged(lemma_list, device_type):
return 'rootPrivilege'
return 'deviceControl'
# Now we need to construct more complicated rules
# rule for door lock
if 'unlock' in lemma_list and 'lock' in lemma_list:
return 'commandInjection'
# rule for light bulb
if 'turn on' in desc_lower and ('light' in lemma_list or 'bulb' in lemma_list):
return 'commandInjection'
# rule for buffer overflow
if is_buffer_overflow(desc_lower):
if 'inject' in lemma_list or 'hijack' in lemma_list or 'hijacking' in lemma_list:
if full_fledged(lemma_list, device_type):
return 'rootPrivilege'
else:
return 'commandInjection'
else:
return 'DoS'
return 'unknown_exploit_effect'
def decide_exploit_type(cwe, cwe_to_exp_type, desc, C, I, A):
"""
Decide the type of an exploit based on its CWE and natural language description.
:param cwe: a string of the CWE-ID of the NVD-CVE entry
:param cwe_to_exp_type: a dictionary mapping CWE-ID to exploit types
:param desc: a string of one or multiple sentences for vulnerability description
:param C: confidentiality, 2: COMPLETE, 1: PARTIAL, 0: NONE
:param I: integrity, 2: COMPLETE, 1: PARTIAL, 0: NONE
:param A: availability, 2: COMPLETE, 1: PARTIAL, 0: NONE
:return: a string of exploit types
"""
lemma_list, lemma_list_raw, desc_lower = parse_description(desc)
if is_dos(lemma_list, desc_lower, C, I, A):
return 'Denial of Service'
if is_buffer_overflow(desc_lower):
return 'Buffer Overflow'
if is_man_in_the_middle(lemma_list, lemma_list_raw, desc_lower):
return 'Man in the Middle'
if cwe in cwe_to_exp_type:
return cwe_to_exp_type[cwe]
return 'unknown_exploit_type'
def vul_analyzer(cve_id, device_type):
"""
Analyze the given CVE ID and turn the exploit model.
:param cve_id: a string of CVE ID
:param device_type: device type can help to decide exploit precondition and effect
:return: a tuple of exploit model (in Prolog terminology)
"""
# Create a MySQL connect object and cursor object.
db = mysql.connector.connect(host='localhost', user='YOUR_USERNAME_HERE', password='YOUR_PASSWORD_HERE', database='cve')
cursor = db.cursor()
# Query MySQL database to get the cve_tuple
cve_id, cwe, probability, impact_score, exploit_range, desc, C, I, A = query_cve_from_cvetable_given_cveid(cursor, cve_id)
precondition = decide_exploit_precondition(exploit_range, desc, device_type)
effect = decide_exploit_effect(desc, device_type, C, I, A)
# exploit_type = decide_exploit_type(cwe, cwe_to_exp_type, desc, C, I, A)
return cve_id, precondition, effect, probability, impact_score
def main():
# Create a MySQL connect object and cursor object.
db = mysql.connector.connect(host='localhost', user='YOUR_USERNAME_HERE', password='YOUR_PASSWORD_HERE', database='cve')
cursor = db.cursor()
# Create the dictionary to store queried CVEs for IoT devices
iot_cve_dict = query_iot_cve_from_cvetable(cursor, brand_name_list, device_type_list)
# Parse CVE descriptions to decide the effect type of each exploit
for (brand_name, device_type) in iot_cve_dict:
# print(brand_name, device_type)
cve_tuple_list = iot_cve_dict[(brand_name, device_type)]
for (cveid, cwe, probability, impact_score, exploit_range, desc, C, I, A) in cve_tuple_list:
precondition = decide_exploit_precondition(exploit_range, desc, device_type)
effect = decide_exploit_effect(desc, device_type, C, I, A)
exploit_type = decide_exploit_type(cwe, cwe_to_exp_type, desc, C, I, A)
cve_exploit_model = (cveid, exploit_type, precondition, effect, probability, impact_score, desc)
write_to_vul_analysis_table(db, cursor, cve_exploit_model)
cursor.close()
db.close()
def test_vul_analyzer():
return vul_analyzer('CVE-2019-3949', 'base station')
# should return: ('CVE-2019-3949', 'network', 'rootPrivilege', 0.98)
if __name__ == '__main__':
print(test_vul_analyzer())
|
pmlab-ucd/IOTA
|
python/vul_analyzer.py
|
vul_analyzer.py
|
py
| 13,241 |
python
|
en
|
code
| 1 |
github-code
|
6
|
10250262704
|
# Nothing too high-powered. Simply run a DFS on a random non-visited node.
# Append end node to a list at each function callback. Repeat until all
# Nodes are visited. The reversed list will the the topological order.
class Graph():
def __init__(self, len):
self.nds = [[] for i in range(len)]
self.visited = [False for i in range(len)]
def printGraph(self):
print(self.nds)
def addEdge(self, nd, nxt):
self.nds[nd].append(nxt)
test = Graph(13)
test.printGraph()
edges = [
(0, 3), (1, 3), (2, 0), (2, 1), (3, 6), (3, 7), (4, 0),
(4, 3), (4, 5), (5, 9), (5, 10), (6, 8), (7, 8), (7, 9),
(8, 11), (9, 11), (9, 12), (10, 9)
]
for i, j in edges:
test.addEdge(i, j)
test.printGraph()
order = []
def dfs(index):
global order
for i in test.nds[index]:
if test.visited[i] == False:
test.visited[i] = True
dfs(i)
order.append(i)
if test.visited[index] == False:
test.visited[index] = True
order.append(index)
for i in range(len(test.visited)):
if test.visited[i] == False:
dfs(i)
print(test.visited)
print(order[::-1])
|
Ocinom/Stuff
|
Random/TopSort.py
|
TopSort.py
|
py
| 1,245 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38474579179
|
import argparse
import regex as re
from pathlib import Path
from textwrap import dedent
import yaml
from .validator import run_sigma_validator
from clint.textui import colored, puts
import logging
STANDARD_YAML_PATH = Path(__file__).resolve().parent.parent / Path('CCCS_SIGMA.yml')
SIGMA_FILENAME_REGEX = r'(\.yaml|\.yml)$'
SIGMA_VALID_PREFIX = r'valid_'
SIGMA_VALID_PREFIX_REG = re.compile(r'^' + SIGMA_VALID_PREFIX)
logger = logging.getLogger(__file__)
parser = argparse.ArgumentParser(description='CCCS SIGMA script to run the CCCS SIGMA validator, '
'use the -i or -c flags to generate the id, fingerprint, version, '
'first_imported, or last_modified (if not already present) and add them '
'to the file.')
parser.add_argument('paths', nargs='+', type=str, default=[],
help='A list of files or folders to be analyzed.')
parser.add_argument('-r', '--recursive', action='store_true', default=False, dest='recursive',
help='Recursively search folders provided.')
parser.add_argument('-v', '--verbose', action='store_true', default=False, dest='verbose',
help='Verbose mode, will print why a rule was invalid.')
parser.add_argument('-vv', '--very-verbose', action='store_true', default=False, dest='veryverbose',
help='Very-verbose mode, will printout what rule is about to be processed, '
'the invalid rules, the reasons they are invalid and all contents of the rule.')
parser.add_argument('-f', '--fail', action='store_true', default=False, dest='fail',
help='Fail mode, only prints messages about invalid rules.')
parser.add_argument('-w', '--warnings', action='store_true', default=False, dest='warnings',
help='This mode will ignore warnings and proceed with other behaviors if the rule is valid.')
parser.add_argument('-s', '--standard', action='store_true', default=False, dest='standard',
help='This prints the SIGMA standard to the screen.')
parser.add_argument('-st', '--strict', action='store_true', default=False, dest='strict',
help='This causes the cli to return a non-zero exit code for warnings.')
parser_group = parser.add_mutually_exclusive_group()
parser_group.add_argument('-i', '--in-place', action='store_true', default=False, dest='inplace', # removes comments
help='Modifies valid files in place, mutually exclusive with -c.') # and indentation
parser_group.add_argument('-c', '--create-files', action='store_true', default=False, dest='createfile',
help='Writes a new file for each valid file, mutually exclusive with -i.')
def parse_args(custom_args=None):
if isinstance(custom_args, list):
options = parser.parse_args(custom_args)
else:
options = parser.parse_args()
return options
def get_sigma_paths_from_dir(directory, recursive):
""" Recursively get SIGMA rules from a directory """
if directory.is_file() and re.fullmatch(SIGMA_FILENAME_REGEX, directory.suffix):
yield directory
elif directory.is_dir():
for path in list(directory.iterdir()):
if path.is_file() and re.fullmatch(SIGMA_FILENAME_REGEX, path.suffix):
yield path
elif path.is_dir() and recursive:
for sub_dir_path in get_sigma_paths_from_dir(path, recursive):
yield sub_dir_path
def get_paths_to_validate(options_paths, recursive):
""" Returns a set of pathlib.Path objects for all
SIGMA rules that will be validated """
paths_to_validate = set()
for path in [Path(path_name) for path_name in options_paths]:
if path.exists():
if path.is_dir():
paths_to_validate.update(get_sigma_paths_from_dir(path, recursive))
elif re.match(SIGMA_FILENAME_REGEX, path.suffix):
paths_to_validate.add(path)
else:
print('{message:40}{path}'.format(message='Path does not exist:', path=str(path)))
return sorted(paths_to_validate)
def get_sigma_file_new_path(path):
""" takes a path in argument, and return the same path with the
filename prefixed with SIGMA_VALID_PREFIX.
if the file already has the prefix, returns the path unchanged.
"""
if SIGMA_VALID_PREFIX_REG.match(path.name):
return path
else:
new_name = SIGMA_VALID_PREFIX + path.name
return path.parent / new_name
def overwrite_file(path, content):
# convert sigma rule from dict to str and write contents to disk
with open(path, 'w', encoding='utf-8') as f:
f.write(yaml.dump(content, sort_keys=False) + '\n')
def print_errors(sigma_file_processor, options):
if sigma_file_processor.return_file_error_state():
print(colored.red('{indent:>7}{message}'.format(indent='- ', message='Errors:')))
print(colored.white(sigma_file_processor.return_rule_errors_for_cmlt()))
def print_warnings(sigma_file_processor, options):
if sigma_file_processor.return_file_warning_state() and not options.warnings:
print(colored.yellow('{indent:>7}{message}'.format(indent='- ', message='Warnings:')))
print(colored.white(sigma_file_processor.return_rule_warnings_for_cmlt()))
def print_standard():
# TODO fix entries in standard
print('Printing the CCCS SIGMA Standard:')
with open(STANDARD_YAML_PATH, 'r') as yaml_file:
standard = yaml.safe_load(yaml_file)
for standard_key in standard:
standard_entry_name = standard_key
standard_entry_description = standard[standard_key]['description']
standard_entry_unique = standard[standard_key]['unique']
standard_entry_optional = standard[standard_key]['optional']
standard_entry_format = standard[standard_key]['format']
print('{se_name}{message}'.format(message=':',
se_name=standard_entry_name))
print('{preface:20}{se_text}'.format(preface=' - Description:',
se_text=standard_entry_description))
print('{preface:20}{se_text}'.format(preface=' - Format:',
se_text=standard_entry_format))
print('{preface:20}{se_text}'.format(preface=' - Unique:',
se_text=standard_entry_unique))
print('{preface:20}{se_text}'.format(preface=' - Optional:',
se_text=standard_entry_optional))
if 'validator' in standard[standard_key]:
standard_entry_validator = standard[standard_key]['validator']
print('{preface:20}{se_text}'.format(preface=' - Validator:',
se_text=standard_entry_validator))
if 'argument' in standard[standard_key]:
standard_entry_argument = standard[standard_key]['argument']
print('{preface:20}{se_text}'.format(preface=' - Argument:',
se_text=''))
for param in standard_entry_argument:
print('{preface:20}{se_text}'.format(preface=' - ' + param + ': ',
se_text=standard_entry_argument[param]))
print()
def _call_validator(options):
paths_to_validate = get_paths_to_validate(options.paths,
options.recursive)
all_invalid_rule_returns = []
all_warning_rule_returns = []
# if options.standard:
# print_standard()
# main loop : will iterate over every file the program has to validate,
# validate them and then print the output
for sigma_rule_path in list(paths_to_validate):
if options.veryverbose:
print('{message:40}{y_file}'.format(
message='Validating Rule file:',
y_file=sigma_rule_path,
))
# handle if we want to overwrite or create new files
if options.createfile:
generate_values = True
sigma_file_output = get_sigma_file_new_path(sigma_rule_path)
what_will_be_done = 'create a new file with the {} preface.'.format(SIGMA_VALID_PREFIX)
elif options.inplace:
generate_values = True
sigma_file_output = sigma_rule_path
what_will_be_done = 'modify the file in place.'
else:
generate_values = False
what_will_be_done = 'make no changes'
sigma_file_output = None
sigma_validator = run_sigma_validator(sigma_rule_path, generate_values)
# Prints the output of the validator.
file_message = '{message:39}{y_file}'
if sigma_validator.return_file_error_state():
# The rule is invalid
all_invalid_rule_returns.append((sigma_rule_path, sigma_validator))
puts(colored.red(file_message.format(
message='🍅 Invalid Rule File:',
y_file=sigma_rule_path)))
if options.inplace or options.createfile:
# TODO add these methods to SigmaValidator
sigma_validator.modify_values()
if sigma_validator.return_edited_file_string():
print('modifying file ', sigma_file_output)
overwrite_file(sigma_file_output, sigma_validator.return_edited_file_string())
else:
print('No fields were edited ')
if options.verbose or options.veryverbose:
print_errors(sigma_validator, options)
print_warnings(sigma_validator, options)
elif sigma_validator.return_file_warning_state() and not options.warnings:
# The rule is valid, has warnings and warning are turned on
all_warning_rule_returns.append((sigma_rule_path, sigma_validator))
puts(colored.yellow(file_message.format(
message=' Warnings in Rule File:',
y_file=sigma_rule_path
)))
if options.verbose or options.veryverbose:
print_warnings(sigma_validator, options)
elif not sigma_validator.return_file_error_state():
# The rule is valid with no warnings or has warnings and warnings are turned off
if not options.fail:
print(file_message.format(
message="🥦 Valid Rule File:",
y_file=sigma_rule_path
))
else:
print('Invalid Code Execution Block')
if options.veryverbose:
for invalid_rule_path, invalid_rule_return in all_invalid_rule_returns:
print(dedent('''
----------------------------------------------------------------------------
Invalid rule file:{invalid_rule_path}
Warnings:
{rule_warnings}
Errors:
{rule_errors}
{original_rule}
----------------------------------------------------------------------------
''').format(rule_warnings=invalid_rule_return.return_rule_warnings_for_cmlt(),
rule_errors=invalid_rule_return.return_rule_errors_for_cmlt(),
original_rule=invalid_rule_return.return_original_rule(),
invalid_rule_path=invalid_rule_path))
total_sigma_rule_paths = len(paths_to_validate)
total_invalid_sigma_rule_paths = len(all_invalid_rule_returns)
total_warning_sigma_rule_paths = len(all_warning_rule_returns)
total_valid_sigma_rule_paths = (total_sigma_rule_paths
- total_invalid_sigma_rule_paths
- total_warning_sigma_rule_paths)
print(dedent('''
----------------------------------------------------------------------------
All .yaml Rule files found have been passed through the CCCS Sigma Validator:
Total Sigma Rule Files to Analyze: {total_sigma_rule_paths}
Total Valid CCCS Sigma Rule Files: {total_valid_sigma_rule_paths}
Total Warning CCCS Sigma Rule Files: {total_warning_sigma_rule_paths}
Total Invalid CCCS Sigma Rule Files: {total_invalid_sigma_rule_paths}
---------------------------------------------------------------------------
''').format(total_sigma_rule_paths=str(total_sigma_rule_paths),
total_valid_sigma_rule_paths=colored.green(str(total_valid_sigma_rule_paths)),
total_warning_sigma_rule_paths=colored.yellow(str(total_warning_sigma_rule_paths)),
total_invalid_sigma_rule_paths=colored.red(str(total_invalid_sigma_rule_paths))))
if total_invalid_sigma_rule_paths >= 1:
exit(99)
elif total_warning_sigma_rule_paths >= 1 and options.strict:
exit(49)
def git_ci(changed_file_paths):
options = parser.parse_args(changed_file_paths)
_call_validator(options)
def main():
print('Sigma Rule Validator')
options = parse_args()
_call_validator(options)
if __name__ == '__main__':
main()
|
CybercentreCanada/pysigma
|
pysigma/validator_cli.py
|
validator_cli.py
|
py
| 13,374 |
python
|
en
|
code
| 7 |
github-code
|
6
|
21928738877
|
# Instance & Class variables
class Student:
school = "Sherpur Government Victoria Academy"
Alex = Student()
John = Student()
Alex.name = "Alex"
Alex.cls = 7
John.name = "John"
John.cls = 8
print(Alex.name)
print(Alex.school)
# Student.school = "SGVA"
# print(Alex.school)
Alex.school = "SGVA"
print(Alex.school)
print(Student.school)
print(Alex.__dict__)
|
MahbinAhmed/Learning
|
Python/Python Learning/Revision/35. Instance & Class variables.py
|
35. Instance & Class variables.py
|
py
| 362 |
python
|
en
|
code
| 0 |
github-code
|
6
|
35712406484
|
from global_variables import stop_event
from hatch_controller import hc
from beamer.mqtt import mqtt_client, fsmQueue, TRAPPE_TOPIC, HDMI_TOPIC
from beamer.hdmi import hdmi_relay
import logging
import time
MQTT_OPEN = b"OPEN"
MQTT_CLOSE = b"CLOSE"
MQTT_STOP = b"STOP"
class State():
def __init__(self):
self.enter_time = time.time()
logging.info(f'COVER: Current state: {str(self)}')
self.on_enter()
def on_enter(self) -> None:
pass
def update(self, mqtt_command=""):
return self
def __repr__(self):
return self.__str__()
def __str__(self):
return self.__class__.__name__
class Open(State):
def on_enter(self):
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "open")
def update(self, mqtt_command=""):
if mqtt_command == MQTT_CLOSE:
return Closing()
return self
class Closed(State):
def on_enter(self):
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "closed")
def update(self, mqtt_command=""):
if mqtt_command == MQTT_OPEN:
return Opening()
return self
class Stopped(State):
def on_enter(self):
hc.stop()
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "stopped")
logging.info(f"Stopped at {hc.get_position()}")
def update(self, mqtt_command=""):
if mqtt_command == MQTT_CLOSE:
return Closing()
elif mqtt_command == MQTT_OPEN:
return Opening()
return self
class Opening(State):
def on_enter(self):
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "opening")
hc.set_target_position(hc.opened_position)
def update(self, mqtt_command=""):
if mqtt_command == MQTT_CLOSE:
return Closing()
elif mqtt_command == MQTT_STOP:
return Stopped()
return self
class Closing(State):
def on_enter(self) -> None:
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "closing")
hc.enable_control()
hc.set_target_position(hc.closed_position)
return
def update(self, mqtt_command=""):
if mqtt_command == MQTT_OPEN:
return Opening()
elif mqtt_command == MQTT_STOP:
# stop hc
mqtt_client.publish(f"{TRAPPE_TOPIC}/state", "stopped")
return Stopped()
return self
class CoverStateMachine():
def __init__(self) -> None:
self.state = Closed()
def control_loop(self):
while not stop_event.is_set():
if hc.target_position_reached():
if hc.get_position() <= hc.closed_position + 1:
self.state = Closed()
elif hc.get_position() >= hc.opened_position - 10:
self.state = Open()
mqtt_command = ""
if fsmQueue.not_empty:
mqtt_msg = fsmQueue.get()
if mqtt_msg.topic == f"{HDMI_TOPIC}/set":
if mqtt_msg.payload == b"ON":
hdmi_relay.enable()
mqtt_client.publish(f"{HDMI_TOPIC}/state", b"ON")
elif mqtt_msg.payload == b"OFF":
hdmi_relay.disable()
mqtt_client.publish(f"{HDMI_TOPIC}/state", b"OFF")
elif mqtt_msg.topic == f"{TRAPPE_TOPIC}/set":
mqtt_command = mqtt_msg.payload
logging.info(f"command: {mqtt_command}")
self.state = self.state.update(mqtt_command)
time.sleep(50 * 1e-3) # 50 ms loop
coverFSM = CoverStateMachine()
|
clementnuss/hatch_controller
|
beamer/beamer_state_machine.py
|
beamer_state_machine.py
|
py
| 3,618 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11121080147
|
import typing as tp
from datetime import datetime, date
from uuid import uuid4
import pytest
from sqlalchemy import text
from librarius.domain.models import Publication
from librarius.service.uow.implementation import GenericUnitOfWork
if tp.TYPE_CHECKING:
from sqlalchemy.orm import Session
from sqlalchemy.sql.expression import TextClause
from librarius.types import Reference
pytestmark = pytest.mark.usefixtures("mappers")
def insert_publications(
session: "Session",
uuid: "Reference",
title: str,
date_added: datetime,
date_modified: datetime,
date_published: date,
):
expression: "TextClause" = text(
"INSERT INTO publications (uuid, title, date_added, date_modified, date_published) VALUES (:uuid, :title, :date_added, :date_modified, :date_published)"
)
expression: "TextClause" = expression.bindparams(
uuid=uuid,
title=title,
date_added=date_added,
date_modified=date_modified,
date_published=date_published,
)
session.execute(expression)
def retrieve(query, uow):
with uow:
return uow.session.query(Publication).all()
def test_uow_can_retrieve_a_publication(sqlite_session_factory):
session: "Session" = sqlite_session_factory()
pub_uuid = str(uuid4())
insert_publications(
session, pub_uuid, "Cerbulan Book", datetime.now(), datetime.now(), date.today()
)
session.commit()
uow = GenericUnitOfWork(sqlite_session_factory)
# with uow:
# results = uow.session.query(Publication).all()
# results = retrieve_all_publications(AllPublications(), uow)
# print(results[0].__dict__)
# def test_1(sqlite_session_factory):
# session: Session = sqlite_session_factory()
# uu = str(uuid.uuid4())
# title = "Cerbulan"
# date_added = datetime.now()
# date_modified = datetime.now()
# date_published = datetime.now()
# #session.execute("INSERT INTO publications (uuid, title, date_added, date_modified, date_published VALUES (:uuid, :title, :date_added, :date_modified, :date_published)),",
# # dict(uuid=uu, title=title, date_added=date_added, date_modified=date_modified, date_published=date_published))
# #insert_publications(session, uu, title, date_added, date_modified, date_published)
# expression: TextClause = text(
# "INSERT INTO publications (uuid, title, date_added, date_modified, date_published) VALUES (:uuid, :title, :date_added, :date_modified, :date_published)"
# )
# expression: TextClause = expression.bindparams(
# uuid=uu, title=title, date_added=date_added, date_modified=date_modified, date_published=date_published
# )
# session.execute(expression)
# from sqlalchemy.engine.cursor import CursorResult
# session.commit()
# result: CursorResult = session.execute("SELECT * FROM publications")
# [berba] = result
# #print(berba)
# p1: Publication = session.query(Publication).filter_by(uuid=uu).first()
# assert p1.uuid == uu
#
|
adriangabura/vega
|
tests/integration/test_uow.py
|
test_uow.py
|
py
| 3,023 |
python
|
en
|
code
| 1 |
github-code
|
6
|
71861329469
|
"""将CHANGELOG.MD中的本次更新信息提取出来,供github release流程使用"""
from __future__ import annotations
import os.path
from log import logger
from util import make_sure_dir_exists
def gen_changelog():
update_message_list: list[str] = []
# 解析changelog文件
version_list: list[str] = []
version_to_update_message_list: dict[str, list[str]] = {}
with open("CHANGELOG.MD", encoding="utf-8") as changelog_file:
version = ""
for line in changelog_file:
# # v20.0.1 2022.8.22
if line.startswith("# v"):
version = line.split(" ")[1][1:]
version_list.append(version)
continue
if version != "":
if version not in version_to_update_message_list:
version_to_update_message_list[version] = []
version_to_update_message_list[version].append(line.strip())
# 获取需要的版本信息
latest_version = version_list[0]
update_message_list.extend(version_to_update_message_list[latest_version])
# 导出文本
github_release_dir = os.path.realpath("./releases/_github_action_artifact")
make_sure_dir_exists(github_release_dir)
github_change_path = os.path.join(github_release_dir, "changelog-github.txt")
logger.info(f"将更新信息写入临时文件,供github release使用: {github_change_path}")
with open(github_change_path, "w", encoding="utf-8") as output_file:
output_file.write("\n".join(update_message_list))
if __name__ == "__main__":
gen_changelog()
|
fzls/djc_helper
|
_gen_changelog_for_github_release.py
|
_gen_changelog_for_github_release.py
|
py
| 1,606 |
python
|
en
|
code
| 319 |
github-code
|
6
|
1541207425
|
'''This module should be used to test the parameter and return types of your
functions. Before submitting your assignment, run this type-checker. This
typechecker expects to find files cipher_functions.py, secret1.txt, and
deck1.txt in the same folder.
If errors occur when you run this typechecker, fix them before you submit
your assignment.
If no errors occur when you run this typechecker, then the type checks passed.
This means that the function parameters and return types match the assignment
specification, but it does not mean that your code works correctly in all
situations. Be sure to test your code thoroughly before submitting.
'''
import builtins
# Check for use of functions print and input.
our_print = print
def disable_print(*args):
raise Exception("You must not call built-in function print!")
def disable_input(*args):
raise Exception("You must not call built-in function input!")
builtins.print = disable_print
builtins.input = disable_input
import cipher_functions
sample_deck = [1, 4, 7, 10, 13, 16, 19, 22, 25, 28, 3, 6, 9, 12, 15,
18, 21, 24, 27, 2, 5, 8, 11, 14, 17, 20, 23, 26]
# typecheck the cipher_functions.py functions
# Type check cipher_functions.clean_message
result = cipher_functions.clean_message('abc')
assert isinstance(result, str), \
'''clean_message should return a str, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.encrypt_letter
result = cipher_functions.encrypt_letter('A', 1)
assert isinstance(result, str) and len(result) == 1, \
'''encrypt_letter should return a single character, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.decrypt_letter
result = cipher_functions.decrypt_letter('B', 1)
assert isinstance(result, str) and len(result) == 1, \
'''decrypt_letter should return a single character, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.swap_cards
result = cipher_functions.swap_cards(sample_deck, 1)
assert result is None, \
'''swap_cards should return None, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.get_small_joker_value
result = cipher_functions.get_small_joker_value(sample_deck)
assert isinstance(result, int), \
'''get_small_joker_value should return int, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.get_big_joker_value
result = cipher_functions.get_big_joker_value(sample_deck)
assert isinstance(result, int), \
'''get_big_joker_value should return int, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.move_small_joker
result = cipher_functions.move_small_joker(sample_deck)
assert result is None, \
'''move_small_joker should return None, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.move_big_joker
result = cipher_functions.move_big_joker(sample_deck)
assert result is None, \
'''move_big_joker should return None, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.triple_cut
result = cipher_functions.triple_cut(sample_deck)
assert result is None, \
'''triple_cut should return None, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.insert_top_to_bottom
result = cipher_functions.insert_top_to_bottom(sample_deck)
assert result is None, \
'''insert_top_to_bottom should return None, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.get_card_at_top_index
result = cipher_functions.get_card_at_top_index(sample_deck)
assert isinstance(result, int), \
'''get_card_at_top_index should return an int, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.get_next_keystream_value
result = cipher_functions.get_next_keystream_value(sample_deck)
assert isinstance(result, int), \
'''get_next_keystream_value should return an int, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.process_messages
result = cipher_functions.process_messages(sample_deck, ['A', 'B', 'C'], 'd')
assert isinstance(result, list), \
'''process_messages should return a list, but returned {0}''' \
.format(type(result))
for item in result:
assert isinstance(item, str), \
'''process_messages should return a list of str, but returned a list of {0}'''\
.format(type(item))
# Type check cipher_functions.read_messages
result = cipher_functions.read_messages(open('secret1.txt'))
assert isinstance(result, list), \
'''read_messages should return a list, but returned {0}''' \
.format(type(result))
for item in result:
assert isinstance(item, str), \
'''read_messages should return a list of str, but returned a list of {0}'''\
.format(type(item))
# Type check cipher_functions.is_valid_deck
result = cipher_functions.is_valid_deck([1, 2, 3])
assert isinstance(result, bool), \
'''is_valid_deck should return a bool, but returned {0}''' \
.format(type(result))
# Type check cipher_functions.read_deck
result = cipher_functions.read_deck(open('deck1.txt'))
assert isinstance(result, list), \
'''read_deck should return a list, but returned {0}''' \
.format(type(result))
for item in result:
assert isinstance(item, int), \
'''read_deck should return a list of int, but returned a list of {0}'''\
.format(type(item))
our_print("""
Yippee! The type checker program completed without error.
This means that the functions in cipher_functions.py:
- are named correctly,
- take the correct number of arguments, and
- return the correct types
This does NOT mean that the functions are correct!
Be sure to thoroughly test your functions yourself before submitting.""")
|
monkeykingg/projects
|
1st_year/csc108/a2starter/a2_type_checker.py
|
a2_type_checker.py
|
py
| 5,773 |
python
|
en
|
code
| 2 |
github-code
|
6
|
13954441013
|
'''. Solicitar al usuario que ingrese su dirección email. Imprimir un mensaje
indicando si la dirección es válida o no, valiéndose de una función para decidirlo.
Una dirección se considerará válida si contiene el símbolo "@".'''
def evaluaMail1(correo):
indice = correo.find('@')
mensa = 'CORRECTO'
if indice == -1:
mensa = 'INCORRECTO'
return mensa
def evaluaMail2(correo):
cont = correo.count('@')
mensa = 'INCORRECTO'
if cont == 1:
mensa = 'CORRECTO'
return mensa
correo = input("Mail: ")
estado = evaluaMail1(correo)
print(estado)
estado = evaluaMail2(correo)
print(estado)
|
eSwayyy/UCM-projects
|
python/catedra/lab_funciones/ejercicio1.py
|
ejercicio1.py
|
py
| 638 |
python
|
es
|
code
| 1 |
github-code
|
6
|
40370569254
|
def bonificacion(n):
if n <= 1000000:
bonificacion=float(0)
else:
if n <= 2500000:
bonificacion=float(n*0.04)
else:
if n > 2500000:
bonificacion=float(n*0.08)
return bonificacion
print('\n')
numero1=float(input("Diga las ventas realizadas: "))
x=bonificacion(numero1)
print('la bonificacion es: $ {:,.0f}'.format(x))
print('\n')
|
Natacha7/Python
|
Condicional/Venta_vendedor1.py
|
Venta_vendedor1.py
|
py
| 393 |
python
|
es
|
code
| 0 |
github-code
|
6
|
23265542447
|
class Solution:
def checkPermutation(self, str1, str2):
if len(str1) != len(str2):
return False
mp = map()
for char in str1: #O(n)
if char not in mp:
mp[char] = 1
else:
mp[char] += 1
for char in str2: #O(n)
if char not in mp:
return False
else:
mp[char] -= 1
if mp[char] == 0:
del mp[char]
return True
# O(n) time
# O(n) space due to hash map
|
anhtuanle2101/Data_Algo
|
Python/Problems/checkPermutation.py
|
checkPermutation.py
|
py
| 548 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41196412710
|
import os
import sys
def run(inputs, output):
exe = os.path.dirname(sys.executable)
gdalwarp = os.path.join(exe, 'Library', 'bin', 'gdalwarp.exe')
args = [ "--config", "GDAL_CACHEMAX", "3000", "-wm", "3000", *inputs, output ]
os.system(gdalwarp + ' ' + ' '.join(args))
|
w-copper/misc-tools
|
gdaltools/batch_merge.py
|
batch_merge.py
|
py
| 308 |
python
|
en
|
code
| 3 |
github-code
|
6
|
3971463654
|
from flask import Flask, request, jsonify, render_template, send_file
import os
import csv
import json
import base64
import pickle
import logging
from utils import (set_license_key_in_config, get_license_key_from_config,
get_dynamodb_table, license_key_is_valid)
# Configure the logging level
logging.basicConfig(level=logging.INFO)
# Get the logger for the current module
logger = logging.getLogger(__name__)
# Create a handler that writes log messages to a file
handler = logging.FileHandler('error.log')
# Create a formatter that specifies the format of the log messages
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
PORT = 8323
# For Production
# app = Flask(__name__,
# static_folder='frontend/static/', static_url_path='/static/',
# template_folder='frontend/templates')
# For Development
app = Flask(__name__,
static_folder='../frontend/static/', static_url_path='/static/',
template_folder='../frontend/templates')
base_path = os.path.join(app.root_path, 'frontend', 'media')
def remove_uploaded_background_images():
import glob
pattern = os.path.join(base_path, 'Background-Image*.png')
# Using glob to find all matching files
for file_path in glob.glob(pattern):
if os.path.exists(file_path):
os.remove(file_path)
print(f"Removed: {file_path}")
@app.route('/admin/')
def view_admin_page():
try:
license_key = get_license_key_from_config()
csv_file_path = os.path.join(app.root_path, 'participants.csv')
if os.path.exists(csv_file_path):
os.remove(csv_file_path)
# Remove uploaded images
remove_uploaded_background_images()
return render_template('adminPage.html', license_key=license_key)
except Exception as e:
logger.error(e)
print(e)
# raise(e)
return "An Error Occurred!"
@app.route('/home/')
def view_home_screen():
try:
pickle.dump(dict(request.args), open('admin_conf.pkl', 'wb'))
return render_template('main.html')
except Exception as e:
print(e)
logger.error(e)
return 'An error occurred'
@app.route('/saveCSVData/', methods=['POST'])
def save_csv():
try:
if request.method == 'POST':
csv_data = request.get_data()
# Decode the bytes to string
csv_data_str = csv_data.decode('utf-8').replace('\\n', '\n').replace('\\r', '').strip('"').replace('\\', '').replace('\\\\','').replace('X', '')
# Remove extra quotes
csv_data_str = csv_data_str.replace('\"', '')
# Split the string into a list of lines
csv_data_lines = csv_data_str.splitlines()
numbers = [line.split(',')[0] for line in csv_data_lines[1:]]
names = [f"{line.split(',')[1]} {line.split(',')[2]}" for line in csv_data_lines[1:]]
if len(set(numbers)) != len(numbers):
return jsonify({"error": "All numbers provided in the table must be unique."}), 400
# Numbers input handling
for idx, number in enumerate(numbers):
if not number:
return jsonify({"error": f"The number at [ROW # {idx + 1}] cannot be empty."}), 400
if len(number) > 4:
return jsonify({"error": f"The length of {number} at [ROW # {idx + 1}] cannot be more than 4 letters."}), 400
if not number.isdigit():
return jsonify({"error": f"{number} at [ROW # {idx + 1}] is not a valid digit/number."}), 400
# NAMES must not be empty, input handling
for idx, name in enumerate(names):
if name == ' ':
return jsonify({"error": f"The name at [ROW # {idx + 1}] cannot be empty."}), 400
new_csv_data_lines = []
for line in csv_data_lines[1:]:
line = line.rstrip(',')
if line:
cell = line.split(',')[0]
if not cell.isdigit():
continue
new_csv_data_lines.append(line)
csv_data_lines = new_csv_data_lines
if len(csv_data_lines) < 50:
return jsonify({"error": "Participants cannot be less than 50"}), 400
if len(csv_data_lines) > 300:
return jsonify({"error": "Participants cannot be more than 300"}), 400
# Open a file in write mode
with open('participants.csv', newline='', mode='w') as file:
writer = csv.writer(file)
# Write each line to the CSV file
for line in csv_data_lines:
writer.writerow(line.split(','))
return jsonify({"success": f"File has been saved at: participants.csv"})
else:
return jsonify({"error": "POST request required."}), 400
except Exception as e:
logger.error(e)
return jsonify({'error': 'An error occurred'}), 500
@app.route('/getCSVData/')
def view_saved_csv():
try:
file_path = 'participants.csv'
if os.path.exists(file_path):
data_list = []
with open(file_path, newline='') as f:
csv_data = csv.reader(f)
# headers = next(csv_data, None) # returns the headers or `None` if the input is empty
headers = ['assign-number', 'first-name', 'last-name', 'date-added']
if headers:
for row in csv_data:
data_list.append({headers[i]: value for i, value in enumerate(row)})
return jsonify({"data": data_list})
else:
return jsonify({"error": "File not found"}), 404
except Exception as e:
logger.error(e)
return jsonify({'error': 'An error occurred'}), 500
@app.route('/getAdminConf/')
def get_admin_conf():
try:
obj = pickle.load(open('admin_conf.pkl', 'rb'))
return jsonify(obj)
except Exception as e:
logger.error(e)
return jsonify({'error': 'An error occurred'}), 500
@app.route('/saveImage/', methods=['POST'])
def save_image():
try:
if request.method == 'POST':
data = json.loads(request.get_data(as_text=True))
image_name = data.get('image_name', None)
img_data = data['image'].split(',')[1] # Split off the header, keep only the actual image content
img_data = base64.b64decode(img_data)
file_path = os.path.join(base_path, 'frontend', f'media', f'{image_name}.png') # Or where you want to save it
if image_name == 'Logo':
file_path = os.path.join(fr'media\{image_name} Uploaded.png') # Or where you want to save it
elif image_name == None:
# Background Image Uploaded
file_path = get_image_path_name()
with open(file_path, 'wb') as f:
f.write(img_data)
return jsonify({"message": "Image saved successfully.", 'file_path': file_path})
else:
return jsonify({"error": "Wrong method type."})
except Exception as e:
logger.error(e)
return jsonify({'error': 'An error occurred'}), 500
@app.route('/validateLicenseKey/<string:licenseKey>/', methods=['POST'])
def view_validate_license_key(licenseKey):
try:
table = get_dynamodb_table()
licenseCreatedDate = license_key_is_valid(licenseKey, table)
# If license exists, write/update it to config file
if licenseCreatedDate:
set_license_key_in_config(licenseKey, licenseCreatedDate)
return jsonify({'success': 'License Key successfully validated'}), 200
return jsonify({'error': 'License Key couldn\'t be validated'}), 404
except Exception as e:
from traceback import print_exc
logger.error(e)
print_exc()
return jsonify({'error': str(e)}), 500
@app.route('/licenseKeyIsValid/')
def view_license_key_validated():
license_key = get_license_key_from_config()
if license_key:
return jsonify({'success': 'License Key is validated!'}), 200
return jsonify({'error': 'Please enter a valid License Key in order to use this software.'}), 400
@app.route('/media/<filename>')
def get_media_file(filename):
return send_file(os.path.join('../frontend', 'media', filename))
def get_image_path_name():
# Background Image Uploaded
file_name = 'Background-Image'
extension = '.png'
counter = 0
# Loop to find the next available file name
while True:
if counter == 0:
file_path = os.path.join(base_path, f'{file_name}{extension}')
else:
file_path = os.path.join(base_path, f'{file_name} {counter}{extension}')
# Check if file already exists
if not os.path.exists(file_path):
break # Exit loop if file does not exist
counter += 1
return file_path
if __name__ == "__main__":
app.run(port=PORT)
|
TahirAlauddin/KonnectedReverseRaffle
|
mac_server/konnected-server.py
|
konnected-server.py
|
py
| 9,148 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18781100050
|
from pathlib import Path
from environs import Env
env = Env()
env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
PROJECT_DIR = BASE_DIR / "project"
SECRET_KEY = env.str("SECRET_KEY", default="something-very-secret")
DEBUG = env.bool("DEBUG", default=False)
ALLOWED_HOSTS = env.list("ALLOWED_HOSTS", default=["*"])
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"debug_toolbar",
"allauth",
"allauth.account",
"utils",
"accounting",
"membership",
]
DATABASES = {"default": env.dj_db_url("DATABASE_URL")}
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"whitenoise.middleware.WhiteNoiseMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
ROOT_URLCONF = "project.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [PROJECT_DIR / "templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
WSGI_APPLICATION = "project.wsgi.application"
AUTH_PASSWORD_VALIDATORS = []
LANGUAGE_CODE = "da-dk"
TIME_ZONE = "Europe/Copenhagen"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = "/static/"
STATICFILES_DIRS = [PROJECT_DIR / "static"]
STATIC_ROOT = BASE_DIR / "static"
SITE_ID = 1
LOGIN_REDIRECT_URL = "/"
EMAIL_BACKEND = env.str(
"EMAIL_BACKEND", default="django.core.mail.backends.console.EmailBackend"
)
DEFAULT_FROM_EMAIL = env.str("DEFAULT_FROM_EMAIL", default="")
# Parse email URLs, e.g. "smtp://"
email = env.dj_email_url("EMAIL_URL", default="smtp://")
EMAIL_HOST = email["EMAIL_HOST"]
EMAIL_PORT = email["EMAIL_PORT"]
EMAIL_HOST_PASSWORD = email["EMAIL_HOST_PASSWORD"]
EMAIL_HOST_USER = email["EMAIL_HOST_USER"]
EMAIL_USE_TLS = email["EMAIL_USE_TLS"]
# Always show DDT in development for any IP, not just 127.0.0.1 or
# settings.INTERNAL_IPS. This is useful in a docker setup where the
# requesting IP isn't static.
DEBUG_TOOLBAR_CONFIG = {
"SHOW_TOOLBAR_CALLBACK": lambda _x: DEBUG,
}
# We store all translations in one location
LOCALE_PATHS = [PROJECT_DIR / "locale"]
# Allauth configuration
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_REQUIRED = False
|
valberg/django_project_template
|
src/config/settings.py
|
settings.py
|
py
| 3,358 |
python
|
en
|
code
| 0 |
github-code
|
6
|
25273285890
|
def perm(k):
if k == N-1:
candidate.append(field + [0])
else:
for i in range(k, N):
field[k], field[i] = field[i], field[k]
perm(k+1)
field[k], field[i] = field[i], field[k]
T = int(input())
for t in range(1, T+1):
N = int(input())
golfmap = [list(map(int, input().split())) for _ in range(N)]
field = list(range(N))
candidate = []
perm(1)
min_battery = 100 * N ** 2
for arr in candidate:
battery = 0
for i in range(N):
start, end = arr[i], arr[i+1]
battery += golfmap[start][end]
if battery < min_battery:
min_battery = battery
print('#{} {}'.format(t, min_battery))
|
powerticket/algorithm
|
Practice/실습/201029/전자카트_전원표.py
|
전자카트_전원표.py
|
py
| 722 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21325441820
|
import os
from setuptools import setup
basedir = os.path.dirname(__file__)
def readme():
with open(os.path.join(basedir, "README.rst")) as f:
return f.read()
about = {}
with open(os.path.join(basedir, "pysyncgateway", "__about__.py")) as f:
exec(f.read(), about)
setup(
name=about["__name__"],
version=about["__version__"],
description=about["__description__"],
long_description=readme(),
url="https://github.com/constructpm/pysyncgateway",
author=about["__author__"],
author_email=about["__email__"],
license="Apache License 2.0",
install_requires=["requests>=2.23.0", "six>=1.13"],
packages=["pysyncgateway"],
python_requires=">=3.5, <4",
zip_safe=False,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python",
],
)
|
constructpm/pysyncgateway
|
setup.py
|
setup.py
|
py
| 1,004 |
python
|
en
|
code
| 1 |
github-code
|
6
|
69976456507
|
import logging
from typing import Callable, List
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .coordinator import UpdateCoordinator
from homeassistant.helpers.entity import DeviceInfo, async_generate_entity_id
from .const import (
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 1
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Dolphin switch based on a config entry."""
coordinator: UpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
switches = []
for device in coordinator.data.keys():
switches.append(ShabbatSwitch(hass=hass, coordinator=coordinator, device=device))
switches.append(FixedTemperature(hass=hass, coordinator=coordinator, device=device))
for switch in range(1, 7):
switches.append(DropSwitch(hass=hass, coordinator=coordinator, index=switch, device=device))
async_add_entities(switches)
class DropSwitch(CoordinatorEntity, SwitchEntity):
def __init__(self, hass, coordinator, index, device):
CoordinatorEntity.__init__(self, coordinator)
self._hass = hass
self._id = index
self._coordinator = coordinator
self._device = device
self._is_on = False
self.entity_id = async_generate_entity_id(DOMAIN + ".{}", None or f"{device}_drop{index}", hass=hass)
@property
def unique_id(self):
return self.entity_id
@property
def name(self):
if self._coordinator.data[self._device].showerTemperature != None:
showerTemperature = self._coordinator.data[self._device].showerTemperature[self._id - 1]['temp'] if len(
self._coordinator.data[self._device].showerTemperature) > self._id - 1 else None
else:
showerTemperature = None
return f"{self._id} Shower - {showerTemperature}°C" if self._id == 1 else f"{self._id} Showers - {showerTemperature}°C"
@property
def icon(self):
return "mdi:shower"
@property
def available(self):
"""Return availability."""
if self._coordinator.data[self._device].shabbat:
return False
if self._coordinator.data[self._device].power and not self._is_on:
return False
if self._coordinator.data[self._device].fixedTemperature:
return False
if self._coordinator.data[self._device].showerTemperature != None:
if len(self._coordinator.data[self._device].showerTemperature) > self._id - 1:
return True
return False
@property
def is_on(self):
if not self._coordinator.data[self._device].power:
self._is_on = False
return self._is_on
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
identifiers={
(DOMAIN, self._device)
},
)
async def async_turn_on(self):
current_temp = self._coordinator.data[self._device].temperature
drop_temperature = self._coordinator.data[self._device].showerTemperature[self._id - 1]['temp']
if current_temp <= drop_temperature and self._coordinator.data[self._device].power == False:
await self._coordinator.dolphin.turnOnManually(self._coordinator.dolphin._user, drop_temperature,
self._device)
self._is_on = True
await self.coordinator.async_request_refresh()
self.async_write_ha_state()
async def async_turn_off(self):
await self._coordinator.dolphin.turnOffManually(self._coordinator.dolphin._user, self._device)
self._is_on = False
await self.coordinator.async_request_refresh()
self.async_write_ha_state()
class ShabbatSwitch(CoordinatorEntity, SwitchEntity):
def __init__(self, hass, coordinator, device):
CoordinatorEntity.__init__(self, coordinator)
self._hass = hass
self._coordinator = coordinator
self._device = device
self.entity_id = async_generate_entity_id(DOMAIN + ".{}", None or f"{device}_sabbath_mode", hass=hass)
@property
def unique_id(self):
return self.entity_id
@property
def name(self):
return "Sabbath mode"
@property
def icon(self):
return "mdi:star-david"
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
identifiers={
(DOMAIN, self._device)
},
name=self.name,
)
@property
def is_on(self):
return self._coordinator.data[self._device].shabbat
async def async_turn_on(self):
await self._coordinator.dolphin.enableShabbat(self._coordinator.dolphin._user, self._device)
self._coordinator.data[self._device].shabbat = True
self.async_write_ha_state()
async def async_turn_off(self):
await self._coordinator.dolphin.disableShabbat(self._coordinator.dolphin._user, self._device)
self._coordinator.data[self._device].shabbat = False
self.async_write_ha_state()
class FixedTemperature(CoordinatorEntity, SwitchEntity):
def __init__(self, hass, coordinator, device):
CoordinatorEntity.__init__(self, coordinator)
self._hass = hass
self._coordinator = coordinator
self._device = device
self.entity_id = async_generate_entity_id(DOMAIN + ".{}", None or f"{device}_fixed_temperature", hass=hass)
@property
def unique_id(self):
return self.entity_id
@property
def name(self):
return "Fixed temperature"
@property
def icon(self):
return "mdi:home-thermometer-outline"
@property
def device_info(self) -> DeviceInfo:
"""Return the device info."""
return DeviceInfo(
identifiers={
(DOMAIN, self._device)
},
name=self.name,
)
@property
def is_on(self):
return self._coordinator.data[self._device].fixedTemperature
async def async_turn_on(self):
await self._coordinator.dolphin.turnOnFixedTemperature(self._coordinator.dolphin._user, self._device,
self._coordinator.data[self._device].targetTemperature)
self._coordinator.data[self._device].fixedTemperature = True
await self.coordinator.async_request_refresh()
self.async_write_ha_state()
async def async_turn_off(self):
await self._coordinator.dolphin.turnOffFixedTemperature(self._coordinator.dolphin._user, self._device)
self._coordinator.data[self._device].fixedTemperature = False
await self.coordinator.async_request_refresh()
self.async_write_ha_state()
|
0xAlon/dolphin
|
custom_components/dolphin/switch.py
|
switch.py
|
py
| 7,201 |
python
|
en
|
code
| 6 |
github-code
|
6
|
28845108173
|
from django.views.generic import View
from .forms import CorrectionSendingForm
from apps.article_review.models import Review
from django.contrib import messages
from django.shortcuts import redirect
# Create your views here.
from apps.correction_reception.models import ArticleCorrection
# * Importar los modelos
class CorrectionSendingView(View):
def post(self, request, *args, **kwargs):
# * get review
review = Review.objects.get(pk=kwargs['pk'])
form = CorrectionSendingForm(request.POST)
if form.is_valid():
# * Hacer algo con el formulario
val = form.cleaned_data.get('btn')
if val == 'Si':
review.enviado = True
review.save()
assignment = review.assignment
# * if all reviews are sent then change the status of assignment to completed
# * get all reviews of the assignment
reviews = Review.objects.filter(assignment=assignment)
# * check if all reviews are sent
for review in reviews:
if review.enviado == False:
messages.success(
request, 'Se ha cargado la corrección. Se notificará al autor cuando se hayan cargado todas las correcciones pendientes por los otros arbitros.')
return redirect('core_dashboard:dashboard')
else:
assignment.completed = True
assignment.save()
ArticleCorrection.objects.get_or_create(article=assignment.article)
messages.success(
request, 'Se ha enviado la corrección y se ha notificado al autor.')
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMessage
from django.urls import reverse
email = EmailMessage(
subject='Artículo arbitrado',
body=f'Estimado(a) {review.assignment.article.author.user.get_full_name()},\n\n'
f'Le informamos que el artículo {review.assignment.article.title} ha sido arbitrado y tiene correciones pendientes por realizar.\n\n'
f'Para acceder al artículo puede verlo en su tablero de actividades, por favor ingrese a la siguiente dirección:\n\n'
f'{get_current_site(request).domain + reverse("core_dashboard:dashboard")}\n\n'
f'Atentamente,\n\n'
f'Comité Editorial de Ciencia y Tecnología',
from_email='[email protected]',
to=[review.assignment.article.author.user.email]
)
email.send()
return redirect('core_dashboard:dashboard')
else:
return redirect('core_dashboard:dashboard')
|
HetairoiElite/cienciatec
|
apps/correction_sending/views.py
|
views.py
|
py
| 3,096 |
python
|
es
|
code
| 0 |
github-code
|
6
|
38650731253
|
from ehrqc.standardise import Config
from ehrqc.standardise import Utils
import logging
log = logging.getLogger("EHR-QC")
def importPatients(con, sourceSchemaName, filePath, fileSeparator, overwrite=True):
if overwrite:
log.info("Creating table: " + sourceSchemaName + ".patients")
dropQuery = """DROP TABLE IF EXISTS """ + sourceSchemaName + """.patients CASCADE"""
createQuery = """CREATE TABLE """ + sourceSchemaName + """.patients
(
patient_id VARCHAR(50) NOT NULL,
gender VARCHAR(50),
age VARCHAR(10),
dob TIMESTAMP(0),
dod TIMESTAMP(0)
)
;
"""
with con:
with con.cursor() as cursor:
cursor.execute(dropQuery)
cursor.execute(createQuery)
import pandas as pd
import numpy as np
df = pd.read_csv(filePath, sep=fileSeparator)
dfColumns = []
columns = []
if(Config.patients['column_mapping']['patient_id']):
dfColumns.append(Config.patients['column_mapping']['patient_id'])
columns.append('patient_id')
if(Config.patients['column_mapping']['gender']):
dfColumns.append(Config.patients['column_mapping']['gender'])
columns.append('gender')
if(Config.patients['column_mapping']['age']):
dfColumns.append(Config.patients['column_mapping']['age'])
columns.append('age')
if(Config.patients['column_mapping']['dod']):
df[Config.patients['column_mapping']['dod']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.patients['column_mapping']['dod'])
columns.append('dod')
if(Config.patients['column_mapping']['dob']):
df[Config.patients['column_mapping']['dob']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.patients['column_mapping']['dob'])
columns.append('dob')
Utils.saveDataframe(con=con, destinationSchemaName=sourceSchemaName, destinationTableName='patients', columns=columns, df=df, dfColumns=dfColumns)
def importAdmissions(con, sourceSchemaName, filePath, fileSeparator, overwrite=True):
if overwrite:
log.info("Creating table: " + sourceSchemaName + ".admissions")
dropQuery = """DROP TABLE IF EXISTS """ + sourceSchemaName + """.admissions CASCADE"""
createQuery = """CREATE TABLE """ + sourceSchemaName + """.admissions
(
patient_id VARCHAR(50),
episode_id VARCHAR(50),
admittime VARCHAR(50),
dischtime VARCHAR(50),
deathtime VARCHAR(50),
admission_type VARCHAR(50),
admission_location VARCHAR(50),
discharge_location VARCHAR(50),
insurance VARCHAR(255),
language VARCHAR(10),
marital_status VARCHAR(50),
ethnicity VARCHAR(200),
edregtime VARCHAR(50),
edouttime VARCHAR(50),
hospital_expire_flag VARCHAR(50)
)
;
"""
with con:
with con.cursor() as cursor:
cursor.execute(dropQuery)
cursor.execute(createQuery)
import pandas as pd
import numpy as np
df = pd.read_csv(filePath, sep=fileSeparator)
dfColumns = []
columns = []
if(Config.admissions['column_mapping']['patient_id']):
dfColumns.append(Config.admissions['column_mapping']['patient_id'])
columns.append('patient_id')
if(Config.admissions['column_mapping']['episode_id']):
dfColumns.append(Config.admissions['column_mapping']['episode_id'])
columns.append('episode_id')
if(Config.admissions['column_mapping']['admittime']):
df[Config.admissions['column_mapping']['admittime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.admissions['column_mapping']['admittime'])
columns.append('admittime')
if(Config.admissions['column_mapping']['dischtime']):
df[Config.admissions['column_mapping']['dischtime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.admissions['column_mapping']['dischtime'])
columns.append('dischtime')
if(Config.admissions['column_mapping']['deathtime']):
df[Config.admissions['column_mapping']['deathtime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.admissions['column_mapping']['deathtime'])
columns.append('deathtime')
if(Config.admissions['column_mapping']['admission_type']):
dfColumns.append(Config.admissions['column_mapping']['admission_type'])
columns.append('admission_type')
if(Config.admissions['column_mapping']['admission_location']):
dfColumns.append(Config.admissions['column_mapping']['admission_location'])
columns.append('admission_location')
if(Config.admissions['column_mapping']['discharge_location']):
dfColumns.append(Config.admissions['column_mapping']['discharge_location'])
columns.append('discharge_location')
if(Config.admissions['column_mapping']['insurance']):
dfColumns.append(Config.admissions['column_mapping']['insurance'])
columns.append('insurance')
if(Config.admissions['column_mapping']['language']):
dfColumns.append(Config.admissions['column_mapping']['language'])
columns.append('language')
if(Config.admissions['column_mapping']['marital_status']):
dfColumns.append(Config.admissions['column_mapping']['marital_status'])
columns.append('marital_status')
if(Config.admissions['column_mapping']['ethnicity']):
dfColumns.append(Config.admissions['column_mapping']['ethnicity'])
columns.append('ethnicity')
if(Config.admissions['column_mapping']['edregtime']):
dfColumns.append(Config.admissions['column_mapping']['edregtime'])
columns.append('edregtime')
if(Config.admissions['column_mapping']['edouttime']):
dfColumns.append(Config.admissions['column_mapping']['edouttime'])
columns.append('edouttime')
if(Config.admissions['column_mapping']['hospital_expire_flag']):
dfColumns.append(Config.admissions['column_mapping']['hospital_expire_flag'])
columns.append('hospital_expire_flag')
Utils.saveDataframe(con=con, destinationSchemaName=sourceSchemaName, destinationTableName='admissions', columns=columns, df=df, dfColumns=dfColumns)
def importChartEvents(con, sourceSchemaName, filePath, fileSeparator, overwrite=True):
if overwrite:
log.info("Creating table: " + sourceSchemaName + ".chartevents")
dropQuery = """DROP TABLE IF EXISTS """ + sourceSchemaName + """.chartevents CASCADE"""
createQuery = """CREATE TABLE """ + sourceSchemaName + """.chartevents
(
patient_id VARCHAR(50),
episode_id VARCHAR(50),
vital_id VARCHAR(50),
charttime VARCHAR(50),
storetime VARCHAR(50),
itemid VARCHAR(160),
value VARCHAR(160),
valuenum VARCHAR(160),
valueuom VARCHAR(20),
warning VARCHAR(10)
)
;
"""
with con:
with con.cursor() as cursor:
cursor.execute(dropQuery)
cursor.execute(createQuery)
import pandas as pd
import numpy as np
log.info("Reading file: " + str(filePath))
df = pd.read_csv(filePath, sep=fileSeparator)
dfColumns = []
columns = []
if(Config.chartevents['column_mapping']['patient_id']):
dfColumns.append(Config.chartevents['column_mapping']['patient_id'])
columns.append('patient_id')
if(Config.chartevents['column_mapping']['episode_id']):
dfColumns.append(Config.chartevents['column_mapping']['episode_id'])
columns.append('episode_id')
if(Config.chartevents['column_mapping']['vital_id']):
dfColumns.append(Config.chartevents['column_mapping']['vital_id'])
columns.append('vital_id')
if(Config.chartevents['column_mapping']['charttime']):
df[Config.chartevents['column_mapping']['charttime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.chartevents['column_mapping']['charttime'])
columns.append('charttime')
if(Config.chartevents['column_mapping']['storetime']):
df[Config.chartevents['column_mapping']['storetime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.chartevents['column_mapping']['storetime'])
columns.append('storetime')
if(Config.chartevents['column_mapping']['itemid']):
dfColumns.append(Config.chartevents['column_mapping']['itemid'])
columns.append('itemid')
if(Config.chartevents['column_mapping']['value']):
# df = df[df[Config.chartevents['column_mapping']['value']].str.strip() != '']
dfColumns.append(Config.chartevents['column_mapping']['value'])
columns.append('value')
if(Config.chartevents['column_mapping']['valuenum']):
dfColumns.append(Config.chartevents['column_mapping']['valuenum'])
columns.append('valuenum')
if(Config.chartevents['column_mapping']['valueuom']):
dfColumns.append(Config.chartevents['column_mapping']['valueuom'])
columns.append('valueuom')
if(Config.chartevents['column_mapping']['warning']):
dfColumns.append(Config.chartevents['column_mapping']['warning'])
columns.append('warning')
Utils.saveDataframe(con=con, destinationSchemaName=sourceSchemaName, destinationTableName='chartevents', columns=columns, df=df, dfColumns=dfColumns)
def importLabEvents(con, sourceSchemaName, filePath, fileSeparator, overwrite=True):
if overwrite:
log.info("Creating table: " + sourceSchemaName + ".labevents")
dropQuery = """DROP TABLE IF EXISTS """ + sourceSchemaName + """.labevents CASCADE"""
createQuery = """CREATE TABLE """ + sourceSchemaName + """.labevents
(
labevent_id VARCHAR(50),
patient_id VARCHAR(50),
episode_id VARCHAR(50),
specimen_id VARCHAR(20),
itemid VARCHAR(200),
charttime VARCHAR(50),
storetime VARCHAR(50),
value VARCHAR(200),
valuenum VARCHAR(200),
valueuom VARCHAR(20),
ref_range_lower VARCHAR(200),
ref_range_upper VARCHAR(200),
flag VARCHAR(10),
priority VARCHAR(7),
comments VARCHAR(620)
)
;
"""
with con:
with con.cursor() as cursor:
cursor.execute(dropQuery)
cursor.execute(createQuery)
import pandas as pd
import numpy as np
df = pd.read_csv(filePath, sep=fileSeparator)
dfColumns = []
columns = []
if(Config.labevents['column_mapping']['labevent_id']):
dfColumns.append(Config.labevents['column_mapping']['labevent_id'])
columns.append('labevent_id')
if(Config.labevents['column_mapping']['patient_id']):
dfColumns.append(Config.labevents['column_mapping']['patient_id'])
columns.append('patient_id')
if(Config.labevents['column_mapping']['episode_id']):
dfColumns.append(Config.labevents['column_mapping']['episode_id'])
columns.append('episode_id')
if(Config.labevents['column_mapping']['specimen_id']):
dfColumns.append(Config.labevents['column_mapping']['specimen_id'])
columns.append('specimen_id')
if(Config.labevents['column_mapping']['itemid']):
dfColumns.append(Config.labevents['column_mapping']['itemid'])
columns.append('itemid')
if(Config.labevents['column_mapping']['charttime']):
df[Config.labevents['column_mapping']['charttime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.labevents['column_mapping']['charttime'])
columns.append('charttime')
if(Config.labevents['column_mapping']['storetime']):
df[Config.labevents['column_mapping']['storetime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.labevents['column_mapping']['storetime'])
columns.append('storetime')
if(Config.labevents['column_mapping']['value']):
# df = df[df[Config.labevents['column_mapping']['value']].str.strip() != '']
dfColumns.append(Config.labevents['column_mapping']['value'])
columns.append('value')
if(Config.labevents['column_mapping']['valuenum']):
dfColumns.append(Config.labevents['column_mapping']['valuenum'])
columns.append('valuenum')
if(Config.labevents['column_mapping']['valueuom']):
dfColumns.append(Config.labevents['column_mapping']['valueuom'])
columns.append('valueuom')
if(Config.labevents['column_mapping']['ref_range_lower']):
dfColumns.append(Config.labevents['column_mapping']['ref_range_lower'])
columns.append('ref_range_lower')
if(Config.labevents['column_mapping']['ref_range_upper']):
dfColumns.append(Config.labevents['column_mapping']['ref_range_upper'])
columns.append('ref_range_upper')
if(Config.labevents['column_mapping']['flag']):
dfColumns.append(Config.labevents['column_mapping']['flag'])
columns.append('flag')
if(Config.labevents['column_mapping']['priority']):
dfColumns.append(Config.labevents['column_mapping']['priority'])
columns.append('priority')
if(Config.labevents['column_mapping']['comments']):
dfColumns.append(Config.labevents['column_mapping']['comments'])
columns.append('comments')
Utils.saveDataframe(con=con, destinationSchemaName=sourceSchemaName, destinationTableName='labevents', columns=columns, df=df, dfColumns=dfColumns)
def importDiagnosis(con, sourceSchemaName, filePath, fileSeparator, overwrite=True):
log.info("Creating table: " + sourceSchemaName + ".diagnosis")
if overwrite:
dropQuery = """DROP TABLE IF EXISTS """ + sourceSchemaName + """.diagnosis CASCADE"""
createQuery = """CREATE TABLE """ + sourceSchemaName + """.diagnosis
(
diagnosis_id VARCHAR(50),
episode_id VARCHAR(50),
patient_id VARCHAR(50),
charttime VARCHAR(50),
diagnosis VARCHAR(50),
diagnosis_description VARCHAR(250)
)
;
"""
with con:
with con.cursor() as cursor:
cursor.execute(dropQuery)
cursor.execute(createQuery)
import pandas as pd
import numpy as np
df = pd.read_csv(filePath, sep=fileSeparator)
dfColumns = []
columns = []
if(Config.diagnosis['column_mapping']['diagnosis_id']):
dfColumns.append(Config.diagnosis['column_mapping']['diagnosis_id'])
columns.append('diagnosis_id')
if(Config.diagnosis['column_mapping']['patient_id']):
dfColumns.append(Config.diagnosis['column_mapping']['patient_id'])
columns.append('patient_id')
if(Config.diagnosis['column_mapping']['episode_id']):
dfColumns.append(Config.diagnosis['column_mapping']['episode_id'])
columns.append('episode_id')
if(Config.diagnosis['column_mapping']['charttime']):
df[Config.diagnosis['column_mapping']['charttime']].replace({np.nan: None}, inplace=True)
dfColumns.append(Config.diagnosis['column_mapping']['charttime'])
columns.append('charttime')
if(Config.diagnosis['column_mapping']['diagnosis']):
dfColumns.append(Config.diagnosis['column_mapping']['diagnosis'])
columns.append('diagnosis')
if(Config.diagnosis['column_mapping']['diagnosis_description']):
dfColumns.append(Config.diagnosis['column_mapping']['diagnosis_description'])
columns.append('diagnosis_description')
Utils.saveDataframe(con=con, destinationSchemaName=sourceSchemaName, destinationTableName='diagnosis', columns=columns, df=df, dfColumns=dfColumns)
def importDataCsv(con, sourceSchemaName):
if(hasattr(Config, 'patients') and 'file_name' in Config.patients and Config.patients['file_name']):
importPatients(
con=con,
sourceSchemaName=sourceSchemaName,
filePath = Config.patients['file_name'],
fileSeparator=Config.patients['file_separator'],
overwrite=Config.patients['overwrite'],
)
if(hasattr(Config, 'admissions') and 'file_name' in Config.admissions and Config.admissions['file_name']):
importAdmissions(
con=con,
sourceSchemaName=sourceSchemaName,
filePath = Config.admissions['file_name'],
fileSeparator=Config.admissions['file_separator'],
overwrite=Config.admissions['overwrite'],
)
if(hasattr(Config, 'chartevents') and 'file_name' in Config.chartevents and Config.chartevents['file_name']):
importChartEvents(
con=con,
sourceSchemaName=sourceSchemaName,
filePath = Config.chartevents['file_name'],
fileSeparator=Config.chartevents['file_separator'],
overwrite=Config.chartevents['overwrite'],
)
if(hasattr(Config, 'labevents') and 'file_name' in Config.labevents and Config.labevents['file_name']):
importLabEvents(
con=con,
sourceSchemaName=sourceSchemaName,
filePath = Config.labevents['file_name'],
fileSeparator=Config.labevents['file_separator'],
overwrite=Config.labevents['overwrite'],
)
if(hasattr(Config, 'diagnosis') and 'file_name' in Config.diagnosis and Config.diagnosis['file_name']):
importDiagnosis(
con=con,
sourceSchemaName=sourceSchemaName,
filePath = Config.diagnosis['file_name'],
fileSeparator=Config.diagnosis['file_separator'],
overwrite=Config.diagnosis['overwrite'],
)
|
ryashpal/EHR-QC-Standardise
|
ehrqc/standardise/Import.py
|
Import.py
|
py
| 18,197 |
python
|
en
|
code
| 0 |
github-code
|
6
|
75163509946
|
from flask import Blueprint, render_template, redirect, url_for, flash
from flask_security import current_user
from flask_babel import gettext
from . import route
from dxc.app.models.job.forms import JobForm, JobReportForm
from dxc.services import api_job, api_report
bp = Blueprint('job', __name__, template_folder='templates', static_folder='static', url_prefix='/job')
@route(bp, '/new', methods=['GET', 'POST'])
def create_job():
form = JobForm()
if form.validate_on_submit():
user = None
if current_user.get_id() is not None:
user = current_user
job = api_job.create(user=user, **form.data)
return redirect(url_for('.detail_job', job_id=job.id))
return render_template('job/create.html', form=form)
#----------------------------------------------------------------------
@bp.route('/<int:job_id>', methods=['GET'])
def detail_job(job_id):
""""""
job = api_job.get_or_404(job_id)
api_job.update(job, read_count = job.read_count + 1)
return render_template('job/detail.html', job=job)
#----------------------------------------------------------------------
@bp.route('/jobs/<int:page>', methods=['GET'])
@bp.route('/jobs/', methods=['GET'])
def list_job(page=None):
""""""
if page == None or page <= 0:
page = 1
jobs = api_job.get_latest_page_filterby(page, status=1)
return render_template('job/list.html', jobs = jobs)
#----------------------------------------------------------------------
@bp.route('/report/<int:job_id>', methods=['GET', 'POST'])
def report_job(job_id):
"""Report a job
"""
report_form = JobReportForm()
if report_form.validate_on_submit():
api_report.create(job_id=job_id, **report_form.data)
flash(gettext(u'Thanks for your report. We will check it soon.'))
return redirect(url_for('.list_job'))
return render_template('job/report.html', job_id=job_id, report_form=report_form)
#----------------------------------------------------------------------
@bp.route('/reports/<int:job_id>', methods=['GET'])
def list_report(job_id):
""""""
job = api_job.get(job_id)
return render_template('job/report_list.html', job=job, reports=job.reports)
@route(bp, '/profile/published_jobs/','/profile/published_jobs/<int:status>/','/profile/published_jobs/<int:status>/<int:page>', methods=['GET'])
def list_publisedjobs(status=1, page=1):
"""List jobs published by me."""
jobs = api_job.get_latest_page_filterby(page=page, per_page=2, status=status, user_id=current_user.id)
return render_template('job/profile_publishedjobs.html', jobs=jobs, status=status)
|
cash2one/Luyasi-Flask
|
dxc/app/frontend/job.py
|
job.py
|
py
| 2,646 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10721085289
|
'''
Collection of helper function for the EDA notebooks
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pycountry
''' Returns the pairs of variables sorted according to their correlation '''
def getCorrPairs(corr):
mask = np.zeros_like(corr, dtype=bool)
mask[np.triu_indices_from(mask)] = True
corr[mask] = np.nan
pairs = corr.abs().unstack()
pairs = pairs.sort_values(ascending = False)
return pairs
''' Imputes a predictor timeSeries'''
def imputeTS(timeSeries):
if 'capacity' in timeSeries.name:
res = _imputeCapacity(timeSeries)
else:
res = _imputeGeneric(timeSeries)
return res
''' Imputes a generic time-series by interpolation or year-ahead, year-prior values '''
def _imputeGeneric(timeSeries,
hoursInWeek = 24 * 1,
hoursInYear = 24 * 364):
# Interpolate at most 1 week forwards/backwards in time
timeSeries = timeSeries.interpolate(
method = 'time',
limit = hoursInWeek,
limit_area = 'inside',
limit_direction = 'both')
# Roll-back one year and impute remaining blocks (fills in gaps mostly at the beginning of the time-series)
timeSeries = timeSeries.combine_first(timeSeries.shift(-hoursInYear))
# Roll-forward one year and impute (fills in gaps mostly at the end of the time-series)
timeSeries = timeSeries.combine_first(timeSeries.shift(hoursInYear))
# Re-interpolate any nans remaining
timeSeries = timeSeries.interpolate(
method = 'time',
limit_area = 'inside',
limit_direction = 'both')
return timeSeries
''' Imputes capacity timeseries by padding'''
def _imputeCapacity(timeSeries):
return timeSeries.fillna(method = 'pad')
''' Plots original / imputed time-series'''
def plotImputation(originalTS, imputedTS, withMean = False, hoursInMonth = 24 * 7 * 4):
imputedTS[~originalTS.isnull()] = np.nan
plt.figure(figsize = (15, 3))
plt.plot(originalTS, linewidth = 0.5)
plt.plot(imputedTS, linewidth = 0.5)
if withMean:
monthMean = imputedTS.rolling(hoursInMonth).mean()
plt.plot(monthMean, color = 'k')
plt.legend(['Original', 'Imputed', 'Monthly avg. (rolling)'], ncol = 3);
else:
plt.legend(['Original', 'Imputed'], ncol = 2);
plt.title(originalTS.name + ' Imputed');
return
''' Fixes information for the areas.csv dataframe '''
def makeAreaMetadata(df):
df = df.where(pd.notnull(df), None)
countries, a2Codes, mapCodes, pAreas, bZones, cAreas, mAreas = [], [], [], [], [], [], []
for _, row in df.iterrows():
a2code = row['area ID'].split('_')[0]
if a2code == 'CS': country = 'SerbiaMontenegro' # Does not exist in pycountry
else: country = pycountry.countries.get(alpha_2 = a2code).name
mapcode = a2code
primary_area = country + '_default'
bidZone = country + '_default'
control_area = country + '_default'
market_area = country + '_default'
if row['country'] is None: countries.append(country)
else: countries.append(row['country'])
if row['ISO 3166-1 alpha-2'] is None: a2Codes.append(a2code)
else: a2Codes.append(row['ISO 3166-1 alpha-2'])
if row['MapCode ENTSO-E'] is None: mapCodes.append(mapcode)
else: mapCodes.append(row['MapCode ENTSO-E'])
if row['primary AreaName ENTSO-E'] is None: pAreas.append(primary_area)
else: pAreas.append(row['primary AreaName ENTSO-E'])
if row['bidding zone'] is None: bZones.append(bidZone)
else: bZones.append(row['bidding zone'])
if row['control area'] is None: cAreas.append(control_area)
else: cAreas.append(row['control area'])
if row['market balance area'] is None: mAreas.append(market_area)
else: mAreas.append(row['market balance area'])
df['country'] = countries
df['ISO 3166-1 alpha-2'] = a2Codes
df['MapCode ENTSO-E'] = mapCodes
df['primary AreaName ENTSO-E'] = pAreas
df['bidding zone'] = bZones
df['control area'] = cAreas
df['market balance area'] = mAreas
return df
''' Returns areaIDs per concept type'''
def _getAreas(primaryConcept, df):
return df[df['primary concept'] == primaryConcept]['area ID'].unique().tolist()
''' Checks if a column name appears in a list of area codes and returns area code'''
def areaID(fieldName, conceptType, df):
for area in _getAreas(conceptType, df):
if isinstance(area, str):
if area in fieldName:
return area
return None
|
Miltos-90/EU_Electricity_Price_Forecasting
|
src/eda_utils.py
|
eda_utils.py
|
py
| 5,183 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38794444506
|
n, m, q = map(int, input().split())
grid = [[0] * m for _ in range(n)]
def explore(i, j, seen):
if i-1 >= 0 and (i-1, j) not in seen and grid[i-1][j] == 0:
seen.add((i-1, j))
explore(i-1, j, seen)
if i+1 < len(grid) and (i+1, j) not in seen and grid[i+1][j] == 0:
seen.add((i+1, j))
explore(i+1, j, seen)
if j-1 >= 0 and (i, j-1) not in seen and grid[i][j-1] == 0:
seen.add((i, j-1))
explore(i, j-1, seen)
if j+1 < len(grid[0]) and (i, j+1) not in seen and grid[i][j+1] == 0:
seen.add((i, j+1))
explore(i, j+1, seen)
def regions():
seen = set()
ans = 0
for i in range(len(grid)):
for j in range(len(grid[0])):
if (i, j) not in seen and grid[i][j] == 0:
seen.add((i, j))
ans += 1
explore(i, j, seen)
return ans
for i in range(q):
x1, y1, x2, y2 = map(lambda x: x-1, map(int, input().split()))
while x1 != x2:
grid[x1][y1] = 1
x1 += 1 if x2 > x1 else -1
while y1 != y2:
grid[x1][y1] = 1
y1 += 1 if y2 > y1 else -1
grid[x1][y1] = 1
print(regions())
|
MaxwellGraves/ICPC-Practice
|
Practice3/Artwork/soln.py
|
soln.py
|
py
| 1,207 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40206920414
|
#!/usr/bin/python3
""" A function that finds the biggest integer in a
list, and if empty return none and assume the
list contains only integers. """
def max_integer(my_list=[]):
if len(my_list) == 0:
return None
else:
my_list.sort()
largestInt = my_list[-1]
return largestInt
|
omondistanley/seo-higher_level_programming
|
python-data_structures/9-max_integer.py
|
9-max_integer.py
|
py
| 326 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21666620014
|
#https://leetcode.com/problems/rotate-array/
class Solution:
def rotate(self, nums: list[int], k: int) -> None: #Not returning anything, since we'll modify it in-place
solutionList = [0] * len(nums)
for i in range(0,len(nums)):
newIndex = (i+k) % len(nums)
solutionList[newIndex] = nums[i]
for i in range(0,len(nums)):
nums[i] = solutionList[i]
#More concise implementation using slicing
def rotate2(self, nums: list[int], k: int) -> None:
shift = k % len(nums) #This is needed if k > len(list)
newStartIndex = len(nums) - shift #The starting index of our new list
nums[:] = nums[newStartIndex:] + nums[:newStartIndex] #The [:]= operator copies values to the list in-place instead of doing reassignment
def main():
solution = Solution()
list1 = [1,2,3,4,5,6,7]
solution.rotate(list1, 3)
print(list1) #[5,6,7,1,2,3,4]
if __name__ == "__main__": #Entry point
main() #Calling main method
|
Adam-1776/Practice
|
DSA/rotateArray/solution.py
|
solution.py
|
py
| 1,004 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16822782747
|
from math import comb
def compute(m,n):
combs = comb(m*(m-1)*(m-2),n)
power = pow(m-3, 2*n)
poly = n*m*m+2*m*m+n*m
return combs*power*poly
def compute2(m,n):
combs = comb(m*(m-1)*(m-2)-1,n-1)
power = pow(m-3,n)*pow(m-4,n)
poly = n*m*m+2*m*m+n*m
return combs*power*poly
# for m in range(4, 7):
# print(m)
# for n in range(1,5):
# print(str(n)+" %e" % compute(m,n))
# for n in range(5, m*(m-1)*(m-2), 10):
# print(str(n)+" %e" % compute(m,n))
# print(str(m*(m-1)*(m-2))+" %e" % compute(m,m*(m-1)*(m-2)))
# print()
# print("%e" % compute2(6,3))
# print("%e" % compute2(6,4))
# print("%e" % compute2(6,5))
# print("%e" % compute2(6,120))
print("%e" % compute2(6,6))
|
NoahW314/Python-Testing
|
Testing/src/root/nested/math/Cayley Tables/assoc/assoc_testing_complexity.py
|
assoc_testing_complexity.py
|
py
| 735 |
python
|
en
|
code
| 0 |
github-code
|
6
|
7940432285
|
length = 12
summary = [0] * length
mask = '1' * length
with open('/Users/eilidh/PycharmProjects/advent_of_code/src/day3/input.txt') as file:
for line in file:
line = line.rstrip()
if len(line) != length:
raise Exception('wrong length')
for i, char in enumerate(line.rstrip()):
if int(char):
summary[i] += 1
else:
summary[i] -= 1
print(summary)
bin_str = ''
for value in summary:
if value > 0:
bin_str += '1'
elif value < 0:
bin_str += '0'
else:
raise Exception('behaviour unspecified')
print(bin_str)
gamma = int(bin_str, 2)
mask = int(mask, 2)
epsilon = gamma^mask
print(gamma)
print(epsilon)
print(f'final value: {gamma * epsilon}')
|
eilidht/advent_of_code
|
src/day3/day3.py
|
day3.py
|
py
| 772 |
python
|
en
|
code
| 0 |
github-code
|
6
|
16478653737
|
import mxnet as mx
import time
import gluoncv as gcv
from gluoncv.utils import try_import_cv2
cv2 = try_import_cv2()
net = gcv.model_zoo.get_model(
# good, fast
'ssd_512_mobilenet1.0_coco',
# 'ssd_512_mobilenet1.0_voc',
# 'ssd_512_mobilenet1.0_voc_int8',
#
# 'yolo3_mobilenet1.0_coco',
# 'yolo3_mobilenet1.0_voc',
# too slow...
# 'faster_rcnn_resnet50_v1b_voc', # too slow...
# 'faster_rcnn_fpn_syncbn_resnest50_coco', # too slow...
pretrained=True)
net.hybridize()
cap = cv2.VideoCapture(0)
time.sleep(1)
while(True):
ret, frame = cap.read()
k = cv2.waitKey(1)
if k == ord('q'):
break
frame = mx.nd.array(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).astype('uint8')
rgb_nd, frame = gcv.data.transforms.presets.ssd.transform_test(
frame, short=512, max_size=700
)
# rgb_nd, frame = gcv.data.transforms.presets.yolo.transform_test(
# frame, short=512, max_size=700
# )
# rgb_nd, frame = gcv.data.transforms.presets.rcnn.transform_test(
# frame, short=512, max_size=700
# )
class_IDs, scores, bounding_boxes = net(rgb_nd)
img = gcv.utils.viz.cv_plot_bbox(frame,
bounding_boxes[0],
scores[0],
class_IDs[0],
class_names=net.classes)
gcv.utils.viz.cv_plot_image(img)
cv2.waitKey(1)
cap.release()
cv2.destroyAllWindows()
|
ODN418/progmates_works
|
glouncv/detect.py
|
detect.py
|
py
| 1,506 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26524937441
|
class SinglyLinkedList:
def __init__(self):
self.head = None
self.length = 0
# Methods
# Add to Back
def addToBack(self,val):
newNode = ListNode(val)
if self.head == None:
self.head = newNode
return self;
runner = self.head;
while runner.next:
runner = runner.next;
runner.next = newNode;
return self
# Add to front
def addToFront(self,val):
newNode = ListNode(val)
tempHead = self.head
self.head = newNode
self.head.next = tempHead
# Remove from Front
# Remove from Back
class ListNode:
def __init__(self,val):
self.value = val
self.next = None
SLL = SinglyLinkedList();
# Add and Remove Your nodes
|
CoreyM-Dojo/projects-and-algorithms-october-2023
|
week2/day1/SLL.py
|
SLL.py
|
py
| 802 |
python
|
en
|
code
| 0 |
github-code
|
6
|
41202613200
|
from sense_hat import SenseHat
sense = SenseHat()
sense.clear()
temp = sense.get_temperature()
humidity = sense.get_humidity()
humidity = round(humidity, 1)
calc = 100-humidity
calc2 = calc/5
dewpoint = temp - calc2
dewpoint = (str(dewpoint))
print(dewpoint + " degrees C")
|
Meteodeep/sandbox
|
Summer-2017/RPi-DewPoint.py
|
RPi-DewPoint.py
|
py
| 283 |
python
|
en
|
code
| 4 |
github-code
|
6
|
7901768963
|
from collections import Counter
import logging
def find(list, value):
try:
return list.index(value)
except ValueError:
return None
class DefaultSorter(object):
def __init__(self, langs='all', weight=1):
logging.info("Available languages: {}".format(langs))
self.langs = langs.split(',')
def bestfn(self, subentry):
idx = find(self.langs, subentry['SubLanguageID'])
value = idx if idx is not None else len(self.langs)
return value
def _similarity(a, b):
make_pairs = lambda l: (l[i:i+1] for i in xrange(len(l)-1))
tc = lambda counter: sum(count for count in counter.values())
sa = Counter(make_pairs(a))
sb = Counter(make_pairs(b))
return 2.0 * tc(sa & sb) / (tc(sa) + tc(sb))
class SimilaritySorter(DefaultSorter):
def __init__(self, langs='all'):
super(SimilaritySorter, self).__init__(langs)
self.movie = ''
def bestfn(self, subentry):
value = super(SimilaritySorter, self).bestfn(subentry)
sn = subentry['SubFileName']
similarity = _similarity(sn[:sn.rindex('.')], self.movie)
logging.info("{}: Similarity is {}, lang {}".format(
subentry['SubFileName'], similarity, subentry['SubLanguageID']))
return 1.1 * value + 1 - similarity
|
luisguilherme/framboise
|
framboise/sorting.py
|
sorting.py
|
py
| 1,318 |
python
|
en
|
code
| 2 |
github-code
|
6
|
17493539514
|
#This file "drives" the car by calling all the required files
#outputs plots of the dynamic/vibration models
import Beeman, car_2014, chassis_2014, driver_sally, ff_2014_5, ff_2014_7, get_DM, get_FF, get_Jx, get_Jy, get_LR, get_MM, get_SD, get_SM, get_cg, motor_2014, speed_bump, suspension_front_2014, suspension_rear_2014, trajectory, wheel_front_2014, wheel_rear_2014
import numpy as np, math
import matplotlib.pyplot as plt
#creating arguments into Beeman
ff = ff_2014_7.ff_data
ffmatrix, ffobject = get_FF.get_FF(ff['t_in'],ff)
X0 = get_SD.get_SD(ff['model'],ff['car'])
DOF = X0.shape[0]
V0 = np.zeros((DOF,1))
A0 = np.zeros((DOF,3))
M = get_MM.get_MM(ff['model'],ff['car'])
C = get_DM.get_DM(ff['model'],ff['car'])
K = get_SM.get_SM(ff['model'],ff['car'])
#create data
T7, X7, V7, A7 = Beeman.Beeman(X0,V0,A0,M,C,K,get_FF.get_FF,ffobject)
#Heave
plt.plot(T7,X7[:,0])
plt.show()
#Pitch
|
brandontran14/CarSimulation
|
driving.py
|
driving.py
|
py
| 897 |
python
|
en
|
code
| 0 |
github-code
|
6
|
23255212962
|
from Config import Calculator
from Config import Condition
from Utils import Utils
import json
import insertUser
import os
# # file_path = os.path.join(BASE_DIR, 'Test_Data')
# elements = BASE_DIR.split("/")
# # elements.pop()
# path = "/".join(elements)
# print(path)
if __name__ == '__main__':
# BASE_DIR = os.path.dirname(__file__)
verify = insertUser.verify()
if verify:
BASE_DIR = './File/'
json_file = file(BASE_DIR+"config.json")
conf = json.load(json_file)
allTestValue = Utils.get_test_data(BASE_DIR+conf["file1"])
allTestValue.extend(Utils.get_test_data(BASE_DIR + conf["file2"]))
standardList = Utils.get_standard_data(BASE_DIR + conf["standard"])
conditions = []
for key, value in conf["con"].items():
conditions.append(Condition(key, value))
condition3 = Condition("M", conf["M"]/conf["amount"])
condition3.set_amount(conf["M"])
conditions.append(condition3)
nm = conf["nm"]
config = Calculator(allTestValue, standardList, conf["amount"], nm, 1000, conditions)
config.calculate()
Utils.save(config.result_list)
# x = raw_input("please enter")
|
LJJ/py_parseExcel
|
ParseExcel.py
|
ParseExcel.py
|
py
| 1,219 |
python
|
en
|
code
| 0 |
github-code
|
6
|
4495978796
|
class Funcionarios:
def __init__(self):
pass
def DadosFuncionarios(self):
arq = open('Funcionarios.txt','r')
s = arq.readlines()
lis = []
for x in s:
dados = x.split('{/')
lis.append(dados)
arq.close()
return lis
def ArquivoVazioouNao(self):
criar = open('Funcionarios.txt','a')
criar.close()
arq = open('Funcionarios.txt','r')
s = arq.readlines()
lis = []
for x in s:
dados = x.split('{/')
lis.append(dados)
if len(lis) == 0:
return 0
else:
return 1
arq.close()
def Matricula_CadastradoOuNao(self,UserMat):
arq = open('Funcionarios.txt','r')
s = arq.readlines()
lis = []
for x in s:
dados = x.split('{/')
lis.append(dados)
limite = 0
for p in range(len(lis)):
if str(UserMat) != lis[p][0]:
limite += 1
if limite == len(lis):
resposta = "not"
return resposta
else:
resposta ="yes"
return resposta
arq.close()
def AdicionarFuncionarios(self):
mat = int(input("Matricula (Numero com 6 digitos): "))
while len(str(mat)) < 6 or len(str(mat)) > 6:
mat = int(input("Informe um valor de 6 digitos\nMatricula: "))
ListaVazia = Funcionarios.ArquivoVazioouNao(self)
if ListaVazia == 0 or ListaVazia == 1:
arq = open('Funcionarios.txt','a')
decisao = Funcionarios.Matricula_CadastradoOuNao(self,mat)
if decisao == 'yes':
print("Matricula cadastrada, favor escolher uma nova opcao no Menu\n")
Funcionarios.ChamadaMenuFuncionarios(self)
else:
nom = input("Nome: ")
sen = input("Senha para login: ")
car = input("Cargo: ")
nas = input("Data de nascimento: ")
sex = input("Sexo: ")
end = input("Endereco: ")
bai = input("Bairro: ")
cid = input("Cidade: ")
cep = int(input("CEP: "))
while len(str(cep)) < 8 or len(str(cep)) > 8:
cep = int(input("Informe um valor de 8 digitos\nCEP: "))
est = input("Estado: ")
tel = int(input("Telefone (Digite 0, caso nao possua): "))
if tel == 0:
tel = 'Nao possui'
cel = int(input("Celular (Digite 0, caso nao possua): "))
if cel == 0:
cel = 'Nao possui'
arq.write(str(mat) + '{/' + nom + '{/' + sen + '{/' + car + '{/' + nas + '{/' + sex + '{/' + end + '{/' + bai + '{/' + cid + '{/' + str(cep) + '{/' + est + '{/' + str(tel) + '{/' + str(cel) + '\n')
arq.close()
def MAT_Editar_Remover(self,MATUser,lis):
limite = 0
for p in range(len(lis)):
if MATUser != lis[p][0]:
limite += 1
if limite == len(lis):
d = "Matricula nao cadastrada"
return d
else:
d = p
return d
def ConsultarFuncionarios(self,MatConsulta,lis):
limite = 0
for p in range(len(lis)):
if MatConsulta != lis[p][0]:
limite += 1
if limite == len(lis):
return("Matricula nao cadastrada")
if MatConsulta == lis[p][0]:
d = p
return("\nMatricula: %s\nNome: %s\nCargo: %s\nData de nascimento: %s\nSexo: %s\nEndereco: %s\nBairro: %s\nCidade: %s\nCEP: %s\nEstado: %s\nTelefone: %s\nCelular: %s " % (lis[d][0],lis[d][1],lis[d][3],lis[d][4],lis[d][5],lis[d][6],lis[d][7],lis[d][8],lis[d][9],lis[d][10],lis[d][11],lis[d][12]))
def EditarRemoverFuncionarios(self,decisao,d,lis):
criar = open('Funcionarios.txt','a')
criar.close
if decisao.lower() == "remover":
if d == "Matricula nao cadastrada":
print(d)
else:
del lis[d]
elif decisao.lower() == 'editar':
if d == "Matricula nao cadastrada":
print(d)
else:
nnom = input("Nome: ")
ncar = input("Cargo: ")
nnas = input("Data de nascimento: ")
nsex = input("Sexo: ")
nend = input("Endereco: ")
nbai = input("Bairro: ")
ncid = input("Cidade:")
ncep = input('CEP: ')
while len(str(ncep)) < 8 or len(str(ncep)) > 8:
ncep = int(input("Informe um valor de 8 digitos\nCEP: "))
nest = input("Estado: ")
ntel = int(input("Telefone (Digite 0, caso nao possua): "))
if ntel == 0:
ntel = "Nao possui"
ncel = int(input("Celular (Digite 0, caso nao possua): "))
if ncel == 0:
ncel = "Nao possui"
lis[d][1] = nnom
lis[d][3] = ncar
lis[d][4] = nnas
lis[d][5] = nsex
lis[d][6] = nend
lis[d][7] = nbai
lis[d][8] = ncid
lis[d][9] = ncep
lis[d][10] = nest
lis[d][11] = str(ntel)
lis[d][12] = str(ncel)
return lis
def SalvarDadosEditados(self,lis):
arq = open('Funcionarios.txt','w')
lista = lis
for x in lista:
p = '{/'.join(x)
arq.write(p)
arq.close()
def ChamadaMenuFuncionarios(self):
print("\nMENU DE FUNCIONARIOS\n\n1 - Adicionar\n2 - Consultar\n3 - Editar\n4 - Remover\n")
user = int(input("Escolha sua opcao: "))
if user == 1:
Funcionarios.AdicionarFuncionarios(self)
Funcionarios.ChamadaMenuFuncionarios(self)
elif user == 2:
MAT = input("Informe o Matricula que deseja consultar: ")
resultado = Funcionarios.ArquivoVazioouNao(self)
if resultado == 0:
print("Nao existem dados de nenhum funcionario, adicione um funcionario no menu de funcionarios")
Funcionarios.ChamadaMenuFuncionarios(self)
else:
print(Funcionarios.ConsultarFuncionarios(self,MAT, Funcionarios.DadosFuncionarios(self)))
Funcionarios.ChamadaMenuFuncionarios(self)
elif user == 3:
Mateditar = input("Informe o matricula que deseja editar: ")
resultado = Funcionarios.ArquivoVazioouNao(self)
if resultado == 0:
print("Nao existem dados de nenhum fornecedores, adicione um fornecedor no menu de fornecedores")
Funcionarios.ChamadaMenuFuncionarios(self)
else:
decisao = Funcionarios.EditarRemoverFuncionarios(self,'editar',Funcionarios.MAT_Editar_Remover(self,Mateditar,Funcionarios.DadosFuncionarios(self)),Funcionarios.DadosFuncionarios(self))
Funcionarios.SalvarDadosEditados(self,decisao)
Funcionarios.ChamadaMenuFuncionarios(self)
elif user == 4:
user = input("Informe qual MATRICULA deseja remover: ")
resultado = Funcionarios.ArquivoVazioouNao(self)
if resultado == 0:
print("Nao existem dados de nenhum funcionarios, adicione um funcionario no menu de funcionarios")
Funcionarios.ChamadaMenuFuncionarios(self)
else:
decisao = Funcionarios.EditarRemoverFuncionarios(self,'remover',Funcionarios.MAT_Editar_Remover(self,user,Funcionarios.DadosFuncionarios(self)),Funcionarios.DadosFuncionarios(self))
Funcionarios.SalvarDadosEditados(self,decisao)
Funcionarios.ChamadaMenuFuncionarios(self)
else:
print("\nOPCAO INVALIDA\n")
Funcionarios.ChamadaMenuFuncionarios(self)
|
Ander20n/Codigos-Faculdade
|
Projeto IP/Funcionarios.py
|
Funcionarios.py
|
py
| 8,276 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
33225318672
|
import torch
import torch.nn as nn
from torch.utils.data import Dataset
import h5py
import numpy as np
import utils.io as io
from datasets.hico_constants import HicoConstants
from datasets import metadata
import sys
import random
class HicoDataset(Dataset):
'''
Args:
subset: ['train', 'val', 'train_val', 'test']
'''
data_sample_count = 0 # record how many times to process data sampling
def __init__(self, data_const=HicoConstants(), subset='train', data_aug=False, sampler=None, test=False):
super(HicoDataset, self).__init__()
self.data_aug = data_aug
self.data_const = data_const
self.test = test
self.subset_ids = self._load_subset_ids(subset, sampler)
self.sub_app_data = self._load_subset_app_data(subset)
self.sub_spatial_data = self._load_subset_spatial_data(subset)
self.word2vec = h5py.File(self.data_const.word2vec, 'r')
self.sub_pose_feat = self._load_subset_pose_data(subset)
def _load_subset_ids(self, subset, sampler):
global_ids = io.load_json_object(self.data_const.split_ids_json)
bad_det_ids = io.load_json_object(self.data_const.bad_faster_rcnn_det_ids)
# skip bad instance detection image with 0-1 det
# !NOTE: How to reduce the number of bad instance detection images
subset_ids = [id for id in global_ids[subset] if id not in bad_det_ids['0']+bad_det_ids["1"]]
if sampler:
# import ipdb; ipdb.set_trace()
''' when changing the model, use sub-dataset to quickly show if there is something wrong '''
subset_ids = random.sample(subset_ids, int(len(subset_ids)*sampler))
return subset_ids
def _load_subset_app_data(self, subset):
print(f'Using {self.data_const.feat_type} feature...')
if subset == 'train' or subset == 'val' or subset == 'train_val':
return h5py.File(self.data_const.hico_trainval_data, 'r')
elif subset == 'test':
return h5py.File(self.data_const.hico_test_data, 'r')
else:
print('Please double check the name of subset!!!')
sys.exit(1)
def _load_subset_spatial_data(self, subset):
if subset == 'train' or subset == 'val' or subset == 'train_val':
return h5py.File(self.data_const.trainval_spatial_feat, 'r')
elif subset == 'test':
return h5py.File(self.data_const.test_spatial_feat, 'r')
else:
print('Please double check the name of subset!!!')
sys.exit(1)
def _load_subset_pose_data(self, subset):
if subset == 'train' or subset == 'val' or subset == 'train_val':
return h5py.File(self.data_const.trainval_keypoints_feat, 'r')
elif subset == 'test':
return h5py.File(self.data_const.test_keypoints_feat, 'r')
else:
print('Please double check the name of subset!!!')
sys.exit(1)
def _get_obj_one_hot(self,node_ids):
num_cand = len(node_ids)
obj_one_hot = np.zeros([num_cand,80])
for i, node_id in enumerate(node_ids):
obj_idx = int(node_id)-1
obj_one_hot[i,obj_idx] = 1.0
return obj_one_hot
def _get_word2vec(self,node_ids):
word2vec = np.empty((0,300))
for node_id in node_ids:
vec = self.word2vec[metadata.coco_classes[node_id]]
word2vec = np.vstack((word2vec, vec))
return word2vec
def _get_interactive_label(self, edge_label):
interactive_label = np.zeros(edge_label.shape[0])
interactive_label = interactive_label[:, None]
valid_idxs = list(set(np.where(edge_label==1)[0]))
if len(valid_idxs) > 0:
# import ipdb; ipdb.set_trace()
interactive_label[valid_idxs,:] = 1
return interactive_label
@staticmethod
def displaycount():
print("total times to process data sampling:", HicoDataset.data_sample_count)
# def get_verb_one_hot(self,hoi_ids):
# num_cand = len(hoi_ids)
# verb_one_hot = np.zeros([num_cand,len(self.verb_to_id)])
# for i, hoi_id in enumerate(hoi_ids):
# verb_id = self.verb_to_id[self.hoi_dict[hoi_id]['verb']]
# verb_idx = int(verb_id)-1
# verb_one_hot[i,verb_idx] = 1.0
# return verb_one_hot
def __len__(self):
return len(self.subset_ids)
def __getitem__(self, idx):
global_id = self.subset_ids[idx]
data = {}
single_app_data = self.sub_app_data[global_id]
single_spatial_data = self.sub_spatial_data[global_id]
single_pose_data = self.sub_pose_feat[str(global_id)]
data['roi_labels'] = single_app_data['classes'][:]
data['node_num'] = single_app_data['node_num'].value
data['edge_labels'] = single_app_data['edge_labels'][:]
data['features'] = single_app_data['feature'][:]
data['spatial_feat'] = single_spatial_data[:]
data['word2vec'] = self._get_word2vec(data['roi_labels'])
# data['pose_feat'] = single_pose_data[:]
data['pose_to_human'] = single_pose_data['pose_to_human'][:]
data['pose_to_obj_offset'] = single_pose_data['pose_to_obj_offset'][:]
if self.test:
data['global_id'] = global_id
data['img_name'] = global_id + '.jpg'
data['det_boxes'] = single_app_data['boxes'][:]
data['roi_scores'] = single_app_data['scores'][:]
# import ipdb; ipdb.set_trace()
if self.data_aug:
thresh = random.random()
if thresh > 0.5:
data = self._data_sampler(data)
return data
# for inference
def sample_date(self, global_id):
data = {}
single_app_data = self.sub_app_data[global_id]
single_spatial_data = self.sub_spatial_data[global_id]
single_pose_data = self.sub_pose_feat[str(global_id)]
data['global_id'] = global_id
data['img_name'] = global_id + '.jpg'
data['det_boxes'] = single_app_data['boxes'][:]
data['roi_labels'] = single_app_data['classes'][:]
data['roi_scores'] = single_app_data['scores'][:]
data['node_num'] = single_app_data['node_num'].value
# data['node_labels'] = single_app_data['node_labels'][:]
data['edge_labels'] = single_app_data['edge_labels'][:]
data['features'] = single_app_data['feature'][:]
data['spatial_feat'] = single_spatial_data[:]
data['word2vec'] = self._get_word2vec(data['roi_labels'])
data['pose_to_human'] = single_pose_data['pose_to_human'][:]
data['pose_to_obj_offset'] = single_pose_data['pose_to_obj_offset'][:]
data['keypoints'] = single_app_data['keypoints'][:]
return data
# for DatasetLoader
def collate_fn(batch):
'''
Default collate_fn(): https://github.com/pytorch/pytorch/blob/1d53d0756668ce641e4f109200d9c65b003d05fa/torch/utils/data/_utils/collate.py#L43
'''
batch_data = {}
batch_data['global_id'] = []
batch_data['img_name'] = []
batch_data['det_boxes'] = []
batch_data['roi_labels'] = []
batch_data['roi_scores'] = []
batch_data['node_num'] = []
batch_data['edge_labels'] = []
batch_data['features'] = []
batch_data['spatial_feat'] = []
batch_data['word2vec'] = []
# batch_data['pose_feat'] = []
batch_data['pose_to_human'] = []
batch_data['pose_to_obj_offset'] = []
batch_data['keypoints'] = []
for data in batch:
batch_data['roi_labels'].append(data['roi_labels'])
batch_data['node_num'].append(data['node_num'])
batch_data['edge_labels'].append(data['edge_labels'])
batch_data['features'].append(data['features'])
batch_data['spatial_feat'].append(data['spatial_feat'])
batch_data['word2vec'].append(data['word2vec'])
# batch_data["pose_feat"].append(data["pose_feat"])
batch_data["pose_to_human"].append(data["pose_to_human"])
batch_data["pose_to_obj_offset"].append(data["pose_to_obj_offset"])
if 'global_id' in data.keys():
batch_data['global_id'].append(data['global_id'])
batch_data['img_name'].append(data['img_name'])
batch_data['det_boxes'].append(data['det_boxes'])
batch_data['roi_scores'].append(data['roi_scores'])
if 'keypoints' in data.keys():
batch_data['keypoints'].append(data['keypoints'])
# import ipdb; ipdb.set_trace()
batch_data['edge_labels'] = torch.FloatTensor(np.concatenate(batch_data['edge_labels'], axis=0))
batch_data['features'] = torch.FloatTensor(np.concatenate(batch_data['features'], axis=0))
batch_data['spatial_feat'] = torch.FloatTensor(np.concatenate(batch_data['spatial_feat'], axis=0))
batch_data['word2vec'] = torch.FloatTensor(np.concatenate(batch_data['word2vec'], axis=0))
# batch_data['pose_feat'] = torch.FloatTensor(np.concatenate(batch_data['pose_feat'], axis=0))
batch_data['pose_to_human'] = torch.FloatTensor(np.concatenate(batch_data['pose_to_human'], axis=0))
batch_data['pose_to_obj_offset'] = torch.FloatTensor(np.concatenate(batch_data['pose_to_obj_offset'], axis=0))
return batch_data
|
birlrobotics/PMN
|
datasets/hico_dataset.py
|
hico_dataset.py
|
py
| 9,279 |
python
|
en
|
code
| 7 |
github-code
|
6
|
34197476486
|
#!/bin/python3
import sys
import os
import mysql.connector
import datetime
from sys import argv
import requests
import json
from requests.exceptions import HTTPError
from slack import WebClient
from slack.errors import SlackApiError
import logging
logging.basicConfig(level=logging.DEBUG)
database_conf = "/var/lib/jenkins/engine.cnf"
operator_name_list = argv[1].split(",")
start_payment_date = argv[2]
finish_payment_date = argv[3]
game_cycle_file = "rounds.txt"
default_round_log = "round-close.log"
operator_id_list = []
search_list = ["| closed", "| not closed", "| game cycle is already in completed game cycle table"]
slack_channel = "#customer_support"
def collect_operator_id(operator_name: str) -> int:
sql_operator_id = ("select operator_id from core_operator where operator_name='{}'".format(operator_name))
cursor.execute(sql_operator_id)
operator_results = cursor.fetchall()
for op_id in operator_results:
operator_id = op_id[0]
return operator_id
def collect_game_cycle(operator_data: str):
sql_game_cycle = """
SELECT distinct(game_cycle_id) FROM tx_payment_journal a
left join tx_completed_game_cycle b on a.game_cycle_id=b.payment_reference
left join tx_player c on a.from_player_id=c.player_id
where a.transaction_id>=(SELECT transaction_id FROM tx_payment_journal where payment_date >= '{0}' limit 1)
and a.transaction_id<(SELECT transaction_id FROM tx_payment_journal where payment_date >= '{1}' limit 1)
and a.to_player_id=1 and a.complete=1 and a.cancelled=0 and a.current_balance>0 and b.completed_tx_id is null
and c.operator_id={2};""".format(start_payment_date, finish_payment_date, operator_data)
print(sql_game_cycle)
cleanup(game_cycle_file)
cursor.execute(sql_game_cycle)
result_table = cursor.fetchall()
for collumn in result_table:
game_cycle = collumn[0]
with open(game_cycle_file, "a") as rounds_list:
rounds_list.write("{}\n".format(game_cycle))
def close_rounds(operator_id_close: int):
try:
if os.path.exists(game_cycle_file):
print("*** Closing game rounds")
cleanup(default_round_log)
os.system("cp /var/lib/jenkins/devops-prod/scripts/close_rounds/application.properties .")
os.system("java -jar /var/lib/jenkins/devops-prod/scripts/close_rounds/close-round.jar {0} {1}".format(game_cycle_file, operator_id_close))
else:
print("*** No rounds were collected from database, please check data.")
open(default_round_log, "a").close()
except OSError as e:
print("*** Error occurs: {}".format(sys.exc_info()[1]))
exit()
def notify_slack(operator_data: str, prev_date: str, now_date: str, pattern: str, pattert_count: str):
slack_token = "xoxp-229522615970"
client = WebClient(token=slack_token)
user="jenkins-bot"
try:
if pattern == "game cycle is already in completed game cycle table":
completed_pattern = "already closed"
response = client.chat_postMessage(
channel = slack_channel,
text = """Finished processing issued rounds for {0} operator:
Period: {1} - {2}
Rounds {3}: {4}
""".format(operator_data.replace(" ", ""), prev_date, now_date, completed_pattern, pattert_count)
)
else:
response = client.chat_postMessage(
channel = slack_channel,
text = """Finished processing issued rounds for {0} operator:
Period: {1} - {2}
Rounds {3}: {4}
""".format(operator_data.replace(" ", ""), prev_date, now_date, pattern, pattert_count)
)
if os.path.exists(filename):
response = client.files_upload(
channels = slack_channel,
file = filename,
title = custom_pattern
)
except SlackApiError as e:
# You will get a SlackApiError if "ok" is False
assert e.response["error"] # str like 'invalid_auth', 'channel_not_found'
except FileNotFoundError as e:
print("*** Pattern for search was not found: {}".format(sys.exc_info()[1]))
def parse_log(message: str, operatorname: str):
global total_pattert_count
global custom_pattern
global filename
custom_pattern = message.replace("| ", "")
if message == "| closed":
filename = "Rounds_closed.log"
elif message == "| not closed":
filename = "Rounds_not_closed.log"
elif message == "| game cycle is already in completed game cycle table":
filename = "Rounds_already_closed.log"
total_pattert_count = 0
with open(default_round_log, "r") as log_file:
for line in log_file:
if message in line:
total_pattert_count += 1
with open(filename, "a") as closed_rounds:
closed_rounds.write(line)
print("File was created: {}".format(filename))
notify_slack(operatorname, start_payment_date, finish_payment_date, custom_pattern, total_pattert_count)
cleanup(filename)
def cleanup(item: str):
try:
if os.path.exists(item):
os.system("rm -rf {}".format(item))
print("*** {} was successfully removed from workspace".format(item))
except OSError as e:
print("*** Error occurs: {}".format(sys.exc_info()[1]))
exit()
def main():
try:
db_connection = mysql.connector.connect(option_files=database_conf, option_groups="client")
cursor = db_connection.cursor()
for operator in operator_name_list:
print("Processing {} operator:".format(operator))
collect_game_cycle(collect_operator_id(operator))
close_rounds(collect_operator_id(operator))
for search_pattern in search_list:
parse_log(search_pattern, operator)
except mysql.connector.Error as e:
print("*** ERROR: {}".format(e.msg))
exit()
finally:
if (db_connection.is_connected()):
db_connection.close()
cursor.close()
cleanup(game_cycle_file)
cleanup(default_round_log)
if __name__ == '__main__':
main()
|
vlad-solomai/viam_automation
|
automation_gambling/game_round_management/close_rounds_slack.py
|
close_rounds_slack.py
|
py
| 6,354 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7798026425
|
import os.path
import xml.dom.minidom
import xml.xpath
import logging
import edef
from edef.dev import Config
import fnmatch
from Tools import getModuleName
class Model:
def __init__(self):
self._logger = logging.getLogger("edef.dev")
self._base_path = Config().getBasePath()
self._module_list = dict()
imp = edef.Importer()
xmlfile_list = list()
mod_list = imp.listModules()
for mod in mod_list:
xmlfile_list.append(imp._find_module_meta(mod))
for path in xmlfile_list:
self._logger.debug("Found xml file %s"%path)
#path = os.path.abspath( os.path.join(self._base_path, filename) )
try:
module = eDevModelModule(path)
except:
self._logger.exception("Exception while load xml %s"%path)
continue
self._module_list[module.GetURI()] = module
def openURI(self, uri):
if uri == "mod://":
return self._module_list.keys()
try: mod = self._module_list[uri]
except: raise Exception("Unknown module %s"%uri)
return mod.getText()
def saveURI(self, uri, txt=None):
if not uri in self._module_list.keys():
# create module...
mod_name = getModuleName(uri)+".xml"
mod_path = os.path.join(self._base_path, mod_name)
if os.path.isfile(mod_path): raise Exception("File %s allready exists!"%mod_path)
f = open(mod_path,"w")
f.write(txt)
f.close()
mod = eDevModelModule(mod_path)
self._module_list[uri] = mod
return
# save module
mod = self._module_list[uri]
mod.setText(txt)
def checkURI(self, uri):
return uri in self._module_list.keys()
def deleteURI(self, uri):
if not uri in self._module_list.keys():
raise Exception("Module %s not known"%uri)
os.unlink(self._module_list[uri].getPath())
del self._module_list[uri]
def isURIWriteable(self, uri):
if uri == "mod://": return False
if not uri in self._module_list.keys():
raise Exception("Module %s not known"%uri)
return self._module_list[uri].isWriteable()
def isURIEditable(self, uri):
if uri == "mod://": return False
if not uri in self._module_list.keys():
raise Exception("Module %s not known"%uri)
return self._module_list[uri].isEditable()
class eDevModelModule:
_d_name = None
def __init__(self, path):
self._d_full_path = path
if not os.path.isfile(path):
raise Exception("%s doesn't point to a file!"%path)
(tmp, name) = os.path.split(path)
(name, tmp) = os.path.splitext(name)
(tmp, self._d_name) = os.path.splitext(name)
if self._d_name == "": self._d_name = tmp
self._d_uri = "mod://"+"/".join(name.split("."))
self._editable = False
self._writeable = False
# FIXME replace by TREX
dom = xml.dom.minidom.parse(path)
# if module:
if len(xml.xpath.Evaluate("/Module", dom))==1:
self._editable = True
if os.access(path, os.W_OK): self._writeable = True
# if assembly
elif len(xml.xpath.Evaluate("/Assembly", dom))==1:
self._editable = False
self._writeable = False
else:
raise Exception("Invalid module description: %s"%path)
def GetURI(self): return self._d_uri
def getName(self): return self._d_name
def getPath(self): return self._d_full_path
def getText(self):
f = open(self._d_full_path,"r")
txt = f.read()
f.close()
return txt
def setText(self, xml_txt):
# FIXME check xml_txt
f = open(self._d_full_path, "w")
f.write(xml_txt)
f.close()
def isEditable(self): return self._editable
def isWriteable(self): return self._writeable
|
BackupTheBerlios/pplt-svn
|
trunk/edef/edef-dev/modeditor/ModelModule.py
|
ModelModule.py
|
py
| 4,039 |
python
|
en
|
code
| 0 |
github-code
|
6
|
24673967273
|
# Jumpy! - Platform game
# KidsCanCode - Game Development with python
# Art from Kenney.nl
import pygame as pg
import random
from settings import *
from sprites import *
from os import path
class Game:
def __init__(self):
# Initialize game window
pg.init()
pg.mixer.init()
self.screen = pg.display.set_mode((WIDTH, HEIGHT))
pg.display.set_caption(TITLE)
self.clock = pg.time.Clock()
self.running = True
self.font_name = pg.font.match_font(FONT_NAME)
self.load_data()
def load_data(self):
# load high score
self.dir = path.dirname(__file__)
with open(path.join(self.dir, HS_FILE), 'w') as f:
try:
self.highscore = int(f.read())
except:
self.highscore = 0
# load spritesheet image
img_dir = path.join(self.dir, 'img')
self.spritesheet = Spritesheet(path.join(img_dir, SPRITESHEET))
self.cloud_images = []
for i in range(1, 4):
self.cloud_images.append(pg.image.load(path.join(img_dir, 'cloud{}.png'.format(i))).convert())
# load sounds
self.snd_dir = path.join(self.dir, 'snd')
self.jump_sound = pg.mixer.Sound(path.join(self.snd_dir, 'Jump33.wav'))
self.boost_sound = pg.mixer.Sound(path.join(self.snd_dir, 'powerup16.wav'))
def new(self):
# Initialzing the game
self.score = 0
# initializing all the sprites groups
self.all_sprites = pg.sprite.LayeredUpdates()
self.platforms = pg.sprite.Group()
self.powerups = pg.sprite.Group()
self.mobs = pg.sprite.Group()
self.clouds = pg.sprite.Group()
self.mob_timer = pg.time.get_ticks()
# Add a player
self.player = Player(self)
# Create platforms
for plat in PLATFORM_LIST:
Platform(self, *plat)
# Spawn some clouds
for i in range(8):
c = Cloud(self)
c.rect.y += 500
# loading the game music
pg.mixer.music.load(path.join(self.snd_dir, 'Happy Tune.ogg'))
pg.mixer.music.set_volume(VOLUME)
self.run()
def run(self):
# Game loop
pg.mixer.music.play(loops=-1)
self.playing = True
while self.playing:
# Keep the running at the right speed
self.clock.tick(FPS)
self.envents()
self.update()
self.draw()
pg.mixer.music.fadeout(500)
def update(self):
# Game loop update
# update Sprites
self.all_sprites.update()
# Spawn a mob
now = pg.time.get_ticks()
if now - self.mob_timer > MOB_FREQ + random.choice([1000, -500, 250, -1000]):
self.mob_timer = now
Mob(self)
# Check if the player hits any platform - only if falling
if self.player.vel.y > 0:
hits = pg.sprite.spritecollide(self.player, self.platforms, False)
if hits:
lowest = hits[0]
for hit in hits:
if hit.rect.bottom > lowest.rect.bottom:
lowest = hit
if self.player.pos.x > lowest.rect.left and self.player.pos.x < lowest.rect.right:
if self.player.pos.y < lowest.rect.centery:
self.player.pos.y = lowest.rect.top # puts the player on top of the platform
self.player.vel.y = 0 # set the y acceleration to 0
self.player.jumping = False
# Check is player hit a powerup
hits_pow = pg.sprite.spritecollide(self.player, self.powerups, True)
if hits_pow:
for hit in hits_pow:
if hit.type == 'boost':
self.player.vel.y = -BOOST_POWER
self.player.jumping = False
# Check is player hit a mob
hits_mob = pg.sprite.spritecollide(self.player, self.mobs, False, pg.sprite.collide_mask)
if hits_mob:
for hit in hits_mob:
self.playing = False
# if player reaches top 1/4 of screen
if self.player.rect.top < HEIGHT / 4:
# spawn a cloud - 1% chance
if random.randrange(100) < 5:
Cloud(self)
# move the player down
self.player.pos.y += max(abs(self.player.vel.y), 2)
# move the platforms down - scrolling up
for plat in self.platforms:
plat.rect.y += max(abs(self.player.vel.y), 2)
if plat.rect.top > HEIGHT:
plat.kill()
self.score += 10
# move the mobs down when scrolling up
for mob in self.mobs:
mob.rect.y += max(abs(self.player.vel.y), 2)
if mob.rect.top > HEIGHT:
mob.kill()
# move the mobs down when scrolling up
for cloud in self.clouds:
cloud.rect.y += max(abs(self.player.vel.y / random.randrange(1, 4)), 1)
if cloud.rect.top > HEIGHT:
cloud.kill()
# if we die
if self.player.rect.top > HEIGHT:
for sprite in self.all_sprites:
sprite.rect.y -= max(self.player.vel.y, 10)
if sprite.rect.bottom < 0:
sprite.kill()
if len(self.platforms) == 0:
self.playing = False
# spawn new platforms to keep average number
while len(self.platforms) < 6:
width = random.randrange(50, 100)
Platform(self, random.randrange(0, WIDTH - width),
random.randrange(-70, -35))
def envents(self):
# Game events
# process input (events)
for event in pg.event.get():
# check for closing window
if event.type == pg.QUIT:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_SPACE:
self.player.jump()
if event.key == pg.K_ESCAPE:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYUP:
if event.key == pg.K_SPACE:
self.player.jump_cut()
def draw(self):
# Game loop - draw
# Drae / render
self.screen.fill(BGCOLOR)
self.all_sprites.draw(self.screen)
self.draw_text('Your score: ' + str(self.score), 22, WHITE, WIDTH / 2, 15)
# *After* drawing everything, flip the display
pg.display.flip()
def show_start_screen(self):
# Game splash/Start screen
pg.mixer.music.load(path.join(self.snd_dir, 'Yippee.ogg'))
pg.mixer.music.set_volume(VOLUME)
pg.mixer.music.play(loops=-1)
self.screen.fill(BGCOLOR)
self.draw_text(TITLE, 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text('Arrows to move, Space to jump', 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text('Press a key to play', 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
self.draw_text('High score: ' + str(self.highscore), 22, WHITE, WIDTH / 2, 15)
pg.display.flip()
self.wait_for_key()
pg.mixer.music.fadeout(500)
def show_go_screen(self):
pg.mixer.music.load(path.join(self.snd_dir, 'Yippee.ogg'))
pg.mixer.music.set_volume(VOLUME)
pg.mixer.music.play(loops=-1)
if self.running:
# Game over screen
self.screen.fill(BGCOLOR)
self.draw_text("GAME OVER", 48, WHITE, WIDTH / 2, HEIGHT / 4)
self.draw_text('Score: ' + str(self.score), 22, WHITE, WIDTH / 2, HEIGHT / 2)
self.draw_text('Press a key to play again', 22, WHITE, WIDTH / 2, HEIGHT * 3 / 4)
if self.score > self.highscore:
self.draw_text('NEW HIGH SCORE!', 22, WHITE, WIDTH / 2, HEIGHT / 2 + 40)
self.highscore = self.score
with open(path.join(self.dir, HS_FILE), 'w') as f:
f.write(str(self.score))
else:
self.draw_text('High score: ' + str(self.highscore), 22, WHITE, WIDTH / 2, HEIGHT / 2 + 40)
pg.display.flip()
self.wait_for_key()
pg.mixer.music.fadeout(500)
def wait_for_key(self):
waiting = True
while waiting:
self.clock.tick(FPS)
for event in pg.event.get():
if event.type == pg.QUIT:
waiting = False
self.running = False
if event.type == pg.KEYDOWN:
if event.key == pg.K_ESCAPE:
if self.playing:
self.playing = False
self.running = False
if event.type == pg.KEYUP:
waiting = False
def draw_text(self, text, size, color, x, y):
font = pg.font.Font(self.font_name, size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
def main():
# main function for this app
g = Game()
g.show_start_screen()
while g.running:
g.new()
g.show_go_screen()
pg.quit()
if __name__ == '__main__':
main()
|
guychaimy/jumpy
|
main.py
|
main.py
|
py
| 9,548 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42972033220
|
from subprocess import Popen, PIPE
import sys
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from config import config
from theme import theme
class PluginGui:
_route = None
_button = None
_target = None
def __init__(self, parent, route):
self._route = route
self._button = tk.Frame(parent)
g = {'row': 1, 'column': 1, 'sticky': tk.NSEW}
self._button_open = ttk.Button(self._button)
self._button_open.grid(g)
self._button_open.configure(command=self._load_route)
self._button_open.bind('<Double-Button-1>', self._clear_route)
self._button_theme = tk.Label(self._button)
self._button_theme.grid(g)
self._button_theme.bind('<Double-Button-1>', self._clear_route)
theme.register_alternate((self._button_open, self._button_theme), g)
theme.button_bind(self._button_theme, self._load_route)
self._target = tk.Label(parent, text='', anchor=tk.W)
self._target.bind('<Button-1>', self._to_clipboard)
self.update_ui()
def get_ui(self):
return (self._button, self._target)
def update_ui(self):
waypoints = len(self._route)
if waypoints == 0:
self._button_open['text'] = ' Open '
self._target['text'] = 'no waypoints'
else:
self._button_open['text'] = f'{waypoints}'
self._target['text'] = self._route.next()
self._button_theme['text'] = self._button_open['text']
self._to_clipboard()
def _to_clipboard(self, event=None):
if len(self._route) == 0:
return
target = self._route.next()
if sys.platform == "linux" or sys.platform == "linux2":
command = Popen(["xclip", "-selection", "c"], stdin=PIPE)
command.communicate(input=target.encode(), timeout=1)
else:
self._parent.clipboard_clear()
self._parent.clipboard_append(target)
self._parent.update()
def _clear_route(self, event=None):
self._route.clear()
self.update_ui()
def _load_route(self, event=None):
if len(self._route) > 0:
return
ftypes = [
('All supported files', '*.csv *.txt'),
('CSV files', '*.csv'),
('Text files', '*.txt'),
]
logdir = config.get_str('journaldir',
default=config.default_journal_dir)
filename = filedialog.askopenfilename(initialdir=logdir,
filetypes=ftypes)
if self._route.load(filename):
self.update_ui()
|
pwerken/EDMC_Waypoints
|
plugin_gui.py
|
plugin_gui.py
|
py
| 2,681 |
python
|
en
|
code
| 1 |
github-code
|
6
|
36011235958
|
# encoding: utf8
# Import local files:
import colors as COLORS
import margins as MARGINS
import roi as ROI
# External:
external = ROI.ROI('External', 'External', COLORS.external)
body = ROI.ROI('Body', 'Organ', COLORS.external)
# Support:
couch = ROI.ROI('Couch', 'Support', COLORS.couch)
# Target volumes:
gtv = ROI.ROI('GTV', 'Gtv', COLORS.gtv)
ctv = ROI.ROIExpanded('CTV', 'Ctv', COLORS.ctv, gtv, margins = MARGINS.uniform_5mm_expansion)
ptv = ROI.ROIExpanded('PTV', 'Ptv', COLORS.ptv, ctv, margins = MARGINS.uniform_5mm_expansion)
ctv_ext = ROI.ROIAlgebra('CTV', 'Ctv', COLORS.ctv, sourcesA = [gtv], sourcesB = [external], operator = 'Intersection', marginsA = MARGINS.uniform_5mm_expansion, marginsB = MARGINS.uniform_5mm_contraction)
ctv_underived = ROI.ROI('CTV', 'Ctv', COLORS.ctv)
igtv = ROI.ROI('IGTV', 'Gtv', COLORS.gtv)
igtv1 = ROI.ROI('IGTV1', 'Gtv', COLORS.gtv)
igtv2 = ROI.ROI('IGTV2', 'Gtv', COLORS.gtv)
igtv3 = ROI.ROI('IGTV3', 'Gtv', COLORS.gtv)
ictv = ROI.ROIExpanded('ICTV', 'Ctv', COLORS.ctv, igtv, margins = MARGINS.uniform_5mm_expansion)
ictv1 = ROI.ROIExpanded('ICTV1', 'Ctv', COLORS.ctv, igtv1, margins = MARGINS.uniform_5mm_expansion)
ictv2 = ROI.ROIExpanded('ICTV2', 'Ctv', COLORS.ctv, igtv2, margins = MARGINS.uniform_5mm_expansion)
ictv3 = ROI.ROIExpanded('ICTV3', 'Ctv', COLORS.ctv, igtv3, margins = MARGINS.uniform_5mm_expansion)
iptv = ROI.ROIExpanded('PTV', 'Ptv', COLORS.ptv, ictv, margins = MARGINS.uniform_5mm_expansion)
iptv_gtv = ROI.ROIExpanded('PTV', 'Ptv', COLORS.ptv, igtv, margins = MARGINS.uniform_5mm_expansion)
iptv1 = ROI.ROIExpanded('PTV1', 'Ptv', COLORS.ptv, ictv1, margins = MARGINS.uniform_5mm_expansion)
iptv2 = ROI.ROIExpanded('PTV2', 'Ptv', COLORS.ptv, ictv2, margins = MARGINS.uniform_5mm_expansion)
iptv3 = ROI.ROIExpanded('PTV3', 'Ptv', COLORS.ptv, ictv3, margins = MARGINS.uniform_5mm_expansion)
gtv_p = ROI.ROI('GTVp','Gtv', COLORS.gtv)
gtv_n = ROI.ROI('GTVn','Gtv', COLORS.gtv)
gtv_n1 = ROI.ROI('GTVn','Gtv', COLORS.gtv)
ctv_e = ROI.ROI('CTVe','Ctv', COLORS.ctv_med)
ptv_e = ROI.ROI('PTVe','Ptv', COLORS.ptv)
gtv_groin_l = ROI.ROI('GTV_Groin_L','Gtv', COLORS.gtv)
gtv_groin_r = ROI.ROI('GTV_Groin_R','Gtv', COLORS.gtv)
ctv_groin_l = ROI.ROI('CTV_Groin_L','Ctv', COLORS.ctv_med)
ctv_groin_r = ROI.ROI('CTV_Groin_R','Ctv', COLORS.ctv_med)
ptv_groin_l = ROI.ROI('PTV_Groin_L','Ptv', COLORS.ptv)
ptv_groin_r = ROI.ROI('PTV_Groin_R','Ptv', COLORS.ptv)
ctv_p = ROI.ROI('CTVp', 'Ctv', COLORS.ctv)
ctv_n = ROI.ROI('CTVn','Ctv', COLORS.ctv)
igtv_p = ROI.ROI('IGTVp','Gtv', COLORS.gtv)
igtv_n = ROI.ROI('IGTVn','Gtv', COLORS.gtv)
ictv_p = ROI.ROI('ICTVp', 'Ctv', COLORS.ctv)
ictv_n = ROI.ROI('ICTVn', 'Ctv', COLORS.ctv)
gtv1 = ROI.ROI('GTV1', 'Gtv', COLORS.gtv)
gtv2 = ROI.ROI('GTV2', 'Gtv', COLORS.gtv)
gtv3 = ROI.ROI('GTV3', 'Gtv', COLORS.gtv)
gtv4 = ROI.ROI('GTV4', 'Gtv', COLORS.gtv)
ctv1 = ROI.ROI('CTV1', 'Ctv', COLORS.ctv)
ctv2 = ROI.ROI('CTV2', 'Ctv', COLORS.ctv)
ctv3 = ROI.ROI('CTV3', 'Ctv', COLORS.ctv)
ctv4 = ROI.ROI('CTV4', 'Ctv', COLORS.ctv)
gtv_sb = ROI.ROI('GTVsb', 'Gtv', COLORS.gtv)
ctv_sb = ROI.ROI('CTVsb', 'Ctv', COLORS.ctv)
vb = ROI.ROI('VB','Ctv', COLORS.ctv_med)
ctv_l = ROI.ROI('CTV_L', 'Ctv', COLORS.ctv)
ctv_r = ROI.ROI('CTV_R', 'Ctv', COLORS.ctv)
ptvc_l = ROI.ROI('PTVc_L', 'Ptv', COLORS.ptv)
ptvc_r = ROI.ROI('PTVc_R', 'Ptv', COLORS.ptv)
# OARs: Empty (will be delineated manually):
# Head:
eye_l = ROI.ROI('Eye_L', 'Organ', COLORS.eye)
eye_r = ROI.ROI('Eye_R', 'Organ', COLORS.eye)
lens_l = ROI.ROI('Lens_L', 'Organ', COLORS.lens)
lens_r = ROI.ROI('Lens_R', 'Organ', COLORS.lens)
optic_nrv_l = ROI.ROI('OpticNerve_L', 'Organ', COLORS.optic_nrv)
optic_nrv_r = ROI.ROI('OpticNerve_R', 'Organ', COLORS.optic_nrv)
optic_chiasm = ROI.ROI('OpticChiasm','Organ', COLORS.chiasma)
lacrimal_l =ROI.ROI('LacrimalGland_L', 'Organ', COLORS.lacrimal)
lacrimal_r =ROI.ROI('LacrimalGland_R', 'Organ', COLORS.lacrimal)
cochlea_l = ROI.ROI('Cochlea_L','Organ', COLORS.cochlea)
cochlea_r = ROI.ROI('Cochlea_R','Organ', COLORS.cochlea)
hippocampus_l = ROI.ROI('Hippocampus_L','Organ', COLORS.hippocampus)
hippocampus_r = ROI.ROI('Hippocampus_R','Organ', COLORS.hippocampus)
brainstem = ROI.ROI('Brainstem', 'Organ', COLORS.brainstem)
nasal_cavity = ROI.ROI('NasalCavity', 'Organ', COLORS.nasal_cavity)
oral_cavity = ROI.ROI('OralCavity', 'Organ', COLORS.oral_cavity)
pituitary = ROI.ROI('Pituitary', 'Organ', COLORS.pituitary)
submand_l = ROI.ROI('SubmandGland_L', 'Organ', COLORS.submand)
submand_r = ROI.ROI('SubmandGland_R', 'Organ', COLORS.submand)
cornea_l = ROI.ROI('Cornea_L', 'Organ', COLORS.cornea)
cornea_r = ROI.ROI('Cornea_R', 'Organ', COLORS.cornea)
retina_l = ROI.ROI('Retina_L', 'Organ', COLORS.retina)
retina_r = ROI.ROI('Retina_R', 'Organ', COLORS.retina)
brainstem_core = ROI.ROIExpanded('BrainstemCore', 'Organ', COLORS.brainstem_core, brainstem, margins = MARGINS.uniform_2mm_contraction)
brainstem_surface = ROI.ROIAlgebra('BrainstemSurface', 'Organ', COLORS.brainstem_surface, sourcesA = [brainstem], sourcesB = [brainstem_core], operator = 'Subtraction')
# Thorax:
esophagus = ROI.ROI('Esophagus', 'Organ', COLORS.esophagus)
spinal_cord = ROI.ROI('SpinalCord', 'Organ', COLORS.spinal_cord)
heart = ROI.ROI('Heart', 'Organ', COLORS.heart)
# Breast:
thyroid = ROI.ROI('ThyroidGland','Organ', COLORS.thyroid)
a_lad = ROI.ROI('A_LAD','Organ', COLORS.lad)
# Lung, stereotactic:
chestwall = ROI.ROI('Chestwall', 'Organ', COLORS.chestwall)
greatves = ROI.ROI('GreatVessel','Organ', COLORS.heart)
trachea = ROI.ROI('Trachea','Organ', COLORS.trachea)
spleen = ROI.ROI('Spleen','Organ', COLORS.spleen)
stomach = ROI.ROI('Stomach','Organ', COLORS.stomach)
liver = ROI.ROI('Liver','Organ', COLORS.liver)
rib_x_l = ROI.ROI('Ribx_L','Organ', COLORS.rib)
rib_x_r = ROI.ROI('Ribx_R','Organ', COLORS.rib)
rib_y_l = ROI.ROI('Riby_L','Organ', COLORS.rib)
rib_y_r = ROI.ROI('Riby_R','Organ', COLORS.rib)
ribs = ROI.ROI('Ribs','Organ', COLORS.ribs)
main_bronchus_l = ROI.ROI('BronchusMain_L','Organ', COLORS.main_bronchus)
main_bronchus_r = ROI.ROI('BronchusMain_R','Organ', COLORS.main_bronchus)
# Spine SBRT:
cauda_equina = ROI.ROI('CaudaEquina','Organ', COLORS.cauda)
small_bowel = ROI.ROI('BowelSmall','Organ', COLORS.small_bowel)
colon = ROI.ROI('Colon','Organ', COLORS.colon)
brachial = ROI.ROI('BrachialPlexus','Organ', COLORS.brachial)
# Pelvis, prostate:
bowel_space = ROI.ROI('BowelBag', 'Organ', COLORS.bowel_space)
rectum = ROI.ROI('Rectum', 'Organ', COLORS.rectum)
pelvic_nodes = ROI.ROI('LN_Iliac', 'Ctv', COLORS.pelvic_nodes)
prostate = ROI.ROI('Prostate', 'Ctv', COLORS.prostate)
prostate_bed = ROI.ROI('SurgicalBed', 'Ctv', COLORS.prostate_bed)
urethra = ROI.ROI('Urethra', 'Organ', COLORS.urethra)
vesicles = ROI.ROI('SeminalVes', 'Ctv', COLORS.vesicles)
penile_bulb = ROI.ROI('PenileBulb', 'Organ', COLORS.penile_bulb)
anal_canal = ROI.ROI('AnalCanal','Organ', COLORS.anal_canal)
levator_ani = ROI.ROI('LevatorAni', 'Organ', COLORS.levator_ani)
# Bone ROIs:
humeral_l = ROI.ROI('HumeralHead_L', 'Organ', COLORS.bone_color1)
humeral_r = ROI.ROI('HumeralHead_R', 'Organ', COLORS.bone_color1)
sternum = ROI.ROI('Sternum', 'Organ', COLORS.bone_color3)
l2 = ROI.ROI('L2', 'Organ', COLORS.bone_color1)
l3 = ROI.ROI('L3', 'Organ', COLORS.bone_color2)
l4 = ROI.ROI('L4', 'Organ', COLORS.bone_color1)
l5 = ROI.ROI('L5', 'Organ', COLORS.bone_color2)
sacrum = ROI.ROI('Sacrum', 'Organ', COLORS.bone_color1)
coccyx = ROI.ROI('Coccyx', 'Organ', COLORS.bone_color2)
pelvic_girdle_l = ROI.ROI('PelvicGirdle_L', 'Organ', COLORS.bone_color3)
pelvic_girdle_r = ROI.ROI('PelvicGirdle_R', 'Organ', COLORS.bone_color3)
femur_head_neck_l = ROI.ROI('FemurHeadNeck_L', 'Organ', COLORS.bone_color1)
femur_head_neck_r = ROI.ROI('FemurHeadNeck_R', 'Organ', COLORS.bone_color1)
# Vessels:
a_descending_aorta = ROI.ROI('A_DescendingAorta', 'Organ', COLORS.artery_color1)
a_common_iliac_l = ROI.ROI('A_CommonIliac_L', 'Organ', COLORS.artery_color2)
a_common_iliac_r = ROI.ROI('A_CommonIliac_R', 'Organ', COLORS.artery_color2)
a_internal_iliac_l = ROI.ROI('A_InternalIliac_L', 'Organ', COLORS.artery_color3)
a_internal_iliac_r = ROI.ROI('A_InternalIliac_R', 'Organ', COLORS.artery_color3)
a_external_iliac_l = ROI.ROI('A_ExternalIliac_L', 'Organ', COLORS.artery_color4)
a_external_iliac_r = ROI.ROI('A_ExternalIliac_R', 'Organ', COLORS.artery_color4)
v_inferior_vena_cava = ROI.ROI('V_InferiorVenaCava', 'Organ', COLORS.vein_color1)
v_common_iliac_l = ROI.ROI('V_CommonIliac_L', 'Organ', COLORS.vein_color2)
v_common_iliac_r = ROI.ROI('V_CommonIliac_R', 'Organ', COLORS.vein_color2)
v_internal_iliac_l = ROI.ROI('V_InternalIliac_L', 'Organ', COLORS.vein_color3)
v_internal_iliac_r = ROI.ROI('V_InternalIliac_R', 'Organ', COLORS.vein_color3)
v_external_iliac_l = ROI.ROI('V_ExternalIliac_L', 'Organ', COLORS.vein_color4)
v_external_iliac_r = ROI.ROI('V_ExternalIliac_R', 'Organ', COLORS.vein_color4)
# Undefined / Other ROIs
# Breast organs:
surgical_bed_l = ROI.ROI('SurgicalBed_L','Undefined', COLORS.breast_draft)
surgical_bed_r = ROI.ROI('SurgicalBed_R','Undefined', COLORS.breast_draft)
imn_l = ROI.ROI('LN_IMN_L', 'Undefined', COLORS.imn)
imn_r = ROI.ROI('LN_IMN_R', 'Undefined', COLORS.imn)
breast_l_draft = ROI.ROI('Breast_L_Draft', 'Undefined', COLORS.contralat_draft)
breast_r_draft = ROI.ROI('Breast_R_Draft', 'Undefined', COLORS.contralat_draft)
level4_l = ROI.ROI('LN_Ax_L4_L', 'Undefined', COLORS.level4)
level3_l = ROI.ROI('LN_Ax_L3_L', 'Undefined', COLORS.level3)
level2_l = ROI.ROI('LN_Ax_L2_L', 'Undefined', COLORS.level2)
level1_l = ROI.ROI('LN_Ax_L1_L', 'Undefined', COLORS.level1)
level_l = ROI.ROI('LN_Ax_Pectoral_L', 'Undefined', COLORS.level)
level4_r = ROI.ROI('LN_Ax_L4_R', 'Undefined', COLORS.level4)
level3_r = ROI.ROI('LN_Ax_L3_R', 'Undefined', COLORS.level3)
level2_r = ROI.ROI('LN_Ax_L2_R', 'Undefined', COLORS.level2)
level1_r = ROI.ROI('LN_Ax_L1_R', 'Undefined', COLORS.level1)
level_r = ROI.ROI('LN_Ax_Pectoral_R', 'Undefined', COLORS.level)
artery1_l = ROI.ROI('A_Subclavian_L+A_Axillary_L', 'Undefined', COLORS.artery2)
artery2_l = ROI.ROI('A_Carotid_L', 'Undefined', COLORS.artery2)
vein1_l = ROI.ROI('V_Brachioceph', 'Undefined', COLORS.vein2)
vein2_l = ROI.ROI('V_Subclavian_L+V_Axillary_L','Undefined', COLORS.vein2)
vein3_l = ROI.ROI('V_Jugular_L','Undefined', COLORS.vein2)
scalene_muscle_l = ROI.ROI('ScaleneMusc_Ant_L', 'Undefined', COLORS.muscle)
scalene_muscle_r = ROI.ROI('ScaleneMusc_Ant_R', 'Undefined', COLORS.muscle)
artery1_r = ROI.ROI('A_Brachioceph', 'Undefined', COLORS.artery2)
artery2_r = ROI.ROI('A_Subclavian_R+A_Axillary_R', 'Undefined', COLORS.artery2)
artery3_r = ROI.ROI('A_Carotid_R', 'Undefined', COLORS.artery2)
vein1_r = ROI.ROI('V_Brachioceph_R', 'Undefined', COLORS.vein2)
vein2_r = ROI.ROI('V_Subclavian_R+V_Axillary_R', 'Undefined', COLORS.vein2)
vein3_r = ROI.ROI('V_Jugular_R','Undefined', COLORS.vein2)
prosthesis = ROI.ROI('Prosthesis','Undefined', COLORS.prosthesis)
prosthesis_l = ROI.ROI('Prosthesis_L','Undefined', COLORS.prosthesis)
prosthesis_r = ROI.ROI('Prosthesis_R','Undefined', COLORS.prosthesis)
# Markers:
markers = ROI.ROI('Markers', 'Marker', COLORS.clips)
seed1 = ROI.ROI('Marker1', 'Marker', COLORS.seed)
seed2 = ROI.ROI('Marker2', 'Marker', COLORS.seed)
seed3 = ROI.ROI('Marker3', 'Marker', COLORS.seed)
seed4 = ROI.ROI('Marker4', 'Marker', COLORS.seed)
marker1 = ROI.ROI('Marker1', 'Marker', COLORS.seed)
marker2 = ROI.ROI('Marker2', 'Marker', COLORS.seed)
marker3 = ROI.ROI('Marker3', 'Marker', COLORS.seed)
marker4 = ROI.ROI('Marker4', 'Marker', COLORS.seed)
# OARs: MBS (delineated by model based segmentation):
brain = ROI.ROI('Brain', 'Organ', COLORS.brain, case ='HeadNeck', model = 'Brain')
#brainstem = ROI.ROI('Brainstem', 'Organ', COLORS.brainstem, case ='HeadNeck', model = 'Brainstem')
spinal_canal = ROI.ROI('SpinalCanal', 'Organ', COLORS.spinal_canal, case ='Thorax', model = 'SpinalCord (Thorax)')
spinal_canal_head = ROI.ROI('SpinalCanal', 'Organ', COLORS.spinal_canal, case ='HeadNeck', model = 'SpinalCord')
parotid_l = ROI.ROI('Parotid_L', 'Organ', COLORS.parotid, case ='HeadNeck', model = 'ParotidGland (Left)')
parotid_r = ROI.ROI('Parotid_R', 'Organ', COLORS.parotid, case ='HeadNeck', model = 'ParotidGland (Right)')
lung_l = ROI.ROI('Lung_L', 'Organ', COLORS.lung, case ='Thorax', model = 'Lung (Left)')
lung_r = ROI.ROI('Lung_R', 'Organ', COLORS.lung, case ='Thorax', model = 'Lung (Right)')
kidney_l = ROI.ROI('Kidney_L', 'Organ', COLORS.kidney, case ='Abdomen', model = 'Kidney (Left)')
kidney_r = ROI.ROI('Kidney_R', 'Organ', COLORS.kidney, case ='Abdomen', model = 'Kidney (Right)')
bladder = ROI.ROI('Bladder', 'Organ', COLORS.bladder, case ='PelvicMale', model = 'Bladder')
femoral_l = ROI.ROI('FemoralHead_L', 'Organ', COLORS.femoral, case = 'PelvicMale', model = 'FemoralHead (Left)')
femoral_r = ROI.ROI('FemoralHead_R', 'Organ', COLORS.femoral, case = 'PelvicMale', model = 'FemoralHead (Right)')
# OARs: Unions:
parotids = ROI.ROIAlgebra('Parotids', 'Organ', COLORS.parotid, sourcesA=[parotid_l], sourcesB=[parotid_r])
submands = ROI.ROIAlgebra('SubmandGlands', 'Organ', COLORS.submand, sourcesA=[submand_l], sourcesB=[submand_r])
lungs = ROI.ROIAlgebra('Lungs', 'Organ', COLORS.lungs, sourcesA=[lung_l], sourcesB=[lung_r])
kidneys = ROI.ROIAlgebra('Kidneys', 'Organ', COLORS.kidneys, sourcesA=[kidney_l], sourcesB=[kidney_r])
ribs_l = ROI.ROIAlgebra('Ribs_L','Organ', COLORS.ribs, sourcesA=[rib_x_l], sourcesB=[rib_y_l])
ribs_r = ROI.ROIAlgebra('Ribs_R','Organ', COLORS.ribs, sourcesA=[rib_x_r], sourcesB=[rib_y_r])
lungs_igtv = ROI.ROIAlgebra('Lungs-IGTV', 'Organ', COLORS.lungs, sourcesA=[lungs], sourcesB=[igtv], operator = 'Subtraction')
breast_l = ROI.ROIAlgebra('Breast_L', 'Organ', COLORS.contralat, sourcesA = [breast_l_draft], sourcesB = [external], operator = 'Intersection', marginsA = MARGINS.zero, marginsB = MARGINS.uniform_5mm_contraction)
breast_r = ROI.ROIAlgebra('Breast_R', 'Organ', COLORS.contralat, sourcesA = [breast_r_draft], sourcesB = [external], operator = 'Intersection', marginsA = MARGINS.zero, marginsB = MARGINS.uniform_5mm_contraction)
# OARs: Target subtracted
# Other:
other_ptv = ROI.ROI('Other_PTV', 'Organ', COLORS.other_ptv)
# PRVs:
spinal_cord_prv = ROI.ROIExpanded('SpinalCord_PRV', 'Avoidance', COLORS.prv, source=spinal_cord, margins=MARGINS.uniform_2mm_expansion)
# Walls:
skin_srt = ROI.ROIWall('Skin','Organ', COLORS.skin, body, 0, 0.3)
skin = ROI.ROIWall('Skin','Organ', COLORS.skin, external, 0, 0.3)
skin_brain_5 = ROI.ROIWall('Skin','Organ', COLORS.skin, body, 0, 0.5)
skin_brain = ROI.ROIWall('Skin','Organ', COLORS.skin, external, 0, 0.5)
wall_ptv = ROI.ROIWall('zPTV_Wall', 'Undefined', COLORS.wall, iptv, 1, 0)
wall_ptv1 = ROI.ROIWall('zPTV1_Wall', 'Undefined', COLORS.wall, iptv1, 1, 0)
wall_ptv2 = ROI.ROIWall('zPTV2_Wall', 'Undefined', COLORS.wall, iptv2, 1, 0)
wall_ptv3 = ROI.ROIWall('zPTV3_Wall', 'Undefined', COLORS.wall, iptv3, 1, 0)
# ROIs for optimization:
z_water = ROI.ROI('zWater', 'Undefined', COLORS.other_ptv)
z_heart = ROI.ROI('zHeart', 'Undefined', COLORS.heart)
z_esophagus = ROI.ROI('zEsophagus', 'Undefined', COLORS.esophagus)
z_bladder = ROI.ROI('zBladder','Undefined', COLORS.bladder)
z_spc_bowel = ROI.ROI('zBowelBag','Undefined', COLORS.bowel_space)
z_rectum = ROI.ROI('zRectum', 'Undefined', COLORS.rectum)
dorso_rectum = ROI.ROI('zRectum_P', 'Undefined', COLORS.dorso_rectum)
z_rectum_p = ROI.ROI('zRectum_P', 'Undefined', COLORS.dorso_rectum)
z_ptv_77_wall = ROI.ROI('zPTV_77_Wall', 'Undefined', COLORS.wall)
z_ptv_70_77_wall = ROI.ROI('zPTV_70+77_Wall', 'Undefined', COLORS.wall)
z_ptv_67_5_wall = ROI.ROI('zPTV_67.5_Wall', 'Undefined', COLORS.wall)
z_ptv_62_5_67_5_wall = ROI.ROI('zPTV_62.5+67.5_Wall', 'Undefined', COLORS.wall)
z_ptv_50_62_5_67_5_wall = ROI.ROI('zPTV_50+62.5+67.5_Wall', 'Undefined', COLORS.wall)
z_ptv_60_wall = ROI.ROI('zPTV_60_Wall', 'Undefined', COLORS.wall)
z_ptv_50_wall = ROI.ROI('zPTV_50_Wall', 'Undefined', COLORS.wall)
z_ptv_47_50_wall = ROI.ROI('zPTV_47+50_Wall', 'Undefined', COLORS.wall)
z_ptv_57_60_wall = ROI.ROI('zPTV_57+60_Wall', 'Undefined', COLORS.wall)
z_ptv_70_wall = ROI.ROI('zPTV_70_Wall', 'Undefined', COLORS.wall)
z_ptv_62_5_wall = ROI.ROI('zPTV_62.5_Wall', 'Undefined', COLORS.wall)
z_ptv_56_temp = ROI.ROI('zPTV_56_Temp', 'Undefined', COLORS.wall)
z_ptv_56_wall = ROI.ROI('zPTV_56_Wall', 'Undefined', COLORS.wall)
z_ptv_50_temp = ROI.ROI('zPTV_50_Temp', 'Undefined', COLORS.wall)
z_ptv_50_wall = ROI.ROI('zPTV_50_Wall', 'Undefined', COLORS.wall)
z_ptv_wall = ROI.ROI('zPTV_Wall', 'Undefined', COLORS.wall)
z_ptv1_wall = ROI.ROI('zPTV1_Wall', 'Undefined', COLORS.wall)
z_ptv2_wall = ROI.ROI('zPTV2_Wall', 'Undefined', COLORS.wall)
z_ptv3_wall = ROI.ROI('zPTV3_Wall', 'Undefined', COLORS.wall)
z_ptv4_wall = ROI.ROI('zPTV4_Wall', 'Undefined', COLORS.wall)
ctv_oars = ROI.ROI('zCTV-OARs', 'Ctv', COLORS.ctv)
ptv_oars = ROI.ROI('zPTV-OARs', 'Ptv', COLORS.ptv)
ptv_and_oars = ROI.ROI('zPTV_AND_OARs', 'Ptv', COLORS.other_ptv)
z_eye_l = ROI.ROIWall('zEye_L','Undefined', COLORS.wall, eye_l, 0, 0.2)
z_eye_r = ROI.ROIWall('zEye_R','Undefined', COLORS.wall, eye_r, 0, 0.2)
# Substitute ROI objects (only used for naming):
# Targets:
ptv_77 = ROI.ROI('PTV_77', 'Ptv', COLORS.ptv_high)
ptv_70 = ROI.ROI('PTV_70', 'Ptv', COLORS.ptv_med)
ptv_70_sib = ROI.ROI('PTV!_70', 'Ptv', COLORS.ptv_med)
ctv_77 = ROI.ROI('CTV_77', 'Ctv', COLORS.ctv_high)
ctv_70 = ROI.ROI('CTV_70', 'Ctv', COLORS.ctv_med)
ctv_70_sib = ROI.ROI('CTV!_70', 'Ctv', COLORS.ctv_med)
ptv_56 = ROI.ROI('PTV!_56', 'Ptv', COLORS.ptv_low)
ctv_56 = ROI.ROI('CTV!_56', 'Ctv', COLORS.ctv_low)
ptv_70_77 = ROI.ROI('PTV_70+77', 'Ptv', COLORS.ptv_low)
ctv_70_77 = ROI.ROI('CTV_70+77', 'Ctv', COLORS.ctv_low)
ptv_56_70_77 = ROI.ROI('PTV_56+70+77', 'Ptv', COLORS.ptv_low)
ptv_56_70 = ROI.ROI('PTV_56+70', 'Ptv', COLORS.ptv_low)
ptv_67_5 = ROI.ROI('PTV_67.5', 'Ptv', COLORS.ptv_high)
ptv_62_5 = ROI.ROI('PTV_62.5', 'Ptv', COLORS.ptv_med)
ptv_62_5_sib = ROI.ROI('PTV!_62.5', 'Ptv', COLORS.ptv_med)
ctv_67_5 = ROI.ROI('CTV_67.5', 'Ctv', COLORS.ctv_high)
ctv_62_5 = ROI.ROI('CTV_62.5', 'Ctv', COLORS.ctv_med)
ctv_62_5_sib = ROI.ROI('CTV!_62.5', 'Ctv', COLORS.ctv_med)
ptv__50 = ROI.ROI('PTV!_50', 'Ptv', COLORS.ptv_low)
ctv__50 = ROI.ROI('CTV!_50', 'Ctv', COLORS.ctv_low)
ptv_62_5_67_5 = ROI.ROI('PTV_62.5+67.5', 'Ptv', COLORS.ptv_low)
ctv_62_5_67_5 = ROI.ROI('CTV_62.5+67.5', 'Ctv', COLORS.ctv_low)
ptv_50_62_5_67_5 = ROI.ROI('PTV_50+62.5+67.5', 'Ptv', COLORS.ptv_low)
ptv_50_62_5 = ROI.ROI('PTV_50+62.5', 'Ptv', COLORS.ptv_low)
ptv_57_60 = ROI.ROI('PTV_57+60', 'Ptv', COLORS.ptv_low)
ctv_57_60 = ROI.ROI('CTV_57+60', 'Ctv', COLORS.ctv_low)
ptv_60 = ROI.ROI('PTV_60', 'Ptv', COLORS.ptv_high)
ctv_60 = ROI.ROI('CTV_60', 'Ctv', COLORS.ctv_high)
ptv_57 = ROI.ROI('PTV!_57', 'Ptv', COLORS.ptv_med)
ctv_57 = ROI.ROI('CTV!_57', 'Ctv', COLORS.ctv_med)
ptv_semves = ROI.ROI('PTV_SeminalVes', 'Ptv', COLORS.ptv_med)
ptv_pc = ROI.ROI('PTVpc', 'Ptv', COLORS.ptv)
ptv_pc_l = ROI.ROI('PTVpc_L', 'Ptv', COLORS.ptv)
ptv_pc_r = ROI.ROI('PTVpc_R', 'Ptv', COLORS.ptv)
ptv_p = ROI.ROI('PTVp', 'Ptv', COLORS.ptv)
ptv_n = ROI.ROI('PTVn', 'Ptv', COLORS.ptv)
ptv_nc = ROI.ROI('PTVnc', 'Ptv', COLORS.ptv)
ptv1 = ROI.ROI('PTV1', 'Ptv', COLORS.ptv)
ptv2 = ROI.ROI('PTV2', 'Ptv', COLORS.ptv)
ptv3 = ROI.ROI('PTV3', 'Ptv', COLORS.ptv)
ptv4 = ROI.ROI('PTV4', 'Ptv', COLORS.ptv)
ptv_c = ROI.ROI('PTVc', 'Ptv', COLORS.ptv)
ptv_sb = ROI.ROI('PTVsb', 'Ptv', COLORS.ptv)
ptv_sbc = ROI.ROI('PTVsbc', 'Ptv', COLORS.ptv)
ctv_47_50 = ROI.ROI('CTV_47+50', 'Ctv', COLORS.ctv_low)
ptv_47_50 = ROI.ROI('PTV_47+50', 'Ptv', COLORS.ptv_low)
ptv_50 = ROI.ROI('PTV_50', 'Ptv', COLORS.ptv_high)
ptv_50c = ROI.ROI('PTV_50c', 'Ptv', COLORS.ptv_high)
ctv_50 = ROI.ROI('CTV_50', 'Ctv', COLORS.ctv_high)
ctv_47 = ROI.ROI('CTV!_47', 'Ctv', COLORS.ctv_low)
ctv_47_tot = ROI.ROI('CTV_47', 'Ctv', COLORS.ctv_low)
ptv_47 = ROI.ROI('PTV!_47', 'Ptv', COLORS.ptv_med)
ptv_47_tot = ROI.ROI('PTV_47', 'Ptv', COLORS.ptv_med)
ptv_47c = ROI.ROI('PTV!_47c', 'Ptv', COLORS.ptv_med)
# Miscellaneous:
brain_gtv = ROI.ROI('Brain-GTV','Organ', COLORS.brain)
brain_ptv = ROI.ROI('Brain-PTV','Organ', COLORS.other_ptv)
lungs_gtv = ROI.ROI('Lungs-GTV', 'Organ', COLORS.lungs)
lungs_ctv = ROI.ROI('Lungs-CTV', 'Organ', COLORS.lungs)
ctv_p_ctv_sb = ROI.ROI('CTVp-CTVsb', 'Ctv', COLORS.ctv)
ctv_ctv_sb = ROI.ROI('CTV-CTVsb', 'Ctv', COLORS.ctv)
ptv_pc_ptv_sbc = ROI.ROI('PTVpc-PTVsbc', 'Ptv', COLORS.ptv)
ptv_c_ptv_sbc = ROI.ROI('PTVc-PTVsbc', 'Ptv', COLORS.ptv)
ptv_gtv = ROI.ROI('PTV-GTV', 'Ptv', COLORS.ptv_med)
ptv_spinal = ROI.ROI('PTV-SpinalCord_PRV', 'Ptv', COLORS.ptv_med)
mask_ptv = ROI.ROI('Mask_PTV','Undefined', COLORS.mask_ptv)
mask_ptv1 = ROI.ROI('Mask_PTV1','Undefined', COLORS.mask_ptv)
mask_ptv2 = ROI.ROI('Mask_PTV2','Undefined', COLORS.mask_ptv)
mask_ptv3 = ROI.ROI('Mask_PTV3','Undefined', COLORS.mask_ptv)
box = ROI.ROI('zBox','Undefined', COLORS.mask_ptv)
box1 = ROI.ROI('zBox1','Undefined', COLORS.mask_ptv)
box_l = ROI.ROI('zBox_L','Undefined', COLORS.mask_ptv)
box_r = ROI.ROI('zBox_R','Undefined', COLORS.mask_ptv)
box3 = ROI.ROI('zBox3','Undefined', COLORS.mask_ptv)
box4 = ROI.ROI('zBox4','Undefined', COLORS.mask_ptv)
|
dicom/raystation-scripts
|
settings/rois.py
|
rois.py
|
py
| 21,183 |
python
|
en
|
code
| 40 |
github-code
|
6
|
72168147708
|
# -*- coding: utf-8 -*-
import os
import re
import sys
import time
import json
import datetime
_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, '{}/../../libs/src'.format(_dir))
from roc_date_converter import RocDateConverter
class Inventory():
def __init__(self):
self._dir = os.path.dirname(os.path.abspath(__file__))
self.tse_data_dir = '{}/../../tse_crawler/data'.format(self._dir)
self.reset_securities()
def reset_securities(self):
self.securities = {
'normal': {},
'margin': {},
'short': {},
}
def store(self, inv_type, stock_code, date_obj, quantity):
date_str = date_obj.strftime('%Y-%m-%d')
buying_price = self.get_buying_price(stock_code, date_obj)
if buying_price is None:
raise InventoryException('找不到 {} 的價格資訊'.format(date_str))
number = 0
while quantity > number:
number += 1
securities_id = '{}{}{}'.format(stock_code, date_obj.strftime('%Y%m%d'), number)
securities_data = {
'id': securities_id,
'date': date_str,
'buying_price': buying_price,
}
try:
self.securities[inv_type][stock_code]
except KeyError:
self.securities[inv_type][stock_code] = []
self.securities[inv_type][stock_code].append(securities_data)
def out(self, inv_type, stock_code, data_obj, quantity):
secus = []
i = 0
while quantity > i:
i += 1
try:
secu = self.securities[inv_type][stock_code].pop(0)
except IndexError:
raise InventoryException('庫存不足: {} - {}'.format(inv_type, stock_code))
secus.append(secu)
return secus
def get_buying_price(self, stock_code, date_obj):
# 檔名
file_name = '{}/{}.csv'.format(self.tse_data_dir, stock_code)
# 取得檔案內容
f = open(file_name, 'r')
file_content = f.read()
f.close()
# 找到該行資料
converter = RocDateConverter()
roc_date_str = converter.get_roc_date_by_datetime(date_obj)
search_re = '{},[^,]*,[^,]*,([^,]*),[^,]*,[^,]*,[^,]*,[^,]*,[^,]*'.format(roc_date_str)
search_result = re.search(search_re, file_content)
# 取得買進價格
try:
buying_price = float(search_result.group(1))
except AttributeError:
return None
return buying_price
class InventoryException(Exception):
pass
def main():
inv = Inventory()
if __name__ == '__main__':
main()
|
greenseedyo/stock_scripts
|
simulators/src/inventory.py
|
inventory.py
|
py
| 2,740 |
python
|
en
|
code
| 0 |
github-code
|
6
|
40128872754
|
#!/usr/bin/env python3
import sys
sys.setrecursionlimit(10**6)
INF = 10 ** 9 + 1 # sys.maxsize # float("inf")
MOD = 10 ** 9 + 7
def debug(*x):
print(*x, file=sys.stderr)
def solve(N, AS):
sum = 0
sumSq = 0
for i in range(N):
sum += AS[i]
sum %= MOD
sumSq += AS[i] * AS[i]
sumSq %= MOD
ret = (sum * sum - sumSq) % MOD
if ret % 2 == 0:
return ret // 2
else:
return (ret + MOD) // 2
def main():
# parse input
N = int(input())
AS = list(map(int, input().split()))
print(solve(N, AS))
# tests
T1 = """
3
1 2 3
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
11
"""
T2 = """
4
141421356 17320508 22360679 244949
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
437235829
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
|
nishio/atcoder
|
abc177/c.py
|
c.py
|
py
| 1,341 |
python
|
en
|
code
| 1 |
github-code
|
6
|
30084867415
|
from .models import AdminUser
from django.shortcuts import render
from django.http import JsonResponse
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
import json
from datetime import date, datetime
# Create your views here.
def jsons(data = None, errorCode = 0, cookies = '', days = 0):
if data is None:
data = []
return JsonResponse({'errorCode': errorCode, 'data': data, 'cookies': cookies, 'days': days})
def adminLogin(request):
data = json.loads(request.body)
username = data['username']
password = data['password']
try:
admin = AdminUser.objects.get(username = username)
except AdminUser.DoesNotExist:
return jsons([], 404)
admin = authenticate(request, username=username, password=password)
if admin is not None:
# authenticated
login(request, admin)
admin = AdminUser.objects.get(username = username)
return jsons([dict(admin.body())], 0, {'user_id': admin.id, 'username': username})
else:
# not authenticated
return jsons([], 403)
# Logout
def adminLogout(request):
if request.user.is_authenticated:
logout(request)
return jsons([], 0)
return jsons([], 403)
@login_required
def adminEdit(request, pk):
try:
admin = AdminUser.objects.get(id = pk)
except AdminUser.DoesNotExist:
return jsons([], 404)
# change password
if request.method == 'PUT':
if request.user.id != admin.id:
return jsons([], 403)
data = json.loads(request.body)
admin.username = admin.username
admin.set_password(data['newpass'])
admin.save()
login(request, admin)
return jsons([dict(admin.body())])
def adminGetByUsername(request, username):
try:
admin = AdminUser.objects.get(username = username)
year = int(admin.joinDate.strftime("%Y"))
month = int(admin.joinDate.strftime("%m"))
day = int(admin.joinDate.strftime("%d"))
nowYear = int(datetime.now().strftime("%Y"))
nowMonth = int(datetime.now().strftime("%m"))
nowDay = int(datetime.now().strftime("%d"))
date1 = date(year, month, day)
date2 = date(nowYear, nowMonth, nowDay)
days = (date2 - date1).days
except AdminUser.DoesNotExist:
return jsons([], 404)
return jsons([dict(admin.body())], 0, '', days)
|
jeremyytann/BUAA-SE-LetStudy
|
Code/backend/admin_user/views.py
|
views.py
|
py
| 2,482 |
python
|
en
|
code
| 0 |
github-code
|
6
|
21934463311
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Text Emotion Detection."""
from dataclasses import dataclass
from transformers import AutoTokenizer, AutoModelWithLMHead
from transformers import pipeline
__all__ = (
"Emotion",
"EmotionDetectorT5",
"EmotionDetectorRoberta",
)
@dataclass
class Emotion:
"""Emotion."""
tag: str
emoji: str
def get_emotion_emoji(tag: str) -> str:
# Define the emojis corresponding to each sentiment
emoji_mapping = {
"disappointment": "😞",
"sadness": "😢",
"annoyance": "😠",
"neutral": "😐",
"disapproval": "👎",
"realization": "😮",
"nervousness": "😬",
"approval": "👍",
"joy": "😄",
"anger": "😡",
"embarrassment": "😳",
"caring": "🤗",
"remorse": "😔",
"disgust": "🤢",
"grief": "😥",
"confusion": "😕",
"relief": "😌",
"desire": "😍",
"admiration": "😌",
"optimism": "😊",
"fear": "😨",
"love": "❤️",
"excitement": "🎉",
"curiosity": "🤔",
"amusement": "😄",
"surprise": "😲",
"gratitude": "🙏",
"pride": "🦁"
}
return emoji_mapping.get(tag, "")
class EmotionDetectorT5:
"""Emotion Detector from T5 model."""
# https://huggingface.co/mrm8488/t5-base-finetuned-emotion/
# emotions = ["joy", "sad", "dis", "sup", "fea", "ang"]
# emotions = ["sadness", "joy", "love", "anger", "fear", "surprise"]
def __init__(self) -> None:
"""Init Sentiment Analysis."""
self.model_name = "mrm8488/t5-base-finetuned-emotion"
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModelWithLMHead.from_pretrained(self.model_name)
def get(self, text: str) -> Emotion:
"""Check emotion from text string."""
input_ids = self.tokenizer.encode(text + '</s>', return_tensors='pt')
output = self.model.generate(input_ids=input_ids,
max_length=2)
dec = [self.tokenizer.decode(ids) for ids in output]
emo = dec[0].replace("<pad>", "").strip()
return Emotion(tag=emo, emoji=get_emotion_emoji(emo))
class EmotionDetectorRoberta:
"""Emotion Detector from Roberta."""
# https://huggingface.co/SamLowe/roberta-base-go_emotions
# emotions = [
# "admiration",
# "amusement",
# "anger",
# "annoyance",
# "approval",
# "caring",
# "confusion",
# "curiosity",
# "desire",
# "disappointment",
# "disapproval",
# "disgust",
# "embarrassment",
# "excitement",
# "fear",
# "gratitude",
# "grief",
# "joy",
# "love",
# "nervousness",
# "optimism",
# "pride",
# "realization",
# "relief",
# "remorse",
# "sadness",
# "surprise",
# "neutral",
# ]
def __init__(self) -> None:
"""Init."""
self.model_name = "SamLowe/roberta-base-go_emotions"
self.nlp = pipeline("sentiment-analysis", framework="pt", model=self.model_name)
def get(self, text: str) -> Emotion:
"""Get Emotion from text str."""
try:
results = self.nlp(text)
except RuntimeError as err:
print(f"len(text) = {len(text)}")
print(f"text: {text}")
raise(err)
data = {result['label']: result['score'] for result in results}
tag, score = "", 0
for key, value in data.items():
if value > score:
tag = key
score = value
return Emotion(
tag=tag,
emoji=get_emotion_emoji(tag=tag),
)
|
br8km/pynlp
|
core/emotion.py
|
emotion.py
|
py
| 3,869 |
python
|
en
|
code
| 0 |
github-code
|
6
|
179588656
|
# Guessing Game Code
# a guessing game that have problem in return function
# have problem in line 17
def Guessing_Game(guess):
#Secret number of Guessing_Game
secret_number = 5
guess_count = 0
#It's Count Is 0
guess_limit = 3
#The Guess Can Ask The User 3 Times
while guess_count < guess_limit:
guess_count += 1
#The Guess Counted 1 or less than one
if guess == secret_number:
print('You Have Won The Guess...')
#After the user guessed it correctly the programm doesn't ask it to do more guess
break
return
#I Don't Understand To What I Return I will Do it after i learned Return Statement Well
#in which part will return insert it if you know
else:
print('You Have Failed The Guess...')
#If the user try's 3 times and don't get the guess the programm will anounce hi's failed
try:
# if the user try's to distrube the programm it will be prevent by this code under this
guess = int(input('Guess: '))
#the main guess input
print(Guessing_Game(guess))
except ValueError:
#if the user enter alphabetical words the programm will tell him it determine as error
print('Invalid Error')
|
Amdesew/Guessing-Game
|
Guessing Game.py
|
Guessing Game.py
|
py
| 1,277 |
python
|
en
|
code
| 1 |
github-code
|
6
|
73630578109
|
"""# Node"""
import numpy as np
import random
def sort_nodes(nodes, shuffle=False, presort=False, reverse=False):
"""
Sorts nodes in an order in which they should be estimated. i.e. all of
node N's in-nodes should be estimated before estimating node N.
Parameters
----------
nodes : list of `Node`s
Nodes to sort.
shuffle : bool, default=False
Indicates that nodes should be shuffled before presorting.
presort : bool, default=False
Indicates that nodes should be presorted by number of in-nodes.
reverse : bool, default=False
Indicates that nodes should be presorted by number of in-nodes from
greatest to least. If `False`, presorting will sort nodes by in-nodes
from least to greatest.
Returns
-------
sorted_nodes : list of `Node`s
"""
def presort_nodes(nodes):
nodes = [node for node in nodes if not node.added]
if shuffle:
random.shuffle(nodes)
if presort:
key = (
lambda node: -len(node.in_nodes) if reverse
else lambda node: len(node.in_nodes)
)
nodes = sorted(nodes, key=key)
return nodes
def sort_nodes_(nodes):
nodes = presort_nodes(nodes)
sorted_nodes = []
for node in nodes:
if not node.added:
if not all([n.added for n in node.in_nodes]):
sorted_nodes += sort_nodes_(node.in_nodes)
node.added = True
sorted_nodes.append(node)
return sorted_nodes
for node in nodes:
node.added = False
sorted_nodes = sort_nodes_(nodes)
for node in nodes:
del node.added
return sorted_nodes
class Node():
"""Node in Bayesian network
Parameters and attributes
-------------------------
in_nodes : list of `Node`s
Nodes on which this node depends
distribution : distribution, default=None
Distribution of the variable associated with this node. If this node
has `in_nodes`, this should be a `ConditionalDistribution` where the
given features correspond to the `in_nodes`. You may also fix this
node's value by setting `distribution` to a `float` or `int`.
name : str or None, default=None
For debugging.
Additional attributes
---------------------
frozen_rvs : np.array
Frozen sampling of random values from this node's distribution.
"""
def __init__(self, in_nodes=[], distribution=None, name=None):
self.in_nodes = in_nodes
self.distribution = distribution
self.name = name
self.frozen_rvs = None
def rvs(self, size=1):
"""
Sample random values from this node's distribution. If this node has
`frozen_rvs`, `frozen_rvs` will be returned. If this node has
`in_nodes`, it will sample from these to draw from conditional
distributions.
Parameters
----------
size : int, default=1
Number of random values to sample.
Returns
-------
random_values : (size,) np.array
"""
if isinstance(self.distribution, (int, float)):
self.frozen_rvs = np.array([self.distribution]*size)
if self.frozen_rvs is not None:
return self.frozen_rvs
assert self.distribution is not None
if self.in_nodes:
given = self.given_rvs(size)
conditional_dists = self.distribution.predict(given)
self.frozen_rvs = np.array([d.rvs() for d in conditional_dists])
else:
self.frozen_rvs = self.distribution.rvs(size)
return self.frozen_rvs
def given_rvs(self, size=1):
"""
Sample random values from the distributions of this node's `in_nodes`.
Parameters
----------
size : int, default=1
Returns
-------
random_values : (size x # in nodes) np.array
"""
assert self.in_nodes
return np.array([node.rvs(size) for node in self.in_nodes]).T
def clear_rvs(self):
"""
Clear `frozen_rvs`.
Returns
-------
self
"""
if self.frozen_rvs is not None:
self.frozen_rvs = None
[node.clear_rvs() for node in self.in_nodes]
return self
|
dsbowen/smoother
|
smoother/node.py
|
node.py
|
py
| 4,428 |
python
|
en
|
code
| 1 |
github-code
|
6
|
696671101
|
import tkinter as tk
import atten
import face_recognition
import cv2
import numpy as np
import csv
import os
from datetime import datetime
# Create the root window
root = tk.Tk()
root.overrideredirect(True)
# Set the window size and position
width = 700
height = root.winfo_screenheight()-100 # Get the screen height
# Calculate the x- and y-coordinates to center the window
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
x = int((screen_width/2) - (width/2))
y = int((screen_height/2) - (height/2))
root.geometry(f"{width}x{height}+{x}+{y}")
x_cord = 75;
y_cord = 20;
checker=0;
video_capture = cv2.VideoCapture(0)
Abhinav_image = face_recognition.load_image_file("Abhinav.jpg")
Abhinav_encoding = face_recognition.face_encodings(Abhinav_image)[0]
Khushi_image = face_recognition.load_image_file("Khushi.jpeg")
Khushi_encoding = face_recognition.face_encodings(Khushi_image)[0]
Yashika_image = face_recognition.load_image_file("Yashika.jpeg")
Yashika_encoding = face_recognition.face_encodings(Yashika_image)[0]
Jyotiraditya_image = face_recognition.load_image_file("Jyotiraditya.jpeg")
Jyotiraditya_encoding = face_recognition.face_encodings(Jyotiraditya_image)[0]
Alok_image = face_recognition.load_image_file("Alok.jpeg")
Alok_encoding = face_recognition.face_encodings(Alok_image)[0]
Shrey_image = face_recognition.load_image_file("Shrey.jpeg")
Shrey_encoding = face_recognition.face_encodings(Shrey_image)[0]
known_face_encoding = [
Abhinav_encoding,
Khushi_encoding,
Yashika_encoding,
Jyotiraditya_encoding,
Alok_encoding,
Shrey_encoding
]
known_faces_names = [
"Abhinav Maheshwari",
"Khushi Arora",
"Yashika",
"Jyotiraditya",
"Alok Raj",
"Shrey"
]
students = known_faces_names.copy()
face_locations = []
face_encodings = []
face_names = []
s=True
now = datetime.now()
current_date = now.strftime("%Y-%m-%d")
def mark_attendance():
atten.run(video_capture, s, known_face_encoding, known_faces_names, students,message2)
# Open the CSV file in read mode
# Set the background color to white
root.configure(bg="white")
# Add logo to the top left corner
logo_img = tk.PhotoImage(file="logo.png")
logo_img = logo_img.subsample(1)
# def run_jjcopy():
# root.destroy()
# os.system('python jjcopy.py')
# Create a label widget for the logo and pack it in the top left corner
logo_label = tk.Label(root, image=logo_img, bd=0)
logo_label.pack(side="left", anchor="nw", padx=10, pady=10)
# Add text to the right of the logo
text_label= tk.Label(root, text="ATTENDANCE RECOGNITION SYSTEM" ,bg="white" ,fg="blue" ,width=35 ,height=1,font=('Sitka Text Semibold', 18, 'bold underline'))
text_label.pack(pady=30, anchor="n")
line_canvas = tk.Canvas(root, height=1, width = 700,bg="black", highlightthickness=0)
line_canvas.create_line(0, 0, width, 0, fill="black")
line_canvas.place(x=75-x_cord,y=130-y_cord)
button = tk.Button(root, text="MARK ATTENDANCE", command=mark_attendance, width=40 ,height=1 ,fg="white" ,bg="black" ,font=('Sitka Text Semibold', 18, ' bold ') )
button.place(x=120-x_cord, y=150-y_cord)
lbl = tk.Label(root, text="Attendance list:", width=12 ,height=1 ,fg="green" ,bg="white" ,font=('Sitka Text Semibold', 18, ' bold ') )
lbl.place(x=120-x_cord, y=250-y_cord)
# # Add a line below the "Attendance list:" line
# line2_canvas = tk.Canvas(root, height=1, bg="black", highlightthickness=0)
# line2_canvas.create_line(0, 0, width, 0, fill="black")
# line2_canvas.place(x=120-x_cord, y=150-y_cord)
# message2 = tk.Label(root, height=screen_height*0.025, width=67, bg="#f0f4f9", fg="black", font=("Helvetica", 12), wrap="word", state="disabled")
# message2.place(x=120-x_cord, y=290-y_cord)
message2 = tk.Label(root, height=20, width=67, font=("Helvetica", 12))
message2.place(x=120-x_cord, y=290-y_cord)
# Add an exit button in the bottom left corner
exit_button = tk.Button(root, text="EXIT", width=10, height=1, bg="black", fg="white", font=('Sitka Text Semibold', 15, 'bold'), command=root.destroy)
exit_button.place(x=20, y=height-70)
root.mainloop()
|
khushiarora1793/attendancemanagement
|
temp.py
|
temp.py
|
py
| 4,274 |
python
|
en
|
code
| 0 |
github-code
|
6
|
26922931124
|
"""
Calls the entos executable.
"""
import string
from typing import Any, Dict, List, Optional, Tuple
from qcelemental.models import Result
from qcelemental.util import parse_version, safe_version, which
from ..exceptions import UnknownError
from ..util import execute, popen
from .model import ProgramHarness
class EntosHarness(ProgramHarness):
_defaults = {
"name": "entos",
"scratch": True,
"thread_safe": False,
"thread_parallel": True,
"node_parallel": False,
"managed_memory": True,
}
version_cache: Dict[str, str] = {}
class Config(ProgramHarness.Config):
pass
def found(self, raise_error: bool = False) -> bool:
return which('entos', return_bool=True, raise_error=raise_error, raise_msg='Please install via XXX')
def get_version(self) -> str:
self.found(raise_error=True)
which_prog = which('entos')
if which_prog not in self.version_cache:
with popen([which_prog, '--version']) as exc:
exc["proc"].wait(timeout=15)
self.version_cache[which_prog] = safe_version(exc["stdout"].split()[2])
return self.version_cache[which_prog]
def compute(self, input_data: 'ResultInput', config: 'JobConfig') -> 'Result':
"""
Run entos
"""
# Check if entos executable is found
self.found(raise_error=True)
# Check entos version
if parse_version(self.get_version()) < parse_version("0.5"):
raise TypeError("entos version '{}' not supported".format(self.get_version()))
# Setup the job
job_inputs = self.build_input(input_data, config)
# Run entos
exe_success, proc = self.execute(job_inputs)
# Determine whether the calculation succeeded
if exe_success:
# If execution succeeded, collect results
result = self.parse_output(proc["outfiles"], input_data)
return result
else:
# Return UnknownError for error propagation
return UnknownError(proc["stderr"])
def execute(self,
inputs: Dict[str, Any],
extra_infiles: Optional[Dict[str, str]] = None,
extra_outfiles: Optional[List[str]] = None,
extra_commands: Optional[List[str]] = None,
scratch_name: Optional[str] = None,
scratch_messy: bool = False,
timeout: Optional[int] = None) -> Tuple[bool, Dict[str, Any]]:
"""
For option documentation go look at qcengine/util.execute
"""
# Collect all input files and update with extra_infiles
infiles = inputs["infiles"]
if extra_infiles is not None:
infiles.update(extra_infiles)
# Collect all output files and extend with with extra_outfiles
outfiles = ["dispatch.out"]
if extra_outfiles is not None:
outfiles.extend(extra_outfiles)
# Replace commands with extra_commands if present
commands = inputs["commands"]
if extra_commands is not None:
commands = extra_commands
# Run the entos program
exe_success, proc = execute(commands,
infiles=infiles,
outfiles=outfiles,
scratch_name=scratch_name,
scratch_directory=inputs["scratch_directory"],
scratch_messy=scratch_messy,
timeout=timeout)
# Entos does not create an output file and only prints to stdout
proc["outfiles"]["dispatch.out"] = proc["stdout"]
return exe_success, proc
def build_input(self, input_model: 'ResultInput', config: 'JobConfig',
template: Optional[str] = None) -> Dict[str, Any]:
# Write the geom xyz file with unit au
xyz_file = input_model.molecule.to_string(dtype='xyz', units='Angstrom')
# Create input dictionary
if template is None:
structure = {'structure': {'file': 'geometry.xyz'}}
dft_info = {
'xc': input_model.model.method,
'ao': input_model.model.basis.upper(),
'df_basis': input_model.keywords["df_basis"].upper(),
'charge': input_model.molecule.molecular_charge
}
print_results = {'print': {'results': True}}
if input_model.driver == 'energy':
input_dict = {'dft': {**structure, **dft_info}, **print_results}
# Write gradient call if asked for
elif input_model.driver == 'gradient':
input_dict = {'gradient': {**structure, 'dft': {**dft_info}}, **print_results}
else:
raise NotImplementedError('Driver {} not implemented for entos.'.format(input_model.driver))
# Write input file
input_file = self.write_input_recursive(input_dict)
input_file = "\n".join(input_file)
else:
# Some of the potential different template options
# (A) ordinary build_input (need to define a base template)
# (B) user wants to add stuff after normal template (A)
# (C) user knows their domain language (doesn't use any QCSchema quantities)
# # Build dictionary for substitute
# sub_dict = {
# "method": input_model.model.method,
# "basis": input_model.model.basis,
# "df_basis": input_model.keywords["df_basis"].upper(),
# "charge": input_model.molecule.molecular_charge
# }
# Perform substitution to create input file
str_template = string.Template(template)
input_file = str_template.substitute()
return {
"commands": ["entos", "-n", str(config.ncores), "dispatch.in"],
"infiles": {
"dispatch.in": input_file,
"geometry.xyz": xyz_file
},
"scratch_directory": config.scratch_directory,
"input_result": input_model.copy(deep=True)
}
def write_input_recursive(self, d: Dict[str, Any]) -> List:
input_file = []
for key, value in d.items():
if isinstance(value, dict):
input_file.append(key + '(')
rec_input = self.write_input_recursive(value)
indented_line = map(lambda x: " " + x, rec_input)
input_file.extend(indented_line)
input_file.append(')')
else:
if isinstance(value, str):
input_file.append("{0} = '{1}'".format(key, value))
elif isinstance(value, bool):
input_file.append("{0} = {1}".format(key, str(value).lower()))
else:
input_file.append("{0} = {1}".format(key, value))
return input_file
def parse_output(self, outfiles: Dict[str, str], input_model: 'ResultInput') -> 'Result':
output_data = {}
properties = {}
# Parse the output file, collect properties and gradient
output_lines = outfiles["dispatch.out"].split('\n')
gradients = []
natom = len(input_model.molecule.symbols)
for idx, line in enumerate(output_lines):
fields = line.split()
if fields[:1] == ["energy:"]:
properties["scf_total_energy"] = float(fields[-1])
elif fields[:2] == ["Molecular", "Dipole:"]:
properties["scf_dipole_moment"] = [float(x) for x in fields[2:5]]
elif fields[:3] == ["SCF", "converged", "in"]:
properties["scf_iterations"] = int(fields[3])
elif fields == ["Gradient", "(hartree/bohr):"]:
# Gradient is stored as (dE/dx1,dE/dy1,dE/dz1,dE/dx2,dE/dy2,...)
for i in range(idx + 2, idx + 2 + natom):
grad = output_lines[i].strip('\n').split()[1:]
gradients.extend([float(x) for x in grad])
if input_model.driver == 'gradient':
if len(gradients) == 0:
raise ValueError('Gradient not found.')
else:
output_data["return_result"] = gradients
# Replace return_result with final_energy if gradient wasn't called
if "return_result" not in output_data:
if "scf_total_energy" in properties:
output_data["return_result"] = properties["scf_total_energy"]
else:
raise KeyError("Could not find SCF total energy")
output_data["properties"] = properties
output_data['schema_name'] = 'qcschema_output'
output_data['success'] = True
return Result(**{**input_model.dict(), **output_data})
|
ChemRacer/QCEngine
|
qcengine/programs/entos.py
|
entos.py
|
py
| 8,943 |
python
|
en
|
code
| null |
github-code
|
6
|
71746011387
|
from gui import GUI, run
from threading import Thread
from PyQt4.QtCore import QObject, SIGNAL
import socket
import pickle
'''
Host y port de facil acceso
'''
HOST = "192.168.1.181"
PORT = 12336
class Client(QObject):
'''Base de material de clases
'''
def __init__(self, port, host, username, gui):
print("Inicializando cliente...")
super().__init__()
# Inicializamos el socket principal del cliente
self.host = host
self.port = port
self.username = username
self.gui = gui
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.connect_to_server()
self.listen()
except:
print("Conexion terminada")
self.client_socket.close()
exit()
def connect_to_server(self):
self.client_socket.connect((self.host, self.port))
print("Cliente conectado exitosamente al servidor...")
# El método listen() inicilizará el thread que escuchará los mensajes del
# servidor. Es útil hacer un thread diferente para escuchar al servidor
# ya que de esa forma podremos tener comunicación asíncrona con este, es decir,
# el servidor nos podrá enviar mensajes sin necesidad de iniciar una solicitud
# desde el lado del cliente.
def listen(self):
thread = Thread(target=self.listen_thread, daemon=True)
thread.start()
# El método send() enviará mensajes al servidor. Implementa el mismo
# protocolo de comunicación que mencionamos, es decir, agregar 4 bytes
# al principio de cada mensaje indicando el largo del mensaje enviado.
def send(self, msg):
msg_bytes = msg.encode()
msg_length = len(msg_bytes).to_bytes(4, byteorder="big")
self.client_socket.send(msg_length + msg_bytes)
# La función listen_thread() será lanzada como thread el cual se encarga
# de escuchar al servidor. Vemos como se encarga de recibir 4 bytes que
# indicarán el largo de los mensajes. Posteriormente recibe en bloques de
# 256 bytes el resto del mensaje hasta que éste se recibe totalmente.
def listen_thread(self):
while True:
response_bytes_length = self.client_socket.recv(4)
response_length = int.from_bytes(response_bytes_length,
byteorder="big")
response = b""
# Recibimos datos hasta que alcancemos la totalidad de los datos
# indicados en los primeros 4 bytes recibidos.
while len(response) < response_length:
response += self.client_socket.recv(256)
try:
self.handle_command(response.decode())
except UnicodeDecodeError: # pickle (lista)
decoded = pickle.loads(response)
if decoded[0] == "friends":
self.handle_command("gui;start", extra=decoded[1:])
elif decoded[0] == "chat":
self.handle_command("chat;start", extra=decoded[1:])
def handle_command(self, message, extra=None):
aux = message.split(";")
if aux[0] == "CHAT":
chat = aux[1]
participants = aux[2:]
if self.username in participants:
self.emit(SIGNAL("send_chat"), participants, chat)
if aux[0] == "chat":
if aux[1] == "start":
print("Comenzando chat...")
i = extra.index("friends")
messages = extra[:i]
participants = extra[i + 1:]
# en caso de no pertenecer al chat
if self.username not in participants:
return
self.emit(SIGNAL("start_chat"), messages, participants)
elif aux[1] == "close":
participants = ";".join(aux[2:])
self.emit(SIGNAL("close_chat"), participants)
elif aux[0] == "error":
if "signup" in message:
self.gui.login.set_message("Username not available")
self.gui.login.button_signup_done.hide()
self.gui.login.button_aux.hide()
elif "login" in message:
self.gui.login.set_message("Incorrect username / password")
self.gui.login.button_login_done.hide()
self.gui.login.button_aux.hide()
elif "friend" in message: # usuario no encontrado
self.gui.programillo.set_message("Usuario no encontrado")
self.gui.programillo.label_find.setText("")
elif aux[1] == "game":
if aux[2] == "start":
self.emit(SIGNAL("start_game"), False)
elif aux[0] == "success":
if "signup" in message:
username = message.split(";")[2]
self.gui.username = username
self.send("info;{};friends".format(self.gui.username))
elif "login" in message:
username = message.split(";")[2]
self.gui.username = username
self.send("info;{};friends".format(self.gui.username))
elif aux[1] == "friend":
self.emit(SIGNAL("add_friend"))
elif aux[1] == "game":
if aux[2] == "start":
participants = aux[3:]
self.emit(SIGNAL("start_game"), True, participants)
elif aux[0] == "added":
friend = message.split(";")[1]
new_friend = message.split(";")[2]
if friend == self.username: # lo agrego new_friend
self.gui.programillo.label_find.setText(new_friend)
self.gui.programillo.add_friend()
elif aux[0] == "gui":
if "start" in message:
i = extra.index("games")
friends = extra[:i]
games = extra[i + 1:]
self.gui.friends = friends
self.gui.games = games
elif aux[0] == "draw":
if aux[1] == "line":
x1 = float(aux[2])
y1 = float(aux[3])
x2 = float(aux[4])
y2 = float(aux[5])
r = int(aux[6])
g = int(aux[7])
b = int(aux[8])
linewidth = int(aux[9])
self.emit(SIGNAL("draw_line"), x1, y1, x2, y2, r, g, b, linewidth)
elif aux[1] == "free":
x1 = float(aux[2])
y1 = float(aux[3])
x2 = float(aux[4])
y2 = float(aux[5])
r = int(aux[6])
g = int(aux[7])
b = int(aux[8])
linewidth = int(aux[9])
self.emit(SIGNAL("draw_free"), x1, y1, x2, y2, r, g, b, linewidth)
elif aux[1] == "curve":
x1 = float(aux[2])
y1 = float(aux[3])
x2 = float(aux[4])
y2 = float(aux[5])
x3 = float(aux[6])
y3 = float(aux[7])
r = int(aux[8])
g = int(aux[9])
b = int(aux[10])
linewidth = int(aux[11])
self.emit(SIGNAL("draw_curve"), x1, y1, x2, y2, x3, y3, r, g, b, linewidth)
elif aux[1] == "polygon":
template = aux[2]
x = float(aux[3])
y = float(aux[4])
r = int(aux[5])
g = int(aux[6])
b = int(aux[7])
linewidth = int(aux[8])
self.emit(SIGNAL("draw_polygon"), template, x, y, r, g, b, linewidth)
elif aux[0] == "game":
if aux[1] == "add":
participants = aux[2:]
self.emit(SIGNAL("add_game"), ";".join(participants))
elif aux[1] == "close":
self.emit(SIGNAL("no_game"))
elif aux[1] == "offline":
user = aux[2]
self.emit(SIGNAL("user_offline"), user)
elif aux[1] == "send":
chat = aux[2:]
for c in chat:
self.emit(SIGNAL("send_game_chat"), c)
elif aux[1] == "start_round":
self.emit(SIGNAL("start_round"), aux[-1], aux[-2])
elif aux[1] == "guess":
self.emit(SIGNAL("game_guess"), aux[2])
elif aux[1] == "save_image":
self.emit(SIGNAL("everybody_guessed"))
class MiGUI(GUI):
def __init__(self):
super().__init__()
def connect_login(self, username, password): # password encoded
self.login.button_aux.show()
if self.client is None:
self.client = Client(PORT, HOST, username, self)
self.client.send("login;{};{}".format(username, password))
def connect_signup(self, username, password):
self.login.button_aux.show()
if self.client is None:
self.client = Client(PORT, HOST, username, self)
self.client.send("signup;{};{}".format(username, password))
if __name__ == "__main__":
run(MiGUI)
|
isidoravs/iic2233-2016-2
|
Tareas/T06/client/client.py
|
client.py
|
py
| 9,169 |
python
|
en
|
code
| 0 |
github-code
|
6
|
70732574587
|
import os
from importlib.machinery import SourceFileLoader
from setuptools import find_packages, setup
from typing import List
module_name = 'juldate'
module = SourceFileLoader(
module_name,
os.path.join(module_name, '__init__.py'),
).load_module()
def parse_requirements(filename: str) -> List[str]:
requirements = list()
with open(filename) as file:
for line in file:
requirements.append(line.rstrip())
return requirements
setup(
name=module_name,
version=module.__version__,
author=module.__author__,
author_email=module.__email__,
url='https://github.com/churilov-ns/juldate.git',
license=module.__license__,
description=module.__doc__,
long_description=open('README.md').read(),
classifiers=[
'Intended Audience :: Science/Research',
'Natural Language :: Russian',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Astronomy',
],
platforms='all',
python_requires='>=3.8',
packages=find_packages(exclude=['tests']),
install_requires=parse_requirements('requirements.txt'),
extras_require={'dev': parse_requirements('requirements.dev.txt')},
include_package_data=True,
)
|
churilov-ns/juldate
|
setup.py
|
setup.py
|
py
| 1,333 |
python
|
en
|
code
| 0 |
github-code
|
6
|
32467456793
|
from ting_file_management.file_management import txt_importer
import sys
def process(path_file, instance):
"""Aqui irá sua implementação"""
current_path = None
for item in range(len(instance)):
if instance.search(item)["nome_do_arquivo"] == path_file:
current_path = instance.search(item)
if current_path is None:
data = {
"nome_do_arquivo": path_file,
"qtd_linhas": len(txt_importer(path_file)),
"linhas_do_arquivo": txt_importer(path_file)
}
instance.enqueue(data)
sys.stdout.write(f"{data}")
def remove(instance):
"""Aqui irá sua implementação"""
if len(instance):
path_file = instance.dequeue()["nome_do_arquivo"]
sys.stdout.write(f"Arquivo {path_file} removido com sucesso\n")
sys.stdout.write("Não há elementos\n")
def file_metadata(instance, position):
"""Aqui irá sua implementação"""
try:
sys.stdout.write(f"{instance.search(position)}\n")
except IndexError:
sys.stderr.write("Posição inválida\n")
|
janaolive/estrutura_de_dados
|
ting_file_management/file_process.py
|
file_process.py
|
py
| 1,087 |
python
|
pt
|
code
| 0 |
github-code
|
6
|
18917415762
|
from typing import Any, Callable, TypeVar, cast
import pluggy
F = TypeVar("F", bound=Callable[..., Any])
hookimpl = cast(Callable[[F], F], pluggy.HookimplMarker("ape"))
hookspec = pluggy.HookspecMarker("ape")
plugin_manager = pluggy.PluginManager("ape")
"""A manager responsible for registering and accessing plugins (singleton)."""
class PluginType:
"""
The base plugin class in ape. There are several types of plugins available in ape, such
as the :class:`~ape.plugins.config.Config` or :class:`~ape.plugins.network.EcosystemPlugin`.
Each one of them subclass this class. It is used to namespace the plugin hooks for the
registration process, and to ensure overall conformance to type interfaces as much as possible.
"""
|
ApeWorX/ape
|
src/ape/plugins/pluggy_patch.py
|
pluggy_patch.py
|
py
| 752 |
python
|
en
|
code
| 736 |
github-code
|
6
|
26058478061
|
from django.contrib.auth.views import LogoutView
from django.urls import path
from .views import *
urlpatterns = [
path('login/', CustomLoginView.as_view(), name='login'),
path('logout/', LogoutView.as_view(next_page='login'), name='logout'), # тут ми вказуємо через next_page, що якщо ми виходимо з акаунту то переходимо на сторінку "login"
path('', TaskList.as_view(), name='tasks'),
path('register/', RegisterPage.as_view(), name='register'),
path('task/<int:pk>/', TaskDetail.as_view(), name='task'),
path('task-create/', TaskCreate.as_view(), name='task-create'),
path('task-update/<int:pk>', TaskUpdate.as_view(), name='task-update'),
path('task-delete/<int:pk>', DeleteView.as_view(), name='task-delete'),
]
|
ianvv/todo-app-django
|
todo_list/base/urls.py
|
urls.py
|
py
| 812 |
python
|
uk
|
code
| 0 |
github-code
|
6
|
38586042024
|
from flask import Flask,render_template,json,flash,request,session,redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
with open('config.json', 'r') as c:
parameter = json.load(c)["parameter"]
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = parameter['local_uri']
app.secret_key = 'super-secret-key'
db = SQLAlchemy(app)
class Contact(db.Model):
sno = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
email = db.Column(db.String(20), nullable=False)
phone = db.Column(db.String(12), nullable=False)
message = db.Column(db.String(120), nullable=False)
date = db.Column(db.String(12), nullable=True)
@app.route('/')
def home():
return render_template('index.html',parameter=parameter)
@app.route("/contact", methods = ['GET', 'POST'])
def contact():
if(request.method=='POST'):
name = request.form.get('name')
email = request.form.get('email')
phone = request.form.get('phone')
message = request.form.get('message')
entry = Contact(name=name, email = email, phone = phone, message = message, date= datetime.now())
db.session.add(entry)
db.session.commit()
flash("Thank You We will get back to you soon...","success")
return render_template('index.html',parameter=parameter)
|
199-cmd/FlaskDemo
|
FlaskDemo/main.py
|
main.py
|
py
| 1,402 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73510601787
|
class Solution:
def findRightInterval(self, intervals: List[List[int]]) -> List[int]:
ans = [-1] * len(intervals)
start = []
for i,arr in enumerate(intervals):
start.append([arr[0],i])
start.sort()
for i,interval in enumerate(intervals):
low = 0
high = len(intervals)
while low < high:
mid = low + (high - low) // 2
if start[mid][0] >= interval[1]:
high = mid
ans[i] = start[mid][1]
else:
low = mid + 1
return ans
|
yonaSisay/a2sv-competitive-programming
|
find-right-interval.py
|
find-right-interval.py
|
py
| 643 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36007020776
|
from bs4 import BeautifulSoup
import requests
import pandas as pd
# Downloading IMDB feature film and MyAnimeList popularity data
headers = {'Accept-Language': 'en-US,en;q=0.8'}
url1 = 'https://www.imdb.com/search/title/?title_type=feature&sort=num_votes,desc'
url2 = 'https://myanimelist.net/topanime.php?type=bypopularity'
response1 = requests.get(url1,headers=headers)
response2 = requests.get(url2,headers=headers)
soup1 = BeautifulSoup(response1.text, "html.parser")
soup2 = BeautifulSoup(response2.text, "html.parser")
movie_title = []
link = []
year = []
certificate = []
movie_runtime = []
genre = []
anime_title = []
anime_link = []
type = []
anime_runtime = []
members = []
for t in soup1.select('h3.lister-item-header a'):
movie_title.append(t.get_text())
link.append("https://www.imdb.com" + t.attrs.get('href') + "?ref_=adv_li_tt")
for t in soup1.select('h3.lister-item-header span.lister-item-year'):
year.append(t.get_text().replace("(","").replace(")",""))
for t in soup1.select('p.text-muted span.certificate'):
certificate.append(t.get_text())
for t in soup1.select('p.text-muted span.runtime'):
movie_runtime.append(t.get_text())
for t in soup1.select('p.text-muted span.genre'):
genre.append(t.get_text().replace("\n","").replace(" ",""))
for t in soup2.select('h3.anime_ranking_h3 a.hoverinfo_trigger'):
anime_title.append(t.get_text())
anime_link.append(t.attrs.get('href'))
for t in soup2.select('div.information'):
info = t.get_text().strip().split('\n')
type.append(info[0].strip())
anime_runtime.append(info[1].strip())
members.append(info[2].strip())
df1 = pd.DataFrame(
{'movie title': movie_title,
'link': link,
'year': year,
'certificate': certificate,
'runtime': movie_runtime,
'genre': genre}
)
df2 = pd.DataFrame(
{'anime title': anime_title,
'anime link': anime_link,
'type': type,
'anime runtime': anime_runtime,
'members': members}
)
print(df1.head())
print(df2.head())
df1.to_csv('moviesrating.csv', index=False)
df2.to_csv('animerating.csv', index=False)
|
ilovegaming42069/DataScienceExercise
|
datascience.py
|
datascience.py
|
py
| 2,188 |
python
|
en
|
code
| 0 |
github-code
|
6
|
73363574908
|
class ObjList:
def __init__(self, data):
self.__next = None
self.__prev = None
self.__data = data
def set_next(self, obj):
self.__next = obj
def set_prev(self, obj):
self.__prev = obj
def get_next(self):
return self.__next
def get_prev(self):
return self.__prev
def set_data(self, data):
self.__data = data
def get_data(self):
return self.__data
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def add_obj(self, obj):
"""добавление нового объекта obj класса ObjList в конец связного списка"""
if self.head is None and self.tail is None:
self.head = self.tail = obj
else:
self.tail.set_next(obj)
obj.set_prev(self.tail)
self.tail = obj
def remove_obj(self):
"""удаление крайнего объекта из связного списка"""
if self.head == self.tail:
self.head = None
self.tail = None
else:
self.tail = self.tail.get_prev()
self.tail.set_next(None)
def get_data(self) -> list:
"""получение списка из строк локального свойства __data всех объектов связного списка"""
pointer, result = self.head, []
while pointer:
result.append(pointer.get_data())
pointer = pointer.get_next()
return result
lst = LinkedList()
lst.add_obj(ObjList("данные 1"))
lst.add_obj(ObjList("данные 2"))
lst.add_obj(ObjList("данные 3"))
res = lst.get_data()
print(res)
lst = LinkedList()
lst.add_obj(ObjList("данные 1"))
lst.add_obj(ObjList("данные 2"))
lst.remove_obj()
res = lst.get_data()
print(res)
lst.add_obj(ObjList(12))
res = lst.get_data()
print(res)
lst.remove_obj()
lst.remove_obj()
lst.remove_obj()
res = lst.get_data()
print(res)
|
expo-lux/stepik_balakirev_python_OOP
|
task_2_1_10_access.py
|
task_2_1_10_access.py
|
py
| 2,051 |
python
|
ru
|
code
| 0 |
github-code
|
6
|
37683950634
|
import math
#import numbertheory
from numbertheory import *
#import multiprocessing
from multiprocessing import Pool
import gc
#import numpy as np
#from numpy.polynomial import polynomial as poly
####### HELPER FUNCTIONS #######
# performs the extended euclidean algorithm
# returns info to help calculate inverses
def egcd(a, b):
x, y, u, v = 0, 1, 1, 0
while a != 0:
q, r = b // a, b % a
m, n, = x - u*q, y-v*q
b, a, x, y, u, v = a, r, u, v, m, n
gcd = b
return gcd, x, y
# returns the modular inverse of a mod m if a is coprime to m
# returns the gcd of a and m if a is not coprime to m
def modinv(a, m):
a = a%m
gcd, x, y = egcd(a, m)
if gcd != 1: return False, gcd
else: return True, x % m
# returns whether a is a quadratic residue mod something
def isQR(a, mod):
squareList = list()
for i in range(0, mod):
squareList.append(i**2 % mod)
return a in squareList
# returns a list of the quadratic residues mod something
def listQRs(mod):
squareList = list()
for i in range(0, mod):
squareList.append(i**2 % mod)
return squareList
# returns the modular square root of a number if it exists
def sqrtMod(a, mod):
if not isQR(a, mod): return []
answerList = list()
singleList = list(range(0, mod))
squareList = listQRs(mod)
for i in range(0, mod):
if squareList[i] == a:
answerList.append(singleList[i])
return answerList
# credit to martin-thoma.com
def legendre_symbol(a, p):
if a >= p or a < 0:
return legendre_symbol(a % p, p)
elif a == 0 or a == 1:
return a
elif a == 2:
if p%8 == 1 or p%8 == 7:
return 1
else:
return -1
elif a == p-1:
if p%4 == 1:
return 1
else:
return -1
elif not isPrime(a):
factors = primeFactors(a)
product = 1
for pi in factors:
product *= legendre_symbol(pi, p)
return product
else:
if ((p-1)/2)%2==0 or ((a-1)/2)%2==0:
return legendre_symbol(p, a)
else:
return (-1)*legendre_symbol(p, a)
# returns a list of prime factors
# credit to stackoverflow.com/questions/16996217/prime-factorization-list
def primeFactors(n):
primes = list()
d = 2
while d*d <= n:
while (n%d) == 0:
primes.append(d)
n//=d
d+=1
if n>1:
primes.append(n)
return primes
# creates a proper set of primes involved in the prime factorization of n
# each member is a double: (base, power)
def groupPrimes(n):
groups = list()
primes = primeFactors(n)
distincts = list(set(primes))
distincts.sort()
for i in distincts:
temp = 0
for j in primes:
if j == i:
temp += 1
groups.append((i, temp))
return groups
# to solve systems of congruences - credit to rosetta code
def chinese_remainder(mods, exes, lena):
p = i = prod = 1; sm = 0
for i in range(lena): prod *= mods[i]
for i in range(lena):
p = prod / mods[i]
sm += exes[i] * modinv(p, mods[i])[1] * p
return sm % prod
# Fermat primality test - credit to codeproject.com
def isPrime(number):
import random
''' if number != 1 '''
if (number > 1):
''' repeat the test few times '''
for time in range(3):
''' Draw a RANDOM number in range of number ( Z_number ) '''
randomNumber = random.randint(2, number)-1
''' Test if a^(n-1) = 1 mod n '''
if ( pow(randomNumber, number-1, number) != 1 ):
return False
return True
else:
''' case number == 1 '''
return False
homework = 495960937377360604920383605744987602701101399399359259262820733407167
def multE_Factor(n):
# point = (1, 2)
# jobs = []
# for i in range(15):
# factors = list()
# print("Curve", i, "\n")
# p = multiprocessing.Process(target = E_Factor(factors, i, n))
# jobs.append(p)
# p.start()
# del factors[:]
outcomes = list()
for i in range(15): outcomes.append([])
pool = Pool(processes = 15)
results = [pool.apply(E_Factor, args = (outcomes[i], i, n)) for i in range(5)]
print(results)
print(outcomes)
# Executes multiple factoring processes simultaneously
def Mult_E_Factor(n):
pool = Pool(processes=20)
result = pool.map(e_factorize, (range(0, 20), n))
print(results)
# checks the list generated by E_Factor and reruns it as necessary
def E_Factor_Manager(a, n):
factors = []
E_Factor(factors, a, n)
#print(factors)
finalFactors = []
for i in range(len(factors)):
if not isPrime(factors[i]):
if factors[i] > 100:
finalFactors.extend(e_factorize(a+1, factors[i]))
else: finalFactors.extend(primeFactors(factors[i]))
else: finalFactors.append(factors[i])
finalFactors.sort()
#print(finalFactors)
return finalFactors
# creates a list of elliptic curve generated factors of an number
def E_Factor(factors, a, n):
gc.collect()
print("Factor of", n)
if isPrime(n):
factors.append(n)
return
point = (1,3,1)
curve = findB(point, a, n)
factor = curve.factor(point, math.ceil(math.log(n)))
if factor != False:
factors.append(factor)
E_Factor(factors, a, n//factor)
if factor == False:
factors.append(n)
#print(n)
# finds value b and creates a curve, given a point, a mod, and an a
def findB(point, a, mod):
b = 0
while True:
testCurve = EllipticCurve(a, b, mod)
if testCurve.onCurve(point):
testCurve.printme()
return testCurve
b += 1
####### ELLIPTIC CURVE CLASS #######
class EllipticCurve:
def __init__(self, a, b, mod):
self.a = a
self.b = b
self.mod = mod
def printme(self):
print("E: y^2 = x^3 +", self.a, "x +", self.b, "( mod", self.mod, ")")
def neg(self, point):
if point == (0, 1, 0): return (0, 1, 0)
return point[0], (-1 * point[1]) % self.mod, 1
def onCurve(self, point):
if len(point) < 3:
print("Point must be a triple.")
return
if point[2] == 0: return True
x, y = point[0], point[1]
if y in sqrt_mod_m(x**3 + self.a*x + self.b, self.mod):
return True
return False
def add(self, point1, point2):
if len(point1) < 3 or len(point2) < 3:
print("Point must be a triple.")
return
# anything times the identity is itself
if point1[2] == 0: return point2
if point2[2] == 0: return point1
# the identity times the identity is itself
if point1[2] == 0 and point2[2] == 0: return (0, 1, 0)
if point1 != point2:
if modinv(point1[0] - point2[0], self.mod)[0] == False:
return (0, modinv(point2[0] - point1[0], self.mod)[1], 2)
if point1[0] != point2[0]:
slope = (point2[1] - point1[1]) * modinv(point2[0] - point1[0], self.mod)[1]
else: return (0, 1, 0)
if point1 == point2:
if modinv((2*point1[1])%self.mod, self.mod)[0] == False:
return (0, modinv(2*point1[1], self.mod)[1], 2)
slope = (3*(point1[0]**2) + self.a) * modinv(2*point1[1], self.mod)[1]
x3 = (slope**2 - point1[0] - point2[0]) % self.mod
y3 = (slope * (point1[0] - x3) - point1[1]) % self.mod
return (x3, y3, 1)
def mult(self, point, k):
if k == 1: return point
sum = (0, 1, 0)
for i in range(k):
sum = self.add(sum, point)
return sum
# recursive repeated addition via doubling
# doubles until next doubling would exceed k
# then calls itself on the difference until 1 left
def multP(self, point, k):
if k == 0: return (0, 1, 0)
if k == 1: return point
else:
temp = point
doubles = 0
while True:
doubles += 1
if 2**doubles >= k:
doubles -= 1
break
temp = self.add(temp, temp)
if temp[2] == 2: return temp
leftovers = k - 2**doubles
temp = self.add(temp, self.multP(point, leftovers))
if temp[2] == 2: return temp
return temp
# this works, slowly
def pointOrder(self, point):
answer = (0, 1, 0)
count = 0
while True:
answer = self.add(answer, point)
#print(count, answer, test.onCurve(answer))
count += 1
if answer == (0, 1, 0): break
return count
def bsgsGroupOrder(self, point):
p = self.mod # set the constants
m = p + 1 - math.ceil(2*(p**(1/2)))
z = math.ceil(2*(p**(1/4)))
m, z = int(m), int(z)
mP = self.multP(point,m)
babyList = list()
giantList = list()
answerList = list()
matchList = list()
for i in range(z): # create the lists
babyList.append(self.multP(point,i))
giantList.append(self.neg(self.add(mP, self.multP(point,i*z))))
for i in babyList: # find the matches
for j in giantList:
if i == j:
answerList.append(m + babyList.index(i) + giantList.index(j)*z)
matchList.append((i, j))
for i in range(len(babyList)): print(babyList[i], "\t", giantList[i])
print("ANSWER:")
for i in matchList: print(i) # print results
return answerList
def pohlig_hellman(self, P, Q):
originalQ = Q
N = self.pointOrder(P)
factors = groupPrimes(N) # groupPrimes() returns a list of doubles where
# the first element of each double is the base
mods = list() # and the second is the exponent, so we can
exes = list() # refer to each as necessary
for n in factors:
mods.append(n[0]**n[1])
for q in factors: # for each component of the modulus N
print("\n***********************")
T = list()
Ks = list()
Q = originalQ # reset Q
e = q[1] # the power of the prime factor
for j in range(q[0]):
T.append(self.multP(P, j*(N/q[0]))) # create T list
print("T:", T)
for i in range(1, e+1): # for all elements of the base-k
# expansion of current q
candidate = self.multP(Q, N/(q[0]**i))
K = T.index(candidate) # find the candidate in T
Ks.append(K) # add to the list of ks ()
# then update Q
Q = self.add(Q, self.neg(self.multP(P, K*q[0]**(i-1))))
print("Q", i, " is", Q, "-", K, "*",q[0], "^", i-1, "*", P)
sum = 0
for k in Ks: # evaluate the expansion
sum += k*q[0]**Ks.index(k)
sum %= q[0]**q[1]
print(sum, "mod ", q[0]**q[1], "=", sum)
exes.append(sum) # add it to the list
print("\n***********************")
print("SYSTEM:")
print("X VALUES:\t", exes)
print("MOD VALUES:\t", mods)
print("ANSWER:\t\t", chinese_remainder(mods, exes, len(exes)))
def factor(self, point, b):
for i in range(2, b):
#print(i)
#print(math.factorial(i))
temp = self.multP(point, math.factorial(i))
#print(temp)
if temp[2] == 2:
if temp[1] != self.mod:
return temp[1]
break
# return(temp[1])
#new = EllipticCurve(self.a, self.b, self.mod / temp[1])
if isPrime(temp[1]):
if temp[1] == self.mod:
print(temp[1], "is a trivial factor.")
return False
else: return temp[1]
return False
#print("Nothing broken")
def bitwise_xor(a, b):
c = list()
for i in range(len(a)):
c.append((a[i] + b[i])%2)
return c
def bitwise_and(a, b):
c = list()
for i in range(len(a)):
c.append((a[i] & b[i]))
return c
def bitwise_or(a, b):
c = list()
for i in range(len(a)):
c.append((a[i] | b[i]))
return c
def linear_complexity(sequence, polynomial, debug):
Bx = make_bin_poly([0]) # make these into polynomials
Cx = make_bin_poly([0])
Tx = make_bin_poly([0])
L, N = 0, 0 # complexity and test length start at 0
s = sequence
m = -1
n = len(sequence)
d = 0 # discrepancy
while N < n:
if debug: print("----------------\nN =", N, "\n")
if N==0:
d = s[0]
if debug: print("s 0 (", s[0], ")")
else:
d = 0
for i in range(0, L+1):
if debug: print("s", N-i,"(", s[N-i], ") * ", "c", (i), "(", Cx[i], ") = ", s[N-i] * Cx[i])
d += s[N-i] * Cx[i] # calculate the discrepancy
d%=2
if debug: print('\nd = ', d)
if d==1:
x = make_bin_poly([N-m]) # create x**(N-m)
Tx = Cx
Cx = addpoly(Cx, mulpoly(Bx,x))
if debug: print('\nC(x) = \n', Cx, '\n')
if L <= N/2:
L = N + 1 - L
m = N
Bx = Tx
N += 1
print("\nCOMPLEXITY = ", L)
if polynomial: print("TAP POLYNOMIAL = \n", Cx)
def make_bin_poly(terms):
poly = [0]*(terms[0]+1) # length is the degree + 1
#print(poly)
for i in terms:
poly[len(poly)-(i+1)] = 1
#print(poly)
realpoly = np.poly1d(poly)
return realpoly
def mulpoly(a, b):
c = np.polymul(a, b)
return np.poly1d(c.coeffs % 2)
def addpoly(a, b):
c = a + b
return np.poly1d(c.coeffs % 2)
def xorStreams(a, b, debug):
#print(len(a))
#print(len(b))
#if len(a) != len(b): print("Sizes not equal")
C = list()
for i in range(0, len(a)):
C.append( (a[i]+b[i]) % 2 )
if debug: print("*********************")
if debug: print('A:\t', a)
if debug: print('B:\t', b)
if debug: print('C:\t', C)
return C
def testC(A, B, length, printSequences):
linear_complexity(xorStreams(A.putout(length, False), B.putout(length, False), printSequences), False)
class LFSR:
def __init__(self, fill, taps):
self.fill = list(fill)
self.register = list(fill)
self.taps = list(taps)
for i in range(0, len(self.taps)):
self.taps[i] -= 1
def printtaps(self):
print(self.taps)
def printfill(self):
print(self.register)
def printregister(self):
print(self.register)
def newregister(self, sequence):
self.register = sequence
def newtaps(self, taps):
self.taps = taps
#print(self.taps)
def reset(self):
self.register = list(self.fill)
def tick(self):
return (self.putout(1, False, False))[0]
def putout(self, bits, reset, printRegisters):
if reset: self.reset()
self.output = []
next = 0
for i in range(bits):
#print(i)
if printRegisters: print(self.register)
next = self.xor(self.register, self.taps)
self.output.append(self.register[0])
self.register.pop(0)
self.register.append(next)
return self.output
def xor(self, fill, taps):
sum = 0
for i in taps:
sum += fill[len(fill)-i-1]
sum %= 2
return sum
|
CSAlexWhite/Cryptography
|
crypto.py
|
crypto.py
|
py
| 16,518 |
python
|
en
|
code
| 0 |
github-code
|
6
|
36258745480
|
#!/usr/bin/env python
# _*_ coding:utf-8 _*_
# Author: JiaChen
import traceback
from src.plugins.base import BasePlugin
from lib.response import BaseResponse
from config import settings
class CpuPlugin(BasePlugin):
def run(self):
response = BaseResponse()
try:
response.data = {'cpu_model': None, 'cpu_physical_count': 0, 'cpu_count': 0}
temp = self.exec_shell_cmd('snmpwalk -v 2c -c %s %s .1.3.6.1.4.1.674.10892.5.4.1100.30.1.23.1' % (settings.community_name, self.manager_ip))
cpu_model = temp.split('"')[1]
response.data['cpu_model'] = cpu_model
temp = self.exec_shell_cmd('snmpwalk -v 2c -c %s %s .1.3.6.1.4.1.674.10892.5.4.1100.30.1.23.1|wc -l' % (settings.community_name, self.manager_ip))
cpu_physical_count = int(temp)
response.data['cpu_physical_count'] = cpu_physical_count
temp = self.exec_shell_cmd('snmpwalk -v 2c -c %s %s .1.3.6.1.4.1.674.10892.5.4.1100.30.1.18.1' % (settings.community_name, self.manager_ip))
cpu_count = 0
for line in temp.split('\n'):
cpu_count += int(line.split(':')[-1])
response.data['cpu_count'] = cpu_count
except Exception as e:
msg = "%s dell cpu plugin error: %s"
self.logger.log(msg % (self.hostname, traceback.format_exc()), False)
response.status = False
response.error = msg % (self.hostname, traceback.format_exc())
return response
|
jcdiy0601/EasyCmdbClient
|
src/plugins/snmp/dell/server/cpu.py
|
cpu.py
|
py
| 1,514 |
python
|
en
|
code
| 0 |
github-code
|
6
|
38090331621
|
from .base_page import BasePage
from .locators import ProductPageLocators
from selenium.common.exceptions import NoAlertPresentException
from selenium.webdriver.common.by import By
import math
import webbrowser
class ProductPage(BasePage):
def go_product_basket_add(self):
self.browser.find_element(*ProductPageLocators.BTN_ADD_BASKET).click()
def solve_quiz_and_get_code(self):
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
print(f"Your code: {alert_text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented")
def should_be_name_product(self):
product_name = self.is_element_present(*ProductPageLocators.PRODUCT_NAME)
message = self.is_element_present(*ProductPageLocators.CONFIRM_MESSAGE)
assert product_name == message, "Наименование товара отсутсвует в корзине"
def should_be_price_product(self):
product_price = self.browser.find_element(*ProductPageLocators.PRODUCT_PRICE).text
message_price = self.browser.find_element(*ProductPageLocators.PRICE_BASKET).text
assert product_price in message_price, "Цена товара не соответствует цене в корзине"
def should_not_be_success_message(self):
assert self.is_not_element_present(*ProductPageLocators.SUCCESS_MESSAGE), \
"Success message is presented, but should not be"
def should_not_be_success_message_disappeared(self):
assert self.is_disappeared(*ProductPageLocators.SUCCESS_MESSAGE), \
"Success message is presented, but should not be"
|
Pavel-OG/project_selenium_course_final_block
|
pages/product_page.py
|
product_page.py
|
py
| 1,916 |
python
|
en
|
code
| 0 |
github-code
|
6
|
10711597654
|
from youtubesearchpython import VideosSearch
import os
import glob
# __ _ _
# / \ | | | |
# / \ | | /\ | | /\ _ _
# / /\ \ | |/ / | |/ / | | | |
# / ____ \ | |\ \ | |\ \ | |_| |
#/__/ \__\ |_| \_\ |_| \_\ \___/
#
# Copyright of Akash, 2021
# https://www.github.com/akkupy
# https://t.me/akkupy
def yt_music(song_name, chat_id, msg_id, bot):
try:
videosSearch = VideosSearch(song_name, limit=1)
result = videosSearch.result()
first_result = result["result"]
yt_url = first_result[0]["link"]
yt_title = first_result[0]["title"]
yt_pub_time = first_result[0]["publishedTime"]
yt_id = first_result[0]["id"]
yt_duration = first_result[0]["duration"]
if not os.path.isdir("./music/"):
os.makedirs("./music/")
yt_song = (
f'youtube-dl --force-ipv4 -q -o "./music/{yt_title}.%(ext)s" --extract-audio --audio-format mp3 --audio-quality 128k '
+ yt_url
)
os.system(yt_song)
try:
a = glob.glob("./music/*.webm")
b = a[0]
c = b[8:]
except:
a = glob.glob("./music/*.mp3")
b = a[0]
c = b[8:]
dir = f"./music/{c}"
dir1 = f"./music/{c}"
capy = f"**Song Name ➠** `{yt_title}` \n**Published On ➠** `{yt_pub_time}` \n**Duration ➠** `{yt_duration}` \n**Link ➠** `{yt_url}`"
if os.path.exists(dir):
try:
bot.sendChatAction(chat_id=chat_id, action="upload_audio")
bot.send_audio(audio=open(dir, 'rb'), caption=capy, chat_id=chat_id, reply_to_message_id=msg_id)
os.remove(dir)
except:
bot.sendMessage(chat_id=chat_id, text="Audio Size is too large,Check the link below",
reply_to_message_id=msg_id)
bot.sendMessage(chat_id=chat_id, text=yt_url, reply_to_message_id=msg_id)
os.remove(dir)
elif os.path.exists(dir1):
try:
bot.sendChatAction(chat_id=chat_id, action="upload_audio")
bot.send_audio(audio=open(dir1, 'rb'), caption=capy, chat_id=chat_id, reply_to_message_id=msg_id)
os.remove(dir1)
except:
bot.sendMessage(chat_id=chat_id, text="Audio Size is too large,Check the link below",
reply_to_message_id=msg_id)
bot.sendMessage(chat_id=chat_id, text=yt_url, reply_to_message_id=msg_id)
os.remove(dir1)
else:
bot.sendChatAction(chat_id=chat_id, action="typing")
bot.sendMessage(chat_id=chat_id, text="Song Not Found!", reply_to_message_id=msg_id)
except:
bot.sendChatAction(chat_id=chat_id, action="typing")
bot.sendMessage(chat_id=chat_id, text="Unable to retreive the Song :( Check out the link", reply_to_message_id=msg_id)
if yt_url is not "":
bot.sendMessage(chat_id=chat_id, text=yt_url, reply_to_message_id=msg_id)
|
akkupy/Sara-Bot
|
Modules/Yt_music.py
|
Yt_music.py
|
py
| 3,160 |
python
|
en
|
code
| 2 |
github-code
|
6
|
70384673149
|
from collections import deque
from dataclasses import dataclass, field, replace
from typing import Type
import copy
import numpy as np
import pandas as pd
import re
# little helper class
class ldf_dict(dict):
def __init__(self):
self = dict()
def add(self, key, value):
self[key] = value
@dataclass
class Nodes:
master: str
timer_base_ms: float
jitter_ms: float
slaves: list = field(default_factory=list)
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class Signal:
size: int
init_val: int
publisher: str
subscriber: str
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class Frame:
identifier: int
publisher: str
response_length: int
signals: ldf_dict()
@dataclass(init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False)
class Diagnostic_signal:
size: int
init_val: int
@dataclass
class Node_attribute:
lin_protocol: float
configure_NAD: str
product_id: list
response_error: str
P2_min_ms: int
ST_min_ms: int
configure_frames: ldf_dict()
class LDFParser:
"""
Wording: every element of the ldf e.g. Nodes {} or Signals {} is
called attribute.
"""
__closed_curly: np.ndarray
__opened_curly: np.ndarray
__ldf_data: np.ndarray
__ldf_header: np.ndarray
__start_of_attribute: np.ndarray
__start_of_frames: np.ndarray
# frames: key=frame_name, value=frame data
frames = ldf_dict()
node_attributes = ldf_dict()
schedule_tables = ldf_dict()
signals = ldf_dict()
diagnostic_signals = ldf_dict()
signal_encoding_types = ldf_dict()
signal_representation = ldf_dict()
nodes = Nodes
bus_name = ""
def __init__(self, ldf_path):
self.__ldf_data = pd.read_csv(ldf_path, sep="\n", encoding='latin-1')
self.__ldf_data = self.__ldf_data.values
self.__remove_header_info()
self.__get_bus_name()
self.__analyse_ldf_elements()
def parse_all(self):
for (line_number, axis), value in np.ndenumerate(self.__start_of_attribute):
if value and self.__ldf_data[line_number] == "Nodes {":
self.get_nodes(line_number)
elif value and self.__ldf_data[line_number] == "Signals {":
self.get_signals(line_number)
elif value and self.__ldf_data[line_number] == "Diagnostic_signals {":
self.get_dignostic_signals(line_number)
elif value and self.__ldf_data[line_number] == "Frames {":
self.get_frames()
elif value and self.__ldf_data[line_number] == "Node_attributes {":
self.get_node_attributes(line_number)
elif value and self.__ldf_data[line_number] == "Schedule_tables {":
self.get_schedule_table(line_number)
elif value and self.__ldf_data[line_number] == "Signal_encoding_types {":
self.get_signal_encoding_types(line_number)
elif value and self.__ldf_data[line_number] == "Signal_representation {":
self.get_signal_representation(line_number)
del self.__ldf_data, self.__closed_curly, self.__start_of_frames, self.__start_of_attribute
def get_nodes(self, line_number=-1):
nodes = Nodes
if line_number == -1:
line_number = int(np.where(self.__ldf_data == "Nodes {")[0]) + 1
end_of_nodes = self.__get_index_of_next_closed_curly(line_number)
while line_number < end_of_nodes:
line_number = line_number + 1
current_line_value = self.__ldf_data[line_number][0]
current_line_value = self.__remove_unwanted(current_line_value).split(':')
if current_line_value[0] == "Master":
master_values = current_line_value[1].split(',')
nodes.master = master_values[0]
nodes.timer_base_ms = float(self.__remove_all_but_num(master_values[1]))
nodes.jitter_ms = float(self.__remove_all_but_num(master_values[2]))
elif current_line_value[0] == "Slaves":
nodes.slaves = current_line_value[1].split(',')
self.nodes = nodes
def get_frames(self):
# self.start_of_frame contains all starting positons of the frame elements
start_frame_indizes = np.where(self.__start_of_frames[:, 0])[0]
end_frame_indizes = np.where(self.__closed_curly[:, 0])[0]
end_frame_indizes = deque(end_frame_indizes)
# remove not needed closing curly braces
while end_frame_indizes[0] < start_frame_indizes[0]:
end_frame_indizes.popleft()
end_frames_index = self.__get_end_of_attribute(start_frame_indizes[0])
start_frame_indizes = deque(start_frame_indizes)
current_line_number = start_frame_indizes.popleft()
while current_line_number < end_frames_index:
# first parse the frame header ..
frame = Frame(identifier=0, publisher="", response_length=0, signals=ldf_dict())
frame_header = self.__raw_line_to_list(self.__ldf_data[current_line_number][0])
frame.identifier = frame_header[1]
frame.publisher = frame_header[2]
frame.response_length = int(frame_header[3])
current_line_number = current_line_number + 1
# .. and then the signals
end_of_frame_signals = self.__get_end_of_attribute(current_line_number, 1)
signals = ldf_dict()
while current_line_number < end_of_frame_signals:
signal = ldf_dict()
signal_line = self.__remove_unwanted(self.__ldf_data[current_line_number][0]).split(",")
signal_name = signal_line[0]
signal_offset = signal_line[1]
signal.add("Offset", signal_offset)
signals.add(signal_name, signal)
current_line_number = current_line_number + 1
frame.signals = signals
self.frames.add(frame_header[0], frame)
current_line_number = current_line_number + 1
def get_node_attributes(self, line_number):
end_of_node_attr = self.__get_end_of_attribute(line_number, 3)
line_number = line_number + 1
while line_number < end_of_node_attr:
node_attribute = Node_attribute(lin_protocol=0.0, configure_NAD="", product_id=[], response_error="",
P2_min_ms=0, ST_min_ms=0, configure_frames=ldf_dict())
node_attribute_name = self.__remove_unwanted(self.__ldf_data[line_number][0])
line_number = line_number + 1
node_attribute.lin_protocol = float(self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1])
line_number = line_number + 1
node_attribute.configure_NAD = self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1]
line_number = line_number + 1
if node_attribute_name == "DS":
self.node_attributes.add(node_attribute_name, node_attribute)
line_number = self.__get_end_of_attribute(line_number, 1) + 1
else:
node_attribute.product_id = self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1].split(",")
line_number = line_number + 1
node_attribute.response_error = self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1]
line_number = line_number + 1
node_attribute.P2_min_ms = int(re.sub(r'[^0-9]', '', self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1]))
line_number = line_number + 1
node_attribute.ST_min_ms = int(re.sub(r'[^0-9]', '', self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")[1]))
line_number = line_number + 2
end_of_configurable_frames = self.__get_end_of_attribute(line_number, 1)
conf_frame_dict = ldf_dict()
while line_number < end_of_configurable_frames:
conf_frame = self.__remove_unwanted(self.__ldf_data[line_number][0]).split("=")
conf_frame_dict.add(conf_frame[0], conf_frame[1])
line_number = line_number + 1
node_attribute.configure_frames = conf_frame_dict
self.node_attributes.add(node_attribute_name, node_attribute)
line_number = self.__get_end_of_attribute(line_number, 2) + 2
def get_signal_representation(self, current_line_number):
current_line_number = current_line_number + 1
end_of_signal_representation = self.__get_index_of_next_closed_curly(current_line_number)
while current_line_number < end_of_signal_representation:
signal_representation_list = self.__remove_unwanted(self.__ldf_data[current_line_number][0]).split(":")
signal_repre_key = signal_representation_list[0]
signal_repre_val = signal_representation_list[1].split(",")
current_line_number = current_line_number + 1
self.signal_representation.add(signal_repre_key, signal_repre_val)
def get_signal_encoding_types(self, current_line_number):
current_line_number = current_line_number + 1
end_of_signal_enc_types = self.__get_end_of_attribute(current_line_number, 2)
while current_line_number < end_of_signal_enc_types:
signal_encoding_name = self.__remove_unwanted(self.__ldf_data[current_line_number][0])
current_line_number = current_line_number + 1
end_of_current_sign_enc_type = self.__get_index_of_next_closed_curly(current_line_number)
encoding_list = []
while current_line_number < end_of_current_sign_enc_type:
val_list = self.__ldf_data[current_line_number][0].split(",")
for i in range(0, len(val_list)):
val_list[i] = re.sub(r"^[\s]*|[\";]", "", val_list[i])
encoding_list.append(val_list)
current_line_number = current_line_number + 1
self.signal_encoding_types.add(signal_encoding_name, encoding_list)
current_line_number = current_line_number + 1
def get_schedule_table(self, current_line_number):
current_line_number = current_line_number + 1
end_of_schedule_tables = self.__get_end_of_attribute(current_line_number, 2)
while current_line_number < end_of_schedule_tables:
schedule_table_name = self.__remove_unwanted(self.__ldf_data[current_line_number][0])
current_line_number = current_line_number + 1
end_of_current_schedule_table = self.__get_index_of_next_closed_curly(current_line_number)
frame_slots = ldf_dict()
while current_line_number < end_of_current_schedule_table:
#schedule_table = Schedule_table(frame_slot_name="", frame_slot_duration_ms=0)
current_line_list = re.sub(r"[\t]", "", self.__ldf_data[current_line_number][0]).split(" ")
frame_slot_name = current_line_list[0]
frame_slot_duration_ms = current_line_list[2]
frame_slots.add(frame_slot_name, int(frame_slot_duration_ms))
current_line_number = current_line_number + 1
self.schedule_tables.add(schedule_table_name, frame_slots)
current_line_number = current_line_number + 1
def get_signals(self, current_line_number):
current_line_number = current_line_number + 1
end_of_signals = self.__get_index_of_next_closed_curly(current_line_number)
while current_line_number < end_of_signals:
signal = Signal(size=0, init_val=0, publisher="", subscriber="")
raw_line = self.__ldf_data[current_line_number][0]
line_as_list = self.__raw_line_to_list(raw_line)
signal.size = line_as_list[1]
signal.init_val = line_as_list[2]
signal.publisher = line_as_list[3]
signal.subscriber = line_as_list[4]
current_line_number = current_line_number + 1
self.signals.add(line_as_list[0], signal)
def get_dignostic_signals(self, current_line_number):
current_line_number = current_line_number + 1
end_of_diagnostic_signals = self.__get_index_of_next_closed_curly(current_line_number)
while current_line_number < end_of_diagnostic_signals:
diagnostic_signal = Diagnostic_signal(size=0, init_val=0)
raw_line = self.__ldf_data[current_line_number][0]
line_as_list = self.__raw_line_to_list(raw_line)
diagnostic_signal.size = line_as_list[1]
diagnostic_signal.init_val = line_as_list[2]
self.diagnostic_signals.add(line_as_list[0], diagnostic_signal)
current_line_number = current_line_number + 1
def __get_bus_name(self):
for (line_number, axis), value in np.ndenumerate(self.__ldf_header):
if value.find("Network") != -1:
self.bus_name = self.__remove_unwanted(value).split(":")[1]
def __remove_unwanted(self, string: str) -> str:
"""
:param string: string that contains commas, semicols, whitespace, tabspace or closed curly
:return: cleaned string
"""
string = re.sub(r'[\s\t;{}"*/]*', '', string, flags=re.M)
return string
def __analyse_ldf_elements(self):
# TODO: optimzable since it runs three times over the file
start_pattern = re.compile(r'\b\w+\s{$')
start_vmatch = np.vectorize(lambda x: bool(start_pattern.match(x)))
self.__start_of_attribute = start_vmatch(self.__ldf_data)
# find all closed curlys
close_curly_pattern = re.compile(r'\s*}$')
end_vmatch = np.vectorize(lambda x: bool(close_curly_pattern.match(x)))
self.__closed_curly = end_vmatch(self.__ldf_data)
open_curly_pattern = re.compile(r'.*{$')
open_curly_vmatch = np.vectorize(lambda x: bool(open_curly_pattern.match(x)))
self.__opened_curly = open_curly_vmatch(self.__ldf_data)
frames_pattern = re.compile(r'\s*[A-Za-z0-9_]+:[\d\sA-Za-z,_]+{$')
# example: AQSe_01: 10, Klima_LIN1, 6 {
frames_vmatch = np.vectorize(lambda x: bool(frames_pattern.match(x)))
self.__start_of_frames = frames_vmatch(self.__ldf_data)
def __remove_all_but_num(self, string: str) -> str:
return re.sub(r'[^0-9.]', '', string, flags=re.M)
def __raw_line_to_list(self, line):
line = self.__remove_unwanted(line).split(":")
line = line[:1] + line[1].split(",")
return line
def __remove_header_info(self):
counter = 0
for line in self.__ldf_data:
if "/*" in line[0]:
counter = counter + 1
if counter != 0:
self.__ldf_header = copy.deepcopy(self.__ldf_data[:counter])
self.__ldf_data = self.__ldf_data[counter:]
def __get_index_of_next_closed_curly(self, index):
index_ = index + 1
while not self.__closed_curly[index_]:
index_ = index_ + 1
return index_
def __write_to_arr_till_closed_curly(self, index, np_arr):
index_ = index + 1
while not self.__closed_curly[index_]:
np_arr = np.append(np_arr, self.__ldf_data[index_][0])
index_ = index_ + 1
return np_arr
def __get_end_of_attribute(self, index, successive_closed_curly=2):
# find end of block by double or tripple closed curly braces
i = index
if successive_closed_curly == 1:
while not self.__closed_curly[i]:
i = i + 1
elif successive_closed_curly == 2:
while not self.__closed_curly[i] or not self.__closed_curly[i + 1]:
i = i + 1
elif successive_closed_curly == 3:
while not self.__closed_curly[i] or not self.__closed_curly[i + 1] or not self.__closed_curly[i + 2]:
i = i + 1
else:
print("Number of curly not supported")
return i
|
makreft/lin_ldf_parser
|
lin_ldf_parser/lin_ldf_parser.py
|
lin_ldf_parser.py
|
py
| 16,228 |
python
|
en
|
code
| 1 |
github-code
|
6
|
42977090533
|
longest = 0
shortSide = 0
while True:
A, B, C = map(int, input().split())
if A == B == C == 0:
break
if A > B and A > C:
longest = A**2
shortSide = (B**2 + C**2)
elif B > C and B > A:
longest = B**2
shortSide = (A**2 + C**2)
elif C > A and C > B:
longest = C**2
shortSide = (B**2 + A**2)
if longest == shortSide:
print("right")
else:
print('wrong')
|
jinhyo-dev/BOJ
|
직각삼각형.py
|
직각삼각형.py
|
py
| 404 |
python
|
en
|
code
| 1 |
github-code
|
6
|
41073049349
|
from mymodule_2nd_exercise import *
try:
a = int(input("Input A: "))
b = int(input("Input B: "))
operation = input("What operation to be done: ")
if b == 0 and operation == "/" or operation == "division":
raise ZeroDivisionError
if operation == "add" or operation == "+":
print(add(a, b))
elif operation == "minus" or operation == "-":
print(substract(a, b))
elif operation == "multiplication" or operation == "*":
print(multiplication(a, b))
elif operation == "division" or operation == "/":
print(division(a, b))
else:
raise WrongOperation
except ValueError:
print("Invalid number")
except WrongOperation:
print("Wrong operation")
except ZeroDivisionError:
print("Cant divide by 0")
|
Marto03/Python-homeworks
|
lab_8_15.12.2022/2nd_exercise.py
|
2nd_exercise.py
|
py
| 782 |
python
|
en
|
code
| 0 |
github-code
|
6
|
17609793311
|
from django import http
import six
from django.db.models import ProtectedError
from rest_framework import views, exceptions, status
from rest_framework.exceptions import UnsupportedMediaType
from rest_framework.response import Response
from backpack.serializers_bcv1 import BadgeConnectErrorSerializer
from entity.serializers import V2ErrorSerializer, Rfc7591ErrorSerializer
from entity.authentication import CSRFPermissionDenied
def exception_handler(exc, context):
version = context.get('kwargs', {}).get('version', 'v1')
if version in ['v2', 'rfc7591']:
description = 'miscellaneous error'
field_errors = {}
validation_errors = []
if isinstance(exc, exceptions.ParseError):
description = 'bad request'
validation_errors = [exc.detail]
response_code = status.HTTP_400_BAD_REQUEST
elif isinstance(exc, exceptions.ValidationError):
description = 'bad request'
if isinstance(exc.detail, list):
validation_errors = exc.detail
elif isinstance(exc.detail, dict):
field_errors = exc.detail
elif isinstance(exc.detail, six.string_types):
validation_errors = [exc.detail]
response_code = status.HTTP_400_BAD_REQUEST
elif isinstance(exc, (exceptions.AuthenticationFailed, exceptions.NotAuthenticated)):
description = 'no valid auth token found'
response_code = status.HTTP_401_UNAUTHORIZED
elif isinstance(exc, CSRFPermissionDenied):
description = 'no valid csrf token found'
response_code = status.HTTP_401_UNAUTHORIZED
elif isinstance(exc, (http.Http404, exceptions.PermissionDenied)):
description = 'entity not found or insufficient privileges'
response_code = status.HTTP_404_NOT_FOUND
elif isinstance(exc, ProtectedError):
description, protected_objects = exc.args
response_code = status.HTTP_400_BAD_REQUEST
elif isinstance(exc, UnsupportedMediaType):
description = exc.detail
validation_errors = [exc.detail]
response_code = status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
elif isinstance(exc, exceptions.APIException):
field_errors = exc.detail
response_code = exc.status_code
else:
# Unrecognized exception, return 500 error
return None
if version == 'v2':
serializer = V2ErrorSerializer(
instance={}, success=False, description=description,
field_errors=field_errors, validation_errors=validation_errors
)
else:
serializer = Rfc7591ErrorSerializer(
instance={}, field_errors=field_errors, validation_errors=validation_errors
)
return Response(serializer.data, status=response_code)
elif version == 'bcv1':
# Badge Connect errors
error = None
status_code = status.HTTP_400_BAD_REQUEST
status_text = 'BAD_REQUEST'
if isinstance(exc, exceptions.ParseError):
error = exc.detail
elif isinstance(exc, exceptions.ValidationError):
error = exc.detail
status_text = 'REQUEST_VALIDATION_ERROR'
elif isinstance(exc, exceptions.PermissionDenied):
status_code = status.HTTP_401_UNAUTHORIZED
status_text = 'PERMISSION_DENIED'
elif isinstance(exc, (exceptions.AuthenticationFailed, exceptions.NotAuthenticated)):
status_code = status.HTTP_401_UNAUTHORIZED
status_text = 'UNAUTHENTICATED'
elif isinstance(exc, exceptions.MethodNotAllowed):
status_code = status.HTTP_405_METHOD_NOT_ALLOWED
status_text = 'METHOD_NOT_ALLOWED'
serializer = BadgeConnectErrorSerializer(instance={},
error=error,
status_text=status_text,
status_code=status_code)
return Response(serializer.data, status=status_code)
else:
# Use the default exception-handling logic for v1
if isinstance(exc, ProtectedError):
description, protected_objects = exc.args
return Response(description, status=status.HTTP_400_BAD_REQUEST)
return views.exception_handler(exc, context)
|
reedu-reengineering-education/badgr-server
|
apps/entity/views.py
|
views.py
|
py
| 4,487 |
python
|
en
|
code
| 2 |
github-code
|
6
|
9485584474
|
###
###
### note: had to install webpack, webpack-cli, web-ext globally
from OpenWPM.automation import CommandSequence,TaskManager
import pandas as pd
NUM_BROWSERS = 2
NUM_SITES = 100
vanilla_path = './data_vanilla'
adblock_path = './data_adblock'
sites_path = 'top-1m.csv'
data = pd.read_csv(sites_path,header=None)
sites = data.iloc[:NUM_SITES,1]
def crawl(mode):
# Loads the default manager preferences and copies of the default browser dictionaries
manager_params, browser_params = TaskManager.load_default_params(NUM_BROWSERS)
adblock = (mode == 'adblock')
# Update browser configuration (use this for per-browser settings)
for i in range(NUM_BROWSERS):
browser_params[i]['http_instrument'] = True #Record http requests and responses, saved to http_requests table
browser_params[i]['cookie_instrument'] = True #Record cookie data, saved to javascript_cookies table
browser_params[i]['js_instrument'] = True #Record JS calls, saved to javascript table
browser_params[i]['disable_flash'] = False #Enable flash for all three browsers
browser_params[i]['display_mode'] = 'native' #Launch all browsers headless
browser_params[i]['ublock-origin'] = adblock # True for adblock mode, False for vanilla mode
# Update TaskManager configuration (use this for crawl-wide settings)
if adblock:
manager_params['data_directory'] = adblock_path
manager_params['log_directory'] = adblock_path
else:
manager_params['data_directory'] = vanilla_path
manager_params['log_directory'] = vanilla_path
# Instantiates the measurement platform
# Commands time out by default after 60 seconds
manager = TaskManager.TaskManager(manager_params, browser_params)
# Visits the sites with all browsers simultaneously
for site in sites:
site = 'http://' + site
command_sequence = CommandSequence.CommandSequence(site,reset=True)
command_sequence.get(sleep=10, timeout=60)
#command_sequence.dump_profile_cookies(120)
manager.execute_command_sequence(command_sequence) # ** = synchronized browsers
# Shuts down the browsers and waits for the data to finish logging
manager.close()
return
# run crawl in vanilla and ad-blocking modes
crawl('vanilla')
crawl('adblock')
|
hwtrost/OpenWPM_crawling_project
|
webcrawler.py
|
webcrawler.py
|
py
| 2,329 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18769228974
|
'''
Read and write Olympia state files.
'''
import os
import os.path
import sys
from contextlib import redirect_stdout
from .oid import to_oid
from .formatters import print_one_thing, read_oly_file
def fixup_ms(data):
'''
For whatever reason, the value in IM/ms needs to have a trailing space
'''
for box in data:
if 'IM' in data[box]:
if 'ms' in data[box]['IM']:
value = data[box]['IM']['ms']
value[0] = value[0].strip() + ' '
data[box]['IM']['ms'] = value
def write_oly_file(data, kind=False, verbose=False):
'''
The main function that drives outputting a file
'''
fixup_ms(data)
order = sorted([int(box) for box in data.keys()])
count = 0
for box in order:
box = str(box)
if kind:
if ' '+kind+' ' not in data[box].get('firstline', '')[0]:
continue
print_one_thing(data[box])
del data[box]
count += 1
if verbose:
print('wrote', count, verbose, 'boxes.', file=sys.stderr)
def write_player(data, box, verbose=False):
player_box = box
boxlist = data[box].get('PL', {}).get('un', {})
print_one_thing(data[box])
del data[box]
count = 0
for box in boxlist:
print_one_thing(data[box])
del data[box]
count += 1
if verbose:
print('wrote', count, 'characters for player', to_oid(int(player_box)), file=sys.stderr)
def read_players(dir, verbose=False):
'''
read every fie in dir whose name is an integer
'''
ret = {}
files = os.listdir(dir)
for name in files:
if name.isdigit():
data = read_oly_file(os.path.join(dir, name), verbose='player ' + name)
ret.update(data)
return ret
def write_players(data, dir, verbose=False):
boxlist = list(data.keys()) # we're deleting as we go
for box in boxlist:
if data.get(box) is None:
continue
if ' player ' in data[box]['firstline'][0]:
fact = os.path.join(dir, 'fact')
if not os.path.isdir(fact):
os.mkdir(fact)
filename = os.path.join(dir, 'fact', box)
with open(filename, 'w') as f:
with redirect_stdout(f):
write_player(data, box, verbose=verbose)
def write_system_file(data):
fr = None
lt = 1
tr = None
ur = None
hr = None
hp = None
nr = None
nl = None
cr = None
for k, v in data.items():
fl = v['firstline'][0]
try:
na = v['na'][0]
except KeyError:
na = ''
if ' player pl_regular' in fl:
lt = max(lt, int(v['PL']['lt'][0]))
if fr is None and ' loc region' in fl and na == 'Faery':
if v.get('LI', {}).get('hl'):
fr = k
else:
fr = 0
if tr is None and ' loc region' in fl and na == 'Undercity':
if data[k].get('LI', {}).get('hl'):
tr = k
else:
tr = 0
if ur is None and ' loc region' in fl and na == 'Subworld':
if data[k].get('LI', {}).get('hl'):
ur = k
else:
ur = 0
if hr is None and ' loc region' in fl and na == 'Hades':
if data[k].get('LI', {}).get('hl'):
hr = k
else:
hr = 0
if hp is None and fl.endswith(' loc pit'): # normal pits are 'pits'
hp = k
if nr is None and ' loc region' in fl and na == 'Nowhere':
nr = k
nl = v['LI']['hl'][0]
if cr is None and ' loc region' in fl and na == 'Cloudlands':
if data[k].get('LI', {}).get('hl'):
cr = k
else:
cr = 0
if hp is None:
# not surprising for a player sim
# if I wanted to do this right I have to also create City of the Dead in a provinces.
# fake it.
hp = hr
days_per_month = 30
days_since_epoch = lt * days_per_month
system = '''sysclock: {} {} {}
indep_player=100
gm_player=200
skill_player=202
[email protected]
[email protected]
game_title=SIMULATION
post=1
init=1
fr={}
tr={}
ur={}
fp=204
hr={}
hp={}
hl=205
nr={}
nl={}
np=206
cr={}
cp=210
'''.format(lt, days_per_month, days_since_epoch, fr, tr, ur, hr, hp, nr, nl, cr)
if 'None' in system:
raise ValueError('failed to find some stuff for system:\n' + system)
print(system)
def read_lib(libdir):
if not os.path.isdir(libdir):
raise ValueError('libdir {} is not a directory'.format(libdir))
data = read_oly_file(os.path.join(libdir, 'loc'), verbose='loc')
data.update(read_oly_file(os.path.join(libdir, 'item'), verbose='item'))
data.update(read_oly_file(os.path.join(libdir, 'skill'), verbose='skill'))
data.update(read_oly_file(os.path.join(libdir, 'gate'), verbose='gate'))
data.update(read_oly_file(os.path.join(libdir, 'road'), verbose='road'))
data.update(read_oly_file(os.path.join(libdir, 'ship'), verbose='ship'))
data.update(read_oly_file(os.path.join(libdir, 'unform'), verbose='unform'))
data.update(read_oly_file(os.path.join(libdir, 'misc'), verbose='misc'))
data.update(read_players(os.path.join(libdir, 'fact'), verbose=True))
return data
def write_lib(data, libdir):
if os.path.exists(libdir):
if not os.path.isdir(libdir):
raise ValueError('libdir {} is not a directory'.format(libdir))
else:
os.mkdir(libdir)
with open(os.path.join(libdir, 'system'), 'w') as f:
with redirect_stdout(f):
write_system_file(data)
with open(os.path.join(libdir, 'loc'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='loc', verbose='loc')
with open(os.path.join(libdir, 'item'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='item', verbose='item')
with open(os.path.join(libdir, 'skill'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='skill', verbose='skill')
with open(os.path.join(libdir, 'gate'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='gate', verbose='gate')
with open(os.path.join(libdir, 'road'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='road', verbose='road')
with open(os.path.join(libdir, 'ship'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='ship', verbose='ship')
with open(os.path.join(libdir, 'unform'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, kind='unform', verbose='unform')
write_players(data, libdir, verbose=True)
with open(os.path.join(libdir, 'misc'), 'w') as f:
with redirect_stdout(f):
write_oly_file(data, verbose='misc') # catchall
|
olympiag3/olypy
|
olypy/oio.py
|
oio.py
|
py
| 6,980 |
python
|
en
|
code
| 0 |
github-code
|
6
|
5855348658
|
types = {
"root_list": {
1: ("day_session"),
},
"day_session": {
1: ("string", "session_uuid"),
2: ("int32"),
3: ("sfixed64", "main_timestamp"),
4: ("string", "location_name"),
5: ("location_list"),
},
"location_list": {
2: ("gps_location"),
},
"gps_location": {
1: ("sfixed64", "timestamp"),
2: ("double", "x"),
3: ("double", "y"),
4: ("double", "z")
}
}
|
jaredkaczynski/SnowDuckToGPX
|
protobuf_config.py
|
protobuf_config.py
|
py
| 476 |
python
|
en
|
code
| 0 |
github-code
|
6
|
18834133291
|
# -*- coding: utf-8 -*-
# Author:sen
# Date:2020/3/4 19:09
from typing import List
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def buildListNodeA():
n0 = ListNode(1)
n1 = ListNode(2)
n2 = ListNode(3)
n3 = ListNode(2)
n4 = ListNode(1)
n0.next = n1
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = None
return n0
def buildListNodeB():
n0 = ListNode(1)
n1 = ListNode(2)
n2 = ListNode(3)
n2_2 = ListNode(3)
n3 = ListNode(2)
n4 = ListNode(1)
n0.next = n1
n1.next = n2
n2.next = n2_2
n2_2.next = n3
n3.next = n4
n4.next = None
return n0
class Solution:
def isPalindrome(self, head: ListNode) -> bool:
if head is None:
return True
if head.next is None:
return True
mid = self.get_mid(head)
part2 = mid.next
reverse_part2 = self.reverseList(part2)
p1, p2 = head, reverse_part2
while p2:
if p1.val != p2.val:
return False
p1 = p1.next
p2 = p2.next
return True
def reverseList(self, head: ListNode) -> ListNode:
# 反转链表
prev, curr = None, head
while curr:
temp = curr.next # temp变为下一个节点
curr.next = prev # 指向前个节点,此时链表断裂
prev = curr # prev变为当前节点
curr = temp # curr变为temp节点
return prev
def get_mid(self, head):
# 获取链表中间
if head is None:
return head
fast = slow = head
while fast.next and fast.next.next:
slow = slow.next
fast = fast.next.next
return slow
if __name__ == '__main__':
so = Solution()
headA = buildListNodeA()
headB = buildListNodeB()
print(so.isPalindrome(headA))
print(so.isPalindrome(headB))
|
PandoraLS/CodingInterview
|
ProgrammingOJ/LeetCode_python/234_回文链表.py
|
234_回文链表.py
|
py
| 2,003 |
python
|
en
|
code
| 2 |
github-code
|
6
|
11161578103
|
import time
import aura_sdk as aura
import atexit
atexit.register(aura.close)
print("Devices found:")
for dev in aura.get_devices():
print(" " + dev.Name)
for j in range(5):
for i in range(255):
aura.set_all_to_color(aura.rgb_to_color(i, i, i))
time.sleep(0.005)
for i in range(255):
aura.set_all_to_color(aura.rgb_to_color(255 - i, 255 - i, 255 - i))
time.sleep(0.005)
|
obfuscatedgenerated/asus-aura-control
|
fade_white.py
|
fade_white.py
|
py
| 418 |
python
|
en
|
code
| 1 |
github-code
|
6
|
7437698122
|
"""
Script that trains an NFC bounding interval annotator.
To use tensorboard during or after model training, open a terminal and say:
conda activate vesper-dev-tf2
tensorboard --logdir "/Users/Harold/Desktop/NFC/Data/Vesper ML/
NFC Bounding Interval Annotator 1.0/Logs/<training log dir path>"
and then visit:
127.0.0.1:6006
in Chrome.
"""
from collections import defaultdict
import math
import time
from matplotlib.backends.backend_pdf import PdfPages
from tensorflow.keras.layers import (
BatchNormalization, Conv2D, Dense, Flatten, MaxPooling2D)
# from tensorflow.keras.layers import Dropout
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.inferrer \
import Inferrer
from vesper.util.settings import Settings
import vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.annotator_utils \
as annotator_utils
import vesper.mpg_ranch.nfc_bounding_interval_annotator_1_0.dataset_utils \
as dataset_utils
import vesper.util.yaml_utils as yaml_utils
TSEEP_SETTINGS = Settings(
clip_type='Tseep',
bound_type='Start',
waveform_sample_rate=24000,
positive_example_probability=.5,
positive_example_call_start_offset=.025,
waveform_slice_duration=.080,
# `True` if and only if the waveform amplitude scaling data
# augmentation is enabled. This augmentation scales each waveform
# randomly to distribute the waveform log RMS amplitudes uniformly
# within a roughly 48 dB window.
waveform_amplitude_scaling_data_augmentation_enabled=False,
# spectrogram settings
spectrogram_window_size=.005,
spectrogram_hop_size=20,
spectrogram_log_epsilon=1e-10,
# spectrogram frequency axis slicing settings
spectrogram_start_freq=4000,
spectrogram_end_freq=10500,
# The maximum spectrogram frequency shift for data augmentation,
# in bins. Set this to zero to disable this augmentation.
max_spectrogram_frequency_shift=2,
spectrogram_background_normalization_percentile_rank=30,
# training settings
training_batch_size=128,
training_epoch_step_count=100, # epoch size is batch size times step count
training_epoch_count=30,
model_save_period=5, # epochs
dropout_rate=.25,
# validation settings
validation_batch_size=1,
validation_step_count=1000,
# evaluation plot settings
max_evaluation_inlier_diff=20,
# offsets for converting inference value to spectrogram index
call_start_index_offset=23,
call_end_index_offset=22,
)
def main():
settings = TSEEP_SETTINGS
train_annotator(settings)
# evaluate_annotator('2020-07-06_09.33.54')
# show_model_summary('start_2020-06-10_12.13.39', 20)
# test_get_spectrogram_percentiles()
# test_create_waveform_dataset_from_tensors()
# test_create_waveform_dataset_from_tfrecord_files('Training', settings)
# test_create_training_dataset('Training', settings)
# test_create_inference_dataset(settings)
# show_dataset_sizes(settings)
def train_annotator(settings):
s = settings
training_name = annotator_utils.create_training_name(s)
training_dataset = get_dataset('Training', s).batch(s.training_batch_size)
validation_dataset = \
get_dataset('Validation', s).batch(s.validation_batch_size)
input_shape = dataset_utils.get_spectrogram_slice_shape(settings)
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=input_shape),
# Dropout(s.dropout_rate),
BatchNormalization(),
MaxPooling2D((1, 2)),
# Conv2D(16, (1, 1), activation='relu'),
# BatchNormalization(),
Conv2D(32, (3, 3), activation='relu'),
# Dropout(s.dropout_rate),
BatchNormalization(),
MaxPooling2D((1, 2)),
# Conv2D(16, (1, 1), activation='relu'),
# BatchNormalization(),
Flatten(),
# Dense(32, activation='relu'),
# BatchNormalization(),
Dense(32, activation='relu'),
# Dropout(s.dropout_rate),
BatchNormalization(),
Dense(1, activation='sigmoid')
])
model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
model.summary()
log_dir_path = annotator_utils.get_training_log_dir_path(training_name)
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=log_dir_path, histogram_freq=1)
model_save_callback = ModelSaveCallback(training_name, settings)
model.fit(
training_dataset, epochs=s.training_epoch_count,
steps_per_epoch=s.training_epoch_step_count, verbose=2,
validation_data=validation_dataset,
validation_steps=s.validation_step_count,
callbacks=[tensorboard_callback, model_save_callback])
class ModelSaveCallback(tf.keras.callbacks.Callback):
def __init__(self, training_name, settings):
self._training_name = training_name
self._settings = settings
def on_epoch_end(self, epoch, logs=None):
epoch_num = epoch + 1
if epoch_num % self._settings.model_save_period == 0:
model_dir_path = \
annotator_utils.get_tensorflow_saved_model_dir_path(
self._training_name, epoch_num)
self.model.save(model_dir_path)
save_training_settings(self._settings, self._training_name)
print(f'Saved model at end of epoch {epoch_num}.')
print('Evaluating model...')
evaluate_annotator(self._training_name, epoch_num)
def get_dataset(name, settings):
dir_path = annotator_utils.get_dataset_dir_path(settings.clip_type, name)
return dataset_utils.create_training_dataset(dir_path, settings)
def save_training_settings(settings, training_name):
file_path = annotator_utils.get_training_settings_file_path(training_name)
text = yaml_utils.dump(settings.__dict__, default_flow_style=False)
file_path.write_text(text)
def evaluate_annotator(training_name, epoch_num):
_, settings = annotator_utils.load_model_and_settings(
training_name, epoch_num)
dir_path = annotator_utils.get_dataset_dir_path(
settings.clip_type, 'Validation')
dataset = dataset_utils.create_validation_dataset(dir_path, settings)
dataset = dataset.take(settings.validation_step_count)
inferrer = Inferrer((training_name, epoch_num))
bounds = inferrer.get_call_bounds(dataset)
start_diff_counts = defaultdict(int)
end_diff_counts = defaultdict(int)
for (inferred_start_index, inferred_end_index, dataset_start_index,
dataset_end_index) in bounds:
dataset_start_index = dataset_start_index.numpy()
dataset_end_index = dataset_end_index.numpy()
sample_rate = settings.waveform_sample_rate
start_diff = _get_diff(
inferred_start_index, dataset_start_index, sample_rate)
end_diff = _get_diff(
inferred_end_index, dataset_end_index, sample_rate)
if start_diff is not None:
start_diff_counts[start_diff] += 1
end_diff_counts[end_diff] += 1
# print(
# start_diff, end_diff,
# inferred_start_index, inferred_end_index,
# dataset_start_index, dataset_end_index)
_show_diff_counts('Start', start_diff_counts, settings)
_show_diff_counts('End', end_diff_counts, settings)
_plot_diff_counts(
training_name, epoch_num, start_diff_counts, end_diff_counts, settings)
def _get_diff(inferred_index, dataset_index, sample_rate):
if inferred_index is None:
return None
else:
sample_count = inferred_index - dataset_index
return int(round(1000 * sample_count / sample_rate))
def _show_diff_counts(name, counts, settings):
diffs = sorted(counts.keys())
# Calculate error mean and standard deviation, excluding outliers.
diff_sum = 0
diff_sum_2 = 0
inlier_count = 0
outlier_count = 0
for diff in diffs:
count = counts[diff]
if diff <= settings.max_evaluation_inlier_diff:
diff_sum += count * diff
diff_sum_2 += count * diff * diff
inlier_count += count
else:
outlier_count += count
diff_mean = diff_sum / inlier_count
diff_std = math.sqrt(diff_sum_2 / inlier_count - diff_mean * diff_mean)
print(f'{name} {inlier_count} {diff_mean} {diff_std} {outlier_count}')
def _plot_diff_counts(
training_name, epoch_num, start_diff_counts, end_diff_counts,
settings):
file_path = annotator_utils.get_evaluation_plot_file_path(
training_name, epoch_num)
with PdfPages(file_path) as pdf:
_, (start_axes, end_axes) = plt.subplots(2)
title = f'{training_name} Epoch {epoch_num} Call Start Errors'
_plot_diff_counts_aux(start_axes, title, start_diff_counts, settings)
title = f'{training_name} Epoch {epoch_num} Call End Errors'
_plot_diff_counts_aux(end_axes, title, end_diff_counts, settings)
plt.tight_layout()
pdf.savefig()
plt.close()
def _plot_diff_counts_aux(axes, title, counts, settings):
limit = settings.max_evaluation_inlier_diff
x = np.arange(-limit, limit + 1)
total_count = sum(counts.values())
y = np.array([counts[d] for d in x]) / total_count
axes.bar(x, y)
axes.set_title(title)
axes.set_xlabel('diff (ms)')
axes.set_ylabel('fraction')
def show_model_summary(training_name, epoch_num):
model_dir_path = annotator_utils.get_tensorflow_saved_model_dir_path(
training_name, epoch_num)
model = tf.keras.models.load_model(model_dir_path)
model.summary()
def test_get_spectrogram_percentiles():
# For convenience of specification, here first dimension is frequency,
# second is time. This tensor is transposed below, though, preceding
# the call to `_get_spectrogram_percentiles`.
gram = tf.constant([
[1.1, 0, 0, 89.9], # 0, 0, 1, 90
[80, 60, 40, 20], # 20, 40, 60, 80
[40, 80, 130, -10] # 0, 40, 80, 120
])
print('gram:')
print(gram)
# Transpose gram so it's a sequence of spectra (i.e. so that first
# dimension is time and second is frequency), as expected by
# `_get_spectrogram_percentiles`.
gram = tf.transpose(gram)
ranks = tf.constant([25, 50, 75, 100])
percentiles = dataset_utils._get_spectrogram_percentiles(gram, ranks)
print('gram percentiles:')
print(percentiles)
def test_create_waveform_dataset_from_tensors():
waveforms = [
np.array([0, 16384]),
np.array([0, 16384, 32768])]
dataset = dataset_utils.create_waveform_dataset_from_tensors(waveforms)
for waveform in dataset:
print(waveform)
def test_create_waveform_dataset_from_tfrecord_files(dataset_name, settings):
dir_path = annotator_utils.get_dataset_dir_path(
settings.clip_type, dataset_name)
dataset = dataset_utils.create_waveform_dataset_from_tfrecord_files(
dir_path)
show_waveform_dataset_stats(dataset, settings.waveform_sample_rate)
def show_waveform_dataset_stats(dataset, sample_rate):
example_count = 10000
dataset = dataset.take(example_count)
min_start_time = 1000000
max_start_time = 0
min_end_time = 1000000
max_end_time = 0
min_duration = 1000000
max_duration = 0
start_time = time.time()
for _, clip_start_index, clip_end_index, call_start_index, \
call_end_index, clip_id in dataset:
clip_start_index = clip_start_index.numpy()
clip_end_index = clip_end_index.numpy()
call_start_index = call_start_index.numpy()
call_end_index = call_end_index.numpy()
clip_id = clip_id.numpy()
call_start_time = int(round(1000 * call_start_index / sample_rate))
min_start_time = min(min_start_time, call_start_time)
max_start_time = max(max_start_time, call_start_time)
call_end_time = int(round(1000 * call_end_index / sample_rate))
min_end_time = min(min_end_time, call_end_time)
max_end_time = max(max_end_time, call_end_time)
call_duration = call_end_time - call_start_time
min_duration = min(min_duration, call_duration)
max_duration = max(max_duration, call_duration)
# print(
# clip_id, len(waveform), clip_start_index, clip_end_index,
# call_start_index, call_end_index, call_start_time, call_end_time,
# call_duration)
end_time = time.time()
delta_time = end_time - start_time
rate = example_count / delta_time
print(
f'Generated {example_count} examples in {delta_time} seconds, '
f'a rate of {rate} examples per second.')
print(f'call start time range ({min_start_time}, {max_start_time})')
print(f'call end time range ({min_end_time}, {max_end_time})')
print(f'call duration range ({min_duration}, {max_duration})')
def test_create_training_dataset(dataset_name, settings):
dir_path = annotator_utils.get_dataset_dir_path(
settings.clip_type, dataset_name)
dataset = dataset_utils.create_training_dataset(dir_path, settings)
show_training_dataset_stats(dataset)
def show_training_dataset_stats(dataset):
example_count = 10000
dataset = dataset.take(example_count)
start_time = time.time()
positive_count = 0
for _, label in dataset:
# print(f'gram {gram.shape} {label}')
if label == 1:
positive_count += 1
end_time = time.time()
delta_time = end_time - start_time
rate = example_count / delta_time
print(
f'Generated {example_count} examples in {delta_time} seconds, '
f'a rate of {rate} examples per second.')
percent = 100 * positive_count / example_count
print(f'{positive_count} examples, or {percent} percent, were positives.')
def test_create_inference_dataset(settings):
waveform_durations = [.5, .6]
sample_rate = settings.waveform_sample_rate
waveforms = [
_create_random_waveform(d, sample_rate)
for d in waveform_durations
]
dataset = dataset_utils.create_waveform_dataset_from_tensors(waveforms)
dataset = dataset_utils.create_inference_dataset(dataset, settings)
for forward_slices, backward_slices in dataset:
slice_count = forward_slices.shape[0]
assert(backward_slices.shape[0] == slice_count)
for i in range(slice_count):
forward_slice = forward_slices[i]
backward_slice = backward_slices[slice_count - 1 - i]
_compare_tensors(forward_slice, backward_slice)
def _compare_tensors(x, y):
"""
Checks that tensor x is the same as tensor y but with the first axis
reversed.
"""
assert(tf.reduce_all(x == tf.reverse(y, (0,))))
def _create_random_waveform(duration, sample_rate):
length = int(round(duration * sample_rate))
return np.random.randint(-32768, 32768, length)
def show_dataset_sizes(settings):
from tensorflow.data import TFRecordDataset
for dataset_name in ('Training', 'Validation'):
total_size = 0
print(f'Sizes of files in dataset "{dataset_name}":')
dir_path = annotator_utils.get_dataset_dir_path(
settings.clip_type, dataset_name)
file_paths = sorted(dir_path.glob('*.tfrecords'))
for file_path in file_paths:
dataset = TFRecordDataset([str(file_path)])
size = 0
for _ in dataset:
size += 1
print(f' {file_path.name}: {size}')
total_size += size
print(f'Total size of dataset "{dataset_name}": {total_size}')
if __name__ == '__main__':
main()
|
HaroldMills/Vesper
|
vesper/mpg_ranch/nfc_bounding_interval_annotator_1_0/train_bounding_interval_annotator.py
|
train_bounding_interval_annotator.py
|
py
| 16,756 |
python
|
en
|
code
| 47 |
github-code
|
6
|
36011157948
|
# encoding: utf8
# A class for reading task data from the Mosaiq database.
#
# Authors:
# Christoffer Lervåg
# Helse Møre og Romsdal HF
#
# Python 3.6
# Used for GUI debugging:
#from tkinter import *
#from tkinter import messagebox
from .database import Database
class Task:
# Returns a single task matching the given database id (TSK_ID) (or None if no match).
@classmethod
def find(cls, id):
instance = None
row = Database.fetch_one("SELECT * FROM QCLTask WHERE TSK_ID = '{}'".format(str(id)))
if row != None:
instance = cls(row)
return instance
# Creates a Task instance from a task database row.
def __init__(self, row):
# Database attributes:
self.tsk_id = row['TSK_ID']
self.created_date = row['Create_DtTm']
self.created_by_id = row['Create_ID']
self.edited_date = row['Edit_DtTm']
self.edited_by_id = row['Edit_ID']
self.description = row['Description'].rstrip() # If this crashes sometimes, we have to test if the string exists.
self.inactive = row['Inactive']
self.due_date = row['Due_DtTm']
self.responsible_id = row['Responsible_Staff_ID']
self.estimated_duration = row['Est_Dur']
self.elapsed_action = row['Elpsd_Action']
# Convenience attributes:
self.id = self.tsk_id
# Cache attributes:
self.instance_created_by = None
self.instance_edited_by = None
# The staff who created the task.
def created_by(self):
if not self.instance_created_by:
self.instance_created_by = Location.find(self.created_by_id)
return self.instance_created_by
# The staff who last edited the task.
def edited_by(self):
if not self.instance_edited_by:
self.instance_edited_by = Location.find(self.edited_by_id)
return self.instance_edited_by
|
dicom/raystation-scripts
|
mosaiq/task.py
|
task.py
|
py
| 1,787 |
python
|
en
|
code
| 40 |
github-code
|
6
|
30309719132
|
# Flask-Netpad
# version 1.0-alpha
# (C) Abstergo 2018
## netpad.py [MongoDB logic]
from flask_netpad.models import db, Note
# Custom Error
def errorCode(code=404, msg='Object Not Found :( '):
"""
Returns a custom error code in a dictionary
:param code: Error Code
:param msg: Message to return
:return: error
"""
data = dict()
data['code'] = code
data['error'] = msg
return data
# ==
def createDB(*args, **kwargs):
try:
return 'create db'
except:
errorCode()
# == List Notes
def listNote(**kwargs):
try:
note = Note.objects(**kwargs)
return note
except:
return errorCode()
# == Pagination Note
def pageNote(page=1, per_page=40, **kwargs):
try:
note = Note.objects(deleted=False).paginate(page=page, per_page=per_page)
data = dict()
data['page_current'] = note.page
data['page_total'] = note.pages
data['per_page'] = note.per_page
data['total_items'] = note.total
data['data'] = note.items
return data
except:
note = errorCode()
return note
# == Read Note
def readNote(nid):
try:
note = Note.objects(id=nid)
return note
except:
return errorCode()
# == New / Create Note
def newNote(slug, content, title=None, **kwargs):
try:
note = Note(slug=slug, title=title, content=content, fat={**kwargs})
note.save()
return note
except:
return errorCode(404, 'Note not Created!')
# Update Note
def updateNote(nid, noteData):
try:
# BlogPost.objects(id=post.id).update(title='Example Post')
note=Note.objects(id=nid).get()
note.content= noteData.content
note.title= noteData.title
note.slug= noteData.slug
note.save()
return note
except:
return errorCode(404, 'Note not Found!')
# Delete / Remove Note
def delNote(nid):
# Soft Delete note
try:
note = Note.objects(id=nid).update(deleted=True)
data = dict()
data['total'] = note
data['id'] = nid
return data
except:
return errorCode()
|
dommert/Flask-Netpad
|
flask_netpad/netpad.py
|
netpad.py
|
py
| 2,175 |
python
|
en
|
code
| 0 |
github-code
|
6
|
11408466413
|
#!/usr/bin/env python
import sys
def main ():
if len(sys.argv) < 2:
return
for filepath in sys.argv[1:]:
process_file(filepath)
def process_file (filepath):
file_in = open(filepath, "rb")
content = file_in.read()
file_in.close()
if content[0] != '/' or content[1] != '*':
return
# Find the position of "*/".
n = len(content)
has_star = False
for i in xrange(2, n):
c = content[i]
if has_star and c == '/':
# Dump all after this position.
if i == n-2:
return
file_out = open(filepath, "wb")
file_out.write(content[i+2:])
file_out.close()
return
has_star = c == '*'
if __name__ == '__main__':
main()
|
kohei-us/ixion
|
misc/strip-license.py
|
strip-license.py
|
py
| 788 |
python
|
en
|
code
| 10 |
github-code
|
6
|
19606128436
|
import json
from typing import Dict, Generic, TypeVar, cast
import attr
import yaml
from psqlgml.types import GmlData
__all__ = [
"load_resource",
"load_by_resource",
"ResourceFile",
]
T = TypeVar("T")
def load_by_resource(resource_dir: str, resource_name: str) -> Dict[str, GmlData]:
"""Loads all resources reference within the input resource and returns a mapping
with each resource having an entry.
For example, if the main resource extends another resource which does not extend
anything, this function will return two entries, one for each resource
"""
schema_data: Dict[str, GmlData] = {}
resource_names = {resource_name}
while resource_names:
name = resource_names.pop()
f = ResourceFile[GmlData](f"{resource_dir}/{name}")
obj = f.read()
schema_data[name] = obj
sub_resource = obj.get("extends")
if sub_resource:
resource_names.add(sub_resource)
return schema_data
def load_resource(resource_folder: str, resource_name: str) -> GmlData:
"""Loads all data resource files into a single Gml Data instance"""
file_name = f"{resource_folder}/{resource_name}"
f = ResourceFile[GmlData](file_name)
rss: GmlData = f.read()
extended_resource = rss.pop("extends", None)
if not extended_resource:
return rss
extended = load_resource(resource_folder, extended_resource)
# merge
rss["nodes"] += extended["nodes"]
rss["edges"] += extended["edges"]
if "summary" not in rss:
rss["summary"] = extended.get("summary", {})
return rss
for summary in extended.get("summary", {}):
if summary in rss["summary"]:
rss["summary"][summary] += extended["summary"][summary]
else:
rss["summary"][summary] = extended["summary"][summary]
return rss
@attr.s(frozen=True, auto_attribs=True)
class ResourceFile(Generic[T]):
absolute_name: str
@property
def extension(self) -> str:
return self.absolute_name.split(".")[-1]
def read(self) -> T:
loaded: T
with open(self.absolute_name, "r") as r:
if self.extension == "json":
loaded = cast(T, json.loads(r.read()))
if self.extension in ["yml", "yaml"]:
loaded = cast(T, yaml.safe_load(r))
return loaded
|
kulgan/psqlgml
|
src/psqlgml/resources.py
|
resources.py
|
py
| 2,372 |
python
|
en
|
code
| 0 |
github-code
|
6
|
42572778156
|
from distutils.core import setup
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='django-view-extractor',
version='0.1.0',
packages=setuptools.find_packages(),
url='https://www.quickrelease.co.uk',
license='GNU GPLv3',
author='Nick Solly',
author_email='[email protected]',
description='Extract Django views, urls and permissions',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
'tabulate==0.8.6',
],
)
|
QuickRelease/django-view-extractor
|
setup.py
|
setup.py
|
py
| 577 |
python
|
en
|
code
| 1 |
github-code
|
6
|
40466761016
|
n = int(input())
m = int(input())
graph = [[] for _ in range(n + 1)]
cnt = 0
visited = [0] * (n + 1)
for i in range(m):
a,b = map(int, input().split())
graph[a].append(b)
graph[b].append(a)
for i in graph:
i.sort(reverse = True)
def dfs(v,cnt):
stack = [v]
while stack:
v = stack.pop()
if visited[v] == 0:
visited[v] = 1
cnt += 1
stack += graph[v]
return cnt-1
print(dfs(1,cnt))
|
Cho-El/coding-test-practice
|
백준 문제/DFS/2606_바이러스.py
|
2606_바이러스.py
|
py
| 474 |
python
|
en
|
code
| 0 |
github-code
|
6
|
13109821356
|
import spacy
nlp = spacy.load('en_core_web_sm')
example1 = nlp("Animals")
for token in example1:
print(token.lemma_)
print()
example2 = nlp("I am god")
for token in example2:
print(token.lemma_)
|
39xdgy/Interactive_chatbots
|
03_lemmatization.py
|
03_lemmatization.py
|
py
| 207 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27925991770
|
"""
-*- coding: utf-8 -*-
@author: socratio
@inspiration: drew original inspiration from cleartonic twitchtriviabot. Almost nothing left in this code from that project.
"""
import json
from twitchio import websocket
from twitchio.ext import commands
import yaml
import asyncio
import os
import random
class ChatBot(commands.Bot):
def __init__(self):
#load the auth and connect to twitch
with open(os.path.join(os.getcwd(),'config','auth_config.yml')) as auth:
self.auth = yaml.safe_load(auth)
super().__init__(irc_token=f"{self.auth['pass']}", client_id='...', nick=f"{self.auth['nick']}", prefix='!',initial_channels=[f"{self.auth['chan']}"])
#load the trivia configuration
with open(os.path.join(os.getcwd(),'config','trivia_config.yml')) as config:
self.trivia_config = yaml.safe_load(config)
#create admins array, empty players and questions arrays, boolean variables, and empty answer messages object
self.admins = [i.strip() for i in self.trivia_config['admins'].split(",")]
self.players = []
self.questionlist = []
self.active_game = False
self.questionisactive = False
self.active_question = False
self.scoringopen = False
self.answermessages = {}
#load the scoreboard, set the list of past winners, increment the game number
self.refresh_scores()
try:
self.pastwinners = self.scores[f'Season {self.trivia_config["season"]}']['shirtwinners']
except:
self.scores[f'Season {self.trivia_config["season"]}'] = {"gamesplayed":0, "shirtwinners":[], "scoreboard":{}}
self.pastwinners = self.scores[f'Season {self.trivia_config["season"]}']['shirtwinners']
self.game_number = self.scores[f'Season {self.trivia_config["season"]}']['gamesplayed']+1
#load the questions and populate the questions array
with open(os.path.join(os.getcwd(),'config','triviaset.json')) as self.questions:
self.questions = json.load(self.questions)
for question in self.questions.items():
self.questionlist.append(Question(question))
#populate the players array
for player in self.scores[f'Season {self.trivia_config["season"]}']['scoreboard'].items():
self.players.append(Player(player))
#updates the scoreboard dict object
def refresh_scores(self):
with open(os.path.join(os.getcwd(),'config','scores',"scoreboard.json")) as scores:
self.scores = json.load(scores)
#clears json of scores for this game, sorts and adds scores back to json, resulting in sorted scores every time. Also saves scores to scoreboard file
def commit_scores(self):
self.scores[f'Season {self.trivia_config["season"]}'][f'Game {self.game_number}'] = {}
self.scores[f'Season {self.trivia_config["season"]}']['scoreboard'] = {}
for player in sorted(self.players, key=lambda player:player.seasonpoints, reverse=True):
self.scores[f'Season {self.trivia_config["season"]}']['scoreboard'][player.name] = player.seasonpoints
for player in sorted(self.players, key=lambda player:player.gamepoints, reverse=True):
self.scores[f'Season {self.trivia_config["season"]}'][f'Game {self.game_number}'][player.name] = player.gamepoints
with open(os.path.join(os.getcwd(),'config','scores',"scoreboard.json"),'w') as outfile:
json.dump(self.scores, outfile, indent=4)
#Broadcast ready state to twitch channel
async def event_ready(self):
print(f'Ready | {self.nick}')
ws = bot._ws
await ws.send_privmsg(self.initial_channels[0],"I have indeed been uploaded, sir.")
#major message reading function
async def event_message(self, message):
if message.author != self.nick:
print(f'{message.author.name}: {message.content}')
await self.handle_commands(message)
if self.scoringopen == True and not message.content.startswith('!'):
if message.author.name in self.answermessages:
del self.answermessages[message.author.name]
self.answermessages[message.author.name] = message.content
@commands.command(name='test')
async def test(self, ctx):
await ctx.send(f'Hello {ctx.author.name}!')
#TRIVIA COMMANDS AND PROCEDURES
@commands.command(name='start')
#!Start command starts the trivia game
async def start(self, ctx):
if ctx.author.name in self.admins and not self.active_game:
self.active_game = True
print('Starting Game.')
await ctx.send("Game starts in 15 seconds. Watch the chat for the question. Good luck!")
await asyncio.sleep(15)
if self.active_game:
await self.callquestion()
@commands.command(name='next')
#!next starts the process of asking the next question after 10 seconds and scoring after 20 seconds
async def nextq(self, ctx):
if ctx.author.name in self.admins and not self.questionisactive:
self.questionisactive = True
print('Received call for next question.')
await ctx.send("Next question coming in 10 seconds. Keep an eye on the chat!")
await asyncio.sleep(10)
if self.active_game:
await self.callquestion()
else:
print('Received call for next question, but an active question exists or it is not an admin. Ignoring call.')
@commands.command(name='end')
#!end ends this game of trivia, commits scores to json, and refreshes the scores
async def endtrivia(self, ctx):
if ctx.author.name in self.admins and self.active_game:
print("Ending game.")
self.scoringopen = False
self.active_game = False
self.active_question = False
if any(i.gamepoints > 0 for i in self.players):
for player in sorted(self.players, key=lambda x:x.gamepoints, reverse=True):
if player.name not in self.scores[f'Season {self.trivia_config["season"]}']['shirtwinners']:
self.scores[f'Season {self.trivia_config["season"]}']['shirtwinners'].append(player.name)
self.pastwinners.append(player.name)
break
self.scores[f'Season {self.trivia_config["season"]}']['gamesplayed'] = self.game_number
await ctx.send(f"Ending this game of trivia. Congratulations to {self.pastwinners[-1]} on the new shirt! I hope everyone had fun!")
self.commit_scores()
self.refresh_scores()
@commands.command(name='bonus')
#!bonus reads the message, finds the user targeted for bonus points, finds the point value of the bonus, assigns the extra points if the player exists or creates them if not, and refreshes the scores
async def bonus(self, ctx):
if ctx.author.name in self.admins:
print(f"Received call for bonus points from {ctx.author.name}.")
bonustarget = ctx.message.content.split()[1].lower()
bonuspoints = int(ctx.message.content.split()[2])
if any(bonustarget == player.name for player in self.players):
for player in self.players:
if player.name == bonustarget:
player.gamepoints += int(bonuspoints)
returnstr = player.gamepoints
else:
print(f'Player {bonustarget} does not exist. Creating.')
user = Player(bonustarget,bonuspoints)
self.players.append(user)
self.commit_scores()
self.refresh_scores()
await ctx.send(f'Player {bonustarget} received {bonuspoints} bonus points. Their new total is {returnstr} points.')
@commands.command(name='lasttop5')
#!lasttop5 calls the top 5 scores from the last game played
async def lasttop5(self, ctx):
if ctx.author.name in self.admins:
returnstr = "TOP 5 SCORES FOR THE LAST GAME:\t"
lastgameno = self.scores[f'Season {self.trivia_config["season"]}']['gamesplayed']
lastgamescores = self.scores[f'Season {self.trivia_config["season"]}'][f'Game {lastgameno}']
for score in sorted(lastgamescores.items(), key=lambda x:x[1], reverse=True)[:5]:
returnstr += f"{score[0]}: {score[1]} "
await ctx.send(returnstr)
async def callquestion(self):
self.active_question = self.questionlist.pop(0)
self.scoringopen = True
self.answermessages = {}
ws = bot._ws
await ws.send_privmsg(self.initial_channels[0],f"Question {self.active_question.questionno}: {self.active_question.question}")
await asyncio.sleep(20)
self.scoringopen = False
await self.scorequestion()
self.questionisactive = False
async def scorequestion(self):
self.scoringopen = False
ws = bot._ws
self.point_dict = {}
returnstr = f"The answer was **{self.active_question.answers[0]}**.\t"
#check that all players that answered exist as Player objects
for name in self.answermessages.keys():
if not any(player.name == name for player in self.players):
print(f'Player {name} does not exist. Creating.')
user = Player(name)
self.players.append(user)
#find all the correct answers, building the list of points as it goes
for answer in self.answermessages.items():
for proof in self.active_question.answers:
if answer[1].lower() == proof.lower():
self.point_dict[answer[0]] = 0
break
else:
with open(os.path.join(os.getcwd(),"config","aliases.json")) as aliases:
aliases = json.load(aliases)
for name in aliases.items():
if answer[1].lower() in name[1] and name[0] == self.active_question.answers[0]:
self.point_dict[answer[0]] = 0
for proof in self.active_question.deepcut:
if answer[1].lower() == proof.lower():
self.point_dict[answer[0]] = 3
#check if only 1 person answered, if so, award 3 bonus points
for name,points in self.point_dict.items():
if len(self.point_dict) == 1:
self.point_dict[name] += 3
if 1 < len(self.point_dict) < 4:
self.point_dict[name] += 1
#award 1 point for everyone, an extra point for the first 14, and another point for the first 6
idx = 0
for name,points in self.point_dict.items():
if idx == 0:
returnstr += f"{name} was the first to answer correctly."
if idx < 6:
self.point_dict[name] += 1
if idx < 20:
self.point_dict[name] += 1
self.point_dict[name] += 1
idx += 1
#update the player object with the new points
for player in self.players:
if player.name == name:
player.gamepoints += self.point_dict[name]
player.seasonpoints += self.point_dict[name]
self.commit_scores()
await ws.send_privmsg(self.initial_channels[0],returnstr)
#CHAT RESPONSES AND COMMAND FUNCTIONS
@commands.command(name='score')
#!score finds the score of the user sending the message and sends it in chat
async def score(self, ctx):
print(f'Received a score check for {ctx.author.name}')
if any(player.name == ctx.author.name for player in self.players):
for player in self.players:
if player.name == ctx.author.name:
print(f'Found player {player.name} with {player.gamepoints} game points and {player.seasonpoints} season points.')
user = player
if self.active_game:
scorestr = f"User {player.name} has {player.gamepoints} points in this game and {player.seasonpoints} for the season."
else:
scorestr = f"User {player.name} has {player.seasonpoints} points in this season."
break
else:
print(f'Player {ctx.author.name} does not exist. Creating.')
user = Player(ctx.author.name)
self.players.append(user)
scorestr = f"User {user.name} has 0 points. Welcome to trivia!"
await ctx.send(scorestr)
@commands.command(name='raffle')
#!raffle finds the raffle ticket count of the user sending the message and sends it in chat
async def raffle(self, ctx):
print(f'Received a raffle check for {ctx.author.name}')
if any(player.name == ctx.author.name for player in self.players):
for player in self.players:
if player.name == ctx.author.name:
rafflecount = int(player.seasonpoints/30)
print(f'Found player {player.name} with {player.gamepoints} game points, {player.seasonpoints} season points, and {rafflecount} raffle tickets.')
user = player
if not self.active_game:
scorestr = f"User {player.name} has {player.seasonpoints} for the season resulting in {rafflecount} raffle tickets."
break
else:
print(f'Player {ctx.author.name} does not exist. Creating.')
user = Player(ctx.author.name)
self.players.append(user)
scorestr = f"User {user.name} has 0 points and no raffle tickets. Welcome to trivia!"
await ctx.send(scorestr)
@commands.command(name='top5')
#!top5 returns the top5 scores for the game if a game is active or for the season if a game is not active
async def top5(self, ctx):
if ctx.author.name in self.admins:
returnstr = 'TOP 5: '
print(f'Received top 5 check from {ctx.author.name}.')
if self.active_game:
self.refresh_scores()
for i in sorted(self.players, key=lambda player:player.gamepoints, reverse=True)[:5]:
returnstr += (f'{i.name}: {i.gamepoints}\t')
else:
returnstr = "THIS SEASON'S TOP 5: "
for i in sorted(self.players, key=lambda player:player.seasonpoints, reverse=True)[:5]:
returnstr += (f'{i.name}: {i.seasonpoints}\t')
await ctx.send(returnstr)
@commands.command(name='topless')
#!topless returns the top 5 player scores for players who have not yet won a shirt as defined in pastwinners
async def topless(self, ctx):
if ctx.author.name in self.admins:
returnstr = 'TOP 5 SHIRTLESS THIS '
self.topless = []
print(f'Received top 5 shirtless check from {ctx.author.name}.')
self.refresh_scores()
if self.active_game:
returnstr += 'GAME: '
for player in sorted(self.players, key=lambda x:x.gamepoints, reverse=True):
if player.name not in self.pastwinners and len(self.topless) < 5:
self.topless.append(player)
returnstr += f'{player.name}: {player.gamepoints} '
else:
continue
else:
returnstr += 'SEASON: '
for player in self.scores[f'Season {self.trivia_config["season"]}']['scoreboard'].items():
if player[0] not in self.pastwinners and len(self.topless) < 5:
self.topless.append(player[0])
returnstr += f'{player[0]}: {player[1]} '
else:
continue
await ctx.send(returnstr)
@commands.command(name='stop')
#!stop forces the chatbot to shut down
async def stop(self, ctx):
if ctx.author.name in self.admins:
print(f'Received stop command from {ctx.author.name}.')
if self.active_game:
self.active_game = False
await ctx.send('I have been commanded to stop. The Vision trivia bot is shutting down. See you next time!')
await bot._ws.teardown()
@commands.command(name='rafflewinner')
#!rafflewinner generates a list of raffle tickets based on a person's total points/30 and selects a random winner
async def rafflewinner(self, ctx):
if ctx.author.name in self.admins:
await ctx.send('This is the moment you have ALL been waiting for. The winner of the biggest prize in Stranded Panda Trivia history is...*shuffles raffle tickets for 10 seconds*')
await asyncio.sleep(10)
self.refresh_scores()
with open(os.path.join(os.getcwd(),'config','scores',"scoreboard.json")) as scoreboard:
scoreboard = json.load(scoreboard)
scoreboard = scoreboard[f'Season {self.trivia_config["season"]}']['scoreboard']
rafflelist = []
for player in scoreboard.items():
ticketcount = int(player[1]/30)
for count in range(0,ticketcount):
rafflelist.append(player[0])
drawingwinner = random.choice(rafflelist)
await ctx.send("The hosts now have the raffle winner in their debatably capable hands...")
print(f'The raffle winner is {drawingwinner}')
@commands.command(name='seasonwinner')
#!seasonwinner takes the top 14 scores for the season, adds them together, and produces the top 10
async def seasonwinner(self, ctx):
if ctx.author.name in self.admins:
returnstr = "This season's top 10: "
scorelists = {}
sortedlists = {}
finalscores = {}
with open(os.path.join(os.getcwd(),'config','scores',"scoreboard.json")) as scoreboard:
scoreboard = json.load(scoreboard)
for game in scoreboard[f'Season {self.trivia_config["season"]}'].items():
if (game[0].startswith("Game ")):
for player in game[1].items():
if player[0] not in scorelists:
scorelists[f'{player[0]}'] = []
scorelists[f'{player[0]}'].append(player[1])
for scores in scorelists.items():
sortedlists[f'{scores[0]}'] = sorted(scores[1],reverse=True)
for player in sortedlists.items():
finalscores[f'{player[0]}'] = sum(player[1][0:14])
scoreboard = {}
for player in sorted(finalscores.items(), key=lambda player:player[1], reverse=True):
scoreboard[player[0]] = player[1]
for score in sorted(scoreboard.items(), key=lambda x:x[1], reverse=True)[:10]:
returnstr += f"{score[0]}: {score[1]} "
overallwinner = sorted(scoreboard.items(), key=lambda x:x[1], reverse=True)[0]
await ctx.send("Calculating the season's winner...removing the bottom 2 scores...swapping the bonus Halloween week...")
await asyncio.sleep(5)
await ctx.send(f'The winner of this season of Stranded Panda Twitch Trivia is... {overallwinner[0]} with {overallwinner[1]} points!!! Congratulations {overallwinner[0]}!!!')
await asyncio.sleep(5)
await ctx.send(returnstr)
@commands.command(name='rescore')
#!rescore removes the most recently awarded points and rescores using the most recently submitted answer list.
async def rescore(self, ctx):
if ctx.author.name in self.admins and not self.questionisactive and not self.scoringopen:
print(f"Received call for rescore from {ctx.author.name}.")
#update the player objects with the new points
for name,points in self.point_dict.items():
for player in self.players:
if player.name == name:
player.gamepoints -= points
player.seasonpoints -= points
self.commit_scores()
await self.scorequestion()
await ctx.send("Rescoring complete.")
class Question(object):
#Each question will be an object to be added to a list of objects
def __init__(self, question):
badap = '’'
str_ap = "'"
self.question = str(question[1]['Question'].replace(badap,str_ap))
self.answers = question[1]['Answers']
self.deepcut = question[1]['DeepCut']
self.questionno = question[0]
class Player(object):
#This establishes players in the current game
def __init__(self,playername, pointstart=0):
#if the playername variable is not a string, it's going to be a dictionary object with existing points totals.
#The playername variable will be a string if coming from a !score command and a dictionary object if coming from bot initialization
if not isinstance(playername, str):
self.seasonpoints = playername[1]
self.name = playername[0]
else:
self.seasonpoints = 0
self.name = playername
self.gamepoints = pointstart
if __name__ == '__main__':
bot = ChatBot()
bot.run()
|
Socratia/StrandedPandaTrivia
|
strandedpandatriviabot.py
|
strandedpandatriviabot.py
|
py
| 21,771 |
python
|
en
|
code
| 0 |
github-code
|
6
|
27919143032
|
# Snake water and gun game
import random
def game(comp, User):
if comp == User:
print("Draw, both chossed same!!")
elif comp == 's':
if User == 'w':
print("Snake driked water, Comp Win!!")
elif User == 'g':
print("You Killed Snake, Win!!")
elif comp == 'w':
if User == 's':
print("Snake driked water,You Win!!")
elif User == 'g':
print("Gun Sinked in Water,Comp Win!!")
elif comp == 'g':
if User == 's':
print("You were killed, Comp Win!!")
elif User == 'w':
print("Gun Sinked in Water,You Win!!")
randNo = random.randint(1, 3) # between 1-3 numbers will be generated
print("Comp Turn: Snake(s) Water(w) or Gun(g)?: ")
if randNo == 0:
comp = 's'
elif randNo == 1:
comp = 'w'
elif randNo == 2:
comp = 'g'
User = input("Your's Turn: Snake(s) Water(w) or Gun(g)?: ")
print(f"You choosed: {User}\n")
print(f"Comp choosed: {comp}\n")
game(comp, User)
|
satyam756/Snke-Water-Gun-Game
|
main.py
|
main.py
|
py
| 1,010 |
python
|
en
|
code
| 0 |
github-code
|
6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.