index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
36
|
Yuliashka/Snake-Game
|
refs/heads/main
|
/food.py
|
from turtle import Turtle
import random
# we want this Food class to inherit from the Turtle class, so it will have all the capapibilities from
# the turtle class, but also some specific things that we want
class Food(Turtle):
# creating initializer for this class
def __init__(self):
# we inherit things from the super class:
super().__init__()
# below we are using methods from Turtle class:
self.shape("circle")
self.penup()
# normal sise is 20x20, we want to stretch the length and the width for 0.5 so we have 10x10
self.shapesize(stretch_len=0.5, stretch_wid=0.5)
self.color("blue")
self.speed("fastest")
# call the method refresh so the food goes in random location
self.refresh()
def refresh(self):
# our screen is 600x600
# we want to place our food from -280 to 280 in coordinates:
random_x = random.randint(-280, 280)
random_y = random.randint(-280, 280)
# telling our food to go to random_y and random_x:
self.goto(random_x, random_y)
# All this methods will happen as soon as we create a new object
# This food object we initialize in main.py
|
{"/main.py": ["/snake.py", "/food.py", "/scoreboard.py"]}
|
37
|
Yuliashka/Snake-Game
|
refs/heads/main
|
/snake.py
|
from turtle import Turtle
STARTING_POSITIONS = [(0, 0), (-20, 0), (-40, 0)]
MOVE_DISTANCE = 20
UP = 90
DOWN = 270
RIGHT = 0
LEFT = 180
class Snake:
# The code here is going to determine what should happen when we initialize a new snake object
def __init__(self):
# below we create a new attribute for our class
self.segments = []
# We create a snake:
self.create_snake()
self.head = self.segments[0]
# CREATING SNAKE (2 functions)
def create_snake(self):
for position in STARTING_POSITIONS:
# we are calling the function and passing there the position that we are looping through
self.add_segment(position)
def add_segment(self, position):
new_segment = Turtle("square")
new_segment.color("white")
new_segment.penup()
new_segment.goto(position)
self.segments.append(new_segment)
# Creating a snake extend function
def extend(self):
# we are using the list of segments and counting from the end of list to get the last one segment of the snake
# after we are going to hold segment's position using a method of Turtle class
# then we add the new_segment to the same position as the last segment
self.add_segment(self.segments[-1].position())
# Creating another method for snake class
def move(self):
for seg_num in range(len(self.segments)-1, 0, -1):
new_x = self.segments[seg_num - 1].xcor()
new_y = self.segments[seg_num - 1].ycor()
self.segments[seg_num].goto(new_x, new_y)
self.head.forward(MOVE_DISTANCE)
def up(self):
# if the current heading is pointed down it can't move up
# because the snake can't go backword
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading() != UP:
self.head.setheading(DOWN)
def left(self):
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
def right(self):
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
|
{"/main.py": ["/snake.py", "/food.py", "/scoreboard.py"]}
|
38
|
Yuliashka/Snake-Game
|
refs/heads/main
|
/main.py
|
from turtle import Screen
import time
from snake import Snake
from food import Food
from scoreboard import Score
# SETTING UP THE SCREEN:
screen = Screen()
screen.setup(width=600, height=600)
screen.bgcolor("black")
screen.title("My Snake Game")
# to turn off the screen tracer
screen.tracer(0)
# CREATING A SNAKE OBJECT:
snake = Snake()
# CREATING A FOOD OBJECT:
food = Food()
# CREATING A SCORE OBJECT:
score = Score()
# CREATING A KEY CONTROL:
screen.listen()
# these methods snake.up ,,, we have in a snake class (up = 90, down = 270, left = 180, right = 0)
screen.onkey(key="Up", fun=snake.up)
screen.onkey(key="Down", fun=snake.down)
screen.onkey(key="Left", fun=snake.left)
screen.onkey(key="Right", fun=snake.right)
game_is_on = True
while game_is_on:
# while the game is on the screen is going to be updated every 0.1 second
# It is saying delay for 0.1 sec and then update:
screen.update()
time.sleep(0.1)
# every time the screen refreshes we get the snake to move forwards by one step
snake.move()
# DETECT COLLISION WITH THE FOOD
# if the snake head is within 15 px of the food or closer they have collided
if snake.head.distance(food) < 15:
food.refresh()
snake.extend()
print("nom nom nom")
# when the snake collide with the food we increase the score:
score.increase_score()
# # DETECT COLLISION WITH THE TAIL METHOD 1:
# # we can loop through our list of segments in the snake
# for segment in snake.segments:
# # if head has distance from any segment in segments list less than 10 px - that a collision
# # if the head collides with any segment in the tail: trigger GAME OVER
# # the first segment is the head so we should exclude it from the list of segments
# if segment == snake.head:
# pass
# elif snake.head.distance(segment) < 10:
# game_is_on = False
# score.game_over()
# DETECT COLLISION WITH THE TAIL METHOD 2 SLICING:
# we can loop through our list of segments in the snake using slicing method of python
# we are taking all positions inside the list without the first head segment
for segment in snake.segments[1:]:
# if head has distance from any segment in segments list less than 10 px - that a collision
# if the head collides with any segment in the tail: trigger GAME OVER
if snake.head.distance(segment) < 10:
game_is_on = False
score.game_over()
# DETECT COLLISION WITH THE WALL
if snake.head.xcor() >280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:
score.game_over()
game_is_on = False
screen.exitonclick()
|
{"/main.py": ["/snake.py", "/food.py", "/scoreboard.py"]}
|
39
|
Yuliashka/Snake-Game
|
refs/heads/main
|
/scoreboard.py
|
from turtle import Turtle
ALIGMENT = "center"
FONT = ("Arial", 18, "normal")
class Score(Turtle):
def __init__(self):
super().__init__()
self.score = 0
self.color("white")
self.penup()
self.goto(0, 270)
self.write(f"Current score: {self.score}", align="center", font=("Arial", 18, "normal"))
self.hideturtle()
self.update_score()
def update_score(self):
self.write(f"Current score: {self.score}", align="center", font=("Arial", 18, "normal"))
def game_over(self):
self.goto(0, 0)
self.write("GAME OVER", align=ALIGMENT, font=FONT)
def increase_score(self):
self.score += 1
# to clear the previous score before we update:
self.clear()
self.update_score()
|
{"/main.py": ["/snake.py", "/food.py", "/scoreboard.py"]}
|
46
|
marcin-mulawa/Water-Sort-Puzzle-Bot
|
refs/heads/main
|
/loading_phone.py
|
import numpy as np
import cv2
import imutils
picture = 'puzzle.jpg'
def load_transform_img(picture):
image = cv2.imread(picture)
image = imutils.resize(image, height=800)
org = image.copy()
#cv2.imshow('orginal', image)
mask = np.zeros(image.shape[:2], dtype = "uint8")
cv2.rectangle(mask, (15, 150), (440, 700), 255, -1)
#cv2.imshow("Mask", mask)
image = cv2.bitwise_and(image, image, mask = mask)
#cv2.imshow("Applying the Mask", image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('image', image)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
edged = cv2.Canny(blurred, 140, 230)
#cv2.imshow("Canny", edged)
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print(len(cnts))
cv2.fillPoly(edged, pts =cnts, color=(255,255,255))
#cv2.imshow('filled', edged)
fedged = cv2.Canny(edged, 140, 230)
#cv2.imshow("fedged", fedged)
(cnts, _) = cv2.findContours(fedged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
boxes = fedged.copy()
#cv2.drawContours(boxes, cnts, 10, (100 , 200, 100), 2)
#cv2.imshow("Boxes", boxes)
image = cv2.bitwise_and(org, org, mask = edged)
#cv2.imshow("Applying the Mask2", image)
puzzlelist = []
for (i, c) in enumerate(cnts):
(x, y, w, h) = cv2.boundingRect(c)
print("Box #{}".format(i + 1))
box = org[y:y + h, x:x + w]
cv2.imwrite(f'temp/box{i+1}.jpg',box)
#cv2.imshow("Box", box)
gray = cv2.cvtColor(box, cv2.COLOR_BGR2GRAY)
#cv2.imshow("gray", gray)
mask = np.zeros(gray.shape[:2], dtype = "uint8")
y1,y2 = 35, 50
for i in range(4):
cv2.rectangle(mask, (15, y1), (37, y2), 255, -1)
y1,y2 = y1+40, y2+40
#cv2.imshow("Mask2 ", mask)
masked = cv2.bitwise_and(gray, gray, mask = mask)
y1,y2 = 35, 50
temp = []
for i in range(4):
value = masked[y1:y2,15:37]
#cv2.imshow(f'val{i}',value)
max_val = max(value.flatten())
if max_val >= 45:
temp.append(max_val)
y1,y2 = y1+40, y2+40
puzzlelist.append(temp[::-1])
#cv2.waitKey(0)
return puzzlelist[::-1] , len(cnts)
|
{"/auto_puzzle.py": ["/solver.py"], "/solver.py": ["/loading_pc.py"]}
|
47
|
marcin-mulawa/Water-Sort-Puzzle-Bot
|
refs/heads/main
|
/loading_pc.py
|
import numpy as np
import cv2
import imutils
picture = 'puzzle.jpg'
def load_transform_img(picture):
image = cv2.imread(picture)
#image = imutils.resize(image, height=800)
org = image.copy()
#cv2.imshow('orginal', image)
mask = np.zeros(image.shape[:2], dtype = "uint8")
cv2.rectangle(mask, (680, 260), (1160, 910), 255, -1)
#cv2.imshow("Mask", mask)
image = cv2.bitwise_and(image, image, mask = mask)
#cv2.imshow("Applying the Mask", image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('image', image)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
edged = cv2.Canny(blurred, 140, 230)
#cv2.imshow("Canny", edged)
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#print(len(cnts))
cv2.fillPoly(edged, pts =cnts, color=(255,255,255))
#cv2.imshow('filled', edged)
fedged = cv2.Canny(edged, 140, 230)
#cv2.imshow("fedged", fedged)
(cnts, _) = cv2.findContours(fedged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# boxes = fedged.copy()
# cv2.drawContours(boxes, cnts, 10, (100 , 200, 100), 2)
# cv2.imshow("Boxes", boxes)
image = cv2.bitwise_and(org, org, mask = edged)
#cv2.imshow("Applying the Mask2", image)
puzzlelist = []
boxes_positon = []
for (i, c) in enumerate(cnts):
(x, y, w, h) = cv2.boundingRect(c)
#print("Box #{}".format(i + 1))
box = org[y:y + h, x:x + w]
boxes_positon.append( ( (x+x+w)/2, (y+y+h)/2 ) )
cv2.imwrite(f'temp/box{i+1}.jpg',box)
#cv2.imshow("Box", box)
gray = cv2.cvtColor(box, cv2.COLOR_BGR2GRAY)
#cv2.imshow("gray", gray)
mask = np.zeros(gray.shape[:2], dtype = "uint8")
y1,y2 = 45, 60
for i in range(4):
cv2.rectangle(mask, (15, y1), (37, y2), 255, -1)
y1,y2 = y1+45, y2+45
#cv2.imshow("Mask2 ", mask)
masked = cv2.bitwise_and(gray, gray, mask = mask)
#cv2.imshow('Masked', masked)
y1,y2 = 45, 60
temp = []
for i in range(4):
value = masked[y1:y2,15:37]
#cv2.imshow(f'val{i}',value)
max_val = max(value.flatten())
if max_val >= 45:
temp.append(max_val)
y1,y2 = y1+45, y2+45
puzzlelist.append(temp[::-1])
#cv2.waitKey(0)
print(f'Pozycja początkowa: {puzzlelist[::-1]}\n')
print(f'Pozycje boksow: {boxes_positon[::-1]}\n')
return puzzlelist[::-1], boxes_positon[::-1], len(cnts)
if __name__ == '__main__':
answer, boxes_positon[::-1], boxes = load_transform_img('level/screen.jpg')
print(answer)
|
{"/auto_puzzle.py": ["/solver.py"], "/solver.py": ["/loading_pc.py"]}
|
48
|
marcin-mulawa/Water-Sort-Puzzle-Bot
|
refs/heads/main
|
/auto_puzzle.py
|
import pyautogui as pya
import solver
import time
import glob
import os
import numpy as np
import cv2
import shutil
path = os.getcwd()
path1 = path + r'/temp'
path2 = path +r'/level'
try:
shutil.rmtree(path1)
except:
pass
try:
os.mkdir('temp')
except:
pass
try:
os.mkdir('level')
except:
pass
bluestacks = pya.locateCenterOnScreen('static/bluestacks.jpg', confidence=.9)
print(bluestacks)
pya.click(bluestacks)
time.sleep(3)
full = pya.locateCenterOnScreen('static/full.jpg', confidence=.8)
pya.click(full)
time.sleep(15)
mojeGry = pya.locateCenterOnScreen('static/mojegry.jpg', confidence=.8)
print(mojeGry)
if mojeGry:
pya.click(mojeGry)
time.sleep(2)
game = pya.locateCenterOnScreen('static/watersort.jpg', confidence=.5)
print(game)
if game:
pya.click(game)
time.sleep(6)
record = pya.locateCenterOnScreen('static/record.jpg', confidence=.8)
for m in range(4):
pya.click(record)
time.sleep(4.5)
for k in range(10):
screenshoot = pya.screenshot()
screenshoot = cv2.cvtColor(np.array(screenshoot), cv2.COLOR_RGB2BGR)
cv2.imwrite("level/screen.jpg", screenshoot)
moves, boxes_position = solver.game_loop("level/screen.jpg")
print(f'Steps to solve level: {len(moves)}')
print(moves)
for i,j in moves:
pya.click(boxes_position[i])
time.sleep(0.3)
pya.click(boxes_position[j])
pya.sleep(2.5)
next_level = pya.locateCenterOnScreen('static/next.jpg', confidence=.7)
pya.click(next_level)
time.sleep(3)
x_location = pya.locateCenterOnScreen('static/x.jpg', confidence=.7)
if x_location:
pya.click(x_location)
time.sleep(2)
x_location = pya.locateCenterOnScreen('static/x.jpg', confidence=.7)
if x_location:
pya.click(x_location)
time.sleep(2)
pya.click(record)
time.sleep(2)
|
{"/auto_puzzle.py": ["/solver.py"], "/solver.py": ["/loading_pc.py"]}
|
49
|
marcin-mulawa/Water-Sort-Puzzle-Bot
|
refs/heads/main
|
/solver.py
|
from collections import deque
import random
import copy
import sys
import loading_pc
import os
def move(new_list, from_, to):
temp = new_list[from_].pop()
for _i in range(0,4):
if len(new_list[from_])>0 and abs(int(temp) - int(new_list[from_][-1]))<3 and len(new_list[to])<3:
temp = new_list[from_].pop()
new_list[to].append(temp)
new_list[to].append(temp)
return new_list
def possible_moves(table, boxes):
pos=[]
for i in range(0, boxes):
for j in range(0, boxes):
pos.append((i,j))
possible = []
for from_, to in pos:
if (len(table[from_])>=1 and len(table[to])<4 and to != from_
and (len(table[to]) == 0 or (abs(int(table[from_][-1]) - int(table[to][-1]))<3))
and not (len(table[from_])==4 and len(set(table[from_]))==1)
and not (len(set(table[from_]))==1 and len(table[to]) ==0)):
possible.append((from_,to))
return possible
def check_win(table):
temp = []
not_full =[]
for i in table:
temp.append(len(set(i)))
if len(i)<4:
not_full.append(i)
if len(not_full)>2:
return False
for i in temp:
if i>1:
return False
print(table)
return True
def game_loop(agent, picture):
table, boxes_position, boxes = loading_pc.load_transform_img(picture)
print(len(boxes_position))
answer = agent(table, boxes)
return answer, boxes_position
def random_agent(table, boxes):
k=5
l=0
while True:
print(l)
table_copy = copy.deepcopy(table)
if l%1000 == 0:
k+=1
correct_moves = []
for i in range(boxes*k):
pmove = possible_moves(table_copy, boxes)
if len(pmove) == 0:
win = check_win(table_copy)
if win:
return correct_moves
else:
break
x, y = random.choice(pmove)
table_copy = move(table_copy, x, y)
correct_moves.append((x,y))
l+=1
if __name__ == '__main__':
answer, boxes_position = game_loop(random_agent, 'level/screen.jpg')
print('answer', answer)
|
{"/auto_puzzle.py": ["/solver.py"], "/solver.py": ["/loading_pc.py"]}
|
54
|
TheDinner22/lightning-sim
|
refs/heads/main
|
/lib/board.py
|
# represent the "board" in code
# dependencies
import random
class Board:
def __init__(self, width=10):
self.width = width
self.height = width * 2
self.WALL_CHANCE = .25
self.FLOOR_CHANCE = .15
# create the grid
self.create_random_grid()
def create_random_grid(self):
# reset old grid
self.grid = []
# generate cells for new grid
for i in range(self.width * self.height):
# is the cell at the left, right, top, or bottom?
is_left = True if i % self.width == 0 else False
is_right = True if i % self.width == self.width-1 else False
is_top = True if i < self.width else False
is_bottom = True if i > (self.width * self.height - self.width) else False
# create the cell
cell = {
"left" : is_left,
"right" : is_right,
"roof" : is_top,
"floor" : is_bottom,
"ID" : i
}
# append to grid
self.grid.append(cell)
# randomly generate walls
total = self.width * self.height
horizontal_amount = int(total * self.FLOOR_CHANCE)
verticle_amount = int(total * self.WALL_CHANCE)
# generate the walls
for _i in range(verticle_amount):
random_index = random.randrange(0, total)
adding_num = -1 if random_index == total - 1 else 1
first = "right" if adding_num == 1 else "left"
second = "right" if first == "left" else "left"
self.grid[random_index][first] = True
self.grid[random_index + adding_num][second] = True
# generate the floors
for _i in range(horizontal_amount):
random_index = random.randrange(0, total)
adding_num = self.width * -1 if random_index > (total - self.width) else self.width
first = "floor" if adding_num == self.width else "roof"
second = "floor" if first == "roof" else "roof"
self.grid[random_index][first] = True
self.grid[random_index + adding_num - 1][second] = True
def can_move_from(self, cell_index):
# TODO this works but its a lot of repeated code. Can it be made better?
# can you move left
can_move_left = False
is_left = True if cell_index % self.width == 0 else False
if not is_left and self.grid[cell_index]["left"] == False:
left_cell = self.grid[cell_index - 1]
is_wall_left = True if left_cell["right"] == True else False
can_move_left = True if not is_wall_left else False
# can you move right
can_move_right = False
is_right = True if cell_index % self.width == self.width-1 else False
if not is_right and self.grid[cell_index]["right"] == False:
right_cell = self.grid[cell_index + 1]
is_wall_right = True if right_cell["left"] == True else False
can_move_right = True if not is_wall_right else False
# can you move up
can_move_up = False
is_top = True if cell_index < self.width else False
if not is_top and self.grid[cell_index]["roof"] == False:
top_cell = self.grid[cell_index - self.width]
is_wall_top = True if top_cell["floor"] == True else False
can_move_up = True if not is_wall_top else False
# can you move down
can_move_down = False
is_bottom = True if cell_index > (self.width * self.height - self.width) else False
if not is_bottom and self.grid[cell_index]["floor"] == False:
bottom_cell = self.grid[cell_index + self.width]
is_wall_bottom = True if bottom_cell["roof"] == True else False
can_move_down = True if not is_wall_bottom else False
# return the results
return can_move_left, can_move_right, can_move_up, can_move_down
def BFS(self):
"""breadth first search to find the quickest way to the bottom"""
start_i = random.randrange(0,self.width)
paths = [ [start_i] ]
solved = False
dead_ends = []
while not solved:
for path in paths:
# find all possibles moves from path
if len(dead_ends) >= len(paths) or len(paths) > 10000: # TODO this solution sucks
return False, False
# NOTE order is left right up down
if path[-1] >= (self.width * self.height - self.width):
solved = True
return paths, paths.index(path)
possible_moves = self.can_move_from(path[-1])
if True in possible_moves:
move_order = [-1, 1, (self.width) * -1, self.width]
first_append_flag = False
origonal_path = path.copy()
for i in range(4):
possible_move = possible_moves[i]
if possible_move:
move = move_order[i]
next_index = origonal_path[-1] + move
if not next_index in origonal_path:
if not first_append_flag:
path.append(next_index)
first_append_flag = True
else:
new_path = origonal_path.copy()
new_path.append(next_index)
paths.append(new_path)
if not first_append_flag:
dead_ends.append(paths.index(path))
else:
dead_ends.append(paths.index(path))
def pretty_print_BFS(self, path):
for i in range(self.width * self.height):
cell = self.grid[i]
in_path = True if cell["ID"] in path else False
number_str = str(i)
if len(number_str) == 1:
number_str += " "
elif len(number_str) == 2:
number_str += " "
end_str = "\n" if i % self.width == self.width-1 else " "
if in_path:
print('\033[92m' + number_str + '\033[0m', end=end_str)
else:
print(number_str, end=end_str)
print(path)
if __name__ == "__main__":
b = Board(10)
paths, index = b.BFS()
if paths and index:
b.pretty_print_BFS(paths[index])
else:
print('ljfdsakfdl')
# can_move_left, can_move_right, can_move_up, can_move_down = b.can_move_from(0)
# print("can_move_left ", can_move_left)
# print("can_move_right ", can_move_right)
# print("can_move_up ", can_move_up)
# print("can_move_down ", can_move_down)
|
{"/main.py": ["/lib/board.py", "/lib/window.py"]}
|
55
|
TheDinner22/lightning-sim
|
refs/heads/main
|
/lib/window.py
|
# use pygame to show the board on a window
# dependencies
import pygame, random
class Window:
def __init__(self, board):
# init py game
pygame.init()
# width height
self.WIDTH = 600
self.HEIGHT = 600
# diffenet display modes
self.display_one = False
self.display_all = False
# place holder
self.solution = []
self.display_all_c = 0
# the board to display on the window
self.board = board
# define the dimensions of the cells of the board
self.cell_width = self.WIDTH // self.board.width
# define the left padding for the grid
total_width = self.cell_width * self.board.width
self.left_padding = (self.WIDTH - total_width) // 2
# colors
self.COLORS = {
"BLACK" : (255, 255, 255),
"GREY" : (230, 230, 230),
"BLUE" : (0, 0, 255),
"RED" : (255, 0, 0),
"YELLOW" : (212, 175, 55)
}
def create_random_color(self):
return (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
def create_window(self):
# define window
self.WIN = pygame.display.set_mode( (self.WIDTH, self.HEIGHT) )
# name window
pygame.display.set_caption("LIGHT NING")
# logo/icon for window
logo = pygame.image.load("images/logo.png")
pygame.display.set_icon(logo)
def get_BFS(self):
solved = False
while not solved:
self.board.create_random_grid()
paths, index = self.board.BFS()
if paths != False and index != False:
self.solution = paths[index]
solved = True
self.paths = paths
self.solution_i = index
def draw_grid_solution(self):
fflag = True
for i in range(self.board.width * self.board.height):
if not i in self.solution: continue
# might not work
col_num = i % self.board.width
row_num = i // self.board.width
x_pos = self.left_padding + (col_num * self.cell_width)
y_pos = row_num * self.cell_width
# define rect
r = pygame.Rect(x_pos, y_pos, self.cell_width, self.cell_width)
# draw the rectangle
pygame.draw.rect(self.WIN, self.COLORS["YELLOW"], r)
def draw_BFS(self):
if self.display_all_c >= len(self.paths):
self.display_all_c = 0
# generate a color for each path
path_colors = []
for path in self.paths:
path_colors.append(self.create_random_color())
path_colors[-1] = (0, 0 ,0)
temp = self.paths.pop(self.display_all_c)
self.paths.append(temp)
for path in self.paths:
for i in path:
# might not work
col_num = i % self.board.width
row_num = i // self.board.width
x_pos = self.left_padding + (col_num * self.cell_width)
y_pos = row_num * self.cell_width
# define rect
r = pygame.Rect(x_pos, y_pos, self.cell_width, self.cell_width)
# draw the rectangle
pygame.draw.rect(self.WIN, path_colors[self.paths.index(path)], r)
self.display_all_c += 1
def draw_window(self):
self.WIN.fill(self.COLORS["GREY"])
if self.display_one:
self.draw_grid_solution()
elif self.display_all:
self.draw_BFS()
pygame.display.update()
def main(self):
# create window
self.create_window()
self.running = True
while self.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_0:
self.get_BFS()
elif event.key == pygame.K_1:
# toggle display one
self.display_one = not self.display_one
if self.display_one:
self.display_all = False
elif event.key == pygame.K_2:
# toggle display all
self.display_all = not self.display_all
if self.display_all:
self.display_all_c = 0
self.display_one = False
self.draw_window()
if __name__ == "__main__":
win = Window()
win.main()
|
{"/main.py": ["/lib/board.py", "/lib/window.py"]}
|
56
|
TheDinner22/lightning-sim
|
refs/heads/main
|
/main.py
|
# this could and will be better i just needed to make it here as a
# proof of concept but it will be online and better later
import os, sys
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # adds project dir to places it looks for the modules
sys.path.append(BASE_PATH)
from lib.board import Board
from lib.window import Window
b = Board()
win = Window(b)
win.main()
|
{"/main.py": ["/lib/board.py", "/lib/window.py"]}
|
79
|
igoryuha/wct
|
refs/heads/master
|
/eval.py
|
import torch
from models import NormalisedVGG, Decoder
from utils import load_image, preprocess, deprocess, extract_image_names
from ops import style_decorator, wct
import argparse
import os
parser = argparse.ArgumentParser(description='WCT')
parser.add_argument('--content-path', type=str, help='path to the content image')
parser.add_argument('--style-path', type=str, help='path to the style image')
parser.add_argument('--content-dir', type=str, help='path to the content image folder')
parser.add_argument('--style-dir', type=str, help='path to the style image folder')
parser.add_argument('--style-decorator', type=int, default=1)
parser.add_argument('--kernel-size', type=int, default=12)
parser.add_argument('--stride', type=int, default=1)
parser.add_argument('--alpha', type=float, default=0.8)
parser.add_argument('--ss-alpha', type=float, default=0.6)
parser.add_argument('--synthesis', type=int, default=0, help='0-transfer, 1-synthesis')
parser.add_argument('--encoder-path', type=str, default='encoder/vgg_normalised_conv5_1.pth')
parser.add_argument('--decoders-dir', type=str, default='decoders')
parser.add_argument('--save-dir', type=str, default='./results')
parser.add_argument('--save-name', type=str, default='result', help='save name for single output image')
parser.add_argument('--save-ext', type=str, default='jpg', help='The extension name of the output image')
parser.add_argument('--content-size', type=int, default=768, help='New (minimum) size for the content image')
parser.add_argument('--style-size', type=int, default=768, help='New (minimum) size for the style image')
parser.add_argument('--gpu', type=int, default=0, help='ID of the GPU to use; for CPU mode set --gpu = -1')
args = parser.parse_args()
assert args.content_path is not None or args.content_dir is not None, \
'Either --content-path or --content-dir should be given.'
assert args.style_path is not None or args.style_dir is not None, \
'Either --style-path or --style-dir should be given.'
device = torch.device('cuda:%s' % args.gpu if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
encoder = NormalisedVGG(pretrained_path=args.encoder_path).to(device)
d5 = Decoder('relu5_1', pretrained_path=os.path.join(args.decoders_dir, 'd5.pth')).to(device)
d4 = Decoder('relu4_1', pretrained_path=os.path.join(args.decoders_dir, 'd4.pth')).to(device)
d3 = Decoder('relu3_1', pretrained_path=os.path.join(args.decoders_dir, 'd3.pth')).to(device)
d2 = Decoder('relu2_1', pretrained_path=os.path.join(args.decoders_dir, 'd2.pth')).to(device)
d1 = Decoder('relu1_1', pretrained_path=os.path.join(args.decoders_dir, 'd1.pth')).to(device)
def style_transfer(content, style):
if args.style_decorator:
relu5_1_cf = encoder(content, 'relu5_1')
relu5_1_sf = encoder(style, 'relu5_1')
relu5_1_scf = style_decorator(relu5_1_cf, relu5_1_sf, args.kernel_size, args.stride, args.ss_alpha)
relu5_1_recons = d5(relu5_1_scf)
else:
relu5_1_cf = encoder(content, 'relu5_1')
relu5_1_sf = encoder(style, 'relu5_1')
relu5_1_scf = wct(relu5_1_cf, relu5_1_sf, args.alpha)
relu5_1_recons = d5(relu5_1_scf)
relu4_1_cf = encoder(relu5_1_recons, 'relu4_1')
relu4_1_sf = encoder(style, 'relu4_1')
relu4_1_scf = wct(relu4_1_cf, relu4_1_sf, args.alpha)
relu4_1_recons = d4(relu4_1_scf)
relu3_1_cf = encoder(relu4_1_recons, 'relu3_1')
relu3_1_sf = encoder(style, 'relu3_1')
relu3_1_scf = wct(relu3_1_cf, relu3_1_sf, args.alpha)
relu3_1_recons = d3(relu3_1_scf)
relu2_1_cf = encoder(relu3_1_recons, 'relu2_1')
relu2_1_sf = encoder(style, 'relu2_1')
relu2_1_scf = wct(relu2_1_cf, relu2_1_sf, args.alpha)
relu2_1_recons = d2(relu2_1_scf)
relu1_1_cf = encoder(relu2_1_recons, 'relu1_1')
relu1_1_sf = encoder(style, 'relu1_1')
relu1_1_scf = wct(relu1_1_cf, relu1_1_sf, args.alpha)
relu1_1_recons = d1(relu1_1_scf)
return relu1_1_recons
if not os.path.exists(args.save_dir):
print('Creating save folder at', args.save_dir)
os.mkdir(args.save_dir)
content_paths = []
style_paths = []
if args.content_dir:
# use a batch of content images
content_paths = extract_image_names(args.content_dir)
else:
# use a single content image
content_paths.append(args.content_path)
if args.style_dir:
# use a batch of style images
style_paths = extract_image_names(args.style_dir)
else:
# use a single style image
style_paths.append(args.style_path)
print('Number content images:', len(content_paths))
print('Number style images:', len(style_paths))
with torch.no_grad():
for i in range(len(content_paths)):
content = load_image(content_paths[i])
content = preprocess(content, args.content_size)
content = content.to(device)
for j in range(len(style_paths)):
style = load_image(style_paths[j])
style = preprocess(style, args.style_size)
style = style.to(device)
if args.synthesis == 0:
output = style_transfer(content, style)
output = deprocess(output)
if len(content_paths) == 1 and len(style_paths) == 1:
# used a single content and style image
save_path = '%s/%s.%s' % (args.save_dir, args.save_name, args.save_ext)
else:
# used a batch of content and style images
save_path = '%s/%s_%s.%s' % (args.save_dir, i, j, args.save_ext)
print('Output image saved at:', save_path)
output.save(save_path)
else:
content = torch.rand(*content.shape).uniform_(0, 1).to(device)
for iteration in range(3):
output = style_transfer(content, style)
content = output
output = deprocess(output)
if len(content_paths) == 1 and len(style_paths) == 1:
# used a single content and style image
save_path = '%s/%s_%s.%s' % (args.save_dir, args.save_name, iteration, args.save_ext)
else:
# used a batch of content and style images
save_path = '%s/%s_%s_%s.%s' % (args.save_dir, i, j, iteration, args.save_ext)
print('Output image saved at:', save_path)
output.save(save_path)
|
{"/eval.py": ["/models.py", "/utils.py", "/ops.py"], "/utils.py": ["/ops.py"]}
|
80
|
igoryuha/wct
|
refs/heads/master
|
/models.py
|
import torch
import torch.nn as nn
import copy
normalised_vgg_relu5_1 = nn.Sequential(
nn.Conv2d(3, 3, 1),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU()
)
class NormalisedVGG(nn.Module):
def __init__(self, pretrained_path=None):
super().__init__()
self.net = normalised_vgg_relu5_1
if pretrained_path is not None:
self.net.load_state_dict(torch.load(pretrained_path, map_location=lambda storage, loc: storage))
def forward(self, x, target):
if target == 'relu1_1':
return self.net[:4](x)
elif target == 'relu2_1':
return self.net[:11](x)
elif target == 'relu3_1':
return self.net[:18](x)
elif target == 'relu4_1':
return self.net[:31](x)
elif target == 'relu5_1':
return self.net(x)
vgg_decoder_relu5_1 = nn.Sequential(
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 256, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 128, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 64, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 3, 3)
)
class Decoder(nn.Module):
def __init__(self, target, pretrained_path=None):
super().__init__()
if target == 'relu1_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-5:])) # current -2
elif target == 'relu2_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-9:]))
elif target == 'relu3_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-16:]))
elif target == 'relu4_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-29:]))
elif target == 'relu5_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())))
if pretrained_path is not None:
self.net.load_state_dict(torch.load(pretrained_path, map_location=lambda storage, loc: storage))
def forward(self, x):
return self.net(x)
|
{"/eval.py": ["/models.py", "/utils.py", "/ops.py"], "/utils.py": ["/ops.py"]}
|
81
|
igoryuha/wct
|
refs/heads/master
|
/ops.py
|
import torch
import torch.nn.functional as F
def extract_image_patches_(image, kernel_size, strides):
kh, kw = kernel_size
sh, sw = strides
patches = image.unfold(2, kh, sh).unfold(3, kw, sw)
patches = patches.permute(0, 2, 3, 1, 4, 5)
patches = patches.reshape(-1, *patches.shape[-3:]) # (patch_numbers, C, kh, kw)
return patches
def style_swap(c_features, s_features, kernel_size, stride=1):
s_patches = extract_image_patches_(s_features, [kernel_size, kernel_size], [stride, stride])
s_patches_matrix = s_patches.reshape(s_patches.shape[0], -1)
s_patch_wise_norm = torch.norm(s_patches_matrix, dim=1)
s_patch_wise_norm = s_patch_wise_norm.reshape(-1, 1, 1, 1)
s_patches_normalized = s_patches / (s_patch_wise_norm + 1e-8)
# Computes the normalized cross-correlations.
# At each spatial location, "K" is a vector of cross-correlations
# between a content activation patch and all style activation patches.
K = F.conv2d(c_features, s_patches_normalized, stride=stride)
# Replace each vector "K" by a one-hot vector corresponding
# to the best matching style activation patch.
best_matching_idx = K.argmax(1, keepdim=True)
one_hot = torch.zeros_like(K)
one_hot.scatter_(1, best_matching_idx, 1)
# At each spatial location, only the best matching style
# activation patch is in the output, as the other patches
# are multiplied by zero.
F_ss = F.conv_transpose2d(one_hot, s_patches, stride=stride)
overlap = F.conv_transpose2d(one_hot, torch.ones_like(s_patches), stride=stride)
F_ss = F_ss / overlap
return F_ss
def relu_x_1_transform(c, s, encoder, decoder, relu_target, alpha=1):
c_latent = encoder(c, relu_target)
s_latent = encoder(s, relu_target)
t_features = wct(c_latent, s_latent, alpha)
return decoder(t_features)
def relu_x_1_style_decorator_transform(c, s, encoder, decoder, relu_target, kernel_size, stride=1, alpha=1):
c_latent = encoder(c, relu_target)
s_latent = encoder(s, relu_target)
t_features = style_decorator(c_latent, s_latent, kernel_size, stride, alpha)
return decoder(t_features)
def style_decorator(cf, sf, kernel_size, stride=1, alpha=1):
cf_shape = cf.shape
sf_shape = sf.shape
b, c, h, w = cf_shape
cf_vectorized = cf.reshape(c, h * w)
b, c, h, w = sf.shape
sf_vectorized = sf.reshape(c, h * w)
# map features to normalized domain
cf_whiten = whitening(cf_vectorized)
sf_whiten = whitening(sf_vectorized)
# in this normalized domain, we want to align
# any element in cf with the nearest element in sf
reassembling_f = style_swap(
cf_whiten.reshape(cf_shape),
sf_whiten.reshape(sf_shape),
kernel_size, stride
)
b, c, h, w = reassembling_f.shape
reassembling_vectorized = reassembling_f.reshape(c, h*w)
# reconstruct reassembling features into the
# domain of the style features
result = coloring(reassembling_vectorized, sf_vectorized)
result = result.reshape(cf_shape)
bland = alpha * result + (1 - alpha) * cf
return bland
def wct(cf, sf, alpha=1):
cf_shape = cf.shape
b, c, h, w = cf_shape
cf_vectorized = cf.reshape(c, h*w)
b, c, h, w = sf.shape
sf_vectorized = sf.reshape(c, h*w)
cf_transformed = whitening(cf_vectorized)
cf_transformed = coloring(cf_transformed, sf_vectorized)
cf_transformed = cf_transformed.reshape(cf_shape)
bland = alpha * cf_transformed + (1 - alpha) * cf
return bland
def feature_decomposition(x):
x_mean = x.mean(1, keepdims=True)
x_center = x - x_mean
x_cov = x_center.mm(x_center.t()) / (x_center.size(1) - 1)
e, d, _ = torch.svd(x_cov)
d = d[d > 0]
e = e[:, :d.size(0)]
return e, d, x_center, x_mean
def whitening(x):
e, d, x_center, _ = feature_decomposition(x)
transform_matrix = e.mm(torch.diag(d ** -0.5)).mm(e.t())
return transform_matrix.mm(x_center)
def coloring(x, y):
e, d, _, y_mean = feature_decomposition(y)
transform_matrix = e.mm(torch.diag(d ** 0.5)).mm(e.t())
return transform_matrix.mm(x) + y_mean
|
{"/eval.py": ["/models.py", "/utils.py", "/ops.py"], "/utils.py": ["/ops.py"]}
|
82
|
igoryuha/wct
|
refs/heads/master
|
/utils.py
|
import torch
from torchvision import transforms
from ops import relu_x_1_style_decorator_transform, relu_x_1_transform
from PIL import Image
import os
def eval_transform(size):
return transforms.Compose([
transforms.Resize(size),
transforms.ToTensor()
])
def load_image(path):
return Image.open(path).convert('RGB')
def preprocess(img, size):
transform = eval_transform(size)
return transform(img).unsqueeze(0)
def deprocess(tensor):
tensor = tensor.cpu()
tensor = tensor.squeeze(0)
tensor = torch.clamp(tensor, 0, 1)
return transforms.ToPILImage()(tensor)
def extract_image_names(path):
r_ = []
valid_ext = ['.jpg', '.png']
items = os.listdir(path)
for item in items:
item_path = os.path.join(path, item)
_, ext = os.path.splitext(item_path)
if ext not in valid_ext:
continue
r_.append(item_path)
return r_
|
{"/eval.py": ["/models.py", "/utils.py", "/ops.py"], "/utils.py": ["/ops.py"]}
|
89
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
/tattle_helper.py
|
import os
import json
import boto3
import requests
from logger import log, logError
from dotenv import load_dotenv
load_dotenv()
s3 = boto3.client("s3",aws_access_key_id=os.environ.get('S3_ACCESS_KEY'),aws_secret_access_key=os.environ.get('S3_SECRET_ACCESS_KEY'))
API_BASE_URL = "https://archive-server.tattle.co.in"
# API_BASE_URL = "https://postman-echo.com/post"
ARCHIVE_TOKEN = os.environ.get('ARCHIVE_TOKEN')
def register_post(data):
"""
registers a post on archive server
"""
url_to_post_to = API_BASE_URL+"/api/posts"
payload = json.dumps(data)
headers = {
'token': ARCHIVE_TOKEN,
'Content-Type': "application/json",
'cache-control': "no-cache",
}
try:
r = requests.post(url_to_post_to, data=payload, headers=headers)
if r.status_code==200:
log('STATUS CODE 200 \n'+json.dumps(r.json(), indent=2))
else:
log('STATUS CODE '+str(r.status_code)+'\n '+r.text)
except:
log('error with API call')
def upload_file(file_name, s3=s3 ,acl="public-read"):
bucket_name = os.environ.get('TGM_BUCKET_NAME')
#opens file, reads it, and uploads it to the S3 bucket.
try:
with open(file_name, 'rb') as data:
s3.upload_fileobj(data,bucket_name,file_name,ExtraArgs={"ACL": acl,"ContentType": file_name.split(".")[-1]})
except:
logError('ERROR_S3_UPLOAD of '+file_name)
file_url = "https://s3.ap-south-1.amazonaws.com/"+bucket_name+"/"+file_name
return file_url
def upload_file(file_name, s3=s3 ,acl="public-read"):
bucket_name = os.environ.get('TGM_BUCKET_NAME')
#opens file, reads it, and uploads it to the S3 bucket.
try:
with open(file_name, 'rb') as data:
s3.upload_fileobj(data,bucket_name,file_name,ExtraArgs={"ACL": acl,"ContentType": file_name.split(".")[-1]})
except:
logError('ERROR_S3_UPLOAD of '+file_name)
file_url = "https://s3.ap-south-1.amazonaws.com/"+bucket_name+"/"+file_name
return file_url
|
{"/tattle_helper.py": ["/logger.py"], "/test.py": ["/tattle_helper.py"], "/prototype.py": ["/logger.py", "/tattle_helper.py"]}
|
90
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
/post_request.py
|
token = "78a6fc20-fa83-11e9-a4ad-d1866a9a3c7b" # add your token here
url = "<base-api-url>/api/posts"
try:
payload = d
payload = json.dumps(payload)
headers = {
'token': token,
'Content-Type': "application/json",
'cache-control': "no-cache",
}
r = requests.post(url, data=payload, headers=headers)
if r.ok:
print ('success')
else:
print ('something went wrong')
except:
logging.exception('error in POST request')
raise
{
"type" : "image", # can be image, text, video
"data" : "",
"filename": "4bf4b1cc-516b-469d-aa38-be6762d417a5", #filename you put on s3
"userId" : 169 # for telegram_bot this should be 169
}
|
{"/tattle_helper.py": ["/logger.py"], "/test.py": ["/tattle_helper.py"], "/prototype.py": ["/logger.py", "/tattle_helper.py"]}
|
91
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
/test.py
|
from tattle_helper import register_post, upload_file
data = {
"type" : "image",
"data" : "",
"filename": "asdf",
"userId" : 169
}
response = upload_file(file_name='denny.txt')
print(response)
# register_post(data)
|
{"/tattle_helper.py": ["/logger.py"], "/test.py": ["/tattle_helper.py"], "/prototype.py": ["/logger.py", "/tattle_helper.py"]}
|
92
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
/prototype.py
|
import os
import sys
import json
import requests
import telegram
import logging
import re
from threading import Thread
from telegram.ext import CommandHandler, MessageHandler, Updater, Filters, InlineQueryHandler
from telegram import InlineQueryResultArticle, InputTextMessageContent
from telegram.ext.dispatcher import run_async
from dotenv import load_dotenv
from pymongo import MongoClient
from logger import log, logError
from tattle_helper import upload_file
# loads all environment variables
load_dotenv()
log('STARTING APP v1')
TOKEN = os.environ.get('ACCESS_TOKEN')
PORT = int(os.environ.get('PORT', '8443'))
print(TOKEN)
# logging.basicConfig(filename='telegram_bot_log.log',filemode='a',format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
# Calls for Database modification and reads start
def insert_document(document, required_collection):
return db[required_collection].insert_one(document)
def find_document(query, required_collection):
return db[required_collection].find_one(query)
def update_document(find_query, update_query, required_collection, upsert=False):
return db[required_collection].update_one(find_query, update_query, upsert)
def delete_document(find_query, required_collection):
return db[required_collection].delete_one(find_query)
# Calls for Database modification and reads end
@run_async
def start(update, context):
# start message
context.bot.send_message(chat_id=update.effective_chat.id, text="Hey! \n\nI'm the Tattle Bot. Here are some instructions to use me:\n\n1. You can send whatever content to me that you'd like. All mediums : Text, Video, and Photos are allowed.\n2. You can tag your content using hashtags. When uploading photos or videos you can mention the tags in the caption, with text you can just tag it at the end or in the beginning(anywhere else in the text will also work).\n3. You can edit your messages after you've sent them, we'll update them in our database accordingly.\n 4. In case you miss tagging a message, you can reply to that message and insert the tags required. Only tags will be extracted, so please don't write text while replying to messages.")
def determine_type(message_json):
# checks what type of content is being passed, and returns the type
type_of_content = ''
if(message_json.text):
type_of_content = 'text'
elif(message_json.photo):
type_of_content = 'photo'
elif(message_json.video):
type_of_content = 'video'
elif(message_json.document):
type_of_content = 'document'
return type_of_content
def entity_extraction(all_entities, message_content):
# entity extraction, which basically extracts all the hashtags out of the message
list_of_tags = []
if(bool(all_entities)):
# checks if there are any entities, and if so loops over them
for each_entity in all_entities:
if(each_entity['type'] == 'hashtag'):
# string slicing based on offset and length values
tag = message_content[each_entity['offset']:(
each_entity['offset']+each_entity['length'])]
list_of_tags.append(tag)
if(bool(list_of_tags)):
# converts to set to remove duplicates
return list(set(list_of_tags))
else:
return None
def new_tags(message_json, current_document, all_tags):
# adds or replaces tags in messages that had no tags or in case of edits
new_tags = all_tags
update_document({'message_id': message_json.reply_to_message.message_id}, {
"$set": {"reply_tags": new_tags}}, 'messages')
def error_message(message_json):
# standard error message
context.bot.send_message(chat_id=message_json.chat.id,
text="Something went wrong with registering these tags, apologies for the same.")
def reply_to_messages(message_json, edit_flag):
all_tags = entity_extraction(message_json.entities, message_json.text)
if(all_tags is not None):
# first finds the document that the reply is being done to
current_document = find_document(
{'message_id': message_json.reply_to_message.message_id}, 'messages')
try:
# add reply tags with a new key called reply_tags
new_tags(message_json, current_document, all_tags)
except:
# or, throw an error message and log
error_message()
raise
def edit_message(message_json, final_dict, content_type, context):
tags = []
# check content type before processing the data
if(content_type == 'text'):
# In case of edits, we need to replace file on S3. Replacing happens automatically as long as file name is same.
file_name = str(message_json.message_id) + '.txt'
with open(file_name, 'w') as open_file:
open_file.write(message_json['text'])
upload_file(file_name)
os.remove(file_name)
final_dict = process_text(
message_json, final_dict, message_json['text'], False)
else:
final_dict = process_media(
message_json, final_dict, content_type, context, False)
# in case message is being edited, we first find the document being edited
current_document = find_document(
{'message_id': message_json.message_id}, 'messages')
# we check if the document had any existing tags, if so we store them before deleting the document
# FLAW IN CODE : If existing tags are being edited, it doesn't reflect this way. NEED TO FIX.
try:
tags = current_document['tags']
except KeyError:
tags = None
try:
reply_tags = current_document['reply_tags']
except KeyError:
reply_tags = None
if(reply_tags is not None):
final_dict['reply_tags'] = reply_tags
# add tags to final dict for new, edited document
if(tags is not None):
final_dict['tags'] = tags
# delete the document
delete_document({'message_id': message_json.message_id}, 'messages')
# insert edited document
insert_document(final_dict, 'messages')
def process_text(message_json, final_dict, message_content, caption_flag):
# check if we're processing a caption or a text message
if(caption_flag):
all_tags = entity_extraction(
message_json['caption_entities'], message_content)
else:
all_tags = entity_extraction(message_json['entities'], message_content)
# check if any tags are present
if(all_tags is not None):
final_dict['tags'] = all_tags
if(bool(message_content)):
# cleans out the hashtags
modified_message = re.sub(r'#\w+', '', message_content)
# removes all excessive spacing
cleaned_message = re.sub(' +', ' ', modified_message)
# changes key based on whether it is a caption or not
if(caption_flag):
# removing leading and trailing spaces
final_dict['caption'] = cleaned_message.strip()
else:
final_dict['text'] = cleaned_message.strip()
return final_dict
# just for testing
# BASE_URL = "http://archive-telegram-bot.tattle.co.in.s3.amazonaws.com/"
# print("{}{}".format(BASE_URL, file_name))
def make_post_request(dict_to_post):
log('***')
log(dict_to_post)
API_BASE_URL = "https://archive-server.tattle.co.in"
access_token = os.environ.get('ARCHIVE_TOKEN')
url_to_post_to = API_BASE_URL+"/api/posts"
payload = json.dumps(dict_to_post)
headers = {
'token': access_token,
'Content-Type': "application/json",
'cache-control': "no-cache",
}
r = requests.post(url_to_post_to, data=payload, headers=headers)
print('API response')
print(r)
# print(r.json())
def construct_dict(file_name, file_type):
return {"type": file_type, "data": "", "filename": file_name, "userId": 169}
def process_media(message_json, final_dict, content_type, context, creation_flag):
# check if content type is photo, and constructs dict and file_name appropriately
if(content_type == 'photo'):
final_dict['photo'] = [{'file_id': each_photo.file_id, 'width': each_photo.width,
'height': each_photo.height, 'file_size': each_photo.file_size} for each_photo in message_json.photo]
file_id = message_json.photo[-1].file_id
file_name = str(message_json.message_id)+'.jpeg'
post_request_type = 'image'
# same with video as above
elif(content_type == 'video'):
final_dict['video'] = {'file_id': message_json.video.file_id, 'width': message_json.video.width, 'height': message_json.video.height, 'duration': message_json.video.duration, 'thumb': {'file_id': message_json.video.thumb.file_id,
'width': message_json.video.thumb.width, 'height': message_json.video.thumb.height, 'file_size': message_json.video.thumb.file_size}, 'mime_type': message_json.video.mime_type, 'file_size': message_json.video.file_size}
file_id = message_json.video.file_id
file_type = str(message_json.video.mime_type).split("/")[-1]
file_name = str(message_json.message_id)+"."+file_type
post_request_type = 'video'
# process_media is only called from two places, one of which is when message is edited. Since we don't want duplicates, we set a flag to differentiate.
if(creation_flag):
try:
new_file = context.bot.get_file(file_id)
new_file.download(file_name) # downloads the file
final_dict['file_name'] = file_name
file_url = upload_file(file_name) # uploads to S3
final_dict['s3_url'] = file_url
os.remove(file_name) # removes it from local runtime
request_dict = construct_dict(file_name, post_request_type)
make_post_request(request_dict)
except:
logging.exception(
"The file_name when the error happened is: {}".format(file_name))
# process any caption or text found
final_dict = process_text(message_json, final_dict,
message_json.caption, True)
return final_dict
@run_async
def storing_data(update, context):
log(update)
final_dict = {}
# print(update)
# selects just the effective_message part
relevant_section = update.effective_message
# some general data appended to each dict
final_dict['message_id'] = relevant_section['message_id']
final_dict['date'] = relevant_section['date']
# final_dict['from'] = {'id':relevant_section.from_user.id,'type':relevant_section.chat.type,'first_name':relevant_section.from_user.first_name,'last_name':relevant_section.from_user.last_name,'username':relevant_section.from_user.username,'is_bot':relevant_section.from_user.is_bot}
content_type = determine_type(relevant_section)
final_dict['content_type'] = content_type
# checks if the request is that of an edition
if(relevant_section.edit_date):
# if yes, checks if the edited message was replying to another message
if(relevant_section.reply_to_message):
# if yes, then deals with it by setting edit flag to True
reply_to_messages(relevant_section, True)
return
else:
# else, just edits the message normally
edit_message(relevant_section, final_dict, content_type, context)
return
# if the message is a reply, then respond appropriately
if(relevant_section.reply_to_message):
# edit flag is set to false because we're just handling simple reply
reply_to_messages(relevant_section, False)
return
if(content_type == 'text'):
# creates file with message ID, then writes the text into the file and uploads it to S3
try:
file_name = str(relevant_section.message_id) + '.txt'
with open(file_name, 'w') as open_file:
open_file.write(relevant_section['text'])
file_url = upload_file(file_name)
final_dict['s3_url'] = file_url
os.remove(file_name)
request_dict = construct_dict(file_name, content_type)
r = make_post_request(request_dict)
except Exception as e:
logging.exception(
"The file_name when the error happened is: {}".format(file_name))
logging.exception(e)
# if new text message, process it and then insert it in the database
final_dict = process_text(
relevant_section, final_dict, relevant_section['text'], False)
insert_document(final_dict, 'messages')
else:
final_dict = process_media(
relevant_section, final_dict, content_type, context, True)
insert_document(final_dict, 'messages')
context.bot.send_message(
chat_id=update.effective_chat.id, text='message archived')
# context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
def stop_and_restart():
"""Gracefully stop the Updater and replace the current process with a new one"""
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
def restart(update, context):
update.message.reply_text('Bot is restarting...')
Thread(target=stop_and_restart).start()
try:
client = MongoClient("mongodb+srv://"+os.environ.get("TGM_DB_USERNAME")+":"+os.environ.get("TGM_DB_PASSWORD") +
"@tattle-data-fkpmg.mongodb.net/test?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE")
db = client[os.environ.get("TGM_DB_NAME")]
except error_message:
print('error connecting to db')
print(error_message)
updater = Updater(token=TOKEN, use_context=True, workers=32)
dispatcher = updater.dispatcher
start_handler = CommandHandler('start', start)
storing_data_handler = MessageHandler(Filters.all, storing_data)
restart_handler = CommandHandler(
'r', restart, filters=Filters.user(username='@thenerdyouknow'))
dispatcher.add_handler(restart_handler)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(storing_data_handler)
# updater.start_webhook(listen="0.0.0.0",
# port=PORT,
# url_path=TOKEN)
# updater.bot.set_webhook("https://services-server.tattle.co.in/" + TOKEN)
updater.start_polling()
updater.idle()
log('STARTING SERVER v1.0')
|
{"/tattle_helper.py": ["/logger.py"], "/test.py": ["/tattle_helper.py"], "/prototype.py": ["/logger.py", "/tattle_helper.py"]}
|
93
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
/logger.py
|
from datetime import datetime
def log(data):
print('----', datetime.now(), '----')
print(data)
def logError(error):
print('****', datetime.now(), '****')
print(error)
|
{"/tattle_helper.py": ["/logger.py"], "/test.py": ["/tattle_helper.py"], "/prototype.py": ["/logger.py", "/tattle_helper.py"]}
|
143
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/main.py
|
import data_helper
import time
import datetime
import os
import tensorflow as tf
import numpy as np
import evaluation
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
timeDay = time.strftime("%Y%m%d", timeArray)
print (timeStamp)
def main(args):
args._parse_flags()
print("\nParameters:")
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
log_dir = 'log/'+ timeDay
if not os.path.exists(log_dir):
os.makedirs(log_dir)
data_file = log_dir + '/test_' + args.data + timeStamp
precision = data_file + 'precise'
print('load data ...........')
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev])
print('the number of words',len(alphabet))
print('get embedding')
if args.data=="quora":
embedding = data_helper.get_embedding(alphabet,language="cn")
else:
embedding = data_helper.get_embedding(alphabet)
with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model = QA_CNN_extend(max_input_left = q_max_sent_length,
max_input_right = a_max_sent_length,
batch_size = args.batch_size,
vocab_size = len(alphabet),
embedding_size = args.embedding_dim,
filter_sizes = list(map(int, args.filter_sizes.split(","))),
num_filters = args.num_filters,
hidden_size = args.hidden_size,
dropout_keep_prob = args.dropout_keep_prob,
embeddings = embedding,
l2_reg_lambda = args.l2_reg_lambda,
trainable = args.trainable,
pooling = args.pooling,
conv = args.conv)
model.build_graph()
sess.run(tf.global_variables_initializer())
def train_step(model,sess,batch):
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.answer_negative:data[2],
model.q_mask:data[3],
model.a_mask:data[4],
model.a_neg_mask:data[5]
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[model.train_op, model.merged,model.global_step,model.loss, model.accuracy,model.score12,model.score13, model.see],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
def predict(model,sess,batch,test):
scores = []
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3]
}
score = sess.run(
model.score12,
feed_dict)
scores.extend(score)
return np.array(scores[:len(test)])
for i in range(args.num_epoches):
datas = data_helper.get_mini_batch(train,alphabet,args.batch_size)
train_step(model,sess,datas)
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
print(len(predicted_test))
print(len(test))
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
print('map_mrr test',map_mrr_test)
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
144
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/config.py
|
class Singleton(object):
__instance=None
def __init__(self):
pass
def getInstance(self):
if Singleton.__instance is None:
# Singleton.__instance=object.__new__(cls,*args,**kwd)
Singleton.__instance=self.get_test_flag()
print("build FLAGS over")
return Singleton.__instance
def get_test_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_rnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
# flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('data','trec','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_cnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_qcnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "qcnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','mean','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_8008_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
if __name__=="__main__":
args=Singleton().get_test_flag()
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
145
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/run.py
|
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime,os
import models
import numpy as np
import evaluation
import sys
import logging
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename('program')
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.makedirs(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_qcnn_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
# ckpt = tf.train.get_checkpoint_state("checkpoint")
# if ckpt and ckpt.model_checkpoint_path:
# # Restores from checkpoint
# saver.restore(sess, ckpt.model_checkpoint_path)
# if os.path.exists("model") :
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
# variable_averages = tf.train.ExponentialMovingAverage( model)
# variables_to_restore = variable_averages.variables_to_restore()
# saver = tf.train.Saver(variables_to_restore)
# for name in variables_to_restore:
# print(name)
sess.run(tf.global_variables_initializer())
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
best_p1=0
for i in range(args.num_epoches):
for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess):
# for data in data_helper.getBatch48008(train,alphabet,args.batch_size):
_, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
#<<<<<<< HEAD
#
#
# if i>0 and i % 5 ==0:
# test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
#
# predicted_test = predict(model,sess,test_datas,test)
# map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
#
# logger.info('map_mrr test' +str(map_mrr_test))
# print('map_mrr test' +str(map_mrr_test))
#
# test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size)
# predicted_test = predict(model,sess,test_datas,dev)
# map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test)
#
# logger.info('map_mrr dev' +str(map_mrr_test))
# print('map_mrr dev' +str(map_mrr_test))
# map,mrr,p1 = map_mrr_test
# if p1>best_p1:
# best_p1=p1
# filename= "checkpoint/"+args.data+"_"+str(p1)+".model"
# save_path = saver.save(sess, filename)
# # load_path = saver.restore(sess, model_path)
#
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
#
#
#=======
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
logger.info('map_mrr test' +str(map_mrr_test))
print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
146
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/models/QA_CNN_pairwise.py
|
#coding:utf-8
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import models.blocks as blocks
# model_type :apn or qacnn
class QA_CNN_extend(object):
# def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size,
# dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'):
#
# """
# QA_RNN model for question answering
#
# Args:
# self.dropout_keep_prob: dropout rate
# self.num_filters : number of filters
# self.para : parameter list
# self.extend_feature_dim : my extend feature dimension
# self.max_input_left : the length of question
# self.max_input_right : the length of answer
# self.pooling : pooling strategy :max pooling or attentive pooling
#
# """
# self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
# self.num_filters = num_filters
# self.embeddings = embeddings
# self.embedding_size = embedding_size
# self.batch_size = batch_size
# self.filter_sizes = filter_sizes
# self.l2_reg_lambda = l2_reg_lambda
# self.para = []
#
# self.max_input_left = max_input_left
# self.max_input_right = max_input_right
# self.trainable = trainable
# self.vocab_size = vocab_size
# self.pooling = pooling
# self.total_num_filter = len(self.filter_sizes) * self.num_filters
#
# self.conv = conv
# self.pooling = 'traditional'
# self.learning_rate = learning_rate
#
# self.hidden_size = hidden_size
#
# self.attention_size = 100
def __init__(self,opt):
for key,value in opt.items():
self.__setattr__(key,value)
self.attention_size = 100
self.pooling = 'mean'
self.total_num_filter = len(self.filter_sizes) * self.num_filters
self.para = []
self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
def create_placeholder(self):
print(('Create placeholders'))
# he length of the sentence is varied according to the batch,so the None,None
self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question')
self.max_input_left = tf.shape(self.question)[1]
self.batch_size = tf.shape(self.question)[0]
self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer')
self.max_input_right = tf.shape(self.answer)[1]
self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right')
# self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask')
# self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask')
# self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask')
def add_embeddings(self):
print( 'add embeddings')
if self.embeddings is not None:
print( "load embedding")
W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable)
else:
print( "random embedding")
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name="W",trainable = self.trainable)
self.embedding_W = W
# self.overlap_W = tf.Variable(a,name="W",trainable = True)
self.para.append(self.embedding_W)
self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question)
self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer)
self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative)
#real length
self.q_len,self.q_mask = blocks.length(self.question)
self.a_len,self.a_mask = blocks.length(self.answer)
self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative)
def convolution(self):
print( 'convolution:wide_convolution')
self.kernels = []
for i,filter_size in enumerate(self.filter_sizes):
with tf.name_scope('conv-max-pool-%s' % filter_size):
filter_shape = [filter_size,self.embedding_size,1,self.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name="W")
b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name="b")
self.kernels.append((W,b))
self.para.append(W)
self.para.append(b)
embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding]
self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings]
#convolution
def pooling_graph(self):
if self.pooling == 'mean':
self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask)
self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask)
elif self.pooling == 'attentive':
self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'position':
self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'traditional':
print( self.pooling)
print(self.q_cnn)
self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
def para_initial(self):
# print(("---------"))
# self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp'))
self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U'))
self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm'))
self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm'))
self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms'))
self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi'))
def mean_pooling(self,conv,mask):
conv = tf.squeeze(conv,2)
print( tf.expand_dims(tf.cast(mask,tf.float32),-1))
# conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1))
# self.see = conv_mask
# print( conv_mask)
return tf.reduce_mean(conv,axis = 1);
def attentive_pooling(self,input_left,input_right,q_mask,a_mask):
Q = tf.squeeze(input_left,axis = 2)
A = tf.squeeze(input_right,axis = 2)
print( Q)
print( A)
# Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A')
# G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\
# A,transpose_b = True),name = 'G')
first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U)
second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters])
result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1]))
print( second_step)
print( tf.transpose(A,perm = [0,2,1]))
# print( 'result',result)
G = tf.tanh(result)
# G = result
# column-wise pooling ,row-wise pooling
row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling')
col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling')
self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q')
self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1))
self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a')
self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1))
self.see = G
R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q')
R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a')
return R_q,R_a
def traditional_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2))
Q = tf.reduce_mean(input_left_mask,1)
a_shape = tf.shape(input_right)
A = tf.reshape(input_right,[-1,self.total_num_filter])
m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1))
f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1]))
self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2))
self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True))
self.see = self.f_attention_norm
a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1)
return Q,a_attention
def position_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
# Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A')
Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
QU = tf.matmul(Q,self.U)
QUA = tf.multiply(tf.expand_dims(QU,1),input_right)
self.attention_a = tf.cast(tf.argmax(QUA,2)
,tf.float32)
# q_shape = tf.shape(input_left)
# Q_1 = tf.reshape(input_left,[-1,self.total_num_filter])
# QU = tf.matmul(Q_1,self.U)
# QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter])
# A_1 = tf.transpose(input_right,[0,2,1])
# QUA = tf.matmul(QU_1,A_1)
# QUA = tf.nn.l2_normalize(QUA,1)
# G = tf.tanh(QUA)
# Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
# # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2))
# row_pooling = tf.reduce_max(G,1,name="row_pooling")
# col_pooling = tf.reduce_max(G,2,name="col_pooling")
# self.attention_a = tf.nn.softmax(row_pooling,1,name = "attention_a")
self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32))
self.see = self.attention_a
self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True))
self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter])
return Q ,self.r_a
def create_loss(self):
with tf.name_scope('score'):
self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn)
self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn)
l2_loss = tf.constant(0.0)
for p in self.para:
l2_loss += tf.nn.l2_loss(p)
with tf.name_scope("loss"):
self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13)))
self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss
tf.summary.scalar('loss', self.loss)
# Accuracy
with tf.name_scope("accuracy"):
self.correct = tf.equal(0.0, self.losses)
self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy")
tf.summary.scalar('accuracy', self.accuracy)
def create_op(self):
self.global_step = tf.Variable(0, name = "global_step", trainable = False)
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step)
def max_pooling(self,conv,input_length):
pooled = tf.nn.max_pool(
conv,
ksize = [1, input_length, 1, 1],
strides = [1, 1, 1, 1],
padding = 'VALID',
name="pool")
return pooled
def getCosine(self,q,a):
pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder)
pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder)
pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1))
pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1))
pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1)
score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name="scores")
return score
def wide_convolution(self,embedding):
cnn_outputs = []
for i,filter_size in enumerate(self.filter_sizes):
conv = tf.nn.conv2d(
embedding,
self.kernels[i][0],
strides=[1, 1, self.embedding_size, 1],
padding='SAME',
name="conv-1"
)
h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name="relu-1")
cnn_outputs.append(h)
cnn_reshaped = tf.concat(cnn_outputs,3)
return cnn_reshaped
def variable_summaries(self,var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def build_graph(self):
self.create_placeholder()
self.add_embeddings()
self.para_initial()
self.convolution()
self.pooling_graph()
self.create_loss()
self.create_op()
self.merged = tf.summary.merge_all()
def train(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
self.answer_negative:data[2],
# self.q_mask:data[3],
# self.a_mask:data[4],
# self.a_neg_mask:data[5],
self.dropout_keep_prob_holder:self.dropout_keep_prob
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see],
feed_dict)
return _, summary, step, loss, accuracy,score12, score13, see
def predict(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
# self.q_mask:data[2],
# self.a_mask:data[3],
self.dropout_keep_prob_holder:1.0
}
score = sess.run( self.score12, feed_dict)
return score
if __name__ == '__main__':
cnn = QA_CNN_extend(
max_input_left = 33,
max_input_right = 40,
batch_size = 3,
vocab_size = 5000,
embedding_size = 100,
filter_sizes = [3,4,5],
num_filters = 64,
hidden_size = 100,
dropout_keep_prob = 1.0,
embeddings = None,
l2_reg_lambda = 0.0,
trainable = True,
pooling = 'max',
conv = 'wide')
cnn.build_graph()
input_x_1 = np.reshape(np.arange(3 * 33),[3,33])
input_x_2 = np.reshape(np.arange(3 * 40),[3,40])
input_x_3 = np.reshape(np.arange(3 * 40),[3,40])
q_mask = np.ones((3,33))
a_mask = np.ones((3,40))
a_neg_mask = np.ones((3,40))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {
cnn.question:input_x_1,
cnn.answer:input_x_2,
# cnn.answer_negative:input_x_3,
cnn.q_mask:q_mask,
cnn.a_mask:a_mask,
cnn.dropout_keep_prob_holder:cnn.dropout_keep
# cnn.a_neg_mask:a_neg_mask
# cnn.q_pos_overlap:q_pos_embedding,
# cnn.q_neg_overlap:q_neg_embedding,
# cnn.a_pos_overlap:a_pos_embedding,
# cnn.a_neg_overlap:a_neg_embedding,
# cnn.q_position:q_position,
# cnn.a_pos_position:a_pos_position,
# cnn.a_neg_position:a_neg_position
}
question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict)
print( question.shape,answer.shape)
print( score)
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
147
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/models/my/nn.py
|
from my.general import flatten, reconstruct, add_wd, exp_mask
import numpy as np
import tensorflow as tf
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):#, name_w='', name_b=''
# if args is None or (nest.is_sequence(args) and not args):
# raise ValueError("`args` must be specified")
# if not nest.is_sequence(args):
# args = [args]
flat_args = [flatten(arg, 1) for arg in args]#[210,20]
# if input_keep_prob < 1.0:
# assert is_train is not None
flat_args = [tf.nn.dropout(arg, input_keep_prob) for arg in flat_args]
total_arg_size = 0#[60]
shapes = [a.get_shape() for a in flat_args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
# print(total_arg_size)
# exit()
dtype = [a.dtype for a in flat_args][0]
# scope = tf.get_variable_scope()
with tf.variable_scope(scope) as outer_scope:
weights = tf.get_variable(_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype)
if len(flat_args) == 1:
res = tf.matmul(flat_args[0], weights)
else:
res = tf.matmul(tf.concat(flat_args, 1), weights)
if not bias:
flat_out = res
else:
with tf.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = tf.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=tf.constant_initializer(bias_start, dtype=dtype))
flat_out = tf.nn.bias_add(res, biases)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
def softmax(logits, mask=None, scope=None):
with tf.name_scope(scope or "Softmax"):
if mask is not None:
logits = exp_mask(logits, mask)
flat_logits = flatten(logits, 1)
flat_out = tf.nn.softmax(flat_logits)
out = reconstruct(flat_out, logits, 1)
return out
def softsel(target, logits, mask=None, scope=None):
"""
:param target: [ ..., J, d] dtype=float
:param logits: [ ..., J], dtype=float
:param mask: [ ..., J], dtype=bool
:param scope:
:return: [..., d], dtype=float
"""
with tf.name_scope(scope or "Softsel"):
a = softmax(logits, mask = mask)
target_rank = len(target.get_shape().as_list())
out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2)
return out
def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):
with tf.variable_scope(scope or "highway_layer"):
d = arg.get_shape()[-1]
trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob)
trans = tf.nn.relu(trans)
gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob)
gate = tf.nn.sigmoid(gate)
out = gate * trans + (1 - gate) * arg
return out
def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):
with tf.variable_scope(scope or "highway_network"):
prev = arg
cur = None
for layer_idx in range(num_layers):
cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx), wd=wd,
input_keep_prob=input_keep_prob)
prev = cur
return cur
def conv1d(in_, filter_size, height, padding, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "conv1d"):
num_channels = in_.get_shape()[-1]
filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype='float')
bias = tf.get_variable("bias", shape=[filter_size], dtype='float')
strides = [1, 1, 1, 1]
in_ = tf.nn.dropout(in_, keep_prob)
xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d]
out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d]
return out
def multi_conv1d(in_, filter_sizes, heights, padding, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "multi_conv1d"):
assert len(filter_sizes) == len(heights)
outs = []
for filter_size, height in zip(filter_sizes, heights):
if filter_size == 0:
continue
out = conv1d(in_, filter_size, height, padding, keep_prob=keep_prob, scope="conv1d_{}".format(height))
outs.append(out)
concat_out = tf.concat(outs, axis=2)
return concat_out
if __name__ == '__main__':
a = tf.Variable(np.random.random(size=(2,2,4)))
b = tf.Variable(np.random.random(size=(2,3,4)))
c = tf.tile(tf.expand_dims(a, 2), [1, 1, 3, 1])
test = flatten(c,1)
out = reconstruct(test, c, 1)
d = tf.tile(tf.expand_dims(b, 1), [1, 2, 1, 1])
e = linear([c,d,c*d],1,bias = False,scope = "test",)
# f = softsel(d, e)
with tf.Session() as sess:
tf.global_variables_initializer().run()
print(sess.run(test))
print(sess.run(tf.shape(out)))
exit()
print(sess.run(tf.shape(a)))
print(sess.run(a))
print(sess.run(tf.shape(b)))
print(sess.run(b))
print(sess.run(tf.shape(c)))
print(sess.run(c))
print(sess.run(tf.shape(d)))
print(sess.run(d))
print(sess.run(tf.shape(e)))
print(sess.run(e))
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
148
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/models/__init__.py
|
from .QA_CNN_pairwise import QA_CNN_extend as CNN
from .QA_RNN_pairwise import QA_RNN_extend as RNN
from .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN
def setup(opt):
if opt["model_name"]=="cnn":
model=CNN(opt)
elif opt["model_name"]=="rnn":
model=RNN(opt)
elif opt['model_name']=='qcnn':
model=QCNN(opt)
else:
print("no model")
exit(0)
return model
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
149
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/test.py
|
# -*- coding: utf-8 -*-
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime
import os
import models
import numpy as np
import evaluation
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_rnn_flag()
#args = Singleton().get_8008_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer()) # fun first than print or save
ckpt = tf.train.get_checkpoint_state("checkpoint")
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
print(sess.run(model.position_embedding)[0])
if os.path.exists("model") :
import shutil
shutil.rmtree("model")
builder = tf.saved_model.builder.SavedModelBuilder("./model")
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
builder.save(True)
variable_averages = tf.train.ExponentialMovingAverage( model)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
for name in variables_to_restore:
print(name)
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
text = "怎么 提取 公积金 ?"
splited_text=data_helper.encode_to_split(text,alphabet)
mb_q,mb_q_mask = data_helper.prepare_data([splited_text])
mb_a,mb_a_mask = data_helper.prepare_data([splited_text])
data = (mb_q,mb_a,mb_q_mask,mb_a_mask)
score = model.predict(sess,data)
print(score)
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3],
model.dropout_keep_prob_holder:1.0
}
sess.run(model.position_embedding,feed_dict=feed_dict)[0]
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
150
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/data_helper.py
|
#-*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
from functools import wraps
import time
import pickle
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
import tqdm
from nltk.corpus import stopwords
OVERLAP = 237
class Alphabet(dict):
def __init__(self, start_feature_id = 1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def cut(sentence):
tokens = sentence.lower().split()
# tokens = [w for w in tokens if w not in stopwords.words('english')]
return tokens
@log_time_delta
def load(dataset, filter = False):
data_dir = "data/" + dataset
datas = []
for data_name in ['train.txt','test.txt','dev.txt']:
data_file = os.path.join(data_dir,data_name)
data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0')
# data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0')
if filter == True:
datas.append(removeUnanswerdQuestion(data))
else:
datas.append(data)
# sub_file = os.path.join(data_dir,'submit.txt')
# submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3)
# datas.append(submit)
return tuple(datas)
@log_time_delta
def removeUnanswerdQuestion(df):
counter= df.groupby("question").apply(lambda group: sum(group["flag"]))
questions_have_correct=counter[counter>0].index
counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0))
questions_have_uncorrect=counter[counter>0].index
counter=df.groupby("question").apply(lambda group: len(group["flag"]))
questions_multi=counter[counter>1].index
return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index()
@log_time_delta
def get_alphabet(corpuses=None,dataset=""):
pkl_name="temp/"+dataset+".alphabet.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
alphabet = Alphabet(start_feature_id = 0)
alphabet.add('[UNK]')
alphabet.add('END')
count = 0
for corpus in corpuses:
for texts in [corpus["question"].unique(),corpus["answer"]]:
for sentence in texts:
tokens = cut(sentence)
for token in set(tokens):
alphabet.add(token)
print("alphabet size %d" % len(alphabet.keys()) )
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump( alphabet,open(pkl_name,"wb"))
return alphabet
@log_time_delta
def getSubVectorsFromDict(vectors,vocab,dim = 300):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
def encode_to_split(sentence,alphabet):
indices = []
tokens = cut(sentence)
seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]
return seq
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size = 100):
vectors = {}
with open(filename,encoding='utf-8') as f:
i = 0
for line in f:
i += 1
if i % 100000 == 0:
print( 'epch %d' % i)
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( ( vocab_size, embedding_size))
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'embedding_size',embedding_size)
print( 'done')
print( 'words found in wor2vec embedding ',len(vectors.keys()))
return vectors
@log_time_delta
def get_embedding(alphabet,dim = 300,language ="en",dataset=""):
pkl_name="temp/"+dataset+".subembedding.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
if language=="en":
fname = 'embedding/glove.6B/glove.6B.300d.txt'
else:
fname= "embedding/embedding.200.header_txt"
embeddings = load_text_vec(alphabet,fname,embedding_size = dim)
sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)
pickle.dump( sub_embeddings,open(pkl_name,"wb"))
return sub_embeddings
@log_time_delta
def get_mini_batch_test(df,alphabet,batch_size):
q = []
a = []
pos_overlap = []
for index,row in df.iterrows():
question = encode_to_split(row["question"],alphabet)
answer = encode_to_split(row["answer"],alphabet)
overlap_pos = overlap_index(row['question'],row['answer'])
q.append(question)
a.append(answer)
pos_overlap.append(overlap_pos)
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in mini_batches:
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
yield(mb_q,mb_a)
# calculate the overlap_index
def overlap_index(question,answer,stopwords = []):
ans_token = cut(answer)
qset = set(cut(question))
aset = set(ans_token)
a_len = len(ans_token)
# q_index = np.arange(1,q_len)
a_index = np.arange(1,a_len + 1)
overlap = qset.intersection(aset)
# for i,q in enumerate(cut(question)[:q_len]):
# value = 1
# if q in overlap:
# value = 2
# q_index[i] = value
for i,a in enumerate(ans_token):
if a in overlap:
a_index[i] = OVERLAP
return a_index
def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):
q,a,neg_a=[],[],[]
answers=df["answer"][:250]
ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict()
for question in tqdm(df['question'].unique()):
index= ground_truth[question]
canindates = [i for i in range(250)]
canindates.remove(index)
a_neg_index = random.choice(canindates)
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(answers[index],alphabet)
seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)
q.append(seq_q)
a.append( seq_a)
neg_a.append(seq_neg_a )
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):
if sort_by_len:
sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)
q = [ q[i] for i in sorted_index]
a = [a[i] for i in sorted_index]
neg_a = [ neg_a[i] for i in sorted_index]
pos_overlap = [pos_overlap[i] for i in sorted_index]
neg_overlap = [neg_overlap[i] for i in sorted_index]
#get batch
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
if shuffle:
np.random.shuffle(idx_list)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in tqdm(mini_batches):
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_neg_a = [ neg_a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_neg_overlap = [neg_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)
# mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)
# mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)
yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)
def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):
q = []
a = []
neg_a = []
for question in df['question'].unique():
# group = df[df["question"]==question]
# pos_answers = group[df["flag"] == 1]["answer"]
# neg_answers = group[df["flag"] == 0]["answer"].reset_index()
group = df[df["question"]==question]
pos_answers = group[group["flag"] == 1]["answer"]
neg_answers = group[group["flag"] == 0]["answer"]#.reset_index()
for pos in pos_answers:
if model is not None and sess is not None:
pos_sent= encode_to_split(pos,alphabet)
q_sent,q_mask= prepare_data([pos_sent])
neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers]
a_sent,a_mask= prepare_data(neg_sents)
scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))
neg_index = scores.argmax()
else:
if len(neg_answers.index) > 0:
neg_index = np.random.choice(neg_answers.index)
neg = neg_answers.reset_index().loc[neg_index,]["answer"]
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(pos,alphabet)
seq_neg_a = encode_to_split(neg,alphabet)
q.append(seq_q)
a.append(seq_a)
neg_a.append(seq_neg_a)
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def prepare_data(seqs,overlap = None):
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples,max_len)).astype('int32')
if overlap is not None:
overlap_position = np.zeros((n_samples,max_len)).astype('float')
for idx ,seq in enumerate(seqs):
x[idx,:lengths[idx]] = seq
overlap_position[idx,:lengths[idx]] = overlap[idx]
return x,overlap_position
else:
x_mask = np.zeros((n_samples, max_len)).astype('float')
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
x_mask[idx, :lengths[idx]] = 1.0
# print( x, x_mask)
return x, x_mask
# def prepare_data(seqs):
# lengths = [len(seq) for seq in seqs]
# n_samples = len(seqs)
# max_len = np.max(lengths)
# x = np.zeros((n_samples, max_len)).astype('int32')
# x_mask = np.zeros((n_samples, max_len)).astype('float')
# for idx, seq in enumerate(seqs):
# x[idx, :lengths[idx]] = seq
# x_mask[idx, :lengths[idx]] = 1.0
# # print( x, x_mask)
# return x, x_mask
def getLogger():
import sys
import logging
import os
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.mkdir(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
return logger
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
158
|
pedromeldola/Desafio
|
refs/heads/master
|
/core/models.py
|
from django.db import models
#criação da classe com os atributos
class Jogo(models.Model):
idJogo = models.AutoField(primary_key=True)
placar = models.IntegerField()
placarMin = models.IntegerField()
placarMax = models.IntegerField()
quebraRecMin = models.IntegerField()
quebraRecMax = models.IntegerField()
def __str__(self):
return str(self.idJogo)
|
{"/core/views.py": ["/core/models.py"]}
|
159
|
pedromeldola/Desafio
|
refs/heads/master
|
/core/views.py
|
from django.shortcuts import render,redirect
from .models import Jogo
from django.views.decorators.csrf import csrf_protect
#método para chamar todos os objetos que estão na classe Jogo quando entrar na home page
def home_page(request):
jogo = Jogo.objects.all()
return render (request,'home.html',{'jogo':jogo})
#método para inserir os dados na tabela quando o botão ser clicado
def inserir(request):
placar = request.POST.get('nPlacar')
#método para buscar os valores do objeto anterior
try:
placarMin = int(Jogo.objects.earliest('placarMin').placarMin)
placarMax = int(Jogo.objects.latest('placarMax').placarMax)
quebraRecMin = int(Jogo.objects.latest('quebraRecMin').quebraRecMin)
quebraRecMax = int(Jogo.objects.latest('quebraRecMax').quebraRecMax)
except:
placarMin = False
placarMax = False
quebraRecMin = False
quebraRecMax = False
placar = int(placar)
#condição para adicionar o placar nos demais atributos alem dele mesmo
if placarMin is False:
placarMin = placar
placarMax = placar
elif placar < placarMin:
placarMin = placar
quebraRecMin += 1
elif placar > placarMax or placarMax is False:
placarMax = placar
quebraRecMax += 1
else:
quebraRecMin = quebraRecMin+ 0
quebraRecMmax = quebraRecMax+ 0
#método para criar o objeto já com os atributos populados
jogo = Jogo.objects.create(placarMin=placarMin,placar=placar,placarMax=placarMax,quebraRecMin=quebraRecMin,quebraRecMax=quebraRecMax)
return redirect('/') #função para ficar home page após inserir o dado e clica no botão inserir
|
{"/core/views.py": ["/core/models.py"]}
|
160
|
pedromeldola/Desafio
|
refs/heads/master
|
/core/migrations/0002_auto_20200930_2254.py
|
# Generated by Django 3.1 on 2020-10-01 01:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='jogo',
name='id',
),
migrations.AlterField(
model_name='jogo',
name='idJogo',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='jogo',
name='placar',
field=models.IntegerField(),
),
]
|
{"/core/views.py": ["/core/models.py"]}
|
161
|
pedromeldola/Desafio
|
refs/heads/master
|
/core/migrations/0001_initial.py
|
# Generated by Django 3.1.1 on 2020-09-28 18:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Jogo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idJogo', models.IntegerField()),
('placar', models.IntegerField(max_length=3)),
('placarMin', models.IntegerField()),
('placarMax', models.IntegerField()),
('quebraRecMin', models.IntegerField()),
('quebraRecMax', models.IntegerField()),
],
),
]
|
{"/core/views.py": ["/core/models.py"]}
|
185
|
andrewjschuang/Turing
|
refs/heads/master
|
/turing.py
|
import time
from datetime import datetime
from flask import (Flask, abort, flash, redirect, render_template, request,
session, url_for)
from sqlalchemy.exc import IntegrityError
from wtforms import (Form, RadioField, StringField, SubmitField, TextAreaField, TextField,
validators)
from models.model import User, Project, Task, Questionnaire, Question, Response
from models.shared import db
class SignUp(Form):
name = TextField('Name:', validators=[validators.required()])
email = TextField('Email:', validators=[
validators.required(), validators.Length(min=6, max=35)])
password = TextField('Password:', validators=[
validators.required(), validators.Length(min=3, max=35)])
class Login(Form):
email = TextField('Email:', validators=[
validators.required(), validators.Length(min=6, max=35)])
password = TextField('Password:', validators=[
validators.required(), validators.Length(min=3, max=35)])
def create_app(config=None):
app = Flask(__name__)
if config:
app.config.from_mapping(config)
else:
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///prod.db'
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db.init_app(app)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignUp(request.form)
if request.method == 'POST':
if form.validate():
name = request.form['name']
password = request.form['password']
email = request.form['email']
u = User(email=email, name=name, password=password)
db.session.add(u)
db.session.commit()
session['auth'] = {'name': name,
'email': email, 'timestamp': time.time()}
return redirect(url_for('index'))
else:
flash('All the form fields are required.', category='error')
return render_template('signup.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = Login(request.form)
if request.method == 'POST':
if form.validate():
password = request.form['password']
email = request.form['email']
user = User.query.filter_by(email=email).first()
print(user)
if user:
print(user)
if user.password == password:
session['auth'] = {'name': user.name,
'email': user.email,
'timestamp': time.time()
}
return redirect(url_for('index'))
else:
flash('Authentication failed', category='error')
else:
flash('Authentication failed', category='error')
else:
flash('All the form fields are required', category='error')
return render_template('login.html', form=form)
@app.route('/', methods=['GET'])
def index():
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
info = user.get_index_data()
print(info)
return render_template('index.html', **info)
return redirect('/login')
@app.route('/responses')
def responses():
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
quests = Questionnaire.query.all()
return render_template('responses.html', quests=quests)
@app.route('/respond/<int:ref>', methods=['GET', 'POST'])
def respond(ref):
quest = Questionnaire.query.get(ref)
if not quest:
print('no questionnaire found with id %s' % ref)
return abort(404)
if request.method == 'GET':
return render_template('feedback.html', name=quest.name, questions=quest.questions)
elif request.method == 'POST':
for question_id in request.form:
question = Question.query.get(question_id)
resp = Response(question=question.id, rating=request.form.get(question_id))
db.session.add(resp)
db.session.commit()
return render_template('feedback_received.html')
@app.route('/projects', methods=['GET', 'POST'])
def projects():
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
if request.method == 'POST':
name = request.form['projectName']
description = request.form['projectDescription']
pro = Project(name=name,description=description)
db.session.add(pro)
user.project.append(pro)
db.session.commit()
grid = user.get_project_grid(3)
return render_template('projects.html', projectgrid=grid)
return redirect('/login')
@app.route('/tasks/user')
@app.route('/tasks/user/<int:ref>', methods=['GET', 'POST'])
def user_tasks(ref=None):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
if ref:
user: User = User.query.filter_by(id=ref).first()
if not user:
return abort(404)
if request.method == 'POST':
name = request.form.get('taskName')
description = request.form.get('taskDescription')
t_time = request.form.get('taskTime')
if not all((name, description, t_time)):
abort(404)
t_time = datetime.strptime(t_time,'%Y-%m-%dT%H:%M:%S.%fZ')
n_task: Task = Task(name=name, description=description, end_time=t_time)
user.tasks.append(n_task)
db.session.commit()
return abort(200)
else:
return render_template('tasks.html', data=user)
@app.route('/tasks/project/<int:ref>', methods=['GET', 'POST'])
def proj_tasks(ref):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
project:Project = Project.query.filter_by(id=ref).first()
if not project:
return abort(404)
if request.method == 'POST':
name = request.form.get('taskName')
description = request.form.get('taskDescription')
t_time = request.form.get('taskDate')
if not all((name, description, t_time)):
abort(404)
t_time = datetime.strptime(t_time,'%Y-%m-%dT%H:%M:%S.%fZ')
n_task: Task = Task(name=name, description=description, end_time=t_time)
project.tasks.append(n_task)
user.tasks.append(n_task)
db.session.commit()
return ('' ,200)
else:
return render_template('tasks.html', data=project)
@app.route('/tasks/task/<int:ref>', methods=['GET', 'POST'])
def task_tasks(ref):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
task:Task = Task.query.filter_by(id=ref).first()
if not task:
return abort(404)
if request.method == 'POST':
name = request.form.get('taskName')
description = request.form.get('taskDescription')
t_time = request.form.get('taskDate')
if not all((name, description, t_time)):
abort(404)
t_time = datetime.strptime(t_time,'%Y-%m-%dT%H:%M:%S.%fZ')
n_task: Task = Task(name=name, description=description, end_time=t_time)
db.session.add(n_task)
task.tasks.append(n_task)
db.session.commit()
user.tasks.append(n_task)
db.session.commit()
print(task, task.tasks)
print(n_task, n_task.tasks)
return ('' ,200)
else:
print(task, task.tasks)
return render_template('tasks.html', data=task)
@app.route('/test', methods=['GET'])
def test():
return render_template('newQuestionnaire.html')
@app.route('/questionnaire/<int:ref>', methods=['GET', 'POST'])
def questionnaire(ref):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
task:Task = Task.query.filter_by(id=ref).first()
if not task:
return abort(404)
if request.method == 'POST':
name = request.form.get('name')
if not name:
return abort(404)
quest = Questionnaire(name=name,task=task)
task.questionnaires.append(quest)
for key, value in request.form.items():
if not value or key == 'name':
continue
else:
quest.questions.append(Question(text=value,questionnaire=quest))
db.session.commit()
return render_template('newQuestionnaire.html')
@app.route('/logout', methods=['GET'])
def logout():
session.pop('auth')
return redirect(url_for('index'))
return app
if __name__ == '__main__':
app = create_app()
db.create_all(app=app)
app.run(host='localhost', port=3000, debug=True)
|
{"/test.py": ["/turing.py"]}
|
186
|
andrewjschuang/Turing
|
refs/heads/master
|
/test.py
|
from flask_testing import TestCase
from models.shared import db
from models.model import User, Task, Project, Question, Response, Questionnaire
from turing import create_app
import unittest
class MyTest(TestCase):
def create_app(self):
config = {
'SQLALCHEMY_DATABASE_URI': 'sqlite:///test.db',
'TESTING': True,
'SECRET_KEY': 'secret',
'SQLALCHEMY_TRACK_MODIFICATIONS': True
}
return create_app(config)
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_nothing(self):
assert True
def test_user(self):
user = User(email='em', name='us', password='pass')
db.session.add(user)
db.session.commit()
assert user in db.session
def test_project(self):
project = Project(name='n',description='desc')
db.session.add(project)
db.session.commit()
assert project in db.session
def test_task(self):
task = Task(name='n', description='desc')
db.session.add(task)
db.session.commit()
assert task in db.session
def test_usr_add_tsk2_prj(self):
user = User(email='em', name='us', password='pass')
db.session.add(user)
db.session.commit()
project = Project(name='n',description='desc')
db.session.add(project)
user.project.append(project)
db.session.commit()
project: Project= User.query.filter_by(email='em').first().project[0]
task = Task(name='n', description='desc')
db.session.add(task)
project.tasks.append(task)
db.session.commit()
assert user.project[0].tasks[0] == task
def test_sub_tasks(self):
task = Task(name='n', description='desc')
db.session.add(task)
assert task in db.session
s_task = Task(name='n', description='desc')
db.session.add(s_task)
assert task in db.session
db.session.commit()
task.tasks.append(s_task)
db.session.commit()
assert task.tasks[0] == s_task
def test_questionnaire(self):
questionnaire = Questionnaire(name='Questions')
db.session.add(questionnaire)
question0 = Question(text="ola ?", questionnaire=questionnaire)
question1 = Question(text="tudo bem ?", questionnaire=questionnaire)
questionnaire.questions.append(question0)
questionnaire.questions.append(question1)
for i in range(10):
question0.responses.append(Response(rating=5,question=question0))
for i in range(10):
question1.responses.append(Response(rating=5,question=question1))
rs = [x.rating for x in questionnaire.questions[0].responses]
assert sum(rs)/len(rs) == 5
rs = [x.rating for x in questionnaire.questions[1].responses]
assert sum(rs)/len(rs) == 5
if __name__ == '__main__':
unittest.main()
|
{"/test.py": ["/turing.py"]}
|
187
|
andrewjschuang/Turing
|
refs/heads/master
|
/functionalities.py
|
functionalities = {
'Login': 'Login page',
'Feedback': 'This feedback form',
'Todo': 'To do module',
'Projects': 'Anything related to projects',
'Code': 'Code editor',
'Forum': 'The forum',
'Profile': 'Your profile page',
}
|
{"/test.py": ["/turing.py"]}
|
188
|
yywang0514/dsnre
|
refs/heads/master
|
/train.py
|
import sys
import os
import time
import numpy as np
import torch
import torch.nn.functional as F
import argparse
import logging
from lib import *
from model import *
def train(options):
if not os.path.exists(options.folder):
os.mkdir(options.folder)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s: %(name)s: %(levelname)s: %(message)s")
hdlr = logging.FileHandler(os.path.join(options.folder, options.file_log), mode = "w")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.info("python %s" %(" ".join(sys.argv)))
#################################################################################
start_time = time.time()
msg = "Loading dicts from %s..." %(options.file_dic)
display(msg)
vocab = dicfold(options.file_dic)
word2idx, pre_train_emb, part_point = build_word2idx(vocab, options.file_emb)
msg = "Loading data from %s..." %(options.file_train)
display(msg)
train = datafold(options.file_train)
msg = "Loading data from %s..." %(options.file_test)
display(msg)
test = datafold(options.file_test)
end_time = time.time()
msg = "Loading data time: %f seconds" %(end_time - start_time)
display(msg)
options.size_vocab = len(word2idx)
if options.devFreq == -1:
options.devFreq = (len(train) + options.batch_size - 1) // options.batch_size
msg = "#inst in train: %d" %(len(train))
display(msg)
msg = "#inst in test %d" %(len(test))
display(msg)
msg = "#word vocab: %d" %(options.size_vocab)
display(msg)
msg = "=" * 30 + "Hyperparameter:" + "=" * 30
display(msg)
for attr, value in sorted(vars(options).items(), key = lambda x: x[0]):
msg = "{}={}".format(attr.upper(), value)
display(msg)
#################################################################################
msg = "=" * 30 + "model:" + "=" * 30
display(msg)
os.environ["CUDA_VISIBLE_DEVICES"] = options.gpus
if options.seed is not None:
torch.manual_seed(options.seed)
np.random.seed(options.seed)
model = Model(options.fine_tune,
pre_train_emb,
part_point,
options.size_vocab,
options.dim_emb,
options.dim_proj,
options.head_count,
options.dim_FNN,
options.act_str,
options.num_layer,
options.num_class,
options.dropout_rate).cuda()
if os.path.exists("{}.pt".format(options.reload_model)):
model.load_state_dict(torch.load("{}.pt".format(options.reload_model)))
parameters = filter(lambda param: param.requires_grad, model.parameters())
optimizer = optimizer_wrapper(options.optimizer, options.lr, parameters)
msg = "\n{}".format(model)
display(msg)
#################################################################################
checkpoint_dir = os.path.join(options.folder, "checkpoints")
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
best_path = os.path.join(checkpoint_dir, options.saveto)
#################################################################################
msg = "=" * 30 + "Optimizing:" + "=" * 30
display(msg)
[train_rels, train_nums, train_sents, train_poss, train_eposs] = bags_decompose(train)
[test_rels, test_nums, test_sents, test_poss, test_eposs] = bags_decompose(test)
# batch_index = [0, 1, 2]
# batch_rels = [train_rels[m][0] for m in batch_index]
# batch_nums = [train_nums[m] for m in batch_index]
# batch_sents = [train_sents[m] for m in batch_index]
# batch_poss = [train_poss[m] for m in batch_index]
# batch_eposs = [train_eposs[m] for m in batch_index]
# batch_data = select_instance(batch_rels,
# batch_nums,
# batch_sents,
# batch_poss,
# batch_eposs,
# model)
# for sent in batch_data[0]:
# print(sent)
# print(batch_data[1])
# print(batch_data[2])
# print(batch_data[3])
train_idx_list = np.arange(len(train))
steps_per_epoch = (len(train) + options.batch_size - 1) // options.batch_size
n_updates = 0
for e in range(options.nepochs):
np.random.shuffle(train_idx_list)
for step in range(steps_per_epoch):
batch_index = train_idx_list[step * options.batch_size: (step + 1) * options.batch_size]
batch_rels = [train_rels[m][0] for m in batch_index]
batch_nums = [train_nums[m] for m in batch_index]
batch_sents = [train_sents[m] for m in batch_index]
batch_poss = [train_poss[m] for m in batch_index]
batch_eposs = [train_eposs[m] for m in batch_index]
batch_data = select_instance(batch_rels,
batch_nums,
batch_sents,
batch_poss,
batch_eposs,
model)
disp_start = time.time()
model.train()
n_updates += 1
optimizer.zero_grad()
logit = model(batch_data[0], batch_data[1], batch_data[2])
loss = F.cross_entropy(logit, batch_data[3])
loss.backward()
if options.clip_c != 0:
total_norm = torch.nn.utils.clip_grad_norm_(parameters, options.clip_c)
optimizer.step()
disp_end = time.time()
if np.mod(n_updates, options.dispFreq) == 0:
msg = "Epoch: %d, Step: %d, Loss: %f, Time: %.2f sec" %(e, n_updates, loss.cpu().item(), disp_end - disp_start)
display(msg)
if np.mod(n_updates, options.devFreq) == 0:
msg = "=" * 30 + "Evaluating" + "=" * 30
display(msg)
model.eval()
test_predict = predict(test_rels, test_nums, test_sents, test_poss, test_eposs, model)
test_pr = positive_evaluation(test_predict)
msg = 'test set PR = [' + str(test_pr[0][-1]) + ' ' + str(test_pr[1][-1]) + ']'
display(msg)
msg = "Saving model..."
display(msg)
torch.save(model.state_dict(), "{}_step_{}.pt".format(best_path, n_updates))
msg = "Model checkpoint has been saved to {}_step_{}.pt".format(best_path, n_updates)
display(msg)
end_time = time.time()
msg = "Optimizing time: %f seconds" %(end_time - start_time)
display(msg)
def predict(rels, nums, sents, poss, eposs, model):
numBags = len(rels)
predict_y = np.zeros((numBags), dtype=np.int32)
predict_y_prob = np.zeros((numBags), dtype=np.float32)
y = np.asarray(rels, dtype='int32')
for bagIndex, insRel in enumerate(rels):
insNum = nums[bagIndex]
maxP = -1
pred_rel_type = 0
max_pos_p = -1
positive_flag = False
for m in range(insNum):
insX = sents[bagIndex][m]
epos = eposs[bagIndex][m]
sel_x, sel_len, sel_epos = prepare_data([insX], [epos])
results = model(sel_x, sel_len, sel_epos)
rel_type = results.argmax()
if positive_flag and rel_type == 0:
continue
else:
# at least one instance is positive
tmpMax = results.max()
if rel_type > 0:
positive_flag = True
if tmpMax > max_pos_p:
max_pos_p = tmpMax
pred_rel_type = rel_type
else:
if tmpMax > maxP:
maxP = tmpMax
if positive_flag:
predict_y_prob[bagIndex] = max_pos_p
else:
predict_y_prob[bagIndex] = maxP
predict_y[bagIndex] = pred_rel_type
return [predict_y, predict_y_prob, y]
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--folder", help = "the dir of model", default = "workshop")
parser.add_argument("--file_dic", help = "the file of vocabulary", default = "./data/50/dict.txt")
parser.add_argument("--file_train", help = "the file of training data", default = "./data/gap_40_len_80/train_filtered.data")
parser.add_argument("--file_test", help = "the file of testing data", default = "./data/gap_40_len_80/test_filtered.data")
# parser.add_argument("--file_emb", help = "the file of embedding", default = "./data/50/dict_emb.txt")
parser.add_argument("--file_emb", help = "the file of embedding", default = "")
parser.add_argument("--file_log", help = "the log file", default = "train.log")
parser.add_argument("--reload_model", help = "the pretrained model", default = "")
parser.add_argument("--saveto", help = "the file to save the parameter", default = "model")
parser.add_argument("--seed", help = "the random seed", default = 1234, type = int)
parser.add_argument("--size_vocab", help = "the size of vocabulary", default = 10000, type = int)
parser.add_argument("--dim_emb", help = "the dimension of the word embedding", default = 256, type = int)
parser.add_argument("--dim_proj", help = "the dimension of the hidden state", default = 256, type = int)
parser.add_argument("--head_count", help = "the num of head in multi head attention", default = 8, type = int)
parser.add_argument("--dim_FNN", help = "the dimension of the positionwise FNN", default = 256, type = int)
parser.add_argument("--act_str", help = "the activation function of the positionwise FNN", default = "relu")
parser.add_argument("--num_layer", help = "the num of layers", default = 6, type = int)
parser.add_argument("--num_class", help = "the number of labels", default = 27, type = int)
parser.add_argument("--position_emb", help = "if true, the position embedding will be used", default = False, action = "store_true")
parser.add_argument("--fine_tune", help = "if true, the pretrained embedding will be fine tuned", default = False, action = "store_true")
parser.add_argument("--optimizer", help = "optimization algorithm", default = "adam")
parser.add_argument("--lr", help = "learning rate", default = 0.0004, type = float)
parser.add_argument("--dropout_rate", help = "dropout rate", default = 0.5, type = float)
parser.add_argument("--clip_c", help = "grad clip", default = 10.0, type = float)
parser.add_argument("--nepochs", help = "the max epoch", default = 30, type = int)
parser.add_argument("--batch_size", help = "batch size", default = 32, type = int)
parser.add_argument("--dispFreq", help = "the frequence of display", default = 100, type = int)
parser.add_argument("--devFreq", help = "the frequence of evaluation", default = -1, type = int)
parser.add_argument("--wait_N", help = "use to early stop", default = 1, type = int)
parser.add_argument("--patience", help = "use to early stop", default = 7, type = int)
parser.add_argument("--maxlen", help = "max length of sentence", default = 100, type = int)
parser.add_argument("--gpus", help = "specify the GPU IDs", default = "0")
options = parser.parse_args(argv)
train(options)
if "__main__" == __name__:
main(sys.argv[1:])
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
189
|
yywang0514/dsnre
|
refs/heads/master
|
/lib/module.py
|
import torch
import torch.nn as nn
import math
class LayerNorm(nn.Module):
"""Layer Normalization class"""
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class MLP(nn.Module):
def __init__(self, dim_in, dim_out):
super(MLP, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self._init_params()
def _init_params(self):
self.mlp = nn.Linear(in_features = self.dim_in,
out_features = self.dim_out)
def forward(self, inp):
proj_inp = self.mlp(inp)
return proj_inp
class BiLstm(nn.Module):
def __init__(self, dim_in, dim_out):
super(BiLstm, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self._init_params()
def _init_params(self):
self.bilstm = nn.LSTM(input_size = self.dim_in,
hidden_size = self.dim_out,
bidirectional = True)
def forward(self, inp, inp_len):
sorted_inp_len, sorted_idx = torch.sort(inp_len, dim = 0, descending=True)
sorted_inp = torch.index_select(inp, dim = 1, index = sorted_idx)
pack_inp = torch.nn.utils.rnn.pack_padded_sequence(sorted_inp, sorted_inp_len)
proj_inp, _ = self.bilstm(pack_inp)
proj_inp = torch.nn.utils.rnn.pad_packed_sequence(proj_inp)
unsorted_idx = torch.zeros(sorted_idx.size()).long().cuda().scatter_(0, sorted_idx, torch.arange(inp.size()[1]).long().cuda())
unsorted_proj_inp = torch.index_select(proj_inp[0], dim = 1, index = unsorted_idx)
return unsorted_proj_inp
class Word_Emb(nn.Module):
def __init__(self,
fine_tune,
pre_train_emb,
part_point,
size_vocab,
dim_emb):
super(Word_Emb, self).__init__()
self.fine_tune = fine_tune
self.pre_train_emb = pre_train_emb
self.part_point = part_point
self.size_vocab = size_vocab
self.dim_emb = dim_emb
self._init_params()
def _init_params(self):
self.embedding = torch.nn.ModuleList()
if (not self.fine_tune) and self.pre_train_emb:
self.embedding.append(nn.Embedding(self.part_point, self.dim_emb))
self.embedding.append(nn.Embedding.from_pretrained(torch.Tensor(self.pre_train_emb), freeze = True))
elif self.fine_tune and self.pre_train_emb:
init_embedding = 0.01 * np.random.randn(self.size_vocab, self.dim_emb).astype(np.float32)
init_embedding[self.part_point: ] = self.pre_train_emb
self.embedding.append(nn.Embedding.from_pretrained(torch.Tensor(init_embedding), freeze = False))
else:
self.embedding.append(nn.Embedding(self.size_vocab, self.dim_emb))
def forward(self, inp):
if (not self.fine_tune) and self.pre_train_emb:
def get_emb(inp):
mask = self.inp2mask(inp)
inp_1 = inp * mask
emb_1 = self.embedding[0](inp_1) * mask[:, :, None].float()
inp_2 = (inp - self.part_point) * (1 - mask)
emb_2 = self.embedding[1](inp_2) * (1 - mask)[:, :, None].float()
emb = emb_1 + emb_2
return emb
emb_inp = get_emb(inp)
else:
emb_inp = self.embedding[0](inp)
return emb_inp
def inp2mask(self, inp):
mask = (inp < self.part_point).long()
return mask
class Position_Emb(nn.Module):
def __init__(self, dim_emb):
super(Position_Emb, self).__init__()
self.dim_emb = dim_emb
self._init_params()
def _init_params(self):
pass
def forward(self, inp):
pass
class Wemb(nn.Module):
"""docstring for Wemb"""
def __init__(self,
fine_tune,
pre_train_emb,
part_point,
size_vocab,
dim_emb,
position_emb,
dropout_rate):
super(Wemb, self).__init__()
self.fine_tune = fine_tune
self.pre_train_emb = pre_train_emb
self.part_point = part_point
self.size_vocab = size_vocab
self.dim_emb = dim_emb
self.position_emb = position_emb
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.wembs = torch.nn.ModuleList()
self.wembs.append(Word_Emb(self.fine_tune, self.pre_train_emb, self.part_point, self.size_vocab, self.dim_emb))
if self.position_emb:
self.wembs.append(Position_Emb(self.dim_emb))
self.layer_norm = LayerNorm(self.dim_emb)
self.dropout = nn.Dropout(self.dropout_rate)
def forward(self, inp):
def add_n(inps):
rval = inps[0] * 0
for inp in inps:
rval += inp
return rval
emb_inps = []
for wemb in self.wembs:
emb_inps.append(wemb(inp))
emb_inp = add_n(emb_inps)
emb_inp = self.layer_norm(emb_inp)
emb_inp = self.dropout(emb_inp)
return emb_inp
class Multi_Head_Attention(nn.Module):
def __init__(self,
dim_proj,
head_count,
dropout_rate):
super(Multi_Head_Attention, self).__init__()
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_per_head = self.dim_proj // self.head_count
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.linear_key = nn.Linear(self.dim_proj, self.head_count * self.dim_per_head)
self.linear_value = nn.Linear(self.dim_proj, self.head_count * self.dim_per_head)
self.linear_query = nn.Linear(self.dim_proj, self.head_count * self.dim_per_head)
self.dropout = nn.Dropout(self.dropout_rate)
self.softmax = nn.Softmax(dim=-1)
def forward(self, key, value, query, mask = None):
# key: batch X key_len X hidden
# value: batch X value_len X hidden
# query: batch X query_len X hidden
# mask: batch X query_len X key_len
batch_size = key.size()[0]
key_ = self.linear_key(key)
value_ = self.linear_value(value)
query_ = self.linear_query(query)
key_ = key_.reshape(batch_size, -1, self.head_count, self.dim_per_head).transpose(1, 2)
value_ = value_.reshape(batch_size, -1, self.head_count, self.dim_per_head).transpose(1, 2)
query_ = query_.reshape(batch_size, -1, self.head_count, self.dim_per_head).transpose(1, 2)
attention_scores = torch.matmul(query_, key_.transpose(2, 3))
attention_scores = attention_scores / math.sqrt(float(self.dim_per_head))
if mask is not None:
mask = mask.unsqueeze(1).expand_as(attention_scores)
attention_scores = attention_scores.masked_fill(1 - mask, -1e18)
attention_probs = self.softmax(attention_scores)
attention_probs = self.dropout(attention_probs)
context = torch.matmul(attention_probs, value_)
context = context.transpose(1, 2).reshape(batch_size, -1, self.head_count * self.dim_per_head)
return context
class TransformerEncoderBlock(nn.Module):
def __init__(self,
dim_proj,
head_count,
dim_FNN,
act_fn,
dropout_rate):
super(TransformerEncoderBlock, self).__init__()
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_FNN = dim_FNN
self.act_fn = act_fn
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.multi_head_attention = Multi_Head_Attention(self.dim_proj, self.head_count, self.dropout_rate)
self.linear_proj_context = MLP(self.dim_proj, self.dim_proj)
self.layer_norm_context = LayerNorm(self.dim_proj)
self.position_wise_fnn = MLP(self.dim_proj, self.dim_FNN)
self.linear_proj_intermediate = MLP(self.dim_FNN, self.dim_proj)
self.layer_norm_intermediate = LayerNorm(self.dim_proj)
self.dropout = nn.Dropout(self.dropout_rate)
def forward(self, inp, mask):
context = self.multi_head_attention(inp, inp, inp, mask = mask)
context = self.linear_proj_context(context)
context = self.dropout(context)
res_inp = self.layer_norm_context(inp + context)
rval = self.act_fn(self.position_wise_fnn(res_inp))
rval = self.linear_proj_intermediate(rval)
rval = self.dropout(rval)
res_rval = self.layer_norm_intermediate(rval + res_inp)
return res_rval
def get_activation(act_str):
if act_str == "relu":
return torch.nn.ReLU()
elif act_str == "tanh":
return torch.nn.Tanh()
elif act_str == "sigmoid":
return torch.nn.Sigmoid()
class TransformerEncoder(nn.Module):
def __init__(self,
dim_proj,
head_count,
dim_FNN,
act_str,
num_layers,
dropout_rate):
super(TransformerEncoder, self).__init__()
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_FNN = dim_FNN
self.act_fn = get_activation(act_str)
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.transformer = torch.nn.ModuleList([TransformerEncoderBlock(self.dim_proj, self.head_count, self.dim_FNN, self.act_fn, self.dropout_rate) for _ in range(self.num_layers)])
def forward(self, inp, mask = None):
rval = []
pre_output = inp
for i in range(self.num_layers):
cur_output = self.transformer[i](pre_output, mask)
rval.append(cur_output)
pre_output = cur_output
return pre_output, rval
def optimizer_wrapper(optimizer, lr, parameters):
if optimizer == "adam":
opt = torch.optim.Adam(params = parameters, lr = lr)
return opt
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
190
|
yywang0514/dsnre
|
refs/heads/master
|
/model.py
|
import torch
import torch.nn as nn
from lib import *
class Model(nn.Module):
def __init__(self,
fine_tune,
pre_train_emb,
part_point,
size_vocab,
dim_emb,
dim_proj,
head_count,
dim_FNN,
act_str,
num_layer,
num_class,
dropout_rate):
super(Model, self).__init__()
self.fine_tune = fine_tune
self.pre_train_emb = pre_train_emb
self.part_point = part_point
self.size_vocab = size_vocab
self.dim_emb = dim_emb
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_FNN = dim_FNN
self.act_str = act_str
self.num_layer = num_layer
self.num_class = num_class
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.wemb = Word_Emb(self.fine_tune,
self.pre_train_emb,
self.part_point,
self.size_vocab,
self.dim_emb)
self.encoder = TransformerEncoder(self.dim_proj,
self.head_count,
self.dim_FNN,
self.act_str,
self.num_layer,
self.dropout_rate)
self.dense = MLP(self.dim_proj * 3, self.dim_proj)
self.relu = torch.nn.ReLU()
self.classifier = MLP(self.dim_proj, self.num_class)
self.dropout = nn.Dropout(self.dropout_rate)
def forward(self, inp, lengths, epos):
mask, mask_l, mask_m, mask_r = self.pos2mask(epos, lengths)
emb_inp = self.wemb(inp)
emb_inp = self.dropout(emb_inp)
proj_inp, _ = self.encoder(emb_inp, self.create_attention_mask(mask, mask))
proj_inp = proj_inp * mask[:, :, None]
pool_inp_l = torch.sum(proj_inp * mask_l[:, :, None], dim = 1) / torch.sum(mask_l, dim = 1)[:, None]
pool_inp_m = torch.sum(proj_inp * mask_m[:, :, None], dim = 1) / torch.sum(mask_m, dim = 1)[:, None]
pool_inp_r = torch.sum(proj_inp * mask_r[:, :, None], dim = 1) / torch.sum(mask_r, dim = 1)[:, None]
pool_inp = torch.cat([pool_inp_l, pool_inp_m, pool_inp_r], dim = 1)
pool_inp = self.dropout(pool_inp)
logit = self.relu(self.dense(pool_inp))
logit = self.dropout(logit)
logit = self.classifier(logit)
return logit
def pos2mask(self, epos, lengths):
mask = self.len2mask(lengths)
nsample = lengths.size()[0]
max_len = torch.max(lengths)
idxes = torch.arange(0, max_len).cuda()
mask_l = (idxes < epos[:, 0].unsqueeze(1)).float()
mask_r = mask - (idxes < epos[:, 1].unsqueeze(1)).float()
mask_m = torch.ones([nsample, max_len]).float().cuda() - mask_l - mask_r
return mask, mask_l, mask_m, mask_r
def len2mask(self, lengths):
max_len = torch.max(lengths)
idxes = torch.arange(0, max_len).cuda()
mask = (idxes < lengths.unsqueeze(1)).float()
return mask
def create_attention_mask(self, query_mask, key_mask):
return torch.matmul(query_mask[:, :, None], key_mask[:, None, :]).byte()
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
191
|
yywang0514/dsnre
|
refs/heads/master
|
/lib/__init__.py
|
from module import *
from util import *
from data_iterator import *
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
192
|
yywang0514/dsnre
|
refs/heads/master
|
/format.py
|
import sys
import codecs
class InstanceBag(object):
def __init__(self, entities, rel, num, sentences, positions, entitiesPos):
self.entities = entities
self.rel = rel
self.num = num
self.sentences = sentences
self.positions = positions
self.entitiesPos = entitiesPos
def bags_decompose(data_bags):
bag_sent = [data_bag.sentences for data_bag in data_bags]
bag_pos = [data_bag.positions for data_bag in data_bags]
bag_num = [data_bag.num for data_bag in data_bags]
bag_rel = [data_bag.rel for data_bag in data_bags]
bag_epos = [data_bag.entitiesPos for data_bag in data_bags]
return [bag_rel, bag_num, bag_sent, bag_pos, bag_epos]
def datafold(filename):
f = open(filename, 'r')
data = []
while 1:
line = f.readline()
if not line:
break
entities = map(int, line.split(' '))
line = f.readline()
bagLabel = line.split(' ')
rel = map(int, bagLabel[0:-1])
num = int(bagLabel[-1])
positions = []
sentences = []
entitiesPos = []
for i in range(0, num):
sent = f.readline().split(' ')
positions.append(map(int, sent[0:2]))
epos = map(int, sent[0:2])
epos.sort()
entitiesPos.append(epos)
sentences.append(map(int, sent[2:-1]))
ins = InstanceBag(entities, rel, num, sentences, positions, entitiesPos)
data += [ins]
f.close()
return data
def change_word_idx(data):
new_data = []
for inst in data:
entities = inst.entities
rel = inst.rel
num = inst.num
sentences = inst.sentences
positions = inst.positions
entitiesPos = inst.entitiesPos
new_sentences = []
for sent in sentences:
new_sent = []
for word in sent:
if word == 160696:
new_sent.append(1)
elif word == 0:
new_sent.append(0)
else:
new_sent.append(word + 1)
new_sentences.append(new_sent)
new_inst = InstanceBag(entities, rel, num, new_sentences, positions, entitiesPos)
new_data.append(new_inst)
return new_data
def save_data(data, textfile):
with codecs.open(textfile, "w", encoding = "utf8") as f:
for inst in data:
f.write("%s\n" %(" ".join(map(str, inst.entities))))
f.write("%s %s\n" %(" ".join(map(str, inst.rel)), str(inst.num)))
for pos, sent in zip(inst.positions, inst.sentences):
f.write("%s %s\n" %(" ".join(map(str, pos)), " ".join(map(str, sent))))
def main(argv):
data = datafold(argv[0])
new_data = change_word_idx(data)
save_data(new_data, argv[1])
if "__main__" == __name__:
main(sys.argv[1:])
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
193
|
yywang0514/dsnre
|
refs/heads/master
|
/lib/util.py
|
import sys
import re
import numpy as np
import cPickle as pkl
import codecs
import logging
from data_iterator import *
logger = logging.getLogger()
extra_token = ["<PAD>", "<UNK>"]
def display(msg):
print(msg)
logger.info(msg)
def datafold(filename):
f = open(filename, 'r')
data = []
while 1:
line = f.readline()
if not line:
break
entities = map(int, line.split(' '))
line = f.readline()
bagLabel = line.split(' ')
rel = map(int, bagLabel[0:-1])
num = int(bagLabel[-1])
positions = []
sentences = []
entitiesPos = []
for i in range(0, num):
sent = f.readline().split(' ')
positions.append(map(int, sent[0:2]))
epos = map(int, sent[0:2])
epos.sort()
entitiesPos.append(epos)
sentences.append(map(int, sent[2:-1]))
ins = InstanceBag(entities, rel, num, sentences, positions, entitiesPos)
data += [ins]
f.close()
return data
def dicfold(textfile):
vocab = []
with codecs.open(textfile, "r", encoding = "utf8") as f:
for line in f:
line = line.strip()
if line:
vocab.append(line)
return vocab
def build_word2idx(vocab, textFile):
msg = "Building word2idx..."
display(msg)
pre_train_emb = []
part_point = len(vocab)
if textFile:
word2emb = load_emb(vocab, textFile)
pre_train_vocab = []
un_pre_train_vocab = []
for word in vocab:
if word in word2emb:
pre_train_vocab.append(word)
pre_train_emb.append(word2emb[word])
else:
un_pre_train_vocab.append(word)
part_point = len(un_pre_train_vocab)
un_pre_train_vocab.extend(pre_train_vocab)
vocab = un_pre_train_vocab
word2idx = {}
for v, k in enumerate(extra_token):
word2idx[k] = v
for v, k in enumerate(vocab):
word2idx[k] = v + 2
part_point += 2
return word2idx, pre_train_emb, part_point
def load_emb(vocab, textFile):
msg = 'load emb from ' + textFile
display(msg)
vocab_set = set(vocab)
word2emb = {}
emb_p = re.compile(r" |\t")
count = 0
with codecs.open(textFile, "r", "utf8") as filein:
for line in filein:
count += 1
array = emb_p.split(line.strip())
word = array[0]
if word in vocab_set:
vector = [float(array[i]) for i in range(1, len(array))]
word2emb[word] = vector
del vocab_set
msg = "find %d words in %s" %(count, textFile)
display(msg)
msg = "Summary: %d words in the vocabulary and %d of them appear in the %s" %(len(vocab), len(word2emb), textFile)
display(msg)
return word2emb
def positive_evaluation(predict_results):
predict_y = predict_results[0]
predict_y_prob = predict_results[1]
y_given = predict_results[2]
positive_num = 0
#find the number of positive examples
for yi in range(y_given.shape[0]):
if y_given[yi, 0] > 0:
positive_num += 1
# if positive_num == 0:
# positive_num = 1
# sort prob
index = np.argsort(predict_y_prob)[::-1]
all_pre = [0]
all_rec = [0]
p_n = 0
p_p = 0
n_p = 0
# print y_given.shape[0]
for i in range(y_given.shape[0]):
labels = y_given[index[i],:] # key given labels
py = predict_y[index[i]] # answer
if labels[0] == 0:
# NA bag
if py > 0:
n_p += 1
else:
# positive bag
if py == 0:
p_n += 1
else:
flag = False
for j in range(y_given.shape[1]):
if j == -1:
break
if py == labels[j]:
flag = True # true positive
break
if flag:
p_p += 1
if (p_p+n_p) == 0:
precision = 1
else:
precision = float(p_p)/(p_p+n_p)
recall = float(p_p)/positive_num
if precision != all_pre[-1] or recall != all_rec[-1]:
all_pre.append(precision)
all_rec.append(recall)
return [all_pre[1:], all_rec[1:]]
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
194
|
yywang0514/dsnre
|
refs/heads/master
|
/lib/data_iterator.py
|
import time
import cPickle
import numpy as np
import torch
class InstanceBag(object):
def __init__(self, entities, rel, num, sentences, positions, entitiesPos):
self.entities = entities
self.rel = rel
self.num = num
self.sentences = sentences
self.positions = positions
self.entitiesPos = entitiesPos
def bags_decompose(data_bags):
bag_sent = [data_bag.sentences for data_bag in data_bags]
bag_pos = [data_bag.positions for data_bag in data_bags]
bag_num = [data_bag.num for data_bag in data_bags]
bag_rel = [data_bag.rel for data_bag in data_bags]
bag_epos = [data_bag.entitiesPos for data_bag in data_bags]
return [bag_rel, bag_num, bag_sent, bag_pos, bag_epos]
def select_instance(rels, nums, sents, poss, eposs, model):
batch_x = []
batch_len = []
batch_epos = []
batch_y = []
for bagIndex, insNum in enumerate(nums):
maxIns = 0
maxP = -1
if insNum > 1:
for m in range(insNum):
insX = sents[bagIndex][m]
epos = eposs[bagIndex][m]
sel_x, sel_len, sel_epos = prepare_data([insX], [epos])
results = model(sel_x, sel_len, sel_epos)
tmpMax = results.max()
if tmpMax > maxP:
maxIns = m
maxP=tmpMax
batch_x.append(sents[bagIndex][maxIns])
batch_epos.append(eposs[bagIndex][maxIns])
batch_y.append(rels[bagIndex])
batch_x, batch_len, batch_epos = prepare_data(batch_x, batch_epos)
batch_y = torch.LongTensor(np.array(batch_y).astype("int32")).cuda()
return [batch_x, batch_len, batch_epos, batch_y]
def prepare_data(sents, epos):
lens = [len(sent) for sent in sents]
n_samples = len(lens)
max_len = max(lens)
batch_x = np.zeros((n_samples, max_len)).astype("int32")
for idx, s in enumerate(sents):
batch_x[idx, :lens[idx]] = s
batch_len = np.array(lens).astype("int32")
batch_epos = np.array(epos).astype("int32")
return torch.LongTensor(batch_x).cuda(), torch.LongTensor(batch_len).cuda(), torch.LongTensor(batch_epos).cuda()
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
210
|
rodelrod/pomodoro-report
|
refs/heads/master
|
/test_notebook_parser.py
|
#!/usr/bin/env python
import unittest
from notebook_parser import *
import os
import errno
from datetime import datetime
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
class TestParser(unittest.TestCase):
"""Tests the RedNotebook monthly files parser."""
def setUp(self):
self.nb_path = '/tmp/test_pomodoro_report'
mkdir_p(self.nb_path)
f = open(os.path.join(self.nb_path, '2012-10.txt'), 'w')
f.write(
"21: {text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'}\n"
"25:\n"
" Cat3: {Some other shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'\n"
"27:\n"
" Cat1: {Some shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b''illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk'\n")
f.close()
self.p = Parser(self.nb_path)
def test_get_nb_filename(self):
self.assertEqual(
self.p._get_nb_filename(datetime(2012, 10, 14)),
os.path.join(self.nb_path,'2012-10.txt'))
def test_parse_day_block(self):
block = ['', '5', 'some stuff', '26', 'some other stuff']
expected = {5: 'some stuff', 26: 'some other stuff'}
self.assertEqual(self.p._parse_day_block(block), expected)
def test_get_day_with_categories(self):
"""Get day 27."""
expected = (
"\n"
" Cat1: {Some shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b''illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk'\n")
actual = self.p._get_day(datetime(2012, 10, 27))
self.assertEqual(actual, expected)
def test_get_day_without_categories(self):
"""Get day 21."""
expected = (
" {text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'}\n")
actual = self.p._get_day(datetime(2012, 10, 21))
self.assertEqual(actual, expected)
def test_get_inexistant_day(self):
"""Get 14/10."""
with self.assertRaises(EmptyDayException):
self.p._get_day(datetime(2012, 10, 14))
def test_get_inexistant_month(self):
"""Get 14/04."""
with self.assertRaises(EmptyDayException):
self.p._get_day(datetime(2012, 4, 14))
def test_get_text_with_categories(self):
block = (
"\n"
" Cat1: {Some shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b''illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk'\n")
expected = (
"1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b'illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk")
self.assertEqual(self.p._get_text(block), expected)
def test_get_text_without_categories(self):
block = (
" {text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'}\n")
expected = (
"1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk")
self.assertEqual(self.p._get_text(block), expected)
def test_get_pomodoros(self):
# TODO
pass
def tearDown(self):
os.remove(os.path.join(self.nb_path, '2012-10.txt'))
if __name__ == '__main__':
unittest.main()
|
{"/test_notebook_parser.py": ["/notebook_parser.py"]}
|
211
|
rodelrod/pomodoro-report
|
refs/heads/master
|
/notebook_parser.py
|
#!/usr/bin/env python
import re
import os
NOTEBOOK_PATH = '/home/rrodrigues/.rednotebook/data'
class EmptyDayException(Exception):
"""No info was entered for this date."""
class Parser(object):
"""Parses RedNotebook monthly files.
This is a very basic parser used to extract Pomodoro references for each
day. It has the following limitations:
- Basically assumes there is nothing but the Pomodoro references in the
day's text.
- Ignores any Tags.
- Ignores any Categories.
- In the fancy cases where the text field ends up surrounded by double
quotes instead of single quotes, it breaks.
"""
def __init__(self, nb_path=NOTEBOOK_PATH):
self.nb_path = nb_path
def _get_nb_filename(self, date):
return os.path.join(self.nb_path, date.strftime('%Y-%m.txt'))
@staticmethod
def _parse_day_block(day_block_list):
day_blocks = {}
is_content = False
for index, token in enumerate(day_block_list):
if token.isdigit() and not is_content:
day = int(token)
is_content = True
elif is_content:
day_blocks[day] = token
is_content = False
else:
pass
return day_blocks
def _get_day(self, date):
day_filename = self._get_nb_filename(date)
if not os.path.isfile(day_filename):
raise EmptyDayException
with open(day_filename, 'r') as nb_file:
file_contents = nb_file.read()
day_blocks_list = re.split('^(\d+):', file_contents, flags=re.MULTILINE)
day_blocks = self._parse_day_block(day_blocks_list)
try:
return day_blocks[date.day]
except KeyError:
raise EmptyDayException
def _get_text(self, block):
after_text = re.split('\Wtext:', block)[1]
quote_set = False
started_text = False
ended_text = False
text = []
for token in after_text:
if token == "'":
if not started_text:
#first quote, text starts
started_text = True
elif quote_set and started_text:
#second quote
text.append("'")
quote_set = False
elif not quote_set and started_text:
# quote in the middle of text, maybe the end or first of an
# escape sequence
quote_set = True
else:
if quote_set:
# First character after a quote is not a quote, so this
# must be the end
break
elif started_text:
# Normal text, add it to the output
text.append(token)
else:
# Text hasn't started yet, discard token
continue
return ''.join(text)
def get_pomodoros(self):
# TODO
pass
|
{"/test_notebook_parser.py": ["/notebook_parser.py"]}
|
220
|
grizzlypeaksoftware/tankbot
|
refs/heads/master
|
/tankbot.py
|
import RPi.GPIO as GPIO
from time import sleep
def Init():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(3,GPIO.OUT,initial=GPIO.LOW) #blue
GPIO.setup(5,GPIO.OUT,initial=GPIO.LOW) #green
GPIO.setup(16,GPIO.OUT,initial=GPIO.LOW) #yellow
GPIO.setup(18,GPIO.OUT,initial=GPIO.LOW) #orange
Welcome()
def Welcome():
Stop()
Forward()
sleep(.5)
Reverse()
sleep(.5)
Right()
sleep(1)
Left()
sleep(1)
Stop()
def No():
Stop()
Right()
sleep(.25)
Left()
sleep(.25)
Right()
sleep(.25)
Left()
sleep(.25)
Stop()
def Yes():
Stop()
Forward()
sleep(.25)
Reverse()
sleep(.25)
Forward()
sleep(.25)
Reverse()
sleep(.25)
Stop()
def Forward():
GPIO.output(3,GPIO.LOW)
GPIO.output(5,GPIO.HIGH)
GPIO.output(16,GPIO.LOW)
GPIO.output(18,GPIO.HIGH)
def Reverse():
GPIO.output(3,GPIO.HIGH)
GPIO.output(5,GPIO.LOW)
GPIO.output(16,GPIO.HIGH)
GPIO.output(18,GPIO.LOW)
def Left():
GPIO.output(3,GPIO.LOW)
GPIO.output(5,GPIO.HIGH)
GPIO.output(16,GPIO.HIGH)
GPIO.output(18,GPIO.LOW)
def Right():
GPIO.output(3,GPIO.HIGH)
GPIO.output(5,GPIO.LOW)
GPIO.output(16,GPIO.LOW)
GPIO.output(18,GPIO.HIGH)
def Stop():
#print("Stop Tankbot")
GPIO.output(3,GPIO.LOW)
GPIO.output(5,GPIO.LOW)
GPIO.output(16,GPIO.LOW)
GPIO.output(18,GPIO.LOW)
def Close():
GPIO.cleanup()
|
{"/bot.py": ["/tankbot.py"]}
|
221
|
grizzlypeaksoftware/tankbot
|
refs/heads/master
|
/bot.py
|
import tankbot
import keyboard
import time as _time
tankbot.Init()
recorded = []
recording_started = False
def ControlSwitch(key, event):
global recording_started
#print(key)
#print(event.event_type)
if key == "s":
tankbot.Stop()
if key == "up":
tankbot.Forward()
if key == "down":
tankbot.Reverse()
if key == "right":
tankbot.Right()
if key == "left":
tankbot.Left()
if key == "1":
tankbot.No()
if key == "2":
tankbot.Yes()
if key == "3":
tankbot.Welcome()
if key == "f1":
keyboard.start_recording()
recording_started = True
if key == "f2":
try:
if recording_started == True:
recording_started = False
Playback(keyboard.stop_recording())
else:
Playback(recorded)
except Exception as inst:
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
print(inst) # __str__ allows args to be printed directly,
if key == "q":
tankbot.Stop()
return False
return True
def Playback(rec):
last_time = None
global recorded
recorded = rec
for event in rec:
if last_time is not None:
_time.sleep(event.time - last_time)
last_time = event.time
key = event.scan_code or event.name
if event.name != "f2":
check = ControlSwitch(event.name, event)
continueLoop = True
while continueLoop:
try:
key = keyboard.read_key()
event = keyboard.read_event()
if event.event_type == "up":
continueLoop = ControlSwitch(key, event)
except Exception as inst:
print(type(inst)) # the exception instance
print(inst.args) # arguments stored in .args
print(inst) # __str__ allows args to be printed directly,
tankbot.Close()
|
{"/bot.py": ["/tankbot.py"]}
|
223
|
jeespinozam/bomberman-ai
|
refs/heads/master
|
/plugins/SerpentBombermanGameAgentPlugin/files/helpers/ppo.py
|
from tensorforce.agents import PPOAgent
from serpent.utilities import SerpentError
import numpy as np
import os
# This file is borrowed from SerpentAIsaacGameAgentPlugin:
# https://github.com/SerpentAI/SerpentAIsaacGameAgentPlugin/blob/master/files/helpers/ppo.py
class SerpentPPO:
def __init__(self, frame_shape=None, game_inputs=None):
if frame_shape is None:
raise SerpentError("A 'frame_shape' tuple kwarg is required...")
states_spec = {"type": "float", "shape": frame_shape}
if game_inputs is None:
raise SerpentError("A 'game_inputs' dict kwarg is required...")
self.game_inputs = game_inputs
self.game_inputs_mapping = self._generate_game_inputs_mapping()
actions_spec = {"type": "int", "num_actions": len(self.game_inputs)}
network_spec = [
{"type": "conv2d", "size": 1, "window": 2, "stride": 1},
{"type": "flatten"},
# {"type": "dense", "size": 64},
{"type": "dense", "size": 6}
]
self.agent = PPOAgent(
states=states_spec,
actions=actions_spec,
network=network_spec,
batched_observe=256,
batching_capacity=1000,
# BatchAgent
#keep_last_timestep=True,
# PPOAgent
step_optimizer=dict(
type='adam',
learning_rate=1e-4
),
optimization_steps=10,
# Model
scope='ppo'
#discount=0.97,
# DistributionModel
#distributions=None,
#entropy_regularization=0.01,
# PGModel
#baseline_mode=None,
#baseline=None,
#baseline_optimizer=None,
#gae_lambda=None,
# PGLRModel
#likelihood_ratio_clipping=None,
#summary_spec=summary_spec,
#distributed_spec=None,
# More info
#device=None,
#session_config=None,
#saver=None,
#variable_noise=None,
#states_preprocessing_spec=None,
#explorations_spec=None,
#reward_preprocessing_spec=None,
#execution=None,
#actions_exploration=None,
#update_mode=None,
#memory=None,
#subsampling_fraction=0.1
)
def generate_action(self, game_frame_buffer):
states = np.stack(
game_frame_buffer,
axis=2
)
# Get prediction from agent, execute
action = self.agent.act(states)
label = self.game_inputs_mapping[action]
return action, label, self.game_inputs[label]
def observe(self, reward=0, terminal=False):
self.agent.observe(reward=reward, terminal=terminal)
def _generate_game_inputs_mapping(self):
mapping = dict()
for index, key in enumerate(self.game_inputs):
mapping[index] = key
return mapping
def save_model(self):
self.agent.save_model(directory=os.path.join(os.getcwd(), "datasets", "bomberman", "ppo_model"), append_timestep=False)
def restore_model(self):
self.agent.restore_model(directory=os.path.join(os.getcwd(), "datasets", "bomberman"))
|
{"/plugins/SerpentBombermanGameAgentPlugin/files/serpent_Bomberman_game_agent.py": ["/plugins/SerpentBombermanGameAgentPlugin/files/helpers/game_status.py", "/plugins/SerpentBombermanGameAgentPlugin/files/helpers/ppo.py", "/plugins/SerpentBombermanGameAgentPlugin/files/helpers/dqn.py"]}
|
224
|
jeespinozam/bomberman-ai
|
refs/heads/master
|
/plugins/SerpentBombermanGameAgentPlugin/files/serpent_Bomberman_game_agent.py
|
# import time
# import os
# import pickle
# import serpent.cv
#
# import numpy as np
# import collections
#
# from datetime import datetime
#
#
# from serpent.frame_transformer import FrameTransformer
# from serpent.frame_grabber import FrameGrabber
# from serpent.game_agent import GameAgent
# from serpent.input_controller import KeyboardKey
# from serpent.sprite import Sprite
# from serpent.sprite_locator import SpriteLocator
# from serpent.sprite_identifier import SpriteIdentifier
#
# # from .helpers.game_status import Game
# from .helpers.terminal_printer import TerminalPrinter
# from .helpers.ppo import SerpentPPO
#
#
# import random
#
# class SerpentBombermanGameAgent(GameAgent):
#
# def __init__(self, **kwargs):
# super().__init__(**kwargs)
#
# self.frame_handlers["PLAY"] = self.handle_play
#
# self.frame_handler_setups["PLAY"] = self.setup_play
#
# self.value = None
# print("Sprites")
# print(type(self.game.sprites))
# print("game")
# print(self.game)
# print("game type")
# print(type(self.game))
# for i,value in enumerate(self.game.sprites):
# if(i==13):
# print(value)
# self.value = value
# self.spriteGO = self.game.sprites.get("SPRITE_GAME_OVER")
# self.spriteWO = self.game.sprites.get("SPRITE_GAME_WON")
# #self.sprite.image_data
# self.printer = TerminalPrinter()
#
# def setup_play(self):
# game_inputs = {
# "Move Up": [KeyboardKey.KEY_UP],
# "Move Down": [KeyboardKey.KEY_DOWN],
# "Move Left": [KeyboardKey.KEY_LEFT],
# "Move Right": [KeyboardKey.KEY_RIGHT],
# "Leave Bomb": [KeyboardKey.KEY_SPACE]
# }
# self.game_inputs = game_inputs
#
# # self.ppo_agent = SerpentPPO(
# # frame_shape=(480, 549, 4),
# # game_inputs=game_inputs
# # )
#
# self.first_run = True
# self.game_over = False
# self.current_attempts = 0
# self.run_reward = 0
# self.started_at = datetime.utcnow().isoformat()
# self.paused_at = None
#
# print("Enter - Auto Save")
# self.input_controller.tap_key(KeyboardKey.KEY_ENTER)
# time.sleep(2)
#
# return
#
# def extract_game_area(self, frame_buffer):
# game_area_buffer = []
#
# for game_frame in frame_buffer.frames:
# game_area = serpent.cv.extract_region_from_image(
# game_frame.grayscale_frame,
# self.game.screen_regions["GAME_REGION"]
# )
#
# frame = FrameTransformer.rescale(game_area, 0.25)
# game_area_buffer.append(frame)
#
# return game_area_buffer
#
# def handle_play(self, game_frame):
# if self.first_run:
# self.current_attempts += 1
# self.first_run = False
# return None
#
# self.printer.add("")
# self.printer.add("BombermanAI")
# self.printer.add("Reinforcement Learning: Training a PPO Agent")
# self.printer.add("")
# self.printer.add(f"Stage Started At: {self.started_at}")
# self.printer.add(f"Current Run: #{self.current_attempts}")
# self.printer.add("")
#
# inputs = [KeyboardKey.KEY_UP,
# KeyboardKey.KEY_DOWN,
# KeyboardKey.KEY_LEFT,
# KeyboardKey.KEY_RIGHT,
# KeyboardKey.KEY_SPACE]
#
# #game over?
# sprite_to_locate = Sprite("QUERY", image_data=self.spriteGO.image_data)
#
# sprite_locator = SpriteLocator()
# locationGO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
# print(locationGO)
#
# #won game?
# sprite_to_locate = Sprite("QUERY", image_data=self.spriteWO.image_data)
# sprite_locator = SpriteLocator()
# locationWO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
# print(locationWO)
#
# print(type(game_frame))
#
# if(locationGO!= None or locationWO!= None):
# #enter clic in both cases
# self.input_controller.tap_key(KeyboardKey.KEY_ENTER)
# else:
# game_frame_buffer = FrameGrabber.get_frames([0, 1, 2, 3], frame_type="PIPELINE")
# game_frame_buffer = self.extract_game_area(game_frame_buffer)
# action, label, value = self.ppo_agent.generate_action(game_frame_buffer)
#
# print(action, label, value)
# self.input_controller.tap_key(value)
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import time
import os
import pickle
import serpent.cv
import numpy as np
import collections
from datetime import datetime
from serpent.frame_transformer import FrameTransformer
from serpent.frame_grabber import FrameGrabber
from serpent.game_agent import GameAgent
from serpent.input_controller import KeyboardKey
from serpent.sprite import Sprite
from serpent.sprite_locator import SpriteLocator
from serpent.sprite_identifier import SpriteIdentifier
import skimage.io
from serpent.visual_debugger.visual_debugger import VisualDebugger
from .helpers.game_status import Game
from .helpers.terminal_printer import TerminalPrinter
from .helpers.ppo import SerpentPPO
from .helpers.dqn import KerasAgent
import random
class MyFrame:
def __init__ (self, frame):
self.frame = frame
class SerpentBombermanGameAgent(GameAgent):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.frame_handlers['PLAY'] = self.handle_play
self.frame_handler_setups['PLAY'] = self.setup_play
self.value = None
#print('Sprites')
#print(type(self.game.sprites))
#print('game')
#print(self.game)
#print('game type')
#print(type(self.game))
self.spriteGO = self.game.sprites.get('SPRITE_GAME_OVER')
self.spriteWO = self.game.sprites.get('SPRITE_GAME_WON')
self.spriteGirl = self.game.sprites.get('SPRITE_BETTY_0')
self.printer = TerminalPrinter()
self.visual_debugger = VisualDebugger()
self.gamestate = Game()
def setup_play(self):
game_inputs = {
"MoveUp": [KeyboardKey.KEY_UP],
"MoveDown": [KeyboardKey.KEY_DOWN],
"MoveLeft": [KeyboardKey.KEY_LEFT],
"MoveRight": [KeyboardKey.KEY_RIGHT],
"LeaveBomb": [KeyboardKey.KEY_SPACE],
"None": [0]
}
self.game_inputs = game_inputs
self.game_actions = [
KeyboardKey.KEY_UP,
KeyboardKey.KEY_DOWN,
KeyboardKey.KEY_LEFT,
KeyboardKey.KEY_RIGHT,
KeyboardKey.KEY_SPACE,
None]
##120, 137
self.dqn_agent = KerasAgent(shape=(104, 136, 1), action_size=len(self.game_actions))
#load model
#self.ppo_agent.restore_model()
self.first_run = True
##states trainning
self.epoch = 1
self.total_reward = 0
##state & action
self.prev_state = None
self.prev_action = None
self.prev_reward = 0
print("Enter - Auto Save")
self.input_controller.tap_key(KeyboardKey.KEY_ENTER)
self.gamestate.restartState()
time.sleep(2)
def extract_game_area(self, frame_buffer):
game_area_buffer = []
for game_frame in frame_buffer.frames:
game_area = \
serpent.cv.extract_region_from_image(game_frame.grayscale_frame,self.game.screen_regions['GAME_REGION'])
frame = FrameTransformer.rescale(game_area, 0.25)
game_area_buffer.append(frame)
print(np.array(game_area_buffer).shape)
return np.array(game_area_buffer)
def convert_to_rgba(self, matrix):
#print(matrix)
new_matrix = []
for x in range(0,len(matrix)):
line = []
for y in range(0,len(matrix[x])):
#pixel
pixel = matrix[x][y]
new_pixel = [pixel[0],pixel[1],pixel[2], 255]
line.append(new_pixel)
new_matrix.append(line)
return np.array(new_matrix)
def update_game_state(self, frame):
game_area = \
serpent.cv.extract_region_from_image(frame,self.game.screen_regions['GAME_REGION'])
#game ...
# 0,0
# 32,32
game_squares = [[None for j in range(0,11)] for i in range(0,15)]
const_offset = 8
const = 32
#game variables
self.gamestate.bombs = [] #{x, y}
self.gamestate.enemies = [] #{x,y}
#force girl to die if not found
girl_found = False
for i in range(0,15):
for j in range(0, 11):
izq = ((j+1)*const - const_offset, (i+1)*const - const_offset)
der = ((j+2)*const + const_offset, (i+2)*const + const_offset)
reg = (izq[0], izq[1], der[0], der[1])
square = serpent.cv.extract_region_from_image(game_area, reg)
square = self.convert_to_rgba(square)
sprite_to_locate = Sprite("QUERY", image_data=square[..., np.newaxis])
sprite = self.sprite_identifier.identify(sprite_to_locate, mode="SIGNATURE_COLORS")
game_squares[i][j] = sprite
if("SPRITE_BETTY" in sprite):
self.girl = {"x": i, "y": j}
girl_found = True
elif("SPRITE_GEORGE" in sprite):
self.gamestate.enemies.append({"x": i, "y": j})
elif("SPRITE_BOMB" in sprite):
self.gamestate.bombs.append({"x": i, "y": j})
self.gamestate.girl_alive = girl_found
self.gamestate.done = not girl_found
return game_squares
def handle_play(self, game_frame):
#self.printer.add("")
#self.printer.add("BombermanAI")
#self.printer.add("Reinforcement Learning: Training a PPO Agent")
#self.printer.add("")
#self.printer.add(f"Stage Started At: {self.started_at}")
#self.printer.add(f"Current Run: #{self.current_attempts}")
#self.printer.add("")
#self.check_game_state(game_frame)
#####################CHECK STATE###########################
#game over?
locationGO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteGO.image_data)
sprite_locator = SpriteLocator()
locationGO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
#print("Location Game over:",locationGO)
#won game?
locationWO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteWO.image_data)
sprite_locator = SpriteLocator()
locationWO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
#print("Location Game won:",locationWO)
self.gamestate.victory = locationWO!= None
self.gamestate.lose = locationGO!=None
self.gamestate.girl_alive = (locationGO== None and locationWO== None)
self.gamestate.done = not self.gamestate.girl_alive
print(f"Is alive? {self.gamestate.girl_alive}")
print(f"Game over? {self.gamestate.lose}")
print(f"Won? {self.gamestate.victory}")
#####################VISUAL DEBUGGER###########################
for i, game_frame in enumerate(self.game_frame_buffer.frames):
self.visual_debugger.store_image_data(
game_frame.frame,
game_frame.frame.shape,
str(i)
)
#####################MODEL###########################
#get buffer
frame_buffer = FrameGrabber.get_frames([0, 1, 2, 3], frame_type="PIPELINE")
game_frame_buffer = self.extract_game_area(frame_buffer)
state = game_frame_buffer.reshape(4, 104, 136, 1)
if(self.gamestate.done):
print(f"Game over, attemp {self.epoch}")
if (self.epoch % 10)== 0:
print("saving model")
self.dqn_agent.save_model(f"bombergirl_epoch_{self.epoch}.model")
self.printer.save_file()
self.printer.add(f"{self.gamestate.victory},{self.gamestate.lose},{self.epoch},{self.gamestate.time},{self.total_reward}")
self.total_reward = 0
self.dqn_agent.remember(self.prev_state, self.prev_action, self.prev_reward, state, True)
self.dqn_agent.replay()
self.input_controller.tap_key(KeyboardKey.KEY_ENTER)
self.epoch += 1
self.total_reward = 0
self.gamestate.restartState()
self.prev_state = None
self.prev_action = None
else:
#update time
self.gamestate.updateTime()
#print(np.stack(game_frame_buffer,axis=1).shape)
#print(game_frame_buffer.shape)
#print(state.shape)
if(not (self.prev_state is None) and not (self.prev_action is None)):
self.dqn_agent.remember(self.prev_state, self.prev_action, self.prev_reward, state, False)
#do something
action_index = self.dqn_agent.act(state)
#get key
action = self.game_actions[action_index]
#get random frame from buffer
game_frame_rand = random.choice(frame_buffer.frames).frame
#update enviroment accorind to frame
###################FUN UPDATE STATE#########################################
game_area = \
serpent.cv.extract_region_from_image(game_frame_rand,self.game.screen_regions['GAME_REGION'])
#game ...
# 0,0
# 32,32
game_squares = [[None for j in range(0,11)] for i in range(0,15)]
const_offset = 8
const = 32
#game variables
self.gamestate.bombs = [] #{x, y}
self.gamestate.enemies = [] #{x,y}
#force girl to die if not found
girl_found = False
for i in range(0,15):
for j in range(0, 11):
izq = ((j+1)*const - const_offset, (i+1)*const - const_offset)
der = ((j+2)*const + const_offset, (i+2)*const + const_offset)
reg = (izq[0], izq[1], der[0], der[1])
square = serpent.cv.extract_region_from_image(game_area, reg)
square = self.convert_to_rgba(square)
sprite_to_locate = Sprite("QUERY", image_data=square[..., np.newaxis])
sprite = self.sprite_identifier.identify(sprite_to_locate, mode="SIGNATURE_COLORS")
game_squares[i][j] = sprite
if("SPRITE_BETTY" in sprite):
self.girl = {"x": i, "y": j}
girl_found = True
elif("SPRITE_GEORGE" in sprite):
self.gamestate.enemies.append({"x": i, "y": j})
elif("SPRITE_BOMB" in sprite):
self.gamestate.bombs.append({"x": i, "y": j})
elif("SPRITE_BONUSES" in sprite):
self.gamestate.bonus.append({"x": i, "y": j})
#####################CHECK STATE###########################
#game over?
locationGO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteGO.image_data)
sprite_locator = SpriteLocator()
locationGO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
#print("Location Game over:",locationGO)
#won game?
locationWO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteWO.image_data)
sprite_locator = SpriteLocator()
locationWO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
#print("Location Game won:",locationWO)
self.gamestate.lose = locationGO!=None
self.gamestate.victory = locationWO!= None
self.gamestate.girl_alive = (locationGO== None and locationWO== None)
self.gamestate.done = not self.gamestate.girl_alive
print(f"Is alive? {self.gamestate.girl_alive}")
print(f"Game over? {self.gamestate.lose}")
print(f"Won? {self.gamestate.victory}")
###################REWARD#########################################
#get reward
reward = self.gamestate.getReward(action_index)
self.total_reward += reward
self.prev_state = state
self.prev_action = action_index
self.prev_reward = reward
if(action):
self.input_controller.tap_key(action, 0.15 if action_index < 4 else 0.01)
print(f"Action: {self.gamestate.game_inputs[action_index]}, reward: {reward}, total_reward: {self.total_reward}")
#action, label, value = self.ppo_agent.generate_action(game_frame_buffer)
#print(action, label, value)
#key, value = random.choice(list(self.game_inputs.items()))
#if(value[0]):
# self.input_controller.tap_key(value[0])
#game_squares = self.extract_game_squares(game_frame.frame)
def check_game_state(self, game_frame):
#game over?
locationGO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteGO.image_data)
sprite_locator = SpriteLocator()
locationGO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame)
print("Location Game over:",locationGO)
#won game?
locationWO = None
sprite_to_locate = Sprite("QUERY", image_data=self.spriteWO.image_data)
sprite_locator = SpriteLocator()
locationWO = sprite_locator.locate(sprite=sprite_to_locate, game_frame=game_frame.frames)
print("Location Game won:",locationWO)
self.gamestate.girl_alive = (locationGO== None and locationWO== None)
self.gamestate.done = not self.gamestate.girl_alive
self.gamestate.victory = locationWO!= None
print(f"Is alive? {self.gamestate.girl_alive}")
print(f"Game over? {self.gamestate.lose}")
print(f"Won? {self.gamestate.victory}")
|
{"/plugins/SerpentBombermanGameAgentPlugin/files/serpent_Bomberman_game_agent.py": ["/plugins/SerpentBombermanGameAgentPlugin/files/helpers/game_status.py", "/plugins/SerpentBombermanGameAgentPlugin/files/helpers/ppo.py", "/plugins/SerpentBombermanGameAgentPlugin/files/helpers/dqn.py"]}
|
225
|
jeespinozam/bomberman-ai
|
refs/heads/master
|
/plugins/SerpentBombermanGameAgentPlugin/files/helpers/dqn.py
|
import json
import sys
import random
import os
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import *
from keras.optimizers import *
class KerasAgent:
def __init__(self, shape, action_size):
self.weight_backup = "bombergirl_weight.model"
self.shape = shape
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.learning_rate = 0.001
self.gamma = 0.95
self.exploration_rate = 1.0
self.exploration_min = 0.01
self.exploration_decay = 0.995
self.model = self._build_model()
def _build_model(self):
model = Sequential()
# Convolutions.
model.add(Conv2D(
16,
kernel_size=(3, 3),
strides=(1, 1),
#data_format='channels_first',
input_shape=self.shape
))
model.add(Activation('relu'))
model.add(Conv2D(
32,
kernel_size=(3, 3),
strides=(1, 1),
data_format='channels_first'
))
model.add(Activation('relu'))
# Dense layers.²
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(self.action_size))
model.summary()
model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
#model.compile(RMSprop(), 'MSE')
if os.path.isfile(self.weight_backup):
model.load_weights(self.weight_backup)
self.exploration_rate = self.exploration_min
return model
def save_model(self, name):
self.model.save(self.weight_backup)
self.model.save(name)
def act(self, state):
if np.random.rand() <= self.exploration_rate:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0])
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def replay(self, sample_batch_size=256):
if len(self.memory) < sample_batch_size:
sample_batch_size=len(self.memory)
sample_batch = random.sample(self.memory, sample_batch_size)
for state, action, reward, next_state, done in sample_batch:
target = reward
if not done:
target = (reward + self.gamma *
np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.exploration_rate > self.exploration_min:
self.exploration_rate *= self.exploration_decay
|
{"/plugins/SerpentBombermanGameAgentPlugin/files/serpent_Bomberman_game_agent.py": ["/plugins/SerpentBombermanGameAgentPlugin/files/helpers/game_status.py", "/plugins/SerpentBombermanGameAgentPlugin/files/helpers/ppo.py", "/plugins/SerpentBombermanGameAgentPlugin/files/helpers/dqn.py"]}
|
226
|
jeespinozam/bomberman-ai
|
refs/heads/master
|
/plugins/SerpentBombermanGamePlugin/files/serpent_Bomberman_game.py
|
from serpent.game import Game
from .api.api import BombermanAPI
from serpent.utilities import Singleton
from serpent.game_launchers.web_browser_game_launcher import WebBrowser
class SerpentBombermanGame(Game, metaclass=Singleton):
def __init__(self, **kwargs):
kwargs["platform"] = "web_browser"
kwargs["window_name"] = "Safari"
kwargs["url"] = "http://0.0.0.0:8000"
kwargs["browser"] = WebBrowser.DEFAULT
super().__init__(**kwargs)
self.api_class = BombermanAPI
self.api_instance = None
@property
def screen_regions(self):
#t
dic_offset = {
"WINDOWS_CHROME": {
# "top": 81,
# "left": 5
"top": 0,
"left": 0
}
}
offset = dic_offset["WINDOWS_CHROME"]
regions = {
"GAME_REGION": (offset["top"], offset["left"], 416 + offset["top"], 544 + offset["left"]), #544x416
"GAME_OVER_REGION": (118 + offset["top"], 163 + offset["left"], 151 + offset["top"], 383 + offset["left"]), #220x33 - 163,118
"WIN_REGION": (118 + offset["top"], 171 + offset["left"], 149 + offset["top"], 372 + offset["left"]), # 201x31 - 171,118
}
return regions
@property
def ocr_presets(self):
presets = {
"SAMPLE_PRESET": {
"extract": {
"gradient_size": 1,
"closing_size": 1
},
"perform": {
"scale": 10,
"order": 1,
"horizontal_closing": 1,
"vertical_closing": 1
}
}
}
return presets
# from serpent.game import Game
#
# from .api.api import BombermanAPI
#
# from serpent.utilities import Singleton
#
# from serpent.game_launchers.web_browser_game_launcher import WebBrowser
#
#
# class SerpentBombermanGame(Game, metaclass=Singleton):
#
# def __init__(self, **kwargs):
# kwargs["platform"] = "web_browser"
#
# kwargs["window_name"] = "Safari"
#
# kwargs["url"] = "http://0.0.0.0:8000"
# kwargs["browser"] = WebBrowser.DEFAULT
#
# super().__init__(**kwargs)
#
# self.api_class = BombermanAPI
# self.api_instance = None
#
# @property
# def screen_regions(self):
# regions = {
# "GAME_REGION": (0, 0, 480, 549), ##545x416
# "GAME_OVER_REGION": (160,160, 225, 404),
# "WIN_REGION": (175,130, 220, 421),
# }
#
# return regions
#
# @property
# def ocr_presets(self):
# presets = {
# "SAMPLE_PRESET": {
# "extract": {
# "gradient_size": 1,
# "closing_size": 1
# },
# "perform": {
# "scale": 10,
# "order": 1,
# "horizontal_closing": 1,
# "vertical_closing": 1
# }
# }
# }
#
# return presets
|
{"/plugins/SerpentBombermanGameAgentPlugin/files/serpent_Bomberman_game_agent.py": ["/plugins/SerpentBombermanGameAgentPlugin/files/helpers/game_status.py", "/plugins/SerpentBombermanGameAgentPlugin/files/helpers/ppo.py", "/plugins/SerpentBombermanGameAgentPlugin/files/helpers/dqn.py"]}
|
227
|
jeespinozam/bomberman-ai
|
refs/heads/master
|
/plugins/SerpentBombermanGameAgentPlugin/files/helpers/game_status.py
|
#from .memreader import MemoryReader
import time
class Game:
enemies = [] #{x,y}
bombs = [] #{x,y}
bonus = []
girl = {"x": 0, "y": 0}
start_time = 0
time = 0
game_inputs = {
0: "MoveUp",
1: "MoveDown",
2: "MoveLeft",
3: "MoveRight",
4: "LeaveBomb",
5: "None"
}
girl_alive = True
done = False
lose = False
victory = False
##const
TIME_NORM = 10
MOVEMENT_RW = 5
BONUS_RW = 10
ALIVE_RW = 20
ENEMIES_NORM = 5
REWARD_BOMB = 25
REWARD_VICTORY = 100
REWARD_LOSE = 50
MAX_DISTANCE = 8
def restartState(self):
self.girl_alive = True
self.done = False
self.lose = False
self.victory = False
self.time = 0
self.start_time = time.time()
def getCurrentTimeNormalized(self):
return self.time / self.TIME_NORM
def getDistanceNormalized(self, elem1, elem2):
return abs(elem1['x'] - elem2['x']) + abs(elem1['y'] - elem2['y'])
def updateTime(self):
self.time = time.time() - self.start_time
def getReward(self, action):
reward = 0
# Para castigar por el numero de enemigos
reward -= self.ENEMIES_NORM*len(self.enemies)
# Para casticar con el paso del tiempo
reward -= self.getCurrentTimeNormalized()
# Para castigar/ premiar si la chica está cerca/lejos a una bomba
for bomb in self.bombs:
distance = self.getDistanceNormalized(bomb, self.girl)
if distance < self.MAX_DISTANCE:
reward -= distance
else
reward += distance
if(action == 4):
# Para premiar que esté colocando una bomba
reward += self.REWARD_BOMB
for enemy in self.enemies:
# Para premiar que la bomba está más cerca a un enemigo
distance = self.getDistanceNormalized(enemy, self.girl)
if distance< self.MAX_DISTANCE:
reward += self.REWARD_BOMB/distance
if(action < 4):
# Para premiar que se mueve
reward += self.MOVEMENT_RW
# Para premiar que esté más cerca a un bonus
for bonus in self.bonus:
reward += self.BONUS_RW / self.getDistanceNormalized(bonus, self.girl)
# Para premiar que está jugando
if(self.girl_alive):
reward += self.ALIVE_RW
# Para castigar que ha perdido
if self.lose:
reward -= self.REWARD_LOSE
# Para premiar que ha ganado
if self.victory:
reward += self.REWARD_VICTORY
return reward
|
{"/plugins/SerpentBombermanGameAgentPlugin/files/serpent_Bomberman_game_agent.py": ["/plugins/SerpentBombermanGameAgentPlugin/files/helpers/game_status.py", "/plugins/SerpentBombermanGameAgentPlugin/files/helpers/ppo.py", "/plugins/SerpentBombermanGameAgentPlugin/files/helpers/dqn.py"]}
|
230
|
martkins/images_exif_viewer
|
refs/heads/master
|
/labelmodel.py
|
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.lang import Builder
from kivy.event import EventDispatcher
class LabelModel(Label):
def __init__(self, **kwargs):
super(Label, self).__init__(**kwargs)
|
{"/main.py": ["/imagemodel.py", "/buttonmodel.py", "/labelmodel.py"]}
|
231
|
martkins/images_exif_viewer
|
refs/heads/master
|
/imagemodel.py
|
from kivy.uix.image import Image
from kivy.properties import NumericProperty
class ImageModel(Image):
ang = NumericProperty()
def __init__(self, **kwargs):
super(Image, self).__init__(**kwargs)
def rotate_right(self):
self.ang += 90
def rotate_left(self):
self.ang -= 90
def reset_angle(self):
self.ang = 0
|
{"/main.py": ["/imagemodel.py", "/buttonmodel.py", "/labelmodel.py"]}
|
232
|
martkins/images_exif_viewer
|
refs/heads/master
|
/main.py
|
from kivy.app import App
from kivy.uix.image import Image
from kivy.properties import ObjectProperty
from kivy.uix.listview import ListView, SimpleListAdapter
from kivy.uix.label import Label
from imagemodel import ImageModel
from kivy.uix.button import Button
from kivy.factory import Factory
from buttonmodel import ButtonModel
from labelmodel import LabelModel
from kivy.core.window import Window
class ButtonWithModel(Button):
def __init__(self,model, **kwargs):
self.model = model
super().__init__(**kwargs)
class LabelWithModel(Label):
def __init__(self,model, **kwargs):
self.model = model
super().__init__(**kwargs)
class ImageWithModel(Image):
def __init__(self,model, **kwargs):
self.model = model
super().__init__(**kwargs)
class MainApp(App):
image = ObjectProperty()
exif = ObjectProperty()
def build(self):
Window.bind(on_keyboard=self.on_keyboard)
self.start_app()
def on_keyboard(self, window, key, scancode, codepoint, modifier):
if modifier == ['ctrl'] and codepoint == 'r':
self.image.model.rotate_right()
if modifier == ['ctrl'] and codepoint == 'l':
self.image.model.rotate_left()
if modifier == ['ctrl'] and codepoint == 'o':
self.exif.model.open_image()
if modifier == ['ctrl'] and codepoint == 'e':
self.exif.model.get_exif_data()
if modifier == ['ctrl'] and codepoint == 'n':
self.exif.model.next_image()
if modifier == ['ctrl'] and codepoint == 'p':
self.exif.model.previous_image()
if modifier == ['ctrl'] and codepoint == 'g':
self.exif.model.get_location()
def start_app(self):
labels = [LabelModel() for _ in range(100)]
self.image = Factory.MainImage(ImageModel())
self.root.ids.image_box.add_widget(self.image)
self.exif = Factory.GetExifData(ButtonModel(image=self.image, labels=labels))
self.root.ids.button_box.add_widget(self.exif)
right = Factory.RotateRight(self.exif.model)
self.root.ids.button_box.add_widget(right)
left = Factory.RotateLeft(self.exif.model)
self.root.ids.button_box.add_widget(left)
loc = Factory.GetLocation(self.exif.model)
self.root.ids.button_box.add_widget(loc)
next = Factory.NextImage(self.exif.model)
self.root.ids.cycle_box.add_widget(next)
prev = Factory.PreviousImage(self.exif.model)
self.root.ids.cycle_box.add_widget(prev)
get = Factory.OpenImage(self.exif.model)
self.root.ids.button_box.add_widget(get)
lab = Factory.ExifLabel(LabelModel())
self.root.ids.exif_container.add_widget(lab)
list_adapter = SimpleListAdapter(
data=labels,
args_converter=lambda row, model: {'model': model,
'size_hint_y': None,
'height':100},
cls=Factory.ExifTags)
self.root.ids.exif_container.add_widget(ListView(adapter=list_adapter))
if __name__ == "__main__":
MainApp().run()
|
{"/main.py": ["/imagemodel.py", "/buttonmodel.py", "/labelmodel.py"]}
|
233
|
martkins/images_exif_viewer
|
refs/heads/master
|
/buttonmodel.py
|
import exifread
from kivy.uix.button import Button
from kivy.lang import Builder
from tkinter.filedialog import askopenfilenames
from kivy.properties import DictProperty, ListProperty, NumericProperty
import webbrowser
from tkinter import Tk
root = Tk()
root.withdraw()
Builder.load_file('./actionbutton.kv')
def _convert(value):
d = float(str(value[0]))
m = float(str(value[1]))
s1 = (str(value[2])).split('/')
s = float((s1[0])) / float((s1[1]))
return d + (m / 60.0) + (s / 3600.0)
class ButtonModel(Button):
tags = DictProperty()
images = ListProperty()
count = NumericProperty(0)
def __init__(self,image='', labels='', **kwargs):
self.image = image
self.labels = labels
super(Button, self).__init__(**kwargs)
def rotate_right(self):
self.image.model.rotate_right()
def rotate_left(self):
self.image.model.rotate_left()
def open_image(self):
try:
self.images = askopenfilenames(initialdir="/", title="Select file",
filetypes=(("jpeg files", "*.jpg"),("png files","*png"), ("all files", "*.*")))
self.reset_labels()
self.image.source = self.images[0]
self.image.model.reset_angle()
except:
pass
def get_exif_data(self):
print(self.image.source)
f = open(self.image.source, 'rb')
self.tags = exifread.process_file(f)
i = 0
for tag in self.tags.keys():
if tag not in ('EXIF MakerNote','User Comment','JPEGThumbnail', 'EXIF UserComment'):
self.labels[i].text = str(tag.split()[1])+' : '+str(self.tags[tag])
i = i+1
def get_location(self):
lat = None
lon = None
try:
gps_latitude = self.tags['GPS GPSLatitude'].values
gps_latitude_ref = self.tags['GPS GPSLatitudeRef'].values
gps_longitude = self.tags['GPS GPSLongitude'].values
gps_longitude_ref = self.tags['GPS GPSLongitudeRef'].values
if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:
lat = _convert(gps_latitude)
if gps_latitude_ref != 'N':
lat = 0 - lat
lon = _convert(gps_longitude)
if gps_longitude_ref != 'E':
lon = 0 - lon
webbrowser.open('https://www.google.com/maps/search/?api=1&query='+str(lat)+','+str(lon))
except:
pass
def next_image(self):
if len(self.images) > 1:
self.count = self.count + 1
if self.count >= len(self.images):
self.count = 0
self.image.model.reset_angle()
self.reset_labels()
self.image.source = self.images[self.count]
def previous_image(self):
if len(self.images) > 1:
self.count = self.count - 1
if self.count < 0:
self.count = len(self.images)-1
self.image.model.reset_angle()
self.reset_labels()
self.image.source = self.images[self.count]
def reset_labels(self):
self.tags.clear()
for i in range(0,len(self.labels)):
self.labels[i].text = ''
|
{"/main.py": ["/imagemodel.py", "/buttonmodel.py", "/labelmodel.py"]}
|
234
|
aejontargaryen/conceal-bot
|
refs/heads/master
|
/poolSetup.py
|
import requests
import json
import time
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from poolModels import pool, poolBase
engine = create_engine('sqlite:///poolData.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
poolBase.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
# Insert a Person in the person table
new_pool = pool(url='http://pool.conceal.network/api/live_stats', name='Official Pool', type="normal", poolurl='https://pool.conceal.network')
session.add(new_pool)
new_pool = pool(url='https://ccx.scecf.org:21001/live_stats', name='SCECF', type="normal", poolurl='https://ccx.scecf.org')
session.add(new_pool)
new_pool = pool(url='https://ccx.bluerockpools.net:8119/live_stats', name='Blue Rock Pool', type="normal", poolurl='https://ccx.bluerockpools.net')
session.add(new_pool)
new_pool = pool(url='http://minexmr24.ru:8124/live_stats', name='CCX Майнинг пул', type="normal", poolurl='http://ccx.minexmr24.ru')
session.add(new_pool)
new_pool = pool(url='https://ccx.go-mine.it/api/pool/stats', name='go mine it!', type="node", poolurl='https://ccx.go-mine.it')
session.add(new_pool)
new_pool = pool(url='https://api.ccx.heigh-ho.funkypenguin.co.nz/live_stats', name='Funky Penguin', type="normal", poolurl='https://ccx.heigh-ho.funkypenguin.co.nz')
session.add(new_pool)
new_pool = pool(url='https://conceal.herominers.com/api/stats', name='herominers', type="normal", poolurl='https://conceal.herominers.com')
session.add(new_pool)
new_pool = pool(url='https://ccx.thepiratemine.nl:2890/live_stats', name='ThePirateMine', type="normal", poolurl='https://ccx.thepiratemine.nl')
session.add(new_pool)
session.commit()
|
{"/bot.py": ["/utils.py"]}
|
235
|
aejontargaryen/conceal-bot
|
refs/heads/master
|
/utils.py
|
import random
import requests
import sys
import discord
import binascii
import json
from collections import deque
from jsonrpc_requests import Server
from models import Transaction, TipJar
config = json.load(open('config.json'))
class CCXServer(Server):
def dumps(self, data):
data['password'] = config['rpc_password']
return json.dumps(data)
rpc = CCXServer("http://{}:{}/json_rpc".format(config['rpc_host'], config['rpc_port']))
daemon = CCXServer("http://{}:{}/json_rpc".format(config['daemon_host'], config['daemon_port']))
CONFIRMED_TXS = []
def get_supply():
lastblock = daemon.getlastblockheader()
bo = daemon.f_block_json(hash=lastblock["block_header"]["hash"])
return float(bo["block"]["alreadyGeneratedCoins"])/1000000
def format_hash(hashrate):
i = 0
byteUnits = [" H", " KH", " MH", " GH", " TH", " PH"]
while (hashrate > 1000):
hashrate = hashrate / 1000
i = i+1
return "{0:,.2f} {1}".format(hashrate, byteUnits[i])
def gen_paymentid(address):
rng = random.Random(address+config['token'])
length = 32
chunk_size = 65535
chunks = []
while length >= chunk_size:
chunks.append(rng.getrandbits(chunk_size * 8).to_bytes(chunk_size, sys.byteorder))
length -= chunk_size
if length:
chunks.append(rng.getrandbits(length * 8).to_bytes(length, sys.byteorder))
result = b''.join(chunks)
return "".join(map(chr, binascii.hexlify(result)))
def get_deposits(session):
# get the current block height
# we only want to insert tx after 10 blocks from the tx
data = daemon.getlastblockheader()
height = int(data["block_header"]["height"])
print("INFO: Current blockchain height is {}".format(height))
# scan for deposits
print("scanning the blockchain for deposits")
print("getting list of payment id's in the tipjar database")
allPID = session.query(TipJar).all()
thePID = 0
totalPID = len(allPID)
for thePID in range(0,totalPID):
currentPID = allPID[thePID].paymentid
print("INFO: checking PID {}".format(currentPID))
params = {"payment_id": currentPID}
data = rpc.get_payments(params)
#go through each transaction and them to the confirmed transactions array
for tx in data['payments']:
unlockWindow = int(tx["block_height"]) + 10
if tx['tx_hash'] in CONFIRMED_TXS: # if its already there, ignore it
continue
if unlockWindow < height: # its a confirmed and unlocked transaction
CONFIRMED_TXS.append({'transactionHash': tx['tx_hash'],'amount': tx['amount'], 'ready':True, 'pid':currentPID})
print("CONF: confirmed tx {} for {} ccx at block {}".format(tx['tx_hash'],tx['amount'],tx['block_height']))
else :
toUnlock = unlockWindow - height
print("UNCF: unconfirmed tx {} for {} ccx will unlock in {} blocks".format(tx['tx_hash'],tx['amount'],toUnlock))
for i,trs in enumerate(CONFIRMED_TXS): #now we go through the array of all transactions from our registered users
processed = session.query(Transaction).filter(Transaction.tx == trs['transactionHash']).first()
amount = 0
print("INFO: looking at tx: " + trs['transactionHash'])
if processed: # done already, lets ignore and remove it from the array
print("INFO: already processed: " + trs['transactionHash'])
CONFIRMED_TXS.pop(i)
continue
likestring = trs['pid']
balance = session.query(TipJar).filter(TipJar.paymentid.contains(likestring)).first() #get the balance from that PID
print("INFO: Balance for pid {} is: {}".format(likestring,balance))
if not balance:
print("user does not exist!")
continue
amount = trs['amount']
change = 0
if trs['pid']==balance.paymentid: # money entering tipjar, add to user balance
print("UPDATE: deposit of {} to PID {}".format(amount,balance.paymentid))
change += amount
try:
balance.amount += change
except:
print("no balance, setting balance to: {}".format(change))
balance.amount = change
print("new balance: {}".format(balance.amount))
session.commit()
if balance:
nt = Transaction(trs['transactionHash'], change, trs['pid'])
CONFIRMED_TXS.pop(i)
yield nt
def get_fee(amount):
return 100
def build_transfer(amount, transfers, balance):
print("SEND PID: {}".format(balance.paymentid[0:58] + balance.withdraw))
params = {
'fee': get_fee(amount),
'paymentId': balance.paymentid[0:58] + balance.withdraw,
'mixin': 3,
'destinations': transfers
}
return params
REACTION_AMP_CACHE = deque([], 500)
def reaction_tip_lookup(message):
for x in REACTION_AMP_CACHE:
if x['msg'] == message:
return x
def reaction_tip_register(message, user):
msg = reaction_tip_lookup(message)
if not msg:
msg = {'msg': message, 'tips': []}
REACTION_AMP_CACHE.append(msg)
msg['tips'].append(user)
return msg
def reaction_tipped_already(message, user):
msg = reaction_tip_lookup(message)
if msg:
return user in msg['tips']
|
{"/bot.py": ["/utils.py"]}
|
236
|
aejontargaryen/conceal-bot
|
refs/heads/master
|
/bot.py
|
import asyncio
import discord
from discord.ext.commands import Bot, Context
import requests
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from poolModels import pool, poolBase
from models import Wallet, TipJar, Base, Transaction
from utils import config, format_hash, gen_paymentid, rpc, daemon, \
get_deposits, get_fee, build_transfer, get_supply, \
reaction_tip_register, reaction_tipped_already
HEADERS = {'Content-Type': 'application/json'}
### DATABASE SETUP ###
engine = create_engine('sqlite:///ccxbot.db')
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
### POOL DATABASE SETUP ###
poolEngine = create_engine('sqlite:///poolData.db')
poolBase.metadata.create_all(poolEngine)
poolSession = sessionmaker(bind=poolEngine)
session2 = poolSession()
client = Bot(
description="{} Discord Bot".format(config['symbol']),
command_prefix=config['prefix'],
pm_help=False)
@client.event
async def on_member_join(member):
await send_join_pm(member, client)
async def wallet_watcher():
await client.wait_until_ready()
while not client.is_closed:
for tx in get_deposits(session):
session.add(tx)
try:
session.commit()
except:
session.rollback()
balance = session.query(TipJar).filter(TipJar.paymentid == tx.paymentid).first()
if not balance: # don't do for withdrawals from jar (basically tips)
return
good_embed = discord.Embed(title="Deposit Recieved!",colour=discord.Colour(0xD4AF37))
good_embed.description = "Your deposit of {} {} has now been credited.".format(tx.amount/config['units'], config['symbol'])
print("TRANSACTION PID IS: " + tx.paymentid)
good_embed.add_field(name="New Balance", value="{0:,.2f}".format(balance.amount/config['units']))
user = await client.get_user_info(str(balance.userid))
try:
await client.send_message(user, embed=good_embed)
except:
continue
await asyncio.sleep(119) # just less than the block time
client.loop.create_task(wallet_watcher())
@client.event
async def on_ready():
print("Bot online!")
### TEST COMMANDS ###
# test to see if we can a list of users online
# and then to a rain function which sends money from a different wallet to everyone
# and to test getting a welcome dm
async def send_join_pm(member, client):
"""
Sends a welcome private message to joined members.
"""
if member.bot:
return
currently_online = ""
for m in member.server.members:
if not m.status.__str__() == "offline":
if m.roles.__contains__(discord.utils.get(m.server.roles, name="core team")):
currently_online += ":white_small_square: " + m.mention + "\n"
await client.send_message(member,
"**Hey, " + member.name + "! Welcome to the Conceal Discord! :)**\n\n"
"If you're new here and have some questions head over to the **#faq** channel for an introduction to the project and answers to common questions.\n"
"You can also head over to the **#annoucements##** channel and see the latest news on where we are and what we are doing.\n"
"If you have more questions, look for one the admins or devs\n\n"
"**Devs currently online:**\n\n%s\n\n"
"You can also use this bot to get more information:\n"
"Use the command `.help` to get list of commands.\n"
"You can also see current network information with `.stats` or other specific commands like `.hashrate`, `.height`, `.difficulty`, and `.supply`\n"
"Don't forget to register your wallet address with the bot with the command `.registerwallet` so you can recieve tips.\n"
"If you want to send tips then type `.deposit` after you register your wallet address and transfer some funds to your TipJar.\n"
% currently_online)
@client.command(pass_context=True)
async def price(ctx, exchange=None):
err_embed = discord.Embed(title=":x:Error:x:", colour=discord.Colour(0xf44242))
coindata = requests.get("https://maplechange.com/api/v2/tickers/ccxbtc.json")
btc = requests.get("https://www.bitstamp.net/api/ticker/")
try:
to_json = coindata.json()
except ValueError:
err_embed.description = "The MapleChange API is down"
await client.say(embed = err_embed)
return
coindata_embed = discord.Embed(title="Conceal: MapleChange", url="https://maplechange.com/markets/ccxbtc", description="Current pricing of CCX", color=0x7F7FFF)
coindata_embed.set_thumbnail(url=config['logo_url'])
url = 'https://maplechange.com/api/v2/tickers/ccxbtc.json'
coindata_embed.add_field(name="Sell", value="{0:,.0f} sats".format(round(float(coindata.json()['ticker']['sell'])*100000000)), inline=True)
coindata_embed.add_field(name="Current", value="{0:,.0f} sats".format(round(float(coindata.json()['ticker']['last'])*100000000)), inline=True)
coindata_embed.add_field(name="High", value="{0:,.0f} sats".format(round(float(coindata.json()['ticker']['high'])*100000000)), inline=True)
coindata_embed.add_field(name="{}-USD".format(config['symbol']),
value="${0:,.4f} USD".format(float(coindata.json()['ticker']['sell'])*float(btc.json()['last'])), inline=True)
coindata_embed.add_field(name="BTC-USD", value="${0:,.2f} USD".format(float(btc.json()['last'])), inline=True)
await client.say(embed=coindata_embed)
### NETWORK COMMANDS ###
@client.command()
async def hashrate():
""" .hashrate - Returns network hashrate """
data = daemon.getlastblockheader()
hashrate = format_hash(float(data["block_header"]["difficulty"]) / 120)
await client.say("The current global hashrate is **{}/s**".format(hashrate))
@client.command()
async def difficulty():
""" .difficulty - Returns network difficulty """
data = daemon.getlastblockheader()
difficulty = float(data["block_header"]["difficulty"])
await client.say("The current difficulty is **{0:,.0f}**".format(difficulty))
@client.command()
async def height():
""" .height - Returns the current blockchain height """
data = daemon.getlastblockheader()
height = int(data["block_header"]["height"])
await client.say("The current block height is **{:,}**".format(height))
@client.command()
async def supply():
""" .supply - Returns the current circulating supply """
supply = get_supply()
await client.say("The current circulating supply is **{:0,.2f}** {}".format(supply, config['symbol']))
@client.command()
async def stats():
""" .stats - Returns all network stats """
data = daemon.getlastblockheader()
hashrate = format_hash(float(data["block_header"]["difficulty"]) / 120)
data = daemon.getlastblockheader()
height = int(data["block_header"]["height"])
deposits = int(data["block_header"]["deposits"]) / 1000000
supply = get_supply()
data = daemon.getlastblockheader()
difficulty = float(data["block_header"]["difficulty"])
stats_embed=discord.Embed(title="Conceal", url="https://github.com/TheCircleFoundation/", description="Complete Network Stats", color=0x7F7FFF)
stats_embed.set_thumbnail(url=config['logo_url'])
hashFromPools = 0
allPools = session2.query(pool).all()
totalPools = len(allPools)
for poolNumber in range(0,totalPools):
poolHash = allPools[poolNumber].hashrate
hashFromPools = hashFromPools + poolHash
stats_embed.add_field(name="Hashrate (from Pools)", value="{}KH/s".format(hashFromPools/1000))
stats_embed.add_field(name="Hashrate (from Difficulty)", value="{}/s".format(hashrate))
stats_embed.add_field(name="Height", value="{:,}".format(height))
stats_embed.add_field(name="Difficulty", value="{0:,.0f}".format(difficulty))
stats_embed.add_field(name="Circulating Supply", value="{:0,.2f} CCX".format(supply))
stats_embed.add_field(name="Deposits", value="{:0,.2f}".format(deposits))
stats_embed.set_footer(text="Powered by the Conceal Discord bot. Message @katz for any issues.")
await client.say(embed=stats_embed)
@client.command()
async def pools():
""" .pools - Get a list of pools and current stats """
stats_embed=discord.Embed(title="Conceal", url="https://github.com/TheCircleFoundation/", description="Mining Pool Stats", color=0x7F7FFF)
stats_embed.set_thumbnail(url=config['logo_url'])
hashFromPools = 0
allPools = session2.query(pool).all()
totalPools = len(allPools)
for poolNumber in range(0,totalPools):
poolName = allPools[poolNumber].name
poolSiteURL = allPools[poolNumber].poolurl
poolHash = allPools[poolNumber].hashrate
hashFromPools = hashFromPools + poolHash
poolMiners = allPools[poolNumber].miners
stats_embed.add_field(name=poolName, value=poolSiteURL, inline=False)
stats_embed.add_field(name="Hashrate", value="{} KH/s".format(poolHash/1000))
stats_embed.add_field(name="Miners", value="{:,}".format(poolMiners))
stats_embed.add_field(name="Hashrate (from Pools)", value="{}KH/s".format(hashFromPools/1000))
stats_embed.set_footer(text="Powered by the Conceal Discord bot. Message @katz for any issues.")
await client.say(embed=stats_embed)
### WALLET COMMANDS ###
@client.command(pass_context=True)
async def members(ctx):
members = ""
allID = session.query(Wallet).all()
theID = 0
totalID = len(allID)
await client.say("List of members:")
for theID in range(0,totalID):
currentID = allID[theID].userid
memberName = discord.utils.get(client.get_all_members(), id=str(currentID))
members = members + " @" + str(memberName)
await client.say(members)
@client.command(pass_context=True)
async def registerwallet(ctx, address):
""" .registerwallet <addr> - Register your wallet in the database """
err_embed = discord.Embed(title="Error", colour=discord.Colour(0xf44242))
good_embed = discord.Embed(title="{}'s Wallet".format(ctx.message.author.name),colour=discord.Colour(0xD4AF37))
if address is None:
err_embed.description = "Please provide an address"
await client.send_message(ctx.message.author, embed = err_embed)
return
exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()
addr_exists = session.query(Wallet).filter(Wallet.address == address).first()
if exists:
good_embed.title = "Your wallet exists!".format(exists.address)
good_embed.description = "```{}``` use `{}updatewallet <addr>` to change".format(exists.address, config['prefix'])
await client.send_message(ctx.message.author, embed = good_embed)
return
if addr_exists:
err_embed.description = "Address already registered by another user!"
await client.send_message(ctx.message.author, embed = err_embed)
return
elif not exists and len(address) == 98:
w = Wallet(address, ctx.message.author.id,ctx.message.id)
session.add(w)
session.commit()
good_embed.title = "Successfully registered your wallet"
good_embed.description = "```{}```".format(address)
await client.send_message(ctx.message.author, embed = good_embed)
pid = gen_paymentid(address)
balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()
if not balance:
t = TipJar(pid, ctx.message.author.id, 0)
session.add(t)
else:
balance.paymentid = pid
session.commit()
tipjar_addr = "ccx7Wga6b232eSVfy8KQmBjho5TRXxX8rZ2zoCTyixfvEBQTj1g2Ysz1hZKxQtw874W3w6BZkMFSn5h3gUenQemZ2xiyyjxBR7"
good_embed.title = "Your Tipjar Info"
good_embed.description = "Deposit {} to start tipping! ```transfer 3 {} <amount> -p {}```".format(config['symbol'], tipjar_addr, pid)
balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()
await client.send_message(ctx.message.author, embed = good_embed)
return
elif len(address) > 98:
err_embed.description = "Your wallet must be 98 characeters long, your entry was too long"
elif len(address) < 98:
err_embed.description = "Your wallet must be 98 characeters long, your entry was too short"
await client.say(embed = err_embed)
@registerwallet.error
async def registerwallet_error(error, ctx):
await client.say("Please provide an address: .registerwallet <addr>.")
@client.command(pass_context=True)
async def updatewallet(ctx, address):
""" .updatewallet <addr> - Changes your registred wallet address """
err_embed = discord.Embed(title="Error", colour=discord.Colour(0xf44242))
if address == None:
err_embed.description = "Please provide an address!"
await client.send_message(ctx.message.author, embed=err_embed)
return
address = address.strip()
good_embed = discord.Embed(title="{}'s Updated Wallet".format(ctx.message.author.name),colour=discord.Colour(0xD4AF37))
exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()
if not exists:
err_embed.description = "You haven't registered a wallet!"
addr_exists = session.query(Wallet).filter(Wallet.address == address).first()
if addr_exists:
err_embed.description = "Address already registered by another user!"
await client.send_message(ctx.message.author, embed = err_embed)
return
elif exists and len(address) == 98:
old_pid = gen_paymentid(exists.address)
old_balance = session.query(TipJar).filter(TipJar.paymentid == old_pid).first()
exists.address = address
pid = gen_paymentid(address)
old_balance.paymentid = pid
good_embed.title = "Successfully updated your wallet"
good_embed.description = "```{}```".format(address)
session.commit()
await client.send_message(ctx.message.author, embed = good_embed)
tipjar_addr = "ccx7Wga6b232eSVfy8KQmBjho5TRXxX8rZ2zoCTyixfvEBQTj1g2Ysz1hZKxQtw874W3w6BZkMFSn5h3gUenQemZ2xiyyjxBR7"
good_embed.title = "Your Tipjar Info"
good_embed.description = "Deposit {} to start tipping! ```transfer 3 {} <amount> -p {}```".format(config['symbol'], tipjar_addr, pid)
await client.send_message(ctx.message.author, embed = good_embed)
good_embed.title = "Balance Update"
good_embed.url = ""
good_embed.description = "New Balance: `{:0,.2f}` {1}".format(old_balance.amount / config['units'], config['symbol'])
await client.send_message(ctx.message.author, embed = good_embed)
return
elif len(address) > 98:
err_embed.description = "Your wallet must be 98 characeters long, your entry was too long"
elif len(address) < 98:
err_embed.description = "Your wallet must be 98 characeters long, your entry was too short"
await client.say(embed=err_embed)
@updatewallet.error
async def updatewallet_error(error, ctx):
await client.say("Please provide an address: .updatewallet <addr>")
@client.command(pass_context=True)
async def wallet(ctx, user: discord.User=None):
""" .wallet - Returns your registered wallet address """
err_embed = discord.Embed(title=":x:Error:x:", colour=discord.Colour(0xf44242))
good_embed = discord.Embed(colour=discord.Colour(0xD4AF37))
if not user:
exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()
if not exists:
err_embed.description = "You haven't registered a wallet or specified a user!"
else:
good_embed.title = "Your wallet"
good_embed.description = "Here's your wallet {}! ```{}```".format(ctx.message.author.mention, exists.address)
await client.send_message(ctx.message.author, embed = good_embed)
return
else:
exists = session.query(Wallet).filter(Wallet.userid == user.id).first()
if not exists:
err_embed.description = "{} hasn't registered a wallet!".format(user.name)
else:
good_embed.title = "{}'s wallet".format(user.name)
good_embed.description = "```{}```".format(exists.address)
await client.send_message(ctx.message.author, embed = good_embed)
return
await client.send_message(ctx.message.author, embed = err_embed)
@client.command(pass_context=True)
async def deposit(ctx, user: discord.User=None):
""" .deposit - Get deposit information so you can start tipping """
err_embed = discord.Embed(title=":x:Error:x:", colour=discord.Colour(0xf44242))
good_embed = discord.Embed(title="Your Tipjar Info")
tipjar_addr = "ccx7Wga6b232eSVfy8KQmBjho5TRXxX8rZ2zoCTyixfvEBQTj1g2Ysz1hZKxQtw874W3w6BZkMFSn5h3gUenQemZ2xiyyjxBR7"
exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()
if exists:
pid = gen_paymentid(exists.address)
good_embed.description = "Deposit {} to start tipping! ,Send the funds you want to deposit to the address: ``{}`` (Pay to: in the GUI) and put ``{}`` in the Payment ID field. CLI users just send a transfer to the same address and payment ID.".format(config['symbol'], tipjar_addr, pid)
balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()
if not balance:
t = TipJar(pid, ctx.message.author.id, 0)
session.add(t)
session.commit()
await client.send_message(ctx.message.author, embed = good_embed)
else:
err_embed.description = "You haven't registered a wallet!"
err_embed.add_field(name="Help", value="Use `{}registerwallet <addr>` before trying to tip!".format(config['prefix']))
await client.say(embed=err_embed)
@client.command(pass_context=True)
async def balance(ctx, user: discord.User=None):
""" .balance - PMs your tipjar balance """
err_embed = discord.Embed(title=":x:Error:x:", colour=discord.Colour(0xf44242))
good_embed = discord.Embed(title="Your Tipjar Balance is")
exists = session.query(Wallet).filter(Wallet.userid == ctx.message.author.id).first()
if exists:
pid = gen_paymentid(exists.address)
balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()
if not balance:
t = TipJar(pid, ctx.message.author.id, 0)
session.add(t)
session.commit()
else:
good_embed.description = "`{0:,.2f}` {1}".format(balance.amount / config['units'], config['symbol'])
good_embed.add_field(name="Widthrawal", value="You can tip yourself to widthraw CCX to your wallet")
await client.send_message(ctx.message.author, embed=good_embed)
else:
err_embed.description = "You haven't registered a wallet!"
err_embed.add_field(name="Help", value="Use `{}registerwallet <addr>` before trying to tip!".format(config['prefix']))
await client.say(embed=err_embed)
EMOJI_MONEYBAGS = "\U0001F4B8"
EMOJI_SOS = "\U0001F198"
EMOJI_ERROR = "\u274C"
@client.command(pass_context=True)
async def tip(ctx, amount, sender):
""" .tip <amount> <username> - Tips a user the specified amount """
await _tip(ctx, amount, None, None)
async def _tip(ctx, amount,
sender: discord.User=None,
receiver: discord.User=None):
err_embed = discord.Embed(title=":x:Error:x:", colour=discord.Colour(0xf44242))
good_embed = discord.Embed(title="You were tipped!", colour=discord.Colour(0xD4AF37))
request_desc = "Register with `{}registerwallet <youraddress>` to get started!".format(config['prefix'])
request_embed = discord.Embed(title="{} wants to tip you".format(ctx.message.author.name), description=request_desc)
if not sender: # regular tip
sender = ctx.message.author
if not receiver:
tipees = ctx.message.mentions
else:
tipees = [receiver, ]
try:
amount = int(round(float(amount)*config['units']))
except:
await client.say("Amount must be a number equal or greater than {}".format(10000 / config['units']))
return False
if amount <= 9999:
err_embed.description = "`amount` must be equal or greater than {}".format(10000 / config['units'])
await client.say(embed=err_embed)
return False
fee = get_fee(amount)
self_exists = session.query(Wallet).filter(Wallet.userid == sender.id).first()
if not self_exists:
err_embed.description = "You haven't registered a wallet!"
err_embed.add_field(name="Help", value="Use `{}registerwallet <addr>` before trying to tip!".format(config['prefix']))
await client.send_message(sender, embed=err_embed)
return False
pid = gen_paymentid(self_exists.address)
balance = session.query(TipJar).filter(TipJar.paymentid == pid).first()
if not balance:
t = TipJar(pid, sender.id, 0)
session.add(t)
session.commit()
err_embed.description = "You are not registered, please `{}deposit` to tip".format(config['prefix'])
await client.send_message(sender, embed=err_embed)
return False
if balance.amount < 0:
balance.amount = 0
session.commit()
err_embed.description = "Your balance was negative!"
await client.send_message(sender, embed=err_embed)
katz = discord.utils.get(client.get_all_members(), id='408875878328827916')
err_embed.title = "{} had a negative balance!!".format(sender.name)
err_embed.description = "PID: {}".format(pid)
await client.send_message(katz, embed=err_embed)
return False
if ((len(tipees)*(amount))+fee) > balance.amount:
err_embed.description = "Your balance is too low! Amount + Fee = `{}` {}".format(((len(tipees)*(amount))+fee) / config['units'], config['symbol'])
await client.add_reaction(ctx.message, "\u274C")
await client.send_message(sender, embed=err_embed)
return False
destinations = []
actual_users = []
failed = 0
for user in tipees:
user_exists = session.query(Wallet).filter(Wallet.userid == user.id).first()
if user_exists:
destinations.append({'amount': amount, 'address': user_exists.address})
if user_exists.userid != sender.id: # multitip shouldn't tip self.
actual_users.append(user)
else:
failed = failed+1
await client.add_reaction(ctx.message, EMOJI_SOS)
try:
await client.send_message(user, embed = request_embed)
except:
continue
if len(destinations) == 0:
await client.add_reaction(ctx.message, EMOJI_SOS)
return False
transfer = build_transfer(amount, destinations, balance)
print(transfer)
result = rpc.transfer(transfer)
print(result)
await client.add_reaction(ctx.message, EMOJI_MONEYBAGS)
balance.amount -= ((len(actual_users)*amount)+fee)
tx = Transaction(result['tx_hash'], (len(actual_users)*amount)+fee, balance.paymentid)
session.add(tx)
session.commit()
good_embed.title = "Tip Sent!"
good_embed.description = (
"Sent `{0:,.2f}` {1} to {2} users! With Transaction Hash ```{3}```"
.format(amount / config['units'],
config['symbol'],
len(actual_users),
result['tx_hash']))
good_embed.url = (
"http://www.example.com/#?hash={}#blockchain_transaction"
.format(result['tx_hash']))
good_embed.add_field(name="New Balance", value="`{:0,.2f}` {}".format(balance.amount / config['units'], config['symbol']))
good_embed.add_field(name="Transfer Info", value="Successfully sent to {0} users. {1} failed.".format(len(actual_users), failed))
try:
await client.send_message(sender, embed=good_embed)
except:
pass
for user in actual_users:
good_embed = discord.Embed(title="You were tipped!", colour=discord.Colour(0xD4AF37))
good_embed.description = (
"{0} sent you `{1:,.2f}` {2} with Transaction Hash ```{3}```"
.format(sender.mention,
amount / config['units'],
config['symbol'],
result['tx_hash']))
good_embed.url = (
"http://www.example.com/#?hash={}#blockchain_transaction"
.format(result['tx_hash']))
try:
await client.send_message(user, embed=good_embed)
except:
continue
return True
client.run(config['token'])
|
{"/bot.py": ["/utils.py"]}
|
237
|
CENSOREDd/test_fk
|
refs/heads/master
|
/fk.py
|
#!/usr/bin/python3
from time import sleep
print("what the fuck???")
if __name__ == "__main__":
print("here is python code!!!")
print("Executing code...")
sleep(2)
|
{"/test.py": ["/fk.py"]}
|
238
|
CENSOREDd/test_fk
|
refs/heads/master
|
/test.py
|
#!/usr/bin/python3
import fk
print("here is test")
|
{"/test.py": ["/fk.py"]}
|
239
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/autoc/utils/corrplot.py
|
import seaborn as sns
import matplotlib.pyplot as plt
def plot_corrmatrix(df, square=True, linewidths=0.1, annot=True,
size=None, figsize=(12, 9), *args, **kwargs):
"""
Plot correlation matrix of the dataset
see doc at https://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.heatmap.html#seaborn.heatmap
"""
sns.set(context="paper", font="monospace")
f, ax = plt.subplots(figsize=figsize)
sns.heatmap(df.corr(), vmax=1, square=square, linewidths=linewidths,
annot=annot, annot_kws={"size": size}, *args, **kwargs)
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
240
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/setup.py
|
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(name='autoc',
version="0.1",
description='autoc is a package for data cleaning exploration and modelling in pandas',
long_description=readme(),
author=['Eric Fourrier'],
author_email='ericfourrier0@gmail.com',
license='MIT',
url='https://github.com/ericfourrier/auto-cl',
packages=find_packages(),
test_suite='test',
keywords=['cleaning', 'preprocessing', 'pandas'],
install_requires=[
'numpy>=1.7.0',
'pandas>=0.15.0',
'seaborn>=0.5',
'scipy>=0.14']
)
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
241
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/autoc/utils/getdata.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Get data from https://github.com/ericfourrier/autoc-datasets
"""
import pandas as pd
def get_dataset(name, *args, **kwargs):
"""Get a dataset from the online repo
https://github.com/ericfourrier/autoc-datasets (requires internet).
Parameters
----------
name : str
Name of the dataset 'name.csv'
"""
path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
return pd.read_csv(path, *args, **kwargs)
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
242
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/test.py
|
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Automated test suites with unittest
run "python -m unittest -v test" in the module directory to run the tests
The clock decorator in utils will measure the run time of the test
"""
#########################################################
# Import Packages and helpers
#########################################################
import unittest
# internal helpers
# from autoc.utils.helpers import clock, create_test_df, removena_numpy, cserie
from autoc.utils.helpers import random_pmf, clock, create_test_df, cserie, simu, removena_numpy
from autoc.utils.getdata import get_dataset
from autoc.explorer import DataExploration
from autoc.naimputer import NaImputer
from autoc.outliersdetection import OutliersDetection
import pandas as pd
import numpy as np
flatten_list = lambda x: [y for l in x for y in flatten_list(
l)] if isinstance(x, list) else [x]
# flatten_list = lambda x: [y for l in x for y in flatten_list(l)] if isinstance(x,list) else [x]
#########################################################
# Writing the tests
#########################################################
class TestDataExploration(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls._test_df = create_test_df()
cls._test_dc = DataExploration(data=cls._test_df)
@clock
def test_to_lowercase(self):
df_lower = self._test_dc.to_lowercase()
self.assertNotEqual(id(df_lower), id(self._test_dc.data))
self.assertTrue((pd.Series(['a'] * 500 + ['b'] * 200 + ['c'] * 300)==
df_lower.loc[:, 'character_variable_up1']).all())
self.assertTrue((pd.Series(['a'] * 500 + ['b'] * 200 + ['d'] * 300)==
df_lower.loc[:, 'character_variable_up2']).all())
@clock
def test_copy(self):
exploration_copy = DataExploration(data=create_test_df(), copy=True)
self.assertEqual(id(self._test_df), id(self._test_dc.data))
self.assertNotEqual(id(self._test_df), id(exploration_copy.data))
@clock
def test_cserie(self):
char_var = cserie(self._test_dc.data.dtypes == "object")
self.assertIsInstance(char_var, list)
self.assertIn('character_variable', char_var)
@clock
def test_removena_numpy(self):
test_array = np.array([np.nan, 1, 2, np.nan])
self.assertTrue((removena_numpy(test_array) == np.array([1, 2])).all())
@clock
def test_sample_df(self):
self.assertEqual(len(self._test_dc.sample_df(pct=0.061)),
0.061 * float(self._test_dc.data.shape[0]))
@clock
def test_nrow(self):
self.assertEqual(self._test_dc._nrow, self._test_dc.data.shape[0])
@clock
def test_col(self):
self.assertEqual(self._test_dc._ncol, self._test_dc.data.shape[1])
@clock
def test_is_numeric(self):
self.assertTrue(self._test_dc.is_numeric("num_variable"))
self.assertTrue(self._test_dc.is_numeric("many_missing_70"))
self.assertFalse(self._test_dc.is_numeric("character_variable"))
@clock
def test_is_int_factor(self):
self.assertFalse(self._test_dc.is_int_factor("num_variable"))
self.assertTrue(self._test_dc.is_int_factor("int_factor_10", 0.01))
self.assertTrue(self._test_dc.is_int_factor("int_factor_10", 0.1))
self.assertFalse(self._test_dc.is_int_factor("int_factor_10", 0.005))
self.assertFalse(self._test_dc.is_int_factor("character_variable"))
@clock
def test_where_numeric(self):
self.assertEqual(cserie(self._test_dc.where_numeric().all()), self._test_dc._dfnum)
@clock
def test_total_missing(self):
self.assertEqual(self._test_dc.total_missing,
self._test_dc.data.isnull().sum().sum())
@clock
def test_None_count(self):
nacolcount = self._test_dc.nacolcount()
self.assertEqual(nacolcount.loc['None_100', 'Napercentage'], 0.1)
self.assertEqual(nacolcount.loc['None_100', 'Nanumber'], 100)
self.assertEqual(nacolcount.loc['None_na_200', 'Napercentage'], 0.2)
self.assertEqual(nacolcount.loc['None_na_200', 'Nanumber'], 200)
@clock
def test_nacolcount_capture_na(self):
nacolcount = self._test_dc.nacolcount()
self.assertEqual(nacolcount.loc['na_col', 'Napercentage'], 1.0)
self.assertEqual(nacolcount.loc['many_missing_70', 'Napercentage'], 0.7)
@clock
def test_nacolcount_is_type_dataframe(self):
self.assertIsInstance(self._test_dc.nacolcount(),
pd.core.frame.DataFrame)
@clock
def test_narowcount_capture_na(self):
narowcount = self._test_dc.narowcount()
self.assertEqual(sum(narowcount['Nanumber'] > 0), self._test_dc._nrow)
#
# @clock
# def test_detect_other_na(self):
# other_na = self._test_dc.detect_other_na()
# self.assertIsInstance(narowcount, pd.core.frame.DataFrame)
@clock
def test_narowcount_is_type_dataframe(self):
narowcount = self._test_dc.narowcount()
self.assertIsInstance(narowcount, pd.core.frame.DataFrame)
@clock
def test_manymissing_capture(self):
manymissing = self._test_dc.manymissing(0.7)
self.assertIsInstance(manymissing, list)
self.assertIn('many_missing_70', manymissing)
self.assertIn('na_col', manymissing)
@clock
def test_nacols_full(self):
nacols_full = self._test_dc.nacols_full
self.assertIsInstance(nacols_full, list)
self.assertIn('na_col',nacols_full )
@clock
def test_narows_full(self):
test_df = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD'))
test_df.loc[99, :] = np.nan
self.assertIn(99, DataExploration(test_df).narows_full)
self.assertNotIn(1, test_df)
@clock
def test_constant_col_capture(self):
constantcol = self._test_dc.constantcol()
self.assertIsInstance(constantcol, list)
self.assertIn('constant_col', constantcol)
self.assertIn('constant_col_num', constantcol)
self.assertIn('na_col', constantcol)
@clock
def test_count_unique(self):
count_unique = self._test_dc.count_unique()
self.assertIsInstance(count_unique, pd.Series)
self.assertEqual(count_unique.id, 1000)
self.assertEqual(count_unique.constant_col, 1)
self.assertEqual(count_unique.character_factor, 7)
@clock
def test_dfchar_check_col(self):
dfchar = self._test_dc._dfchar
self.assertIsInstance(dfchar, list)
self.assertNotIn('num_variable', dfchar)
self.assertIn('character_factor', dfchar)
self.assertIn('character_variable', dfchar)
self.assertNotIn('many_missing_70', dfchar)
@clock
def test_dfnum_check_col(self):
dfnum = self._test_dc._dfnum
self.assertIsInstance(dfnum, list)
self.assertIn('num_variable', dfnum)
self.assertNotIn('character_factor', dfnum)
self.assertNotIn('character_variable', dfnum)
self.assertIn('many_missing_70', dfnum)
@clock
def test_factors_check_col(self):
factors = self._test_dc.factors()
self.assertIsInstance(factors, list)
self.assertNotIn('num_factor', factors)
self.assertNotIn('character_variable', factors)
self.assertIn('character_factor', factors)
@clock
def test_detectkey_check_col(self):
detectkey = self._test_dc.detectkey()
self.assertIsInstance(detectkey, list)
self.assertIn('id', detectkey)
self.assertIn('member_id', detectkey)
@clock
def test_detectkey_check_col_dropna(self):
detectkeyna = self._test_dc.detectkey(dropna=True)
self.assertIn('id_na', detectkeyna)
self.assertIn('id', detectkeyna)
self.assertIn('member_id', detectkeyna)
@clock
def test_findupcol_check(self):
findupcol = self._test_dc.findupcol()
self.assertIn(['id', 'duplicated_column'], findupcol)
self.assertNotIn('member_id', flatten_list(findupcol))
@clock
def test_count_unique(self):
count_unique = self._test_dc.count_unique()
self.assertIsInstance(count_unique, pd.Series)
self.assertEqual(count_unique.id, len(self._test_dc.data.id))
self.assertEqual(count_unique.constant_col, 1)
self.assertEqual(count_unique.num_factor, len(
pd.unique(self._test_dc.data.num_factor)))
@clock
def test_structure(self):
structure = self._test_dc.structure()
self.assertIsInstance(structure, pd.DataFrame)
self.assertEqual(len(self._test_dc.data),
structure.loc['na_col', 'nb_missing'])
self.assertEqual(len(self._test_dc.data), structure.loc[
'id', 'nb_unique_values'])
self.assertTrue(structure.loc['id', 'is_key'])
@clock
def test_nearzerovar(self):
nearzerovar = self._test_dc.nearzerovar(save_metrics=True)
self.assertIsInstance(nearzerovar, pd.DataFrame)
self.assertIn('nearzerovar_variable', cserie(nearzerovar.nzv))
self.assertIn('constant_col', cserie(nearzerovar.nzv))
self.assertIn('na_col', cserie(nearzerovar.nzv))
class TestNaImputer(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls._test_na = NaImputer(data=create_test_df())
@clock
def test_fillna_serie(self):
test_serie = pd.Series([1, 3, np.nan, 5])
self.assertIsInstance(
self._test_na.fillna_serie(test_serie), pd.Series)
self.assertEqual(self._test_na.fillna_serie(test_serie)[2], 3.0)
@clock
def test_fillna_serie(self):
test_char_variable = self._test_na.fillna_serie('character_variable_fillna')
test_num_variable = self._test_na.fillna_serie('numeric_variable_fillna')
self.assertTrue(test_char_variable.notnull().any())
self.assertTrue(test_num_variable.notnull().any())
self.assertTrue((pd.Series(
['A'] * 300 + ['B'] * 200 + ['C'] * 200 + ['A'] * 300) == test_char_variable).all())
self.assertTrue(
(pd.Series([1] * 400 + [3] * 400 + [2] * 200) == test_num_variable).all())
@clock
def test_fill_low_na(self):
df_fill_low_na = self._test_na.basic_naimputation(columns_to_process=['character_variable_fillna',
'numeric_variable_fillna'])
df_fill_low_na_threshold = self._test_na.basic_naimputation(threshold=0.4)
self.assertIsInstance(df_fill_low_na, pd.DataFrame)
self.assertIsInstance(df_fill_low_na_threshold, pd.DataFrame)
self.assertTrue((pd.Series(['A'] * 300 + ['B'] * 200 + ['C'] * 200 + [
'A'] * 300) == df_fill_low_na.character_variable_fillna).all())
self.assertTrue((pd.Series([1] * 400 + [3] * 400 + [2] * 200)
== df_fill_low_na.numeric_variable_fillna).all())
self.assertTrue((pd.Series(['A'] * 300 + ['B'] * 200 + ['C'] * 200 + [
'A'] * 300) == df_fill_low_na_threshold.character_variable_fillna).all())
self.assertTrue((pd.Series([1] * 400 + [3] * 400 + [2] * 200)
== df_fill_low_na_threshold.numeric_variable_fillna).all())
self.assertTrue(
sum(pd.isnull(df_fill_low_na_threshold.many_missing_70)) == 700)
class TestOutliersDetection(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls.data = create_test_df()
cls.outlier_d = OutliersDetection(cls.data)
@clock
def test_outlier_detection_serie_1d(self):
strong_cutoff = self.outlier_d.strong_cutoff
df_outliers = self.outlier_d.outlier_detection_serie_1d('outlier', strong_cutoff)
self.assertIn(1, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(10, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertIn(100, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(2, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
@clock
def test_outlier_detection_serie_1d_with_na(self):
strong_cutoff = self.outlier_d.strong_cutoff
df_outliers = self.outlier_d.outlier_detection_serie_1d('outlier_na', strong_cutoff)
self.assertIn(1, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(10, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertIn(100, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(2, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
class TestHelper(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls.data = create_test_df()
@clock
def test_random_pmf(self):
self.assertAlmostEqual(len(random_pmf(10)), 10)
self.assertAlmostEqual(random_pmf(10).sum(), 1)
@clock
def test_simu(self):
pmf = random_pmf(4)
samples_unique = simu((np.array(['A', 'B']), np.array([0, 1])), 10)
self.assertTrue((samples_unique == 'B').all())
# class TestGetData(unittest.TestCase):
#
# @clock
# def test_getdata_titanic(self):
# """ Test if downloading titanic data is working """
# titanic = get_dataset('titanic')
# self.assertIsInstance(titanic, pd.DataFrame)
# self.assertEqual(titanic.shape[0], 891)
# self.assertEqual(titanic.shape[1], 15)
# Adding new tests sets
# def suite():
# suite = unittest.TestSuite()
# suite.addTest(TestPandasPatch('test_default_size'))
# return suite
# Other solution than calling main
#suite = unittest.TestLoader().loadTestsFromTestCase(TestPandasPatch)
#unittest.TextTestRunner(verbosity = 1 ).run(suite)
if __name__ == "__main__":
unittest.main(exit=False)
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
243
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/autoc/naimputer.py
|
from autoc.explorer import DataExploration, pd
from autoc.utils.helpers import cserie
import seaborn as sns
import matplotlib.pyplot as plt
#from autoc.utils.helpers import cached_property
from autoc.utils.corrplot import plot_corrmatrix
import numpy as np
from scipy.stats import ttest_ind
from scipy.stats.mstats import ks_2samp
def missing_map(df, nmax=100, verbose=True, yticklabels=False, figsize=(15, 11), *args, **kwargs):
""" Returns missing map plot like in amelia 2 package in R """
f, ax = plt.subplots(figsize=figsize)
if nmax < df.shape[0]:
df_s = df.sample(n=nmax) # sample rows if dataframe too big
return sns.heatmap(df_s.isnull(), yticklabels=yticklabels, vmax=1, *args, **kwargs)
# class ColumnNaInfo
class NaImputer(DataExploration):
def __init__(self, *args, **kwargs):
super(NaImputer, self).__init__(*args, **kwargs)
self.get_data_isna()
@property
def nacols(self):
""" Returns a list of column with at least one missing values """
return cserie(self.nacolcount().Nanumber > 0)
@property
def nacols_i(self):
""" Returns the index of column with at least one missing values """
return cserie(self.nacolcount().Nanumber > 0)
def get_overlapping_matrix(self, normalize=True):
""" Look at missing values overlapping """
arr = self.data_isna.astype('float').values
arr = np.dot(arr.T, arr)
if normalize:
arr = arr / (arr.max(axis=1)[:, None])
index = self.nacols
res = pd.DataFrame(index=index, data=arr, columns=index)
return res
def infos_na(self, na_low=0.05, na_high=0.90):
""" Returns a dict with various infos about missing values """
infos = {}
infos['nacolcount'] = self.nacolcount()
infos['narowcount'] = self.narowcount()
infos['nb_total_na'] = self.total_missing
infos['many_na_col'] = self.manymissing(pct=na_high)
infos['low_na_col'] = cserie(self.nacolcount().Napercentage < na_low)
infos['total_pct_na'] = self.nacolcount().Napercentage.mean()
return infos
def get_isna(self, col):
""" Returns a dummy variable indicating in a observation of a specific col
is na or not 0 -> not na , 1 -> na """
return self.data.loc[:, col].isnull().astype(int)
@property
def data_isna_m(self):
""" Returns merged dataframe (data, data_is_na)"""
return pd.concat((self.data, self.data_isna), axis=1)
def get_data_isna(self, prefix="is_na_", filter_nna=True):
""" Returns dataset with is_na columns from the a dataframe with missing values
Parameters
----------
prefix : str
the name of the prefix that will be append to the column name.
filter_nna: bool
True if you want remove column without missing values.
"""
if not filter_nna:
cols_to_keep = self.data.columns
else:
cols_to_keep = self.nacols
data_isna = self.data.loc[:, cols_to_keep].isnull().astype(int)
data_isna.columns = ["{}{}".format(prefix, c) for c in cols_to_keep]
self.data_isna = data_isna
return self.data_isna
def get_corrna(self, *args, **kwargs):
""" Get matrix of correlation of na """
return self.data_isna.corr(*args, **kwargs)
def corrplot_na(self, *args, **kwargs):
""" Returns a corrplot of data_isna """
print("This function is deprecated")
plot_corrmatrix(self.data_isna, *args, **kwargs)
def plot_corrplot_na(self, *args, **kwargs):
""" Returns a corrplot of data_isna """
plot_corrmatrix(self.data_isna, *args, **kwargs)
def plot_density_m(self, colname, subset=None, prefix="is_na_", size=6, *args, **kwargs):
""" Plot conditionnal density plot from all columns or subset based on
is_na_colname 0 or 1"""
colname_na = prefix + colname
density_columns = self.data.columns if subset is None else subset
# filter only numeric values and different values from is_na_col
density_columns = [c for c in density_columns if (
c in self._dfnum and c != colname)]
print(density_columns)
for col in density_columns:
g = sns.FacetGrid(data=self.data_isna_m, col=colname_na, hue=colname_na,
size=size, *args, **kwargs)
g.map(sns.distplot, col)
def get_isna_mean(self, colname, prefix="is_na_"):
""" Returns empirical conditional expectatation, std, and sem of other numerical variable
for a certain colname with 0:not_a_na 1:na """
na_colname = "{}{}".format(prefix, colname)
cols_to_keep = list(self.data.columns) + [na_colname]
measure_var = self.data.columns.tolist()
measure_var = [c for c in measure_var if c != colname]
functions = ['mean', 'std', 'sem']
return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname)[measure_var].agg(functions).transpose()
def get_isna_ttest_s(self, colname_na, colname, type_test="ks"):
""" Returns tt test for colanme-na and a colname """
index_na = self.data.loc[:, colname_na].isnull()
measure_var = self.data.loc[:, colname].dropna() # drop na vars
if type_test == "ttest":
return ttest_ind(measure_var[index_na], measure_var[~index_na])
elif type_test == "ks":
return ks_2samp(measure_var[index_na], measure_var[~index_na])
def get_isna_ttest(self, colname_na, type_test="ks"):
res = pd.DataFrame()
col_to_compare = [c for c in self._dfnum if c !=
colname_na] # remove colname_na
for col in col_to_compare:
ttest = self.get_isna_ttest_s(colname_na, col, type_test=type_test)
res.loc[col, 'pvalue'] = ttest[1]
res.loc[col, 'statistic'] = ttest[0]
res.loc[col, 'type_test'] = type_test
return res
def isna_summary(self, colname, prefix="is_na_"):
""" Returns summary from one col with describe """
na_colname = "{}{}".format(prefix, colname)
cols_to_keep = list(self.data.columns) + [na_colname]
return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname).describe().transpose()
def delete_narows(self, pct, index=False):
""" Delete rows with more na percentage than > perc in data
Return the index
Arguments
---------
pct : float
percentage of missing values, rows with more na percentage
than > perc are deleted
index : bool, default False
True if you want an index and not a Dataframe
verbose : bool, default False
True if you want to see percentage of data discarded
Returns
--------
- a pandas Dataframe with rows deleted if index=False, index of
columns to delete either
"""
index_missing = self.manymissing(pct=pct, axis=0, index=False)
pct_missing = len(index_missing) / len(self.data.index)
if verbose:
print("There is {0:.2%} rows matching conditions".format(
pct_missing))
if not index:
return self.data.loc[~index_missing, :]
else:
return index_missing
def fillna_serie(self, colname, threshold_factor=0.1, special_value=None, date_method='ffill'):
""" fill values in a serie default with the mean for numeric or the most common
factor for categorical variable"""
if special_value is not None:
# "Missing for example"
return self.data.loc[:, colname].fillna(special_value)
elif self.data.loc[:, colname].dtype == float:
# fill with median
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].median())
elif self.is_int_factor(colname, threshold_factor):
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].mode()[0])
# fillna for datetime with the method provided by pandas
elif self.data.loc[:, colname].dtype == '<M8[ns]':
return self.data.loc[:, colname].fillna(method=date_method)
else:
# Fill with most common value
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].value_counts().index[0])
def basic_naimputation(self, columns_to_process=[], threshold=None):
""" this function will return a dataframe with na value replaced int
the columns selected by the mean or the most common value
Arguments
---------
- columns_to_process : list of columns name with na values you wish to fill
with the fillna_serie function
Returns
--------
- a pandas DataFrame with the columns_to_process filled with the fillena_serie
"""
# self.data = self.df.copy()
if threshold:
columns_to_process = columns_to_process + cserie(self.nacolcount().Napercentage < threshold)
self.data.loc[:, columns_to_process] = self.data.loc[
:, columns_to_process].apply(lambda x: self.fillna_serie(colname=x.name))
return self.data
def split_tt_na(self, colname, index=False):
""" Split the dataset returning the index of test , train """
index_na = self.data.loc[:, colname].isnull()
index_test = (index_na == True)
index_train = (index_na == False)
if index:
return index_test, index_train
else:
return self.data.loc[index_test, :], self.data.loc[index_train, :]
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
244
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/autoc/exceptions.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : File with all custom exceptions
"""
class NotNumericColumn(Exception):
""" The column should be numeric """
pass
class NumericError(Exception):
""" The column should not be numeric """
pass
# class NotFactor
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
245
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/autoc/__init__.py
|
__all__ = ["explorer", "naimputer"]
from .explorer import DataExploration
from .naimputer import NaImputer
from .preprocess import PreProcessor
from .utils.getdata import get_dataset
# from .preprocess import PreProcessor
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
246
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/autoc/explorer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : This is a framework for Modeling with pandas, numpy and skicit-learn.
The Goal of this module is to rely on a dataframe structure for modelling g
"""
#########################################################
# Import modules and global helpers
#########################################################
import pandas as pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from pprint import pprint
from .exceptions import NotNumericColumn
class DataExploration(object):
"""
This class is designed to provide infos about the dataset such as
number of missing values, number of unique values, constant columns,
long strings ...
For the most useful methods it will store the result into a attributes
When you used a method the output will be stored in a instance attribute so you
don't have to compute the result again.
"""
def __init__(self, data, copy=False):
"""
Parameters
----------
data : pandas.DataFrame
the data you want explore
copy: bool
True if you want make a copy of DataFrame, default False
Examples
--------
explorer = DataExploration(data = your_DataFrame)
explorer.structure() : global structure of your DataFrame
explorer.psummary() to get the a global snapchot of the different stuff detected
data_cleaned = explorer.basic_cleaning() to clean your data.
"""
assert isinstance(data, pd.DataFrame)
self.is_data_copy = copy
self.data = data if not self.is_data_copy else data.copy()
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
self._nrow = len(self.data.index)
self._ncol = len(self.data.columns)
self._dfnumi = (self.data.dtypes == float) | (
self.data.dtypes == int)
self._dfnum = cserie(self._dfnumi)
self._dfchari = (self.data.dtypes == object)
self._dfchar = cserie(self._dfchari)
self._nacolcount = pd.DataFrame()
self._narowcount = pd.DataFrame()
self._count_unique = pd.DataFrame()
self._constantcol = []
self._dupcol = []
self._nearzerovar = pd.DataFrame()
self._corrcolumns = []
self._dict_info = {}
self._structure = pd.DataFrame()
self._string_info = ""
self._list_other_na = {'unknown', 'na',
'missing', 'n/a', 'not available'}
# def get_label(self):
# """ return the Serie of label you want predict """
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
# return self.data[self.label]
def is_numeric(self, colname):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
Notes
------
df._get_numeric_data() is a primitive from pandas
to get only numeric data
"""
dtype_col = self.data.loc[:, colname].dtype
return (dtype_col == int) or (dtype_col == float)
def is_int_factor(self, colname, threshold=0.1):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
threshold : float
colname is an 'int_factor' if the number of
unique values < threshold * nrows
"""
dtype_col = self.data.loc[:, colname].dtype
if dtype_col == int and self.data.loc[:, colname].nunique() <= (threshold * self.data.shape[0]):
return True
else:
return False
def to_lowercase(self):
""" Returns a copy of dataset with data to lower """
return self.data.applymap(lambda x: x.lower() if type(x) == str else x)
def where_numeric(self):
""" Returns a Boolean Dataframe with True for numeric values False for other """
return self.data.applymap(lambda x: isinstance(x, (int, float)))
def count_unique(self):
""" Return a serie with the number of unique value per columns """
if len(self._count_unique):
return self._count_unique
self._count_unique = self.data.apply(lambda x: x.nunique(), axis=0)
return self._count_unique
def sample_df(self, pct=0.05, nr=10, threshold=None):
""" sample a number of rows of a dataframe = min(max(0.05*nrow(self,nr),threshold)"""
a = max(int(pct * float(len(self.data.index))), nr)
if threshold:
a = min(a, threshold)
return self.data.loc[permutation(self.data.index)[:a],:]
def sign_summary(self, subset=None):
"""
Returns the number and percentage of positive and negative values in
a column, a subset of columns or all numeric columns of the dataframe.
Parameters
----------
subset : label or list
Column name or list of column names to check.
Returns
-------
summary : pandas.Series or pandas.DataFrame
Summary of the signs present in the subset
"""
if subset:
subs = subs if isinstance(subs, list) else [subs]
if sum(col not in self._dfnum for col in subs) > 0:
raise NotNumericColumn('At least one of the columns you passed ' \
'as argument are not numeric.')
else:
subs = self._dfnum
summary = pd.DataFrame(columns=['NumOfNegative', 'PctOfNegative',
'NumOfPositive', 'PctOfPositive'])
summary['NumOfPositive'] = self.data[subs].apply(lambda x: (x >= 0).sum(), axis=0)
summary['NumOfNegative'] = self.data[subs].apply(lambda x: (x <= 0).sum(), axis=0)
summary['PctOfPositive'] = summary['NumOfPositive'] / len(self.data)
summary['PctOfNegative'] = summary['NumOfNegative'] / len(self.data)
return summary
@property
def total_missing(self):
""" Count the total number of missing values """
# return np.count_nonzero(self.data.isnull().values) # optimized for
# speed
return self.nacolcount().Nanumber.sum()
def nacolcount(self):
""" count the number of missing values per columns """
if len(self._nacolcount):
return self._nacolcount
self._nacolcount = self.data.isnull().sum(axis=0)
self._nacolcount = pd.DataFrame(self._nacolcount, columns=['Nanumber'])
self._nacolcount['Napercentage'] = self._nacolcount[
'Nanumber'] / (self._nrow)
return self._nacolcount
def narowcount(self):
""" count the number of missing values per columns """
if len(self._narowcount):
return self._narowcount
self._narowcount = self.data.isnull().sum(axis=1)
self._narowcount = pd.DataFrame(
self._narowcount, columns=['Nanumber'])
self._narowcount['Napercentage'] = self._narowcount[
'Nanumber'] / (self._ncol)
return self._narowcount
def detect_other_na(self, verbose=True, auto_replace=False):
""" Detect missing values encoded by the creator of the dataset
like 'Missing', 'N/A' ...
Parameters
----------
verbose : bool
True if you want to print some infos
auto_replace: bool
True if you want replace this value by np.nan, default False
Returns
-------
an DataFrame of boolean if not auto_replace else cleaned DataFrame with
self._list_other_na replaced by np.nan
Notes
------
* You can use na_values parameter in pandas.read_csv to specify the missing
values to convert to nan a priori
* Speed can be improved
"""
res = self.to_lowercase().applymap(lambda x: x in self._list_other_na)
print("We detected {} other type of missing values".format(res.sum().sum()))
if auto_replace:
return self.data.where((res == False), np.nan)
else:
return res
@property
def nacols_full(self):
""" Returns a list of columns with only missing values """
return cserie(self.nacolcount().Nanumber == self._nrow)
@property
def narows_full(self):
""" Returns an index of rows with only missing values """
return self.narowcount().Nanumber == self._ncol
# def manymissing2(self, pct=0.9, axis=0, index=False):
# """ identify columns of a dataframe with many missing values ( >= a), if
# row = True row either.
# - the output is a index """
# if axis == 1:
# self.manymissing = self.narowcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# elif axis == 0:
# self.manymissing = self.nacolcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# else:
# raise ValueError
# if index:
# return manymissing
# else:
# return cserie(manymissing)
def manymissing(self, pct=0.9, axis=0):
""" identify columns of a dataframe with many missing values ( >= pct), if
row = True row either.
- the output is a list """
if axis == 1:
self._manymissingrow = self.narowcount()
self._manymissingrow = self._manymissingrow['Napercentage'] >= pct
return self._manymissingrow
elif axis == 0:
self._manymissingcol = self.nacolcount()
self._manymissingcol = cserie(
self._manymissingcol['Napercentage'] >= pct)
return self._manymissingcol
else:
raise ValueError("Axis should be 1 for rows and o for columns")
def df_len_string(self, drop_num=False):
""" Return a Series with the max of the length of the string of string-type columns """
if drop_num:
return self.data.drop(self._dfnum, axis=1).apply(lambda x: np.max(x.str.len()), axis=0)
else:
return self.data.apply(lambda x: np.max(x.str.len()) if x.dtype.kind =='O' else np.nan , axis=0)
def detectkey(self, index_format=False, pct=0.15, dropna=False, **kwargs):
""" identify id or key columns as an index if index_format = True or
as a list if index_format = False """
if not dropna:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: len(x.unique()) == len(x), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: len(x.unique()) == len(x), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
else:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: x.nunique() == len(x.dropna()), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: x.nunique() == len(x.dropna()), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
def constantcol(self, **kwargs):
""" identify constant columns """
# sample to reduce computation time
if len(self._constantcol):
return self._constantcol
col_to_keep = self.sample_df(
**kwargs).apply(lambda x: len(x.unique()) == 1, axis=0)
if len(cserie(col_to_keep)) == 0:
return []
self._constantcol = cserie(self.data.loc[:, col_to_keep].apply(
lambda x: len(x.unique()) == 1, axis=0))
return self._constantcol
def constantcol2(self, **kwargs):
""" identify constant columns """
return cserie((self.data == self.data.ix[0]).all())
def factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: the mac nb of levels you fix for a categorical variable
threshold_value : the nb of of unique value in percentage of the dataframe length
index : if you want the result as an index or a list
"""
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
def helper_factor(x, num_var=self._dfnum):
unique_value = set()
if x.name in num_var:
return False
else:
for e in x.values:
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(e)
return True
if index:
return self.data.apply(lambda x: helper_factor(x))
else:
return cserie(self.data.apply(lambda x: helper_factor(x)))
@staticmethod
def serie_quantiles(array, nb_quantiles=10):
binq = 1.0 / nb_quantiles
if type(array) == pd.Series:
return array.quantile([binq * i for i in xrange(nb_quantiles + 1)])
elif type(array) == np.ndarray:
return np.percentile(array, [binq * i for i in xrange(nb_quantiles + 1)])
else:
raise("the type of your array is not supported")
def dfquantiles(self, nb_quantiles=10, only_numeric=True):
""" this function gives you a all the quantiles
of the numeric variables of the dataframe
only_numeric will calculate it only for numeric variables,
for only_numeric = False you will get NaN value for non numeric
variables """
binq = 1.0 / nb_quantiles
if only_numeric:
return self.data.loc[:, self._dfnumi].quantile([binq * i for i in xrange(nb_quantiles + 1)])
else:
return self.data.quantile([binq * i for i in xrange(nb_quantiles + 1)])
def numeric_summary(self):
""" provide a more complete sumary than describe, it is using only numeric
value """
df = self.data.loc[:, self._dfnumi]
func_list = [df.count(), df.min(), df.quantile(0.25),
df.quantile(0.5), df.mean(),
df.std(), df.mad(), df.skew(),
df.kurt(), df.quantile(0.75), df.max()]
results = [f for f in func_list]
return pd.DataFrame(results, index=['Count', 'Min', 'FirstQuartile',
'Median', 'Mean', 'Std', 'Mad', 'Skewness',
'Kurtosis', 'Thirdquartile', 'Max']).T
def infer_types(self):
""" this function will try to infer the type of the columns of data"""
return self.data.apply(lambda x: pd.lib.infer_dtype(x.values))
def structure(self, threshold_factor=10):
""" this function return a summary of the structure of the pandas DataFrame
data looking at the type of variables, the number of missing values, the
number of unique values """
if len(self._structure):
return self._structure
dtypes = self.data.dtypes
nacolcount = self.nacolcount()
nb_missing = nacolcount.Nanumber
perc_missing = nacolcount.Napercentage
nb_unique_values = self.count_unique()
dtype_infer = self.infer_types()
dtypes_r = self.data.apply(lambda x: "character")
dtypes_r[self._dfnumi] = "numeric"
dtypes_r[(dtypes_r == 'character') & (
nb_unique_values <= threshold_factor)] = 'factor'
constant_columns = (nb_unique_values == 1)
na_columns = (perc_missing == 1)
is_key = nb_unique_values == self._nrow
string_length = self.df_len_string(drop_num=False)
# is_key_na = ((nb_unique_values + nb_missing) == self.nrow()) & (~na_columns)
dict_str = {'dtypes_r': dtypes_r, 'perc_missing': perc_missing,
'nb_missing': nb_missing, 'is_key': is_key,
'nb_unique_values': nb_unique_values, 'dtypes_p': dtypes,
'constant_columns': constant_columns, 'na_columns': na_columns,
'dtype_infer': dtype_infer, 'string_length': string_length}
self._structure = pd.concat(dict_str, axis=1)
self._structure = self._structure.loc[:, ['dtypes_p', 'dtypes_r', 'nb_missing', 'perc_missing',
'nb_unique_values', 'constant_columns',
'na_columns', 'is_key', 'dtype_infer', 'string_length']]
return self._structure
def findupcol(self, threshold=100, **kwargs):
""" find duplicated columns and return the result as a list of list """
df_s = self.sample_df(threshold=100, **kwargs).T
dup_index_s = (df_s.duplicated()) | (
df_s.duplicated(keep='last'))
if len(cserie(dup_index_s)) == 0:
return []
df_t = (self.data.loc[:, dup_index_s]).T
dup_index = df_t.duplicated()
dup_index_complet = cserie(
(dup_index) | (df_t.duplicated(keep='last')))
l = []
for col in cserie(dup_index):
index_temp = self.data[dup_index_complet].apply(
lambda x: (x == self.data[col])).sum() == self._nrow
temp = list(self.data[dup_index_complet].columns[index_temp])
l.append(temp)
self._dupcol = l
return self._dupcol
def finduprow(self, subset=[]):
""" find duplicated rows and return the result a sorted dataframe of all the
duplicates
subset is a list of columns to look for duplicates from this specific subset .
"""
if sum(self.data.duplicated()) == 0:
print("there is no duplicated rows")
else:
if subset:
dup_index = (self.data.duplicated(subset=subset)) | (
self.data.duplicated(subset=subset, keep='last'))
else:
dup_index = (self.data.duplicated()) | (
self.data.duplicated(keep='last'))
if subset:
return self.data[dup_index].sort(subset)
else:
return self.data[dup_index].sort(self.data.columns[0])
def nearzerovar(self, freq_cut=95 / 5, unique_cut=10, save_metrics=False):
""" identify predictors with near-zero variance.
freq_cut: cutoff ratio of frequency of most common value to second
most common value.
unique_cut: cutoff percentage of unique value over total number of
samples.
save_metrics: if False, print dataframe and return NON near-zero var
col indexes, if True, returns the whole dataframe.
"""
nb_unique_values = self.count_unique()
percent_unique = 100 * nb_unique_values / self._nrow
def helper_freq(x):
if nb_unique_values[x.name] == 0:
return 0.0
elif nb_unique_values[x.name] == 1:
return 1.0
else:
return float(x.value_counts().iloc[0]) / x.value_counts().iloc[1]
freq_ratio = self.data.apply(helper_freq)
zerovar = (nb_unique_values == 0) | (nb_unique_values == 1)
nzv = ((freq_ratio >= freq_cut) & (
percent_unique <= unique_cut)) | (zerovar)
if save_metrics:
return pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio, 'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns)
else:
print(pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio,
'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns))
return nzv[nzv == True].index
def findcorr(self, cutoff=.90, method='pearson', data_frame=False, print_mode=False):
"""
implementation of the Recursive Pairwise Elimination.
The function finds the highest correlated pair and removes the most
highly correlated feature of the pair, then repeats the process
until the threshold 'cutoff' is reached.
will return a dataframe is 'data_frame' is set to True, and the list
of predictors to remove oth
Adaptation of 'findCorrelation' function in the caret package in R.
"""
res = []
df = self.data.copy(0)
cor = df.corr(method=method)
for col in cor.columns:
cor[col][col] = 0
max_cor = cor.max()
if print_mode:
print(max_cor.max())
while max_cor.max() > cutoff:
A = max_cor.idxmax()
B = cor[A].idxmax()
if cor[A].mean() > cor[B].mean():
cor.drop(A, 1, inplace=True)
cor.drop(A, 0, inplace=True)
res += [A]
else:
cor.drop(B, 1, inplace=True)
cor.drop(B, 0, inplace=True)
res += [B]
max_cor = cor.max()
if print_mode:
print(max_cor.max())
if data_frame:
return df.drop(res, 1)
else:
return res
self._corrcolumns = res
def get_infos_consistency(self):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col,narows and cols """
infos = {'duplicated_rows': {'value': cserie(self.data.duplicated(), index=True), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'dup_columns': {'value': self.findupcol(), 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.findupcol())},
'constant_columns': {'value': self.constantcol(), 'level': 'WARNING',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.constantcol())},
'narows_full': {'value': cserie(self.narows_full), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'nacols_full': {'value': self.nacols_full, 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.nacols_full)}
}
# update
self._dict_info.update(infos)
return infos
def get_infos_na(self, manymissing_ph=0.9, manymissing_pl=0.05):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col, narows and cols """
nacolcount_p = self.nacolcount().Napercentage
infos = {'nb_total_missing': {'value': self.total_missing, 'level': 'INFO', 'action': None},
'pct_total_missing': {'value': float(self.total_missing) / self._nrow, 'level': 'INFO', 'action': None},
'many_na_columns': {'value': cserie((nacolcount_p > manymissing_ph)), 'level': 'ERROR', 'action': 'delete or impute'},
'low_na_columns': {'value': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)), 'level': 'WARNING', 'action': 'impute'},
}
# update
self._dict_info.update(infos)
return infos
def print_infos(self, infos="consistency", print_empty=False):
""" pprint of get_infos
Parameters
----------
print_empty: bool:
False if you don't want print the empty infos (
no missing colum for example)"""
if infos == "consistency":
dict_infos = self.get_infos_consistency()
if not print_empty:
dict_infos = {k: v for k, v in dict_infos.items() if len(v['value']) > 0}
pprint(dict_infos)
def psummary(self, manymissing_ph=0.70, manymissing_pl=0.05, nzv_freq_cut=95 / 5, nzv_unique_cut=10,
threshold=100, string_threshold=40, dynamic=False):
"""
This function will print you a summary of the dataset, based on function
designed is this package
- Output : python print
It will store the string output and the dictionnary of results in private variables
"""
nacolcount_p = self.nacolcount().Napercentage
if dynamic:
print('there are {0} duplicated rows\n'.format(
self.data.duplicated().sum()))
print('the columns with more than {0:.2%} manymissing values:\n{1} \n'.format(manymissing_ph,
cserie((nacolcount_p > manymissing_ph))))
print('the columns with less than {0:.2%} manymissing values are :\n{1} \n you should fill them with median or most common value \n'.format(
manymissing_pl, cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl))))
print('the detected keys of the dataset are:\n{0} \n'.format(
self.detectkey()))
print('the duplicated columns of the dataset are:\n{0}\n'.format(
self.findupcol(threshold=100)))
print('the constant columns of the dataset are:\n{0}\n'.format(
self.constantcol()))
print('the columns with nearzerovariance are:\n{0}\n'.format(
list(cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv))))
print('the columns highly correlated to others to remove are:\n{0}\n'.format(
self.findcorr(data_frame=False)))
print('these columns contains big strings :\n{0}\n'.format(
cserie(self.df_len_string() > string_threshold)))
else:
self._dict_info = {'nb_duplicated_rows': np.sum(self.data.duplicated()),
'many_missing_percentage': manymissing_ph,
'manymissing_columns': cserie((nacolcount_p > manymissing_ph)),
'low_missing_percentage': manymissing_pl,
'lowmissing_columns': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)),
'keys_detected': self.detectkey(),
'dup_columns': self.findupcol(threshold=100),
'constant_columns': self.constantcol(),
'nearzerovar_columns': cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv),
'high_correlated_col': self.findcorr(data_frame=False),
'big_strings_col': cserie(self.df_len_string() > string_threshold)
}
self._string_info = u"""
there are {nb_duplicated_rows} duplicated rows\n
the columns with more than {many_missing_percentage:.2%} manymissing values:\n{manymissing_columns} \n
the columns with less than {low_missing_percentage:.2%}% manymissing values are :\n{lowmissing_columns} \n
you should fill them with median or most common value\n
the detected keys of the dataset are:\n{keys_detected} \n
the duplicated columns of the dataset are:\n{dup_columns}\n
the constant columns of the dataset are:\n{constant_columns}\n
the columns with nearzerovariance are:\n{nearzerovar_columns}\n
the columns highly correlated to others to remove are:\n{high_correlated_col}\n
these columns contains big strings :\n{big_strings_col}\n
""".format(**self._dict_info)
print(self._string_info)
def metadata(self):
""" Return a dict/json full of infos about the dataset """
meta = {}
meta['mem_size'] = self.data.memory_usage(index=True).sum() # in bytes
meta['columns_name'] = self.data.columns.tolist()
meta['columns_name_n'] = [e.lower() for e in self.data.columns]
meta['nb_rows'] = self.data.shape[0]
meta['nb_columns'] = self.data.shape[1]
# drop dtype_p for mongodb compatibility
structure_data = self.structure().drop(labels='dtypes_p', axis=1)
structure_data = structure_data.to_dict('index')
meta['structure'] = structure_data
meta['numeric_summary'] = self.numeric_summary().to_dict('index')
return meta
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
247
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/autoc/outliersdetection.py
|
"""
@author: efourrier
Purpose : This is a simple experimental class to detect outliers. This class
can be used to detect missing values encoded as outlier (-999, -1, ...)
"""
from autoc.explorer import DataExploration, pd
import numpy as np
#from autoc.utils.helpers import cserie
from exceptions import NotNumericColumn
def iqr(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return np.percentile(ndarray, 75) - np.percentile(ndarray, 25)
def z_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.mean(ndarray)) / (np.std(ndarray))
def iqr_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.median(ndarray)) / (iqr(ndarray))
def mad_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.median(ndarray)) / (np.median(np.absolute(ndarray - np.median(ndarray))) / 0.6745)
class OutliersDetection(DataExploration):
"""
this class focuses on identifying outliers
Parameters
----------
data : DataFrame
Examples
--------
* od = OutliersDetection(data = your_DataFrame)
* od.structure() : global structure of your DataFrame
"""
def __init__(self, *args, **kwargs):
super(OutliersDetection, self).__init__(*args, **kwargs)
self.strong_cutoff = {'cutoff_z': 6,
'cutoff_iqr': 6, 'cutoff_mad': 6}
self.basic_cutoff = {'cutoff_z': 3,
'cutoff_iqr': 2, 'cutoff_mad': 2}
def check_negative_value(self, colname):
""" this function will detect if there is at leat one
negative value and calculate the ratio negative postive/
"""
if not self.is_numeric(colname):
NotNumericColumn("The serie should be numeric values")
return sum(serie < 0)
def outlier_detection_serie_1d(self, colname, cutoff_params, scores=[z_score, iqr_score, mad_score]):
if not self.is_numeric(colname):
raise("auto-clean doesn't support outliers detection for Non numeric variable")
keys = [str(func.__name__) for func in scores]
df = pd.DataFrame(dict((key, func(self.data.loc[:, colname]))
for key, func in zip(keys, scores)))
df['is_outlier'] = 0
for s in keys:
cutoff_colname = "cutoff_{}".format(s.split('_')[0])
index_outliers = np.absolute(df[s]) >= cutoff_params[cutoff_colname]
df.loc[index_outliers, 'is_outlier'] = 1
return df
def check_negative_value(self):
""" this will return a the ratio negative/positve for each numeric
variable of the DataFrame
"""
return self.data[self._dfnum].apply(lambda x: self.check_negative_value_serie(x.name))
def outlier_detection_1d(self, cutoff_params, subset=None,
scores=[z_score, iqr_score, mad_score]):
""" Return a dictionnary with z_score,iqr_score,mad_score as keys and the
associate dataframe of distance as value of the dictionnnary"""
df = self.data.copy()
numeric_var = self._dfnum
if subset:
df = df.drop(subset, axis=1)
df = df.loc[:, numeric_var] # take only numeric variable
# if remove_constant_col:
# df = df.drop(self.constantcol(), axis = 1) # remove constant variable
# df_outlier = pd.DataFrame()
for col in df:
df_temp = self.outlier_detection_serie_1d(col, cutoff_params, scores)
df_temp.columns = [col + '_' +
col_name for col_name in df_temp.columns]
#df_outlier = pd.concat([df_outlier, df_temp], axis=1)
return df_temp
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
248
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/autoc/preprocess.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : The purpose of this class is too automaticely transfrom a DataFrame
into a numpy ndarray in order to use an aglorithm
"""
#########################################################
# Import modules and global helpers
#########################################################
from autoc.explorer import DataExploration, pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from autoc.exceptions import NumericError
class PreProcessor(DataExploration):
subtypes = ['text_raw', 'text_categorical', 'ordinal', 'binary', 'other']
def __init__(self, *args, **kwargs):
super(PreProcessor, self).__init__(*args, **kwargs)
self.long_str_cutoff = 80
self.short_str_cutoff = 30
self.perc_unique_cutoff = 0.2
self.nb_max_levels = 20
def basic_cleaning(self,filter_nacols=True, drop_col=None,
filter_constantcol=True, filer_narows=True,
verbose=True, filter_rows_duplicates=True, inplace=False):
"""
Basic cleaning of the data by deleting manymissing columns,
constantcol, full missing rows, and drop_col specified by the user.
"""
col_to_remove = []
index_to_remove = []
if filter_nacols:
col_to_remove += self.nacols_full
if filter_constantcol:
col_to_remove += list(self.constantcol())
if filer_narows:
index_to_remove += cserie(self.narows_full)
if filter_rows_duplicates:
index_to_remove += cserie(self.data.duplicated())
if isinstance(drop_col, list):
col_to_remove += drop_col
elif isinstance(drop_col, str):
col_to_remove += [drop_col]
else:
pass
col_to_remove = list(set(col_to_remove))
index_to_remove = list(set(index_to_remove))
if verbose:
print("We are removing the folowing columns : {}".format(col_to_remove))
print("We are removing the folowing rows : {}".format(index_to_remove))
if inplace:
return self.data.drop(index_to_remove).drop(col_to_remove, axis=1)
else:
return self.data.copy().drop(index_to_remove).drop(col_to_remove, axis=1)
def _infer_subtype_col(self, colname):
""" This fonction tries to infer subtypes in order to preprocess them
better for skicit learn. You can find the different subtypes in the class
variable subtypes
To be completed ....
"""
serie_col = self.data.loc[:, colname]
if serie_col.nunique() == 2:
return 'binary'
elif serie_col.dtype.kind == 'O':
if serie_col.str.len().mean() > self.long_str_cutoff and serie_col.nunique()/len(serie_col) > self.perc_unique_cutoff:
return "text_long"
elif serie_col.str.len().mean() <= self.short_str_cutoff and serie_col.nunique() <= self.nb_max_levels:
return 'text_categorical'
elif self.is_numeric(colname):
if serie_col.dtype == int and serie_col.nunique() <= self.nb_max_levels:
return "ordinal"
else :
return "other"
def infer_subtypes(self):
""" Apply _infer_subtype_col to the whole DataFrame as a dictionnary """
return {col: {'dtype': self.data.loc[:,col].dtype, 'subtype':self._infer_subtype_col(col)} for col in self.data.columns}
def infer_categorical_str(self, colname, nb_max_levels=10, threshold_value=0.01):
""" Returns True if we detect in the serie a factor variable
A string factor is based on the following caracteristics :
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
Arguments
----------
nb_max_levels: int
the max nb of levels you fix for a categorical variable
threshold_value : float
the nb of of unique value in percentage of the dataframe length
"""
# False for numeric columns
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
if self.is_numeric(colname):
return False
# False for categorical columns
if self.data.loc[:, colname].dtype == "category":
return False
unique_value = set()
for i, v in self.data.loc[:, colname], iteritems():
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(v)
return True
def get_factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" Return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: int
the max nb of levels you fix for a categorical variable.
threshold_value : float
the nb of of unique value in percentage of the dataframe length.
index: bool
False, returns a list, True if you want an index.
"""
res = self.data.apply(lambda x: self.infer_categorical_str(x))
if index:
return res
else:
return cserie(res)
def factors_to_categorical(self, inplace=True, verbose=True, *args, **kwargs):
factors_col = self.get_factors(*args, **kwargs)
if verbose:
print("We are converting following columns to categorical :{}".format(
factors_col))
if inplace:
self.df.loc[:, factors_col] = self.df.loc[:, factors_col].astype(category)
else:
return self.df.loc[:, factors_col].astype(category)
def remove_category(self, colname, nb_max_levels, replace_value='other', verbose=True):
""" Replace a variable with too many categories by grouping minor categories to one """
if self.data.loc[:, colname].nunique() < nb_max_levels:
if verbose:
print("{} has not been processed because levels < {}".format(
colname, nb_max_levels))
else:
if self.is_numeric(colname):
raise NumericError(
'{} is a numeric columns you cannot use this function'.format())
top_levels = self.data.loc[
:, colname].value_counts[0:nb_max_levels].index
self.data.loc[~self.data.loc[:, colname].isin(
top_levels), colname] = replace_value
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
249
|
ericfourrier/auto-clean
|
refs/heads/develop
|
/autoc/utils/helpers.py
|
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Create toolbox functions to use for the different pieces of code ot the package
"""
from numpy.random import normal
from numpy.random import choice
import time
import pandas as pd
import numpy as np
import functools
def print_section(section_name, width=120):
""" print centered section for reports in DataExplora"""
section_name = ' ' + section_name + ' '
print('{:=^{ }}'.format(section_name, width))
# def get_dataset(name, *args, **kwargs):
# """Get a dataset from the online repo
# https://github.com/ericfourrier/autoc-datasets (requires internet).
#
# Parameters
# ----------
# name : str
# Name of the dataset 'name.csv'
# """
# path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
# return pd.read_csv(path, *args, **kwargs)
def flatten_list(x):
return [y for l in x for y in flatten_list(l)] if isinstance(x, list) else [x]
def cserie(serie, index=False):
if index:
return serie[serie].index
else:
return serie[serie].index.tolist()
def removena_numpy(array):
return array[~(np.isnan(array))]
def common_cols(df1, df2):
""" Return the intersection of commun columns name """
return list(set(df1.columns) & set(df2.columns))
def bootstrap_ci(x, n=300, ci=0.95):
"""
this is a function depending on numpy to compute bootstrap percentile
confidence intervalfor the mean of a numpy array
Arguments
---------
x : a numpy ndarray
n : the number of boostrap samples
ci : the percentage confidence (float) interval in ]0,1[
Return
-------
a tuple (ci_inf,ci_up)
"""
low_per = 100 * (1 - ci) / 2
high_per = 100 * ci + low_per
x = removena_numpy(x)
if not len(x):
return (np.nan, np.nan)
bootstrap_samples = choice(a=x, size=(
len(x), n), replace = True).mean(axis = 0)
return np.percentile(bootstrap_samples, [low_per, high_per])
def clock(func):
""" decorator to measure the duration of each test of the unittest suite,
this is extensible for any kind of functions it will just add a print """
def clocked(*args):
t0 = time.time()
result = func(*args)
elapsed = (time.time() - t0) * 1000 # in ms
print('elapsed : [{0:0.3f}ms]'.format(elapsed))
return result
return clocked
def cached_property(fun):
"""A memoize decorator for class properties."""
@functools.wraps(fun)
def get(self):
try:
return self._cache[fun]
except AttributeError:
self._cache = {}
except KeyError:
pass
ret = self._cache[fun] = fun(self)
return ret
return property(get)
def create_test_df():
""" Creating a test pandas DataFrame for the unittest suite """
test_df = pd.DataFrame({'id': [i for i in range(1, 1001)], 'member_id': [
10 * i for i in range(1, 1001)]})
test_df['na_col'] = np.nan
test_df['id_na'] = test_df.id
test_df.loc[1:3, 'id_na'] = np.nan
test_df['constant_col'] = 'constant'
test_df['constant_col_num'] = 0
test_df['character_factor'] = [
choice(list('ABCDEFG')) for _ in range(1000)]
test_df['num_factor'] = [choice([1, 2, 3, 4]) for _ in range(1000)]
test_df['nearzerovar_variable'] = 'most_common_value'
test_df.loc[0, 'nearzerovar_variable'] = 'one_value'
test_df['binary_variable'] = [choice([0, 1]) for _ in range(1000)]
test_df['character_variable'] = [str(i) for i in range(1000)]
test_df['duplicated_column'] = test_df.id
test_df['many_missing_70'] = [1] * 300 + [np.nan] * 700
test_df['character_variable_fillna'] = ['A'] * \
300 + ['B'] * 200 + ['C'] * 200 + [np.nan] * 300
test_df['numeric_variable_fillna'] = [1] * 400 + [3] * 400 + [np.nan] * 200
test_df['num_variable'] = 100.0
test_df['int_factor_10'] = [choice(range(10)) for _ in range(1000)]
test_df['outlier'] = normal(size=1000)
test_df.loc[[1, 10, 100], 'outlier'] = [999, 3, 999]
test_df['outlier_na'] = test_df['outlier']
test_df.loc[[300, 500], 'outlier_na'] = np.nan
test_df['datetime'] = pd.date_range('1/1/2015', periods=1000, freq='H')
test_df['None_100'] = [1] * 900 + [None] * 100
test_df['None_na_200'] = [1] * 800 + [None] * 100 + [np.nan] * 100
test_df['character_variable_up1'] = ['A'] * 500 + ['B'] * 200 + ['C'] * 300
test_df['character_variable_up2'] = ['A'] * 500 + ['B'] * 200 + ['D'] * 300
test_df['other_na'] = ['Missing'] * 100 + ['missing'] * 100 + ['N/a'] * 100 + \
['NA'] * 100 + ['na'] * 100 + ['n/a'] * 100 + ['Not Available'] * 100 + \
['Unknown'] * 100 + ['do_not_touch'] * 200
return test_df
def simu(pmf, size):
""" Draw one sample from of a discrete distribution, pmf is supposed to
be in ascending order
Parameters
----------
pmf : tuple(ndarray, ndarray)
a tuple with (labels,probs) labels are supposed to be in ascending order
size: int
the number of sampel you want generate
Returns
------
int (depends of the type of labels)
draw a random sample from the pmf
"""
labels, probs = pmf[0], pmf[1]
u = np.random.rand(size)
cumulative_sum = probs.cumsum()
return labels[(u >= cumulative_sum[:, None]).argmin(axis=0)]
def shuffle_df(df, reindex=False):
new_df = df.sample(frac=1) if not reindex else df.sample(
frac=1).reset_index()
return new_df
def random_pmf(nb_labels):
""" Return a random probability mass function of nb_labels"""
random_numbers = np.random.random(nb_labels)
return random_numbers / np.sum(random_numbers)
def random_histogram(nb_labels, nb_observations):
""" Return a random probability mass function of nb_labels"""
random_histo = np.random.choice(np.arange(0, nb_observations), nb_labels)
return random_histo / np.sum(random_histo)
def keep_category(df, colname, pct=0.05, n=5):
""" Keep a pct or number of every levels of a categorical variable
Parameters
----------
pct : float
Keep at least pct of the nb of observations having a specific category
n : int
Keep at least n of the variables having a specific category
Returns
--------
Returns an index of rows to keep
"""
tokeep = []
nmin = df.groupby(colname).apply(lambda x: x.sample(
max(1, min(x.shape[0], n, int(x.shape[0] * pct)))).index)
for index in nmin:
tokeep += index.tolist()
return pd.Index(tokeep)
# for k, i in df.groupby(colname).groups:
# to_keep += np.random.choice(i, max(1, min(g.shape[0], n, int(g.shape[0] * pct))), replace=False)
# return to_keep
#
def simulate_na_col(df, colname, n=None, pct=None, weights=None,
safety=True, *args, **kwargs):
""" Simulate missing values in a column of categorical variables
Notes
-----
Fix issue with category variable"""
# if df.loc[:,colname].dtype == 'float' or df.loc[:,colname].dtype == 'int':
# raise ValueError('This function only support categorical variables')
if (n is None) and (pct is not None):
# be careful here especially if cols has a lot of missing values
n = int(pct * df.shape[0])
if isinstance(colname, pd.core.index.Index) or isinstance(colname, list):
for c in colname:
simulate_na_col(df, colname=c, n=n, pct=pct, weights=weights)
else:
if safety:
tokeep = keep_category(df, colname, *args, **kwargs)
# we are not smapling from tokeep
col = df.loc[:, colname].drop(tokeep)
col = col.dropna()
print(colname)
col_distribution = col.value_counts(normalize=True, sort=False)
labels = col_distribution.index # characters
# generate random pmf
pmf_na = weights if weights else random_pmf(len(labels))
na_distribution = pd.Series(data=pmf_na, index=labels)
# draw samples from this pmf
weights_na = col.apply(lambda x: na_distribution[x])
weights_na /= weights_na.sum()
index_to_replace = col.sample(
n=n, weights=weights_na, replace=False).index
df.loc[index_to_replace, colname] = np.nan
def get_test_df_complete():
""" get the full test dataset from Lending Club open source database,
the purpose of this fuction is to be used in a demo ipython notebook """
import requests
from zipfile import ZipFile
import StringIO
zip_to_download = "https://resources.lendingclub.com/LoanStats3b.csv.zip"
r = requests.get(zip_to_download)
zipfile = ZipFile(StringIO.StringIO(r.content))
file_csv = zipfile.namelist()[0]
# we are using the c parser for speed
df = pd.read_csv(zipfile.open(file_csv), skiprows=[0], na_values=['n/a', 'N/A', ''],
parse_dates=['issue_d', 'last_pymnt_d', 'next_pymnt_d', 'last_credit_pull_d'])
zipfile.close()
df = df[:-2]
nb_row = float(len(df.index))
df['na_col'] = np.nan
df['constant_col'] = 'constant'
df['duplicated_column'] = df.id
df['many_missing_70'] = np.nan
df.loc[1:int(0.3 * nb_row), 'many_missing_70'] = 1
df['bad'] = 1
index_good = df['loan_status'].isin(
['Fully Paid', 'Current', 'In Grace Period'])
df.loc[index_good, 'bad'] = 0
return df
def kl(p, q):
"""
Kullback-Leibler divergence for discrete distributions
Parameters
----------
p: ndarray
probability mass function
q: ndarray
probability mass function
Returns
--------
float : D(P || Q) = sum(p(i) * log(p(i)/q(i))
Discrete probability distributions.
"""
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def kl_series(serie1, serie2, dropna=True):
if dropna:
serie1 = serie1.dropna()
serie2 = serie2.dropna()
return kl(serie1.value_counts(normalize=True).values,
serie2.value_counts(normalize=True).values)
def plot_hist_na(df, colname):
df_h = df.copy()
na_name = "is_na_{}".format(colname)
df_h[na_name] = df_h[colname].isnull().astype(int)
measure_col = cserie((df.dtypes == int) | (df.dtypes == float))
df_h.groupby(na_name)[measure_col].hist()
def psi(bench, target, group, print_df=True):
""" This function return the Population Stability Index, quantifying if the
distribution is stable between two states.
This statistic make sense and works is only working for numeric variables
for bench and target.
Params:
- bench is a numpy array with the reference variable.
- target is a numpy array of the new variable.
- group is the number of group you want consider.
"""
labels_q = np.percentile(
bench, [(100.0 / group) * i for i in range(group + 1)], interpolation="nearest")
# This is the right approach when you have not a lot of unique value
ben_pct = (pd.cut(bench, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(bench)
target_pct = (pd.cut(target, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(target)
target_pct = target_pct.sort_index() # sort the index
ben_pct = ben_pct.sort_index() # sort the index
psi = sum((target_pct - ben_pct) * np.log(target_pct / ben_pct))
# Print results for better understanding
if print_df:
results = pd.DataFrame({'ben_pct': ben_pct.values,
'target_pct': target_pct.values},
index=ben_pct.index)
return {'data': results, 'statistic': psi}
return psi
|
{"/test.py": ["/autoc/utils/helpers.py", "/autoc/utils/getdata.py", "/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/outliersdetection.py"], "/autoc/naimputer.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/utils/corrplot.py"], "/autoc/__init__.py": ["/autoc/explorer.py", "/autoc/naimputer.py", "/autoc/preprocess.py", "/autoc/utils/getdata.py"], "/autoc/explorer.py": ["/autoc/utils/helpers.py", "/autoc/exceptions.py"], "/autoc/outliersdetection.py": ["/autoc/explorer.py"], "/autoc/preprocess.py": ["/autoc/explorer.py", "/autoc/utils/helpers.py", "/autoc/exceptions.py"]}
|
281
|
lukemadera/ml-learning
|
refs/heads/master
|
/breakout_ai_a2c.py
|
import numpy as np
import os
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
# Implementing a function to make sure the models share the same gradient
# def ensure_shared_grads(model, shared_model):
# for param, shared_param in zip(model.parameters(), shared_model.parameters()):
# if shared_param.grad is not None:
# return
# shared_param._grad = param.grad
class ActorCritic(nn.Module):
def __init__(self, numActions, numInputs=84):
super(ActorCritic, self).__init__()
# self.conv1 = nn.Conv2d(numInputs, 32, kernel_size=8, stride=4)
# self.conv2 = nn.Conv2d(32, 64, kernel_size=4, stride=2)
# self.conv3 = nn.Conv2d(64, 64, kernel_size=3, stride=1)
self.conv1 = nn.Conv2d(numInputs, 32, 3, stride=2, padding=1)
self.conv2 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv3 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.conv4 = nn.Conv2d(32, 32, 3, stride=2, padding=1)
self.linear1 = nn.Linear(192, 512)
self.actor = nn.Linear(512, numActions)
self.critic = nn.Linear(512, 1)
# In a PyTorch model, you only have to define the forward pass.
# PyTorch computes the backwards pass for you!
def forward(self, x):
# Normalize image pixels (from rgb 0 to 255) to between 0 and 1.
x = x / 255.
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
x = F.relu(self.linear1(x))
return x
# Only the Actor head
def get_action_probs(self, x):
x = self(x)
actionProbs = F.softmax(self.actor(x), dim=1)
actionProbs = torch.clamp(actionProbs, 0.0001, 0.9999)
return actionProbs
# Only the Critic head
def getStateValue(self, x):
x = self(x)
stateValue = self.critic(x)
return stateValue
# Both heads
def evaluate_actions(self, x):
x = self(x)
actionProbs = F.softmax(self.actor(x), dim=1)
actionProbs = torch.clamp(actionProbs, 0.0001, 0.9999)
stateValues = self.critic(x)
return actionProbs, stateValues
class A2C():
def __init__(self, numActions, gamma=None, learningRate=None, maxGradNorm=0.5,
entropyCoefficient=0.01, valueLossFactor=0.5, sharedModel=None,
sharedOptimizer=None, device='cpu'):
self.gamma = gamma if gamma is not None else 0.99
self.learningRate = learningRate if learningRate is not None else 0.0007
self.maxGradNorm = maxGradNorm
self.entropyCoefficient = entropyCoefficient
self.valueLossFactor = valueLossFactor
self.model = ActorCritic(numActions).to(device=device)
self.sharedModel = sharedModel
self.optimizer = sharedOptimizer if sharedOptimizer is not None else \
optim.Adam(self.model.parameters(), lr=self.learningRate)
self.device = device
print ('A2C hyperparameters',
'learningRate', self.learningRate,
'gamma', self.gamma,
'entropyCoefficient', self.entropyCoefficient,
'valueLossFactor', self.valueLossFactor,
'maxGradNorm', self.maxGradNorm)
def save(self, filePath='training-runs/a2c.pth'):
torch.save({'state_dict': self.model.state_dict(),
'optimizer' : self.optimizer.state_dict(),
}, filePath)
print("=> saved checkpoint... ", filePath)
def load(self, filePath='training-runs/a2c.pth'):
if os.path.isfile(filePath):
print("=> loading checkpoint... ", filePath)
checkpoint = torch.load(filePath)
self.model.load_state_dict(checkpoint['state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
print("done!")
else:
print("no checkpoint found...", filePath)
# def syncSharedModel(self):
# if self.sharedModel is not None:
# # Synchronizing with the shared model
# self.model.load_state_dict(self.sharedModel.state_dict())
def getValues(self, state):
stateTensor = torch.tensor(state, dtype=torch.float32, device=self.device)
return self.model.get_action_probs(stateTensor)
def pickAction(self, bestAction, validActions=None, randomRatio=-1):
action = bestAction
if randomRatio >= 0 and validActions is not None:
randNum = random.uniform(0, 1)
if randNum < randomRatio:
action = np.random.choice(validActions)
# print ('random action')
# action = actionProbs.multinomial(num_samples=1)
# action = action[0,0].tolist()
if validActions is not None and action not in validActions:
action = np.random.choice(validActions)
return action
def selectActions(self, states, validActions=None, randomRatio=-1):
statesTensor = torch.tensor(states, dtype=torch.float32, device=self.device)
actionProbs, stateValues = self.model.evaluate_actions(statesTensor)
actions = []
for item in actionProbs:
bestAction = item.max(0)[1].tolist()
action = self.pickAction(bestAction, validActions, randomRatio)
actions.append(action)
return actions, stateValues.tolist()
def selectAction(self, state, validActions=None, randomRatio=-1):
# Need to add dimension to simulate stack of states, even though just have one.
stateTensor = torch.tensor(state, dtype=torch.float32, device=self.device)
actionProbs, stateValues = self.model.evaluate_actions(stateTensor)
_, bestAction = actionProbs.max(maxIndex)
bestAction = bestAction[0].tolist()
action = self.pickAction(bestAction, validActions, randomRatio)
return action, stateValues
def calcActualStateValues(self, rewards, dones, statesTensor):
rewards = rewards.tolist()
dones = dones.tolist()
# R is the cumulative reward.
R = []
rewards.reverse()
if dones[-1]:
# if 0:
nextReturn = 0
else:
stateTensor = statesTensor[-1].unsqueeze(0)
nextReturn = self.model.getStateValue(stateTensor)[0][0].tolist()
# Backup from last state to calculate "true" returns for each state in the set
R.append(nextReturn)
dones.reverse()
for r in range(1, len(rewards)):
if dones[r]:
# if 0:
thisReturn = 0
else:
thisReturn = rewards[r] + nextReturn * self.gamma
# print ('thisReturn', thisReturn, rewards[r], nextReturn, self.gamma, rewards, r)
R.append(thisReturn)
nextReturn = thisReturn
R.reverse()
# print ('rewards', rewards)
stateValuesActual = torch.tensor(R, dtype=torch.float32, device=self.device).unsqueeze(1)
# print ('stateValuesActual', stateValuesActual)
# print ('R', R)
return stateValuesActual
def learn(self, states, actions, rewards, dones, values=None):
statesTensor = torch.tensor(states, dtype=torch.float32, device=self.device)
# s = torch.tensor(states, dtype=torch.float32, device=self.device)
# Need to convert from array of tensors to tensor of tensors.
# actionProbs, stateValuesEst = self.model.evaluate_actions(torch.cat(statesTensor, 0))
actionProbs, stateValuesEst = self.model.evaluate_actions(statesTensor)
# print ('actionProbs', actionProbs)
# print ('stateValuesEst', stateValuesEst)
actionLogProbs = actionProbs.log()
# print ('actionProbs', actionProbs)
# print ('actionLogProbs', actionLogProbs)
a = torch.tensor(actions, dtype=torch.int64, device=self.device).view(-1,1)
chosenActionLogProbs = actionLogProbs.gather(1, a)
# print ('chosenActionLogProbs', chosenActionLogProbs)
versionToUse = 'v1'
# v1 - original
if versionToUse == 'v1':
# Calculating the actual values.
stateValuesActual = self.calcActualStateValues(rewards, dones, statesTensor)
# print ('stateValuesActual', stateValuesActual)
# This is also the TD (Temporal Difference) error
advantages = stateValuesActual - stateValuesEst
# print ('advantages', advantages)
valueLoss = advantages.pow(2).mean()
# print ('value_loss', value_loss)
entropy = (actionProbs * actionLogProbs).sum(1).mean()
# print ('entropy', entropy, actionProbs, actionLogProbs)
actionGain = (chosenActionLogProbs * advantages).mean()
# print ('actiongain', actionGain)
totalLoss = self.valueLossFactor * valueLoss - \
actionGain - self.entropyCoefficient * entropy
# print ('totalLoss', totalLoss, valueLoss, actionGain)
# v2 - http://steven-anker.nl/blog/?p=184
if versionToUse == 'v2':
R = 0
if not dones[-1]:
stateTensor = statesTensor[-1]
R = self.model.getStateValue(stateTensor)[0][0].tolist()
n = len(statesTensor)
VF = stateValuesEst
RW = np.zeros(n)
ADV = np.zeros(n)
A = np.array(actions)
for i in range(n - 1, -1, -1):
R = rewards[i] + self.gamma * R
RW[i] = R
ADV[i] = R - VF[i]
advantages = torch.from_numpy(ADV, device=self.device)
# rewardsTensor = []
# for reward in rewards:
# print (reward, torch.tensor([reward], device=self.device))
# rewardsTensor.append(torch.tensor(reward, device=self.device))
rewardsTensor = list(map(lambda x: torch.tensor([x], device=self.device), rewards))
rewardsTensor = torch.cat(rewardsTensor, 0)
valueLoss = 0.5 * (stateValuesEst - rewardsTensor).pow(2).mean()
# valueLoss = 0.5 * (stateValuesEst - torch.from_numpy(RW, device=self.device)).pow(2).mean()
actionOneHot = chosenActionLogProbs #Is this correct??
negLogPolicy = -1 * actionLogProbs
# Only the output related to the action needs to be adjusted, since we only know the result of that action.
# By multiplying the negative log of the policy output with the one hot encoded vectors, we force all outputs
# other than the one of the action to zero.
policyLoss = ((negLogPolicy * actionOneHot).sum(1) * advantages.float()).mean()
entropy = (actionProbs * negLogPolicy).sum(1).mean()
# Training works best if the value loss has less influence than the policy loss, so reduce value loss by a factor.
# Optimizing with this loss function could result in converging too quickly to a sub optimal solution.
# I.e. the probability of a single action is significant higher than any other, causing it to always be chosen.
# To prevent this we add a penalty on having a high entropy.
totalLoss = self.valueLossFactor * valueLoss + policyLoss - self.entropyCoefficient * entropy
self.optimizer.zero_grad()
totalLoss.backward()
nn.utils.clip_grad_norm_(self.model.parameters(), self.maxGradNorm)
# if self.sharedModel is not None:
# ensure_shared_grads(self.model, self.sharedModel)
self.optimizer.step()
|
{"/breakout_run_train.py": ["/breakout_ai_a2c.py", "/date_time.py", "/number.py"]}
|
282
|
lukemadera/ml-learning
|
refs/heads/master
|
/number.py
|
# Decimal is causing rounding errors? E.g. 1/3 is 3.333333333334 and 1/3 of 30 is 9.9999999999990
# We want to keep precision at a max, but don't increase precision for numbers that start as less.
# For example, change 33.33333333333334 to 33.33333333 and keep 1 as 1 (not 1.0000000001)
from decimal import *
# decimals = 8
# def set_decimals(decimals1):
# global decimals
# decimals = decimals1
# def precision_string(decimals):
# if decimals == 0:
# return '1'
# precision = '.'
# # -1 because add a '1' at the end as last digit
# for count in range(0, (decimals-1)):
# precision += '0'
# precision += '1'
# return precision
# def number(num, decimals1 = False):
# global decimals
# num_decimals_max = decimals1 or decimals
# num_str = str(num)
# index_dot = num_str.find('.')
# if index_dot < 0:
# num_decimals = 0
# else:
# num_decimals_str = len(num_str) - (index_dot + 1)
# if num_decimals_str < num_decimals_max:
# num_decimals = num_decimals_str
# else:
# num_decimals = num_decimals_max
# precision = precision_string(num_decimals)
# return Decimal(num).quantize(Decimal(precision), rounding=ROUND_HALF_UP)
# decimal type does not store in MongoDB
def number(num):
if not isinstance(num, float):
return float(num)
return num
def toFixed(num, precision1='.01'):
numFixed = precision(num, precision1)
numNoZeroes = removeZeroes(str(numFixed))
if numNoZeroes[-1] == '.':
return str(num)
return numNoZeroes
# '0.010000' will return a precision of 6 decimals, instead of 2! So fix by
# removing any trailing zeroes.
def removeZeroes(str1):
newStr = str1
lastIndex = len(str1)
for index, char in reversed(list(enumerate(str1))):
if char != '0':
break
lastIndex = index
newStr = str1[slice(0, lastIndex)]
return newStr
def decimalCount(numString):
index = numString.find('.')
if index > -1:
return len(numString) - index - 1
return -1
def precision(num, precision1 = '.01', round1='down'):
precision = removeZeroes(precision1)
# See if value is already correct precision.
if decimalCount(str(num)) == decimalCount(precision):
return num
rounding = ROUND_UP if round1 == 'up' else ROUND_DOWN
newVal = float(Decimal(num).quantize(Decimal(precision), rounding=rounding))
if newVal == 0.0:
newVal = float(Decimal(num).quantize(Decimal(precision), rounding=ROUND_UP))
return newVal
|
{"/breakout_run_train.py": ["/breakout_ai_a2c.py", "/date_time.py", "/number.py"]}
|
283
|
lukemadera/ml-learning
|
refs/heads/master
|
/breakout_run_train.py
|
import gym
import logging
import numpy as np
import torch
import time
import breakout_ai_a2c as ai_a2c
import date_time
import number
from subproc_vec_env import SubprocVecEnv
from atari_wrappers import make_atari, wrap_deepmind, Monitor
def updateState(obs, state, nc):
# Do frame-stacking here instead of the FrameStack wrapper to reduce IPC overhead
state = np.roll(state, shift=-nc, axis=3)
state[:, :, :, -nc:] = obs
return state
def runTrain(gymId='BreakoutNoFrameskip-v4', numEnvs=16, seed=0, filePathBrain='training/breakout-v1.pth',
numSteps=5, numBatches=20000, outputBatchInterval=1000, joinEnvs=1, epsilon=0.00001):
def make_env(rank):
def _thunk():
env = make_atari(gymId)
env.seed(seed + rank)
gym.logger.setLevel(logging.WARN)
env = wrap_deepmind(env)
# wrap the env one more time for getting total reward
env = Monitor(env, rank)
return env
return _thunk
print ('training starting', numBatches, outputBatchInterval,
'epsilon', epsilon)
env = SubprocVecEnv([make_env(i) for i in range(numEnvs)])
numActions = env.action_space.n
torchDevice = 'cpu'
if torch.cuda.is_available():
torchDevice = 'cuda'
agent = ai_a2c.A2C(numActions, device=torchDevice)
if filePathBrain:
agent.load(filePath=filePathBrain)
timingStart = date_time.now()
batchCount = 0
states, actions, rewards, dones, values = [], [], [], [], []
for ii in range(numEnvs):
states.append([])
actions.append([])
rewards.append([])
dones.append([])
values.append([])
# Set first state.
# Environment returns 1 frame, but we want multiple, so we stack the new
# state on top of the past ones.
nh, nw, nc = env.observation_space.shape
nstack = 4
batchStateShape = (numEnvs * numSteps, nh, nw, nc * nstack)
emptyState = np.zeros((numEnvs, nh, nw, nc * nstack), dtype=np.uint8)
obs = env.reset()
# states = updateState(obs, emptyState, nc)
lastStates = updateState(obs, emptyState, nc)
lastDones = [False for _ in range(numEnvs)]
totalRewards = []
realTotalRewards = []
# All actions are always valid.
validActions = [0,1,2,3]
while batchCount < numBatches:
states, actions, rewards, dones, values = [], [], [], [], []
stepCount = 0
while stepCount < numSteps:
actionsStep, valuesStep = agent.selectActions(lastStates, validActions=validActions, randomRatio=epsilon)
# print ('actionsStep', actionsStep)
states.append(np.copy(lastStates))
actions.append(actionsStep)
values.append(valuesStep)
if stepCount > 0:
dones.append(lastDones)
# Input the action (run a step) for all environments.
statesStep, rewardsStep, donesStep, infosStep = env.step(actionsStep)
# Update state for any dones.
for n, done in enumerate(donesStep):
if done:
lastStates[n] = lastStates[n] * 0
lastStates = updateState(obs, lastStates, nc)
# Update rewards for logging / tracking.
for done, info in zip(donesStep, infosStep):
if done:
totalRewards.append(info['reward'])
if info['total_reward'] != -1:
realTotalRewards.append(info['total_reward'])
lastDones = donesStep
rewards.append(rewardsStep)
stepCount += 1
# Dones is one off, so add the last one.
dones.append(lastDones)
# discount/bootstrap off value fn
# lastValues = self.agent.value(lastStates).tolist()
# Can skip this as it is done in the learn function with calcActualStateValues?
# Join all (combine batches and steps).
states = np.asarray(states, dtype='float32').swapaxes(1, 0).reshape(batchStateShape)
actions = np.asarray(actions).swapaxes(1, 0).flatten()
rewards = np.asarray(rewards).swapaxes(1, 0).flatten()
dones = np.asarray(dones).swapaxes(1, 0).flatten()
values = np.asarray(values).swapaxes(1, 0).flatten()
agent.learn(states, actions, rewards, dones, values)
batchCount += 1
if batchCount % outputBatchInterval == 0:
runTime = date_time.diff(date_time.now(), timingStart, 'minutes')
totalSteps = batchCount * numSteps
runTimePerStep = runTime / totalSteps
runTimePerStepUnit = 'minutes'
if runTimePerStep < 0.02:
runTimePerStep *= 60
runTimePerStepUnit = 'seconds'
print (batchCount, numBatches, '(batch done)',
number.toFixed(runTime), 'run time minutes,', totalSteps,
'steps,', number.toFixed(runTimePerStep), runTimePerStepUnit, 'per step')
r = totalRewards[-100:] # get last 100
tr = realTotalRewards[-100:]
if len(r) == 100:
print("avg reward (last 100):", np.mean(r))
if len(tr) == 100:
print("avg total reward (last 100):", np.mean(tr))
print("max (last 100):", np.max(tr))
# Only save periodically as well.
if filePathBrain:
agent.save(filePathBrain)
env.close()
if filePathBrain:
agent.save(filePathBrain)
runTime = date_time.diff(date_time.now(), timingStart, 'minutes')
totalSteps = numBatches * numSteps
runTimePerStep = runTime / totalSteps
runTimePerStepUnit = 'minutes'
if runTimePerStep < 0.02:
runTimePerStep *= 60
runTimePerStepUnit = 'seconds'
print ('training done:', number.toFixed(runTime), 'run time minutes,', totalSteps,
'steps,', number.toFixed(runTimePerStep), runTimePerStepUnit, 'per step')
return None
runTrain(filePathBrain='training/breakout-v1-2.pth', epsilon=0.0001)
|
{"/breakout_run_train.py": ["/breakout_ai_a2c.py", "/date_time.py", "/number.py"]}
|
284
|
lukemadera/ml-learning
|
refs/heads/master
|
/date_time.py
|
import datetime
import dateutil.parser
import dateparser
import math
import pytz
def now(tz = 'UTC', microseconds = False):
# return pytz.utc.localize(datetime.datetime.utcnow())
dt = datetime.datetime.now(pytz.timezone(tz))
if not microseconds:
dt = dt.replace(microsecond = 0)
return dt
def now_string(format = '%Y-%m-%d %H:%M:%S %z', tz = 'UTC'):
return string(now(tz), format)
def arrayString(datetimes, format = '%Y-%m-%d %H:%M:%S %z'):
return list(map(lambda datetime1: string(datetime1, format), datetimes))
def arrayStringFields(array1, fields=[], format = '%Y-%m-%d %H:%M:%S %z'):
def mapString1(obj1):
return dictStringFields(obj1, fields, format)
return list(map(mapString1, array1))
def dictStringFields(object1, fields=[], format = '%Y-%m-%d %H:%M:%S %z'):
newObject = {}
for key in object1:
if key in fields:
newObject[key] = string(object1[key], format)
else:
newObject[key] = object1[key]
return newObject
def string(datetime1, format = '%Y-%m-%d %H:%M:%S %z'):
# return datetime1.strftime('%Y-%m-%d %H:%M:%S %z')
# Much more performant.
return datetime1.isoformat()
def stringFormat(datetime1, format = '%Y-%m-%d %H:%M:%S %z'):
return datetime1.strftime('%Y-%m-%d %H:%M:%S %z')
# def from_string(datetime_string, format = '%Y-%m-%d %H:%M:%S %z'):
# return datetime.strptime(datetime_string, format)
def from_string(dt_string):
return dateutil.parser.parse(dt_string)
def remove_seconds(datetime1):
return datetime1.replace(second = 0, microsecond = 0)
def remove_microseconds(datetime1):
return datetime1.replace(microsecond = 0)
# Sets seconds (and microseconds) to 0.
def remove_seconds_string(datetime_string, format_in = '%Y-%m-%d %H:%M:%S %z', format_out = '%Y-%m-%d %H:%M:%S %z'):
datetime1 = from_string(datetime_string)
datetime1 = remove_seconds(datetime1)
return string(datetime1, format_out)
def diff(datetime1, datetime2, unit='minutes'):
if datetime2 > datetime1:
dt_diff = datetime2 - datetime1
else:
dt_diff = datetime1 - datetime2
# Note only total_seconds works - otherwise it just gives the remainer
# (e.g. if more than one hour, time will be 1 hour and 5 seconds, not 3605 seconds).
# https://stackoverflow.com/questions/2788871/date-difference-in-minutes-in-python
if unit == 'seconds':
return float(dt_diff.total_seconds())
if unit == 'minutes':
return float(dt_diff.total_seconds() / 60)
elif unit == 'hours':
return float(dt_diff.total_seconds() / (60*60))
# Unlike seconds, apparently days will not cut off weeks and months, so this
# still works if more than 7 days.
elif unit == 'days':
return float(dt_diff.days)
return None
def to_biggest_unit(value, unit = 'minutes'):
if unit == 'minutes':
if value < 60:
return {
'value': math.floor(value),
'unit': 'minutes'
}
if value < (60 * 24):
return {
'value': math.floor(value / 60),
'unit': 'hours'
}
if value < (60 * 24 * 28):
return {
'value': math.floor(value / 60 / 24),
'unit': 'days'
}
return None
# Note this will not handle intervals larger than the size of the
# next bigger unit (e.g. >60 minutes). So 90 minutes (1.5 hours) for example,
# could not be done with this; need whole numbers of each unit.
# E.g. turn 10:51 into 10:45 if interval is 15 minutes.
def floor_time_interval(datetime1, interval, unit = 'minutes'):
if unit == 'seconds':
seconds = math.floor(datetime1.second / interval) * interval
return datetime1.replace(second = seconds, microsecond = 0)
elif unit == 'minutes':
minutes = math.floor(datetime1.minute / interval) * interval
return datetime1.replace(minute = minutes, second = 0, microsecond = 0)
elif unit == 'hours':
hours = math.floor(datetime1.hour / interval) * interval
return datetime1.replace(hour = hours, minute = 0, second = 0, microsecond = 0)
elif unit == 'days':
days = math.floor(datetime1.day / interval) * interval
return datetime1.replace(day = days, hour = 0, minute = 0, second = 0, microsecond = 0)
elif unit == 'months':
months = math.floor(datetime1.month / interval) * interval
return datetime1.replace(month = months, day = 0, hour = 0, minute = 0, second = 0, microsecond = 0)
elif unit == 'years':
years = math.floor(datetime1.year / interval) * interval
return datetime1.replace(year = years, month = 0, day = 0, hour = 0, minute = 0, second = 0, microsecond = 0)
return None
def nextMonth(datetime1, hour=0, minute=0):
currentMonth = datetime1.month
currentYear = datetime1.year
if currentMonth == 12:
nextMonth = 1
nextYear = currentYear + 1
else:
nextMonth = currentMonth + 1
nextYear = currentYear
nextDatetime = datetime.datetime(nextYear, nextMonth, 1, hour, minute, 0, \
tzinfo=pytz.timezone('UTC'))
return nextDatetime
def previousMonth(datetime1, hour=0, minute=0):
currentMonth = datetime1.month
currentYear = datetime1.year
if currentMonth == 1:
previousMonth = 12
previousYear = currentYear - 1
else:
previousMonth = currentMonth - 1
previousYear = currentYear
previousDatetime = datetime.datetime(previousYear, previousMonth, 1, hour, minute, 0, \
tzinfo=pytz.timezone('UTC'))
return previousDatetime
def dateToMilliseconds(date_str):
"""Convert UTC date to milliseconds
If using offset strings add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
See dateparse docs for formats http://dateparser.readthedocs.io/en/latest/
:param date_str: date in readable format, i.e. "January 01, 2018", "11 hours ago UTC", "now UTC"
:type date_str: str
"""
# get epoch value in UTC
epoch = datetime.datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
# parse our date string
d = dateparser.parse(date_str)
# if the date is not timezone aware apply UTC timezone
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=pytz.utc)
# return the difference in time
return int((d - epoch).total_seconds() * 1000.0)
|
{"/breakout_run_train.py": ["/breakout_ai_a2c.py", "/date_time.py", "/number.py"]}
|
285
|
lukemadera/ml-learning
|
refs/heads/master
|
/lodash.py
|
import copy
import random
def findIndex(array1, key, value):
return find_index(array1, key, value)
def find_index(array1, key, value):
for index, arr_item in enumerate(array1):
if key in arr_item and arr_item[key] == value:
return index
return -1
def extend_object(default, new):
final = {}
# Go through defaults first
for key in default:
if key not in new:
final[key] = default[key]
else:
final[key] = new[key]
# In case any keys in new but not in default, add them
for key in new:
if key not in final:
final[key] = new[key]
return final
def sort2D(array1, key, order = 'ascending'):
if len(array1) < 2:
return array1
# def compare(a, b):
# aVal = a[key]
# bVal = b[key]
# if aVal == bVal:
# return 0
# if (aVal > bVal and order == 'ascending') or (aVal < bVal and order == 'descending'):
# return 1
# return -1
def getValue(item):
return item[key]
reverse = True if order == 'descending' else False
return sorted(array1, key=getValue, reverse=reverse)
def omit(object1, keys = []):
new_object = {}
for key in object1:
if key not in keys:
new_object[key] = object1[key]
return new_object
def pick(object1, keys = []):
new_object = {}
for key in object1:
if key in keys:
new_object[key] = object1[key]
return new_object
def map_pick(array1, keys = []):
def pick1(obj1):
return pick(obj1, keys)
return list(map(pick1, array1))
def mapOmit(array1, omitKeys = []):
def omit1(obj1):
return omit(obj1, omitKeys)
return list(map(omit1, array1))
def get_key_array(items, key, skipEmpty=0, emptyValue=None):
if skipEmpty:
return list(map(lambda item: item[key] if key in item else emptyValue, items))
else:
return list(map(lambda item: item[key], items))
# def append_if_unique(array1, value):
# if value not in array1:
# array1.append(value)
def random_string(length = 10):
text = ''
chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
chars_length = len(chars)
counter = 0
while counter < length:
index = random.randint(0, (chars_length - 1))
text = text + chars[index]
counter = counter + 1
return text
def removeArrayIndices(array, indices):
array1 = copy.deepcopy(array)
for index, item in reversed(list(enumerate(array1))):
if index in indices:
del array1[index]
return array1
|
{"/breakout_run_train.py": ["/breakout_ai_a2c.py", "/date_time.py", "/number.py"]}
|
311
|
Sssssbo/SDCNet
|
refs/heads/master
|
/infer_SDCNet.py
|
import numpy as np
import os
import torch
import torch.nn.functional as F
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from misc import check_mkdir, AvgMeter, cal_precision_recall_mae, cal_fmeasure, cal_sizec, cal_sc
from datasets import TestFolder_joint
import joint_transforms
from model import R3Net, SDCNet
torch.manual_seed(2021)
# set which gpu to use
torch.cuda.set_device(6)
# the following two args specify the location of the file of trained model (pth extension)
# you should have the pth file in the folder './$ckpt_path$/$exp_name$'
ckpt_path = './ckpt'
exp_name = 'SDCNet'
msra10k_path = './SOD_label/label_msra10k.csv'
ecssd_path = './SOD_label/label_ECSSD.csv'
dutomrom_path = './SOD_label/label_DUT-OMROM.csv'
dutste_path = './SOD_label/label_DUTS-TE.csv'
hkuis_path = './SOD_label/label_HKU-IS.csv'
pascals_path = './SOD_label/label_PASCAL-S.csv'
sed2_path = './SOD_label/label_SED2.csv'
socval_path = './SOD_label/label_SOC-Val.csv'
sod_path = './SOD_label/label_SOD.csv'
thur15k_path = './SOD_label/label_THUR-15K.csv'
args = {
'snapshot': '30000', # your snapshot filename (exclude extension name)
'save_results': True, # whether to save the resulting masks
'test_mode': 1
}
joint_transform = joint_transforms.Compose([
#joint_transforms.RandomCrop(300),
#joint_transforms.RandomHorizontallyFlip(),
#joint_transforms.RandomRotate(10)
])
img_transform = transforms.Compose([
transforms.Resize((300, 300)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = transforms.ToTensor()
to_pil = transforms.ToPILImage()
to_test ={'ECSSD': ecssd_path,'SOD': sod_path, 'DUTS-TE': dutste_path} #{'DUTS-TE': dutste_path,'ECSSD': ecssd_path,'SOD': sod_path, 'SED2': sed2_path, 'PASCAL-S': pascals_path, 'HKU-IS': hkuis_path, 'DUT-OMROM': dutomrom_path}
def main():
net = SDCNet(num_classes = 5).cuda()
print('load snapshot \'%s\' for testing, mode:\'%s\'' % (args['snapshot'], args['test_mode']))
print(exp_name)
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
net.eval()
results = {}
with torch.no_grad():
for name, root in to_test.items():
print('load snapshot \'%s\' for testing %s' %(args['snapshot'], name))
test_data = pd.read_csv(root)
test_set = TestFolder_joint(test_data, joint_transform, img_transform, target_transform)
test_loader = DataLoader(test_set, batch_size=1, num_workers=0, shuffle=False)
precision0_record, recall0_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision1_record, recall1_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision2_record, recall2_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision3_record, recall3_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision4_record, recall4_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision5_record, recall5_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision6_record, recall6_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
mae0_record = AvgMeter()
mae1_record = AvgMeter()
mae2_record = AvgMeter()
mae3_record = AvgMeter()
mae4_record = AvgMeter()
mae5_record = AvgMeter()
mae6_record = AvgMeter()
n0, n1, n2, n3, n4, n5 = 0, 0, 0, 0, 0, 0
if args['save_results']:
check_mkdir(os.path.join(ckpt_path, exp_name, '%s_%s' % (name, args['snapshot'])))
for i, (inputs, gt, labels, img_path) in enumerate(tqdm(test_loader)):
shape = gt.size()[2:]
img_var = Variable(inputs).cuda()
img = np.array(to_pil(img_var.data.squeeze(0).cpu()))
gt = np.array(to_pil(gt.data.squeeze(0).cpu()))
sizec = labels.numpy()
pred2021 = net(img_var, sizec)
pred2021 = F.interpolate(pred2021, size=shape, mode='bilinear', align_corners=True)
pred2021 = np.array(to_pil(pred2021.data.squeeze(0).cpu()))
if labels == 0:
precision1, recall1, mae1 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision1, recall1)):
p, r = pdata
precision1_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall1_record[pidx].update(r)
mae1_record.update(mae1)
n1 += 1
elif labels == 1:
precision2, recall2, mae2 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision2, recall2)):
p, r = pdata
precision2_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall2_record[pidx].update(r)
mae2_record.update(mae2)
n2 += 1
elif labels == 2:
precision3, recall3, mae3 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision3, recall3)):
p, r = pdata
precision3_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall3_record[pidx].update(r)
mae3_record.update(mae3)
n3 += 1
elif labels == 3:
precision4, recall4, mae4 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision4, recall4)):
p, r = pdata
precision4_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall4_record[pidx].update(r)
mae4_record.update(mae4)
n4 += 1
elif labels == 4:
precision5, recall5, mae5 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision5, recall5)):
p, r = pdata
precision5_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall5_record[pidx].update(r)
mae5_record.update(mae5)
n5 += 1
precision6, recall6, mae6 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision6, recall6)):
p, r = pdata
precision6_record[pidx].update(p)
recall6_record[pidx].update(r)
mae6_record.update(mae6)
img_name = os.path.split(str(img_path))[1]
img_name = os.path.splitext(img_name)[0]
n0 += 1
if args['save_results']:
Image.fromarray(pred2021).save(os.path.join(ckpt_path, exp_name, '%s_%s' % (
name, args['snapshot']), img_name + '_2021.png'))
fmeasure1 = cal_fmeasure([precord.avg for precord in precision1_record],
[rrecord.avg for rrecord in recall1_record])
fmeasure2 = cal_fmeasure([precord.avg for precord in precision2_record],
[rrecord.avg for rrecord in recall2_record])
fmeasure3 = cal_fmeasure([precord.avg for precord in precision3_record],
[rrecord.avg for rrecord in recall3_record])
fmeasure4 = cal_fmeasure([precord.avg for precord in precision4_record],
[rrecord.avg for rrecord in recall4_record])
fmeasure5 = cal_fmeasure([precord.avg for precord in precision5_record],
[rrecord.avg for rrecord in recall5_record])
fmeasure6 = cal_fmeasure([precord.avg for precord in precision6_record],
[rrecord.avg for rrecord in recall6_record])
results[name] = {'fmeasure1': fmeasure1, 'mae1': mae1_record.avg,'fmeasure2': fmeasure2,
'mae2': mae2_record.avg, 'fmeasure3': fmeasure3, 'mae3': mae3_record.avg,
'fmeasure4': fmeasure4, 'mae4': mae4_record.avg, 'fmeasure5': fmeasure5,
'mae5': mae5_record.avg, 'fmeasure6': fmeasure6, 'mae6': mae6_record.avg}
print('test results:')
print('[fmeasure1 %.3f], [mae1 %.4f], [class1 %.0f]\n'\
'[fmeasure2 %.3f], [mae2 %.4f], [class2 %.0f]\n'\
'[fmeasure3 %.3f], [mae3 %.4f], [class3 %.0f]\n'\
'[fmeasure4 %.3f], [mae4 %.4f], [class4 %.0f]\n'\
'[fmeasure5 %.3f], [mae5 %.4f], [class5 %.0f]\n'\
'[fmeasure6 %.3f], [mae6 %.4f], [all %.0f]\n'%\
(fmeasure1, mae1_record.avg, n1, fmeasure2, mae2_record.avg, n2, fmeasure3, mae3_record.avg, n3, fmeasure4, mae4_record.avg, n4, fmeasure5, mae5_record.avg, n5, fmeasure6, mae6_record.avg, n0))
def accuracy(y_pred, y_actual, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
final_acc = 0
maxk = max(topk)
# for prob_threshold in np.arange(0, 1, 0.01):
PRED_COUNT = y_actual.size(0)
PRED_CORRECT_COUNT = 0
prob, pred = y_pred.topk(maxk, 1, True, True)
# prob = np.where(prob > prob_threshold, prob, 0)
for j in range(pred.size(0)):
if int(y_actual[j]) == int(pred[j]):
PRED_CORRECT_COUNT += 1
if PRED_COUNT == 0:
final_acc = 0
else:
final_acc = float(PRED_CORRECT_COUNT / PRED_COUNT)
return final_acc * 100, PRED_COUNT
if __name__ == '__main__':
main()
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
312
|
Sssssbo/SDCNet
|
refs/heads/master
|
/resnext/__init__.py
|
from .resnext101 import ResNeXt101
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
313
|
Sssssbo/SDCNet
|
refs/heads/master
|
/misc.py
|
import numpy as np
import os
import pylab as pl
#import pydensecrf.densecrf as dcrf
class AvgMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def cal_precision_recall_mae(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
prediction = prediction / 255.
gt = gt / 255.
mae = np.mean(np.abs(prediction - gt))
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
for threshold in range(256):
threshold = threshold / 255.
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
#false_pred = np.zeros(prediction.shape)
#false_prediction[prediction < threshold] = 1
a = prediction.shape
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#for roc
#fp = np.sum(false_pred * hard_gt)
#tpr = (tp + eps)/(a + eps)
fp = p - tp
#TPR.append(tpr)
FP.append(fp)
precision.append((tp + eps) / (p + eps))
recall.append((tp + eps) / (t + eps))
return precision, recall, mae#, TPR, FP
def cal_fmeasure(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
beta_square = 0.3
max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)])
return max_fmeasure
def cal_sizec(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
#print(gt.shape)
prediction = prediction / 255.
gt = gt / 255.
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
best_threshold = 0
best_F = 0
for threshold in range(256):
threshold = threshold / 255.
gt_size = np.ones(prediction.shape)
a = np.sum(gt_size)
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#print(a, p)
precision = (tp + eps) / (p + eps)
recall = (tp + eps) / (t + eps)
beta_square = 0.3
fmeasure = (1 + beta_square) * precision * recall / (beta_square * precision + recall)
if fmeasure > best_F:
best_threshold = threshold*255
best_F = fmeasure
sm_size = p / a
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec, best_threshold#, TPR, FP
def cal_sc(gt):
# input should be np array with data type uint8
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
#print(gt.shape)
img_size = np.ones(gt.shape)
a = np.sum(img_size)
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
p = np.sum(hard_gt)
b = np.sum(gt)
sm_size = float(p) / float(a)
#print(p, a, sm_size, b)
#print(gt)
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec
def pr_cruve(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
r = [a[1] for a in zip(precision, recall)]
p = [a[0] for a in zip(precision, recall)]
pl.title('PR curve')
pl.xlabel('Recall')
pl.xlabel('Precision')
pl.plot(r, p)
pl.show()
# for define the size type of the salient object
def size_aware(gt):
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt)
pic = np.size(hard_gt)
rate = t/pic
return rate
# # codes of this function are borrowed from https://github.com/Andrew-Qibin/dss_crf
# def crf_refine(img, annos):
# def _sigmoid(x):
# return 1 / (1 + np.exp(-x))
# assert img.dtype == np.uint8
# assert annos.dtype == np.uint8
# assert img.shape[:2] == annos.shape
# # img and annos should be np array with data type uint8
# EPSILON = 1e-8
# M = 2 # salient or not
# tau = 1.05
# # Setup the CRF model
# d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M)
# anno_norm = annos / 255.
# n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * _sigmoid(1 - anno_norm))
# p_energy = -np.log(anno_norm + EPSILON) / (tau * _sigmoid(anno_norm))
# U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32')
# U[0, :] = n_energy.flatten()
# U[1, :] = p_energy.flatten()
# d.setUnaryEnergy(U)
# d.addPairwiseGaussian(sxy=3, compat=3)
# d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5)
# # Do the inference
# infer = np.array(d.inference(1)).astype('float32')
# res = infer[1, :]
# res = res * 255
# res = res.reshape(img.shape[:2])
# return res.astype('uint8')
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
314
|
Sssssbo/SDCNet
|
refs/heads/master
|
/resnet/__init__.py
|
from .make_model import ResNet50, ResNet50_BIN, ResNet50_LowIN
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
315
|
Sssssbo/SDCNet
|
refs/heads/master
|
/resnet/make_model.py
|
from .resnet import ResNet, BasicBlock, Bottleneck
import torch
from torch import nn
from .config import resnet50_path
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class ResNet50(nn.Module):
def __init__(self):
super(ResNet50, self).__init__()
net = ResNet(last_stride=2,
block=Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
class ResNet50_BIN(nn.Module):
def __init__(self):
super(ResNet50_BIN, self).__init__()
net = ResNet(last_stride=2,
block=IN_Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
class ResNet50_LowIN(nn.Module):
def __init__(self):
super(ResNet50_LowIN, self).__init__()
net = ResNet_LowIN(last_stride=2,
block=Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
316
|
Sssssbo/SDCNet
|
refs/heads/master
|
/datasets.py
|
import os
import os.path
import torch.utils.data as data
from PIL import Image
class ImageFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
self.label_list = label_list
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.label_list)
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label
class ImageFolder_joint_for_edge(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
target_edge = Image.open(edge_path).convert('L')
if self.joint_transform is not None:
if img.size != target.size or img.size != target_edge.size:
print("error path:", img_path, gt_path)
print("size:", img.size, target.size, target_edge.size)
img, target, target_edge = self.joint_transform(img, target, target_edge)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
target_edge = self.target_transform(target_edge)
return img, target, target_edge, label
def __len__(self):
return len(self.imgs)
class TestFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label, img_path
def __len__(self):
return len(self.imgs)
def make_dataset(root):
img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')]
return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list]
class ImageFolder(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, root, joint_transform=None, transform=None, target_transform=None):
self.root = root
self.imgs = make_dataset(root)
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
317
|
Sssssbo/SDCNet
|
refs/heads/master
|
/model/make_model.py
|
import torch
import torch.nn as nn
from .backbones.resnet import ResNet, Comb_ResNet, Pure_ResNet, Jointin_ResNet, Jointout_ResNet, BasicBlock, Bottleneck, GDN_Bottleneck, IN_Bottleneck, IN2_Bottleneck, SNR_Bottleneck, SNR2_Bottleneck, SNR3_Bottleneck
from loss.arcface import ArcFace
from .backbones.resnet_ibn_a import resnet50_ibn_a, resnet101_ibn_a
from .backbones.se_resnet_ibn_a import se_resnet50_ibn_a, se_resnet101_ibn_a
import torch.nn.functional as F
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class Backbone(nn.Module):
def __init__(self, num_classes, cfg):
super(Backbone, self).__init__()
last_stride = cfg.MODEL.LAST_STRIDE
model_path = cfg.MODEL.PRETRAIN_PATH
model_name = cfg.MODEL.NAME
self.model_name = cfg.MODEL.NAME
pretrain_choice = cfg.MODEL.PRETRAIN_CHOICE
#block = cfg.MODEL.BLOCK
self.cos_layer = cfg.MODEL.COS_LAYER
self.neck = cfg.MODEL.NECK
self.neck_feat = cfg.TEST.NECK_FEAT
if model_name == 'Pure_resnet50_GDN':
self.in_planes = 2048
self.base = ResNet(last_stride=last_stride,
block=GDN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Comb_resnet50_IN':
self.in_planes = 2048
self.base = Comb_ResNet(last_stride=last_stride,
block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Pure_resnet50_IN2':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=IN2_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
elif model_name == 'Pure_resnet50_IN':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Pure_resnet50_SNR':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=SNR_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Pure_resnet50_SNR2':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=SNR2_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Jointin_resnet50_SNR3':
self.in_planes = 2048
self.base = Jointin_ResNet(last_stride=last_stride,
block=SNR3_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Jointout_resnet50_None':
self.in_planes = 2048
self.base = Jointout_ResNet(last_stride=last_stride,
block=Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Jointout_resnet50_IN':
self.in_planes = 2048
self.base = Jointout_ResNet(last_stride=last_stride,
block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'resnet18':
self.in_planes = 512
self.base = ResNet(last_stride=last_stride,
block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN,
layers=[2, 2, 2, 2])
print('using resnet18 as a backbone')
elif model_name == 'resnet34':
self.in_planes = 512
self.base = ResNet(last_stride=last_stride,
block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3])
print('using resnet34 as a backbone')
elif model_name == 'resnet50_ibn_a':
self.in_planes = 2048
self.base = resnet50_ibn_a(last_stride)
print('using se_resnet50_ibn_a as a backbone')
elif model_name == 'se_resnet50_ibn_a':
self.in_planes = 2048
self.base = se_resnet50_ibn_a(
last_stride, frozen_stages=cfg.MODEL.FROZEN)
print('using se_resnet50_ibn_a as a backbone')
elif model_name == 'resnet101_ibn_a':
self.in_planes = 2048
self.base = resnet101_ibn_a(
last_stride, frozen_stages=cfg.MODEL.FROZEN)
print('using resnet101_ibn_a as a backbone')
elif model_name == 'se_resnet101_ibn_a':
self.in_planes = 2048
self.base = se_resnet101_ibn_a(
last_stride, frozen_stages=cfg.MODEL.FROZEN)
print('using se_resnet101_ibn_a as a backbone')
else:
print('unsupported backbone! but got {}'.format(model_name))
if pretrain_choice == 'imagenet':
self.base.load_param(model_path)
print('Loading pretrained ImageNet model......from {}'.format(model_path))
self.gap = nn.AdaptiveAvgPool2d(1)
self.num_classes = num_classes
if self.cos_layer:
print('using cosine layer')
self.arcface = ArcFace(
self.in_planes, self.num_classes, s=30.0, m=0.50)
else:
self.classifier = nn.Linear(
self.in_planes, self.num_classes, bias=False)
self.classifier.apply(weights_init_classifier)
if model_name == 'Jointin_resnet50_SNR3':
self.classifier = nn.Linear(
self.in_planes, self.num_classes, bias=False)
self.classifier.apply(weights_init_classifier)
self.classifier1 = nn.Linear(512, self.num_classes, bias=False)
self.classifier1.apply(weights_init_classifier)
self.classifier2 = nn.Linear(512, self.num_classes, bias=False)
self.classifier2.apply(weights_init_classifier)
self.classifier3 = nn.Linear(512, self.num_classes, bias=False)
self.classifier3.apply(weights_init_classifier)
self.classifier4 = nn.Linear(512, self.num_classes, bias=False)
self.classifier4.apply(weights_init_classifier)
self.classifier5 = nn.Linear(1024, self.num_classes, bias=False)
self.classifier5.apply(weights_init_classifier)
self.classifier6 = nn.Linear(256, self.num_classes, bias=False)
self.classifier6.apply(weights_init_classifier)
self.classifier7 = nn.Linear(256, self.num_classes, bias=False)
self.classifier7.apply(weights_init_classifier)
self.classifier8 = nn.Linear(256, self.num_classes, bias=False)
self.classifier8.apply(weights_init_classifier)
self.classifier9 = nn.Linear(256, self.num_classes, bias=False)
self.classifier9.apply(weights_init_classifier)
self.classifier10 = nn.Linear(512, self.num_classes, bias=False)
self.classifier10.apply(weights_init_classifier)
self.classifier11 = nn.Linear(128, self.num_classes, bias=False)
self.classifier11.apply(weights_init_classifier)
self.classifier12 = nn.Linear(128, self.num_classes, bias=False)
self.classifier12.apply(weights_init_classifier)
self.classifier13 = nn.Linear(128, self.num_classes, bias=False)
self.classifier13.apply(weights_init_classifier)
self.classifier14 = nn.Linear(128, self.num_classes, bias=False)
self.classifier14.apply(weights_init_classifier)
self.classifier15 = nn.Linear(256, self.num_classes, bias=False)
self.classifier15.apply(weights_init_classifier)
self.classifier16 = nn.Linear(64, self.num_classes, bias=False)
self.classifier16.apply(weights_init_classifier)
self.classifier17 = nn.Linear(64, self.num_classes, bias=False)
self.classifier17.apply(weights_init_classifier)
self.classifier18 = nn.Linear(64, self.num_classes, bias=False)
self.classifier18.apply(weights_init_classifier)
self.classifier19 = nn.Linear(64, self.num_classes, bias=False)
self.classifier19.apply(weights_init_classifier)
elif 'Jointout' in model_name:
self.classifier0 = nn.Linear(64, self.num_classes, bias=False)
self.classifier0.apply(weights_init_classifier)
self.classifier0_1 = nn.Linear(64, self.num_classes, bias=False)
self.classifier0_1.apply(weights_init_classifier)
self.classifier1 = nn.Linear(256, self.num_classes, bias=False)
self.classifier1.apply(weights_init_classifier)
self.classifier1_1 = nn.Linear(256, self.num_classes, bias=False)
self.classifier1_1.apply(weights_init_classifier)
self.classifier2 = nn.Linear(512, self.num_classes, bias=False)
self.classifier2.apply(weights_init_classifier)
self.classifier2_1 = nn.Linear(512, self.num_classes, bias=False)
self.classifier2_1.apply(weights_init_classifier)
self.classifier3 = nn.Linear(1024, self.num_classes, bias=False)
self.classifier3.apply(weights_init_classifier)
self.classifier3_1 = nn.Linear(1024, self.num_classes, bias=False)
self.classifier3_1.apply(weights_init_classifier)
self.classifier4 = nn.Linear(2048, self.num_classes, bias=False)
self.classifier4.apply(weights_init_classifier)
self.classifier4_1 = nn.Linear(2048, self.num_classes, bias=False)
self.classifier4_1.apply(weights_init_classifier)
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False)
self.bottleneck.apply(weights_init_kaiming)
def forward(self, x, label=None, camid=None): # label is unused if self.cos_layer == 'no'
if self.training and self.model_name == 'Jointin_resnet50_SNR3':
x, x4_2, x4_1, res4_2, res4_1, x3_3, x3_2, x3_1, res3_2, res3_1, x2_3, x2_2, x2_1, res2_2, res2_1, x1_3, x1_2, x1_1, res1_2, res1_1 = self.base(x, camid)
global_feat = nn.functional.avg_pool2d(x, x.shape[2:4])
global_feat = global_feat.view(global_feat.shape[0], -1)
feat = self.bottleneck(global_feat)
cls_score = self.classifier(feat)
fx4_2 = nn.functional.avg_pool2d(x4_2, x4_2.shape[2:4])
fx4_2 = fx4_2.view(fx4_2.shape[0], -1)
ax4_2 = self.classifier1(fx4_2)
fx4_1 = nn.functional.avg_pool2d(x4_1, x4_1.shape[2:4])
fx4_1 = fx4_1.view(fx4_1.shape[0], -1)
ax4_1 = self.classifier2(fx4_1)
fres4_2 = nn.functional.avg_pool2d(res4_2, res4_2.shape[2:4])
fres4_2 = fres4_2.view(fres4_2.shape[0], -1)
ares4_2 = self.classifier3(fres4_2)
fres4_1 = nn.functional.avg_pool2d(res4_1, res4_1.shape[2:4])
fres4_1 = fres4_1.view(fres4_1.shape[0], -1)
ares4_1 = self.classifier4(fres4_1)
fx3_3 = nn.functional.avg_pool2d(x3_3, x3_3.shape[2:4])
fx3_3 = fx3_3.view(fx3_3.shape[0], -1)
ax3_3 = self.classifier5(fx3_3)
fx3_2 = nn.functional.avg_pool2d(x3_2, x3_2.shape[2:4])
fx3_2 = fx3_2.view(fx3_2.shape[0], -1)
ax3_2 = self.classifier6(fx3_2)
fx3_1 = nn.functional.avg_pool2d(x3_1, x3_1.shape[2:4])
fx3_1 = fx3_1.view(fx3_1.shape[0], -1)
ax3_1 = self.classifier7(fx3_1)
fres3_2 = nn.functional.avg_pool2d(res3_2, res3_2.shape[2:4])
fres3_2 = fres3_2.view(fres3_2.shape[0], -1)
ares3_2 = self.classifier8(fres3_2)
fres3_1 = nn.functional.avg_pool2d(res3_1, res3_1.shape[2:4])
fres3_1 = fres3_1.view(fres3_1.shape[0], -1)
ares3_1 = self.classifier9(fres3_1)
fx2_3 = nn.functional.avg_pool2d(x2_3, x2_3.shape[2:4])
fx2_3 = fx2_3.view(fx2_3.shape[0], -1)
ax2_3 = self.classifier10(fx2_3)
fx2_2 = nn.functional.avg_pool2d(x2_2, x2_2.shape[2:4])
fx2_2 = fx2_2.view(fx2_2.shape[0], -1)
ax2_2 = self.classifier11(fx2_2)
fx2_1 = nn.functional.avg_pool2d(x2_1, x2_1.shape[2:4])
fx2_1 = fx2_1.view(fx2_1.shape[0], -1)
ax2_1 = self.classifier12(fx2_1)
fres2_2 = nn.functional.avg_pool2d(res2_2, res2_2.shape[2:4])
fres2_2 = fres2_2.view(fres2_2.shape[0], -1)
ares2_2 = self.classifier13(fres2_2)
fres2_1 = nn.functional.avg_pool2d(res2_1, res2_1.shape[2:4])
fres2_1 = fres2_1.view(fres2_1.shape[0], -1)
ares2_1 = self.classifier14(fres2_1)
fx1_3 = nn.functional.avg_pool2d(x1_3, x1_3.shape[2:4])
fx1_3 = fx1_3.view(fx1_3.shape[0], -1)
ax1_3 = self.classifier15(fx1_3)
fx1_2 = nn.functional.avg_pool2d(x1_2, x1_2.shape[2:4])
fx1_2 = fx1_2.view(fx1_2.shape[0], -1)
ax1_2 = self.classifier16(fx1_2)
fx1_1 = nn.functional.avg_pool2d(x1_1, x1_1.shape[2:4])
fx1_1 = fx1_1.view(fx1_1.shape[0], -1)
ax1_1 = self.classifier17(fx1_1)
fres1_2 = nn.functional.avg_pool2d(res1_2, res1_2.shape[2:4])
fres1_2 = fres1_2.view(fres1_2.shape[0], -1)
ares1_2 = self.classifier18(fres1_2)
fres1_1 = nn.functional.avg_pool2d(res1_1, res1_1.shape[2:4])
fres1_1 = fres1_1.view(fres1_1.shape[0], -1)
ares1_1 = self.classifier19(fres1_1)
return cls_score, global_feat, ax4_2, ax4_1, ares4_2, ares4_1, ax3_3, ax3_2, ax3_1, ares3_2, ares3_1, ax2_3, ax2_2, ax2_1, ares2_2, ares2_1, ax1_3, ax1_2, ax1_1, ares1_2, ares1_1
elif 'Jointout' in self.model_name and self.training:
x0, x1, x2, x3, x4, res0, res1, res2, res3, res4 = self.base(x, camid)
global_feat = nn.functional.avg_pool2d(x4, x4.shape[2:4])
global_feat = global_feat.view(global_feat.shape[0], -1)
feat = self.bottleneck(global_feat)
cls_score = self.classifier4(feat)
res4 = nn.functional.avg_pool2d(res4, res4.shape[2:4])
res4 = res4.view(res4.shape[0], -1)
res4 = self.classifier4_1(res4)
x3 = nn.functional.avg_pool2d(x3, x3.shape[2:4])
x3 = x3.view(x3.shape[0], -1)
x3 = self.classifier3_1(x3)
res3 = nn.functional.avg_pool2d(res3, res3.shape[2:4])
res3 = res3.view(res3.shape[0], -1)
res3 = self.classifier3(res3)
x2 = nn.functional.avg_pool2d(x2, x2.shape[2:4])
x2 = x2.view(x2.shape[0], -1)
x2 = self.classifier2(x2)
res2 = nn.functional.avg_pool2d(res2, res2.shape[2:4])
res2 = res2.view(res2.shape[0], -1)
res2 = self.classifier2_1(res2)
x1 = nn.functional.avg_pool2d(x1, x1.shape[2:4])
x1 = x1.view(x1.shape[0], -1)
x1 = self.classifier1(x1)
res1 = nn.functional.avg_pool2d(res1, res1.shape[2:4])
res1 = res1.view(res1.shape[0], -1)
res1 = self.classifier1_1(res1)
x0 = nn.functional.avg_pool2d(x0, x0.shape[2:4])
x0 = x0.view(x0.shape[0], -1)
x0 = self.classifier0(x0)
res0 = nn.functional.avg_pool2d(res0, res0.shape[2:4])
res0 = res0.view(res0.shape[0], -1)
res0 = self.classifier0_1(res0)
return global_feat, x0, x1, x2, x3, cls_score, res0, res1, res2, res3, res4
x = self.base(x, camid)
# print(x.shape)
global_feat = nn.functional.avg_pool2d(x, x.shape[2:4])
# print(global_feat.shape)
# print(x.shape)
# for convert to onnx, kernel size must be from x.shape[2:4] to a constant [20,20]
#global_feat = nn.functional.avg_pool2d(x, [16, 16])
# flatten to (bs, 2048), global_feat.shape[0]
global_feat = global_feat.view(global_feat.shape[0], -1)
feat = self.bottleneck(global_feat)
if self.neck == 'no':
feat = global_feat
elif self.neck == 'bnneck':
feat = self.bottleneck(global_feat)
if self.training:
if self.cos_layer:
cls_score = self.arcface(feat, label)
else:
cls_score = self.classifier(feat)
return cls_score, global_feat # global feature for triplet loss
else:
if self.neck_feat == 'after':
# print("Test with feature after BN")
return feat
else:
# print("Test with feature before BN")
return global_feat
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
def load_param_finetune(self, model_path):
param_dict = torch.load(model_path)
# for i in param_dict:
# print(i)#change by sb
# self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model for finetuning from {}'.format(model_path))
def make_model(cfg, num_class):
model = Backbone(num_class, cfg)
return model
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
318
|
Sssssbo/SDCNet
|
refs/heads/master
|
/resnet/config.py
|
resnet50_path = './resnet/resnet50-19c8e357.pth'
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
319
|
Sssssbo/SDCNet
|
refs/heads/master
|
/model.py
|
import torch
import torch.nn.functional as F
from torch import nn
from resnext import ResNeXt101
class R3Net(nn.Module):
def __init__(self):
super(R3Net, self).__init__()
res50 = ResNeXt101()
self.layer0 = res50.layer0
self.layer1 = res50.layer1
self.layer2 = res50.layer2
self.layer3 = res50.layer3
self.layer4 = res50.layer4
self.reduce_low = nn.Sequential(
nn.Conv2d(64 + 256 + 512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce_high = nn.Sequential(
nn.Conv2d(1024 + 2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
_ASPP(256)
)
self.predict0 = nn.Conv2d(256, 1, kernel_size=1)
self.predict1 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict2 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict3 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict4 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict5 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict6 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
for m in self.modules():
if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout):
m.inplace = True
def forward(self, x, label = None):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
l0_size = layer0.size()[2:]
reduce_low = self.reduce_low(torch.cat((
layer0,
F.interpolate(layer1, size=l0_size, mode='bilinear', align_corners=True),
F.interpolate(layer2, size=l0_size, mode='bilinear', align_corners=True)), 1))
reduce_high = self.reduce_high(torch.cat((
layer3,
F.interpolate(layer4, size=layer3.size()[2:], mode='bilinear', align_corners=True)), 1))
reduce_high = F.interpolate(reduce_high, size=l0_size, mode='bilinear', align_corners=True)
predict0 = self.predict0(reduce_high)
predict1 = self.predict1(torch.cat((predict0, reduce_low), 1)) + predict0
predict2 = self.predict2(torch.cat((predict1, reduce_high), 1)) + predict1
predict3 = self.predict3(torch.cat((predict2, reduce_low), 1)) + predict2
predict4 = self.predict4(torch.cat((predict3, reduce_high), 1)) + predict3
predict5 = self.predict5(torch.cat((predict4, reduce_low), 1)) + predict4
predict6 = self.predict6(torch.cat((predict5, reduce_high), 1)) + predict5
predict0 = F.interpolate(predict0, size=x.size()[2:], mode='bilinear', align_corners=True)
predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True)
predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True)
predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True)
predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True)
predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True)
predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True)
if self.training:
return predict0, predict1, predict2, predict3, predict4, predict5, predict6
return F.sigmoid(predict6)
#--------------------------------------------------------------------------------------------
class SDCNet(nn.Module):
def __init__(self, num_classes):
super(SDCNet, self).__init__()
res50 = ResNeXt101()
self.layer0 = res50.layer0
self.layer1 = res50.layer1
self.layer2 = res50.layer2
self.layer3 = res50.layer3
self.layer4 = res50.layer4
self.reducex = nn.Sequential(
nn.Conv2d(2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
_ASPP(256)
)
self.reduce5 = nn.Sequential(
nn.Conv2d(64 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce6 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce7 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce8 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce9 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce10 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
# --------------extra module---------------
self.reduce3_0 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_1 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_2 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_3 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_4 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_0 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_1 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_2 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_3 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_4 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_0 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_1 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_2 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_3 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_4 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_0 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_1 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_2 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_3 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_4 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
# self.predict0 = nn.Conv2d(256, 1, kernel_size=1)
self.predict1 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict2 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict3 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict4 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict5 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict6 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict7 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict8 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict9 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict10 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.pre4 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre3 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre2 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre1 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.reducex_1 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reducex_2 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reducex_3 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
for m in self.modules():
if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout):
m.inplace = True
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc0 = nn.Sequential(
nn.BatchNorm1d(256),
nn.Dropout(0.5),
nn.Linear(256, num_classes),
)
def forward(self, x, c):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
l0_size = layer0.size()[2:]
l1_size = layer1.size()[2:]
l2_size = layer2.size()[2:]
l3_size = layer3.size()[2:]
F1 = self.reducex(layer4)
p4 = self.pre4(F1)
p4 = F.interpolate(p4, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_4 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True)
F0_3 = self.reducex_3(torch.cat((F0_4, layer3), 1))
p3 = self.pre3(F0_3)
p3 = F.interpolate(p3, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_3 = F.interpolate(F0_3, size=l2_size, mode='bilinear', align_corners=True)
F0_2 = self.reducex_2(torch.cat((F0_3, layer2), 1))
p2 = self.pre2(F0_2)
p2 = F.interpolate(p2, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_2 = F.interpolate(F0_2, size=l1_size, mode='bilinear', align_corners=True)
F0_1 = self.reducex_1(torch.cat((F0_2, layer1), 1))
p1 = self.pre1(F0_1)
p1 = F.interpolate(p1, size=x.size()[2:], mode='bilinear', align_corners=True)
p5 = p4 + p3 + p2 + p1
#saliency detect
predict1 = self.predict1(F1)
predict1 = F.interpolate(predict1, size=l3_size, mode='bilinear', align_corners=True)
F1 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True)
F2 = F1[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F2[i, :, :, :] = self.reduce3_0(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F2[i, :, :, :] = self.reduce3_1(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F2[i, :, :, :] = self.reduce3_2(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F2[i, :, :, :] = self.reduce3_3(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F2[i, :, :, :] = self.reduce3_4(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
predict2 = self.predict2(F2) + predict1
predict2 = F.interpolate(predict2, size=l2_size, mode='bilinear', align_corners=True)
F2 = F.interpolate(F2, size=l2_size, mode='bilinear', align_corners=True)
F3 = F2[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F3[i, :, :, :] = self.reduce2_0(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F3[i, :, :, :] = self.reduce2_1(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F3[i, :, :, :] = self.reduce2_2(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F3[i, :, :, :] = self.reduce2_3(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F3[i, :, :, :] = self.reduce2_4(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
predict3 = self.predict3(F3) + predict2
predict3 = F.interpolate(predict3, size=l1_size, mode='bilinear', align_corners=True)
F3 = F.interpolate(F3, size=l1_size, mode='bilinear', align_corners=True)
F4 = F3[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F4[i, :, :, :] = self.reduce1_0(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F4[i, :, :, :] = self.reduce1_1(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F4[i, :, :, :] = self.reduce1_2(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F4[i, :, :, :] = self.reduce1_3(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F4[i, :, :, :] = self.reduce1_4(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
predict4 = self.predict4(F4) + predict3
F5 = self.reduce5(torch.cat((F4, layer0), 1))
predict5 = self.predict5(F5) + predict4
F0 = F4[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F0[i, :, :, :] = self.reduce0_0(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 1:
F0[i, :, :, :] = self.reduce0_1(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 2:
F0[i, :, :, :] = self.reduce0_2(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 3:
F0[i, :, :, :] = self.reduce0_3(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 4:
F0[i, :, :, :] = self.reduce0_4(layer0[i, :, :, :].unsqueeze(0))
F1 = F.interpolate(F1, size=l1_size, mode='bilinear', align_corners=True)
F2 = F.interpolate(F2, size=l1_size, mode='bilinear', align_corners=True)
F6 = self.reduce6(torch.cat((F0, F5), 1))
F7 = self.reduce7(torch.cat((F0, F4), 1))
F8 = self.reduce8(torch.cat((F0, F3), 1))
F9 = self.reduce9(torch.cat((F0, F2), 1))
F10 = self.reduce10(torch.cat((F0, F1), 1))
predict6 = self.predict6(F6) + predict5
predict7 = self.predict7(F7) + predict6
predict8 = self.predict8(F8) + predict7
predict9 = self.predict9(F9) + predict8
predict10 = self.predict10(F10) + predict9
predict11 = predict6 + predict7 + predict8 + predict9 + predict10
predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True)
predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True)
predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True)
predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True)
predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True)
predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True)
predict7 = F.interpolate(predict7, size=x.size()[2:], mode='bilinear', align_corners=True)
predict8 = F.interpolate(predict8, size=x.size()[2:], mode='bilinear', align_corners=True)
predict9 = F.interpolate(predict9, size=x.size()[2:], mode='bilinear', align_corners=True)
predict10 = F.interpolate(predict10, size=x.size()[2:], mode='bilinear', align_corners=True)
predict11 = F.interpolate(predict11, size=x.size()[2:], mode='bilinear', align_corners=True)
if self.training:
return p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11
return F.sigmoid(predict11)
#----------------------------------------------------------------------------------------
class _ASPP(nn.Module):
def __init__(self, in_dim):
super(_ASPP, self).__init__()
down_dim = in_dim // 2
self.conv1 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=2, padding=2), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=4, padding=4), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=6, padding=6), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv5 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.fuse = nn.Sequential(
nn.Conv2d(5 * down_dim, in_dim, kernel_size=1), nn.BatchNorm2d(in_dim), nn.PReLU()
)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(x)
conv3 = self.conv3(x)
conv4 = self.conv4(x)
conv5 = F.interpolate(self.conv5(F.adaptive_avg_pool2d(x, 1)), size=x.size()[2:], mode='bilinear',
align_corners=True)
return self.fuse(torch.cat((conv1, conv2, conv3, conv4, conv5), 1))
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
320
|
Sssssbo/SDCNet
|
refs/heads/master
|
/create_free.py
|
import numpy as np
import os
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
import cv2
import numpy as np
from config import ecssd_path, hkuis_path, pascals_path, sod_path, dutomron_path, MTDD_test_path
from misc import check_mkdir, crf_refine, AvgMeter, cal_precision_recall_mae, cal_fmeasure
from datasets import TestFolder_joint
import joint_transforms
from model import HSNet_single1, HSNet_single1_ASPP, HSNet_single1_NR, HSNet_single2, SDMS_A, SDMS_C
torch.manual_seed(2018)
# set which gpu to use
torch.cuda.set_device(0)
ckpt_path = './ckpt'
test_path = './test_ECSSD.csv'
def main():
img = np.zeros((512, 512),dtype = np.uint8)
img2 = cv2.imread('./0595.PNG', 0)
cv2.imshow('img',img2)
#cv2.waitKey(0)
print(img, img2)
Image.fromarray(img).save('./free.png')
if __name__ == '__main__':
main()
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
321
|
Sssssbo/SDCNet
|
refs/heads/master
|
/count_dataset.py
|
import numpy as np
import os
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
path_list = ['msra10k', 'ECSSD', 'DUT-OMROM', 'DUTS-TR', 'DUTS-TE', 'HKU-IS', 'PASCAL-S', 'SED2', 'SOC', 'SOD', 'THUR-15K']
def main():
Dataset, Class0, Class1, Class2, Class3, Class4, Class5, Class6, Class7, Class8, Class9, Class10, Total = [], [], [], [], [], [], [], [], [], [], [], [], []
for data_path in path_list:
test_path = './SOD_label/label_' + data_path + '.csv'
print('Evalute for ' + test_path)
test_data = pd.read_csv(test_path)
imgs = []
num, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
for index, row in test_data.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
img_path, gt_path, label = imgs[index]
if label == 0:
c0 += 1
elif label == 1:
c1 += 1
elif label == 2:
c2 += 1
elif label == 3:
c3 += 1
elif label == 4:
c4 += 1
elif label == 5:
c5 += 1
elif label == 6:
c6 += 1
elif label == 7:
c7 += 1
elif label == 8:
c8 += 1
elif label == 9:
c9 += 1
elif label == 10:
c10 += 1
num += 1
print('[Class0 %.f], [Class1 %.f], [Class2 %.f], [Class3 %.f]\n'\
'[Class4 %.f], [Class5 %.f], [Class6 %.f], [Class7 %.f]\n'\
'[Class8 %.f], [Class9 %.f], [Class10 %.f], [Total %.f]\n'%\
(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, num)
)
Dataset.append(data_path)
Class0.append(c0)
Class1.append(c1)
Class2.append(c2)
Class3.append(c3)
Class4.append(c4)
Class5.append(c5)
Class6.append(c6)
Class7.append(c7)
Class8.append(c8)
Class9.append(c9)
Class10.append(c10)
Total.append(num)
label_file = pd.DataFrame({'Datasets': Dataset, 'Class 0': Class0, 'Class 1': Class1, 'Class 2': Class2, 'Class 3': Class3, 'Class 4': Class4, 'Class 5': Class5, 'Class 6': Class6, 'Class 7': Class7, 'Class 8': Class8, 'Class 9': Class9, 'Class 10': Class10, 'Num of Pic': Total})
label_file = label_file[['Datasets', 'Class 0', 'Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5', 'Class 6', 'Class 7', 'Class 8', 'Class 9', 'Class 10', 'Num of Pic']]
label_file.to_csv('./Dataset_statistics.csv', index=False)
if __name__ == '__main__':
main()
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
322
|
Sssssbo/SDCNet
|
refs/heads/master
|
/SDCNet.py
|
import datetime
import os
import time
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
import pandas as pd
import numpy as np
import joint_transforms
from config import msra10k_path, MTDD_train_path
from datasets import ImageFolder_joint
from misc import AvgMeter, check_mkdir, cal_sc
from model import R3Net, SDCNet
from torch.backends import cudnn
cudnn.benchmark = True
torch.manual_seed(2021)
torch.cuda.set_device(6)
csv_path = './label_DUTS-TR.csv'
ckpt_path = './ckpt'
exp_name ='SDCNet'
args = {
'iter_num': 30000,
'train_batch_size': 16,
'last_iter': 0,
'lr': 1e-3,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': ''
}
joint_transform = joint_transforms.Compose([
joint_transforms.RandomCrop(300),
joint_transforms.RandomHorizontallyFlip(),
joint_transforms.RandomRotate(10)
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = transforms.ToTensor()
to_pil = transforms.ToPILImage()
all_data = pd.read_csv(csv_path)
train_set = ImageFolder_joint(all_data, joint_transform, img_transform, target_transform)
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True, drop_last=True)#
log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt')
def main():
net = SDCNet(num_classes = 5).cuda().train() #
print('training in ' + exp_name)
optimizer = optim.SGD([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': args['lr'], 'weight_decay': args['weight_decay']}
], momentum=args['momentum'])
if len(args['snapshot']) > 0:
print('training resumes from ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '_optim.pth')))
optimizer.param_groups[0]['lr'] = 2 * args['lr']
optimizer.param_groups[1]['lr'] = args['lr']
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
open(log_path, 'w').write(str(args) + '\n\n')
train(net, optimizer)
def train(net, optimizer):
start_time = time.time()
curr_iter = args['last_iter']
num_class = [0, 0, 0, 0, 0]
while True:
total_loss_record, loss0_record, loss1_record, loss2_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
batch_time = AvgMeter()
end = time.time()
print('-----begining the first stage, train_mode==0-----')
for i, data in enumerate(train_loader):
optimizer.param_groups[0]['lr'] = 2 * args['lr'] * (1 - float(curr_iter) / args['iter_num']
) ** args['lr_decay']
optimizer.param_groups[1]['lr'] = args['lr'] * (1 - float(curr_iter) / args['iter_num']
) ** args['lr_decay']
inputs, gt, labels = data
print(labels)
# depends on the num of classes
cweight = torch.tensor([0.5, 0.75, 1, 1.25, 1.5])
#weight = torch.ones(size=gt.shape)
weight = gt.clone().detach()
sizec = labels.numpy()
#ta = np.zeros(shape=gt.shape)
'''
np.zeros(shape=labels.shape)
sc = gt.clone().detach()
for i in range(len(sizec)):
gta = np.array(to_pil(sc[i,:].data.squeeze(0).cpu()))#
#print(gta.shape)
labels[i] = cal_sc(gta)
sizec[i] = labels[i]
print(labels)
'''
batch_size = inputs.size(0)
inputs = Variable(inputs).cuda()
gt = Variable(gt).cuda()
labels = Variable(labels).cuda()
#print(sizec.shape)
optimizer.zero_grad()
p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11 = net(inputs, sizec) # mode=1
criterion = nn.BCEWithLogitsLoss().cuda()
criterion2 = nn.CrossEntropyLoss().cuda()
gt2 = gt.long()
gt2 = gt2.squeeze(1)
l5 = criterion2(p5, gt2)
l4 = criterion2(p4, gt2)
l3 = criterion2(p3, gt2)
l2 = criterion2(p2, gt2)
l1 = criterion2(p1, gt2)
loss0 = criterion(predict11, gt)
loss10 = criterion(predict10, gt)
loss9 = criterion(predict9, gt)
loss8 = criterion(predict8, gt)
loss7 = criterion(predict7, gt)
loss6 = criterion(predict6, gt)
loss5 = criterion(predict5, gt)
loss4 = criterion(predict4, gt)
loss3 = criterion(predict3, gt)
loss2 = criterion(predict2, gt)
loss1 = criterion(predict1, gt)
total_loss = l1 + l2 + l3 + l4 + l5 + loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6 + loss7 + loss8 + loss9 + loss10
total_loss.backward()
optimizer.step()
total_loss_record.update(total_loss.item(), batch_size)
loss1_record.update(l5.item(), batch_size)
loss0_record.update(loss0.item(), batch_size)
curr_iter += 1.0
batch_time.update(time.time() - end)
end = time.time()
log = '[iter %d], [R1/Mode0], [total loss %.5f]\n' \
'[l5 %.5f], [loss0 %.5f]\n' \
'[lr %.13f], [time %.4f]' % \
(curr_iter, total_loss_record.avg, loss1_record.avg, loss0_record.avg, optimizer.param_groups[1]['lr'],
batch_time.avg)
print(log)
print('Num of class:', num_class)
open(log_path, 'a').write(log + '\n')
if curr_iter == args['iter_num']:
torch.save(net.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % curr_iter))
torch.save(optimizer.state_dict(),
os.path.join(ckpt_path, exp_name, '%d_optim.pth' % curr_iter))
total_time = time.time() - start_time
print(total_time)
return
if __name__ == '__main__':
main()
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
323
|
Sssssbo/SDCNet
|
refs/heads/master
|
/model/backbones/resnet.py
|
import math
import torch
from torch import nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class GDN_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(GDN_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1_0 = nn.BatchNorm2d(
planes, affine=False, track_running_stats=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2_0 = nn.BatchNorm2d(
planes, affine=False, track_running_stats=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3_0 = nn.BatchNorm2d(
planes * 4, affine=False, track_running_stats=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.in1 = nn.InstanceNorm2d(planes)
self.in2 = nn.InstanceNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out1 = torch.zeros_like(out)
if self.training == True:
#print("training with gdn block")
out1[:8] = self.bn1_0(out[:8])
out1[8:16] = self.bn1_0(out[8:16])
out1[16:] = self.bn1_0(out[16:])
else:
#print("test for gdn block")
out1 = self.in1(out)
out = self.bn1(out1)
out = self.relu(out)
out = self.conv2(out)
out1 = torch.zeros_like(out)
if self.training == True:
out1[:8] = self.bn2_0(out[:8])
out1[8:16] = self.bn2_0(out[8:16])
out1[16:] = self.bn2_0(out[16:])
else:
out1 = self.in1(out)
out = self.bn2(out1)
out = self.relu(out)
out = self.conv3(out)
out1 = torch.zeros_like(out)
if self.training == True:
out1[:8] = self.bn3_0(out[:8])
out1[8:16] = self.bn3_0(out[8:16])
out1[16:] = self.bn3_0(out[16:])
else:
out1 = self.in2(out)
out = self.bn3(out1)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class IN_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(IN_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.in1_0(out)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.in2_0(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.in3_0(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class IN2_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(IN2_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.conv1_1 = nn.Sequential(
nn.Conv2d(planes * 2, planes, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(planes), nn.ReLU(inplace=True)
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.conv2_1 = nn.Sequential(
nn.Conv2d(planes * 2, planes, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(planes), nn.ReLU(inplace=True)
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.conv3_1 = nn.Sequential(
nn.Conv2d(planes * 8, planes * 4, kernel_size=1, bias=False), nn.BatchNorm2d(planes * 4)
)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x1 = self.conv1(x)
out1 = self.in1_0(x1)
out1 = self.bn1(out1)
out1 = self.relu(out1)
x1 = self.conv1_1(torch.cat((out1,x1),1))
x2 = self.conv2(x1)
out2 = self.in2_0(x2)
out2 = self.bn2(out2)
out2 = self.relu(out2)
x2 = self.conv2_1(torch.cat((out2,x2),1))
x3 = self.conv3(x2)
out3 = self.in3_0(x3)
out3 = self.bn3(out3)
out3 = self.relu(out3)
x3 = self.conv3_1(torch.cat((out3,x3),1))
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
return x3
class SNR_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x1 = self.conv1(x)
out1 = self.in1_0(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
x1 = self.bn1(x1)
x1 = out1 + res1
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in2_0(x2)
res2 = x2 - out2
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = self.bn2(x2)
x2 = out2 + res2
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
return x3
class SNR2_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR2_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
def forward(self, x):
residual = x
x1 = self.conv1(x)
out1 = self.in1_0(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
x1 = out1 + res1
x1 = self.bn1(x1)
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in2_0(x2)
if self.stride == 2: res1 = self.maxpool(res1)
res2 = x2 - out2 + res1
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = out2 + res2
x2 = self.bn2(x2)
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
return x3
class SNR3_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR3_Bottleneck, self).__init__()
self.in1 = nn.InstanceNorm2d(planes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
def forward(self, x, x_2=None, x_1=None, r2=None, r1=None):
if type(x) is tuple:
# print(len(x))
x_2 = x[1]
x_1 = x[2]
r2 = x[3]
r1 = x[4]
x = x[0]
residual = x
x1 = self.conv1(x)
out1 = self.in1(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
# print(out1.shape)
# print(res1.shape)
# print(x1.shape)
x1 = out1 + res1
x1 = self.bn1(x1)
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in1(x2)
res2 = x2 - out2
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = out2 + res2
x2 = self.bn2(x2)
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
if x_2 is not None: x2 = x2 + x_2
if x_1 is not None: x1 = x1 + x_1
if r2 is not None: res2 = res2 + r2
if r1 is not None: res1 = res1 + r1
'''
print(x3.shape)
print(x2.shape)
print(x1.shape)
print(res2.shape)
print(res1.shape)
'''
if self.stride == 2:
x1 = self.maxpool(x1)
res1 = self.maxpool(res1)
return x3, x2, x1, res2, res1
class SNR4_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR4_Bottleneck, self).__init__()
self.in1 = nn.InstanceNorm2d(planes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
def forward(self, x, x_2=None, x_1=None, r2=None, r1=None):
if type(x) is tuple:
# print(len(x))
x_2 = x[1]
x_1 = x[2]
r2 = x[3]
r1 = x[4]
x = x[0]
residual = x
x1 = self.conv1(x)
out1 = self.in1(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
# print(out1.shape)
# print(res1.shape)
# print(x1.shape)
x1 = out1 + res1
x1 = self.bn1(x1)
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in1(x2)
res2 = x2 - out2
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = out2 + res2
x2 = self.bn2(x2)
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
if x_2 is not None: x2 = x2 + x_2
if x_1 is not None: x1 = x1 + x_1
if r2 is not None: res2 = res2 + r2
if r1 is not None: res1 = res1 + r1
'''
print(x3.shape)
print(x2.shape)
print(x1.shape)
print(res2.shape)
print(res1.shape)
'''
if self.stride == 2:
x1 = self.maxpool(x1)
res1 = self.maxpool(res1)
return x3, x2, x1, res2, res1
# --------------------------------- resnet-----------------------------------
class ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x = self.bn1(x)
# x = self.relu(x) # add missed relu
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------Comb resnet-----------------------------------
class Comb_ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.in1 = nn.InstanceNorm2d(64)
self.bn1_1 = nn.BatchNorm2d(64)
self.conv2 = nn.Sequential(
nn.Conv2d(128, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=1)
)
self.in2 = nn.InstanceNorm2d(256)
self.bn2_1 = nn.BatchNorm2d(256)
self.conv3 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=1)
)
self.in3 = nn.InstanceNorm2d(512)
self.bn3_1 = nn.BatchNorm2d(512)
self.conv4 = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=1)
)
self.in4 = nn.InstanceNorm2d(1024)
self.bn4_1 = nn.BatchNorm2d(1024)
self.conv5 = nn.Sequential(
nn.Conv2d(2048, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(),
nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(),
nn.Conv2d(1024, 1024, kernel_size=1)
)
self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x = self.bn1(x)
# x = self.relu(x) # add missed relu
x = self.maxpool(x)
xin = self.in1(x)
xin = self.bn1_1(xin)
xin = self.relu(xin)
x = self.conv2(torch.cat((xin,x),1))
x = self.layer1(x)
xin = self.in2(x)
xin = self.bn2_1(xin)
xin = self.relu(xin)
x = self.conv3(torch.cat((xin,x),1))
x = self.layer2(x)
xin = self.in3(x)
xin = self.bn3_1(xin)
xin = self.relu(xin)
x = self.conv4(torch.cat((xin,x),1))
x = self.layer3(x)
xin = self.in4(x)
xin = self.bn4_1(xin)
xin = self.relu(xin)
x = self.conv5(torch.cat((xin,x),1))
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------Pure resnet-----------------------------------
class Pure_ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x = self.bn1(x)
#print(camid)
# x = self.relu(x) # add missed relu
x = self.maxpool(x)
if False:
x,_,_,_,_ = self.layer1(x)
x,_,_,_,_ = self.layer2(x)
x,_,_,_,_ = self.layer3(x)
x,_,_,_,_ = self.layer4(x)
else:
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------jointin resnet-----------------------------------
class Jointin_ResNet(nn.Module):
def __init__(self, last_stride=2, block=SNR3_Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.conv1_1 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.bn1_1 = nn.BatchNorm2d(64)
self.in1 = nn.InstanceNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x0 = self.in1(x)
'''
res0 = x - x0
res0 = self.conv1_1(res0)
res0 = self.bn1_1(res0)
x0 = x0 + res0
'''
x0 = self.bn1(x0)
# x = self.relu(x) # add missed relu
x0 = self.maxpool(x0)
x1_3, x1_2, x1_1, res1_2, res1_1 = self.layer1(x0)
x2_3, x2_2, x2_1, res2_2, res2_1 = self.layer2(x1_3)
x3_3, x3_2, x3_1, res3_2, res3_1 = self.layer3(x2_3)
x4_3, x4_2, x4_1, res4_2, res4_1 = self.layer4(x3_3)
if self.training:
return x4_3, x4_2, x4_1, res4_2, res4_1, x3_3, x3_2, x3_1, res3_2, res3_1, x2_3, x2_2, x2_1, res2_2, res2_1, x1_3, x1_2, x1_1, res1_2, res1_1
else:
return x4_3
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------jointout resnet-----------------------------------
class Jointout_ResNet(nn.Module):
def __init__(self, last_stride=2, block=SNR3_Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.conv1_res = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace = True),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace = True),
nn.Conv2d(64, 64, kernel_size=1)
)
self.in1 = nn.InstanceNorm2d(64)
self.bn1 = nn.BatchNorm2d(64)
self.bn1_1 = nn.BatchNorm2d(64)
self.in2 = nn.InstanceNorm2d(256)
self.bn2_1 = nn.BatchNorm2d(256)
self.bn2_0 = nn.BatchNorm2d(256)
self.in3 = nn.InstanceNorm2d(512)
self.bn3_1 = nn.BatchNorm2d(512)
self.bn3_0 = nn.BatchNorm2d(512)
self.in4 = nn.InstanceNorm2d(1024)
self.bn4_1 = nn.BatchNorm2d(1024)
self.bn4_0 = nn.BatchNorm2d(1024)
self.in5 = nn.InstanceNorm2d(2048)
self.bn5_1 = nn.BatchNorm2d(2048)
self.bn5_0 = nn.BatchNorm2d(2048)
self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv2_res = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace = True),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace = True),
nn.Conv2d(128, 256, kernel_size=1)
)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.conv3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv3_res = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace = True),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace = True),
nn.Conv2d(256, 512, kernel_size=1)
)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.conv4 = nn.Conv2d(1024, 1024, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv4_res = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace = True),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace = True),
nn.Conv2d(512, 1024, kernel_size=1)
)
self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride)
self.conv5 = nn.Conv2d(2048, 2048, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv5_res = nn.Sequential(
nn.Conv2d(2048, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(inplace = True),
nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(inplace = True),
nn.Conv2d(1024, 2048, kernel_size=1)
)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x0 = self.in1(x)
res0 = x - x0
x0 = self.bn1(x0)
x0 = self.relu(x0)
res0 = self.conv1_res(res0)
x0 = x0 + res0
x0 = self.bn1_1(x0)
# x = self.relu(x) # add missed relu
x0 = self.maxpool(x0)
x1 = self.layer1(x0)
px1 = self.conv2(x1)
x1 = self.in2(px1)
res1 = px1 - x1
x1 = self.bn2_0(x1)
x1 = self.relu(x1)
res1 = self.conv2_res(res1)
x1 = x1 + res1
x1 = self.bn2_1(x1)
x1 = self.relu(x1)
x2 = self.layer2(x1)
px2 = self.conv3(x2)
x2 = self.in3(px2)
res2 = px2 - x2
x2 = self.bn3_0(x2)
x2 = self.relu(x2)
res2 = self.conv3_res(res2)
x2 = x2 + res2
x2 = self.bn3_1(x2)
x2 = self.relu(x2)
x3 = self.layer3(x2)
px3 = self.conv4(x3)
x3 = self.in4(px3)
res3 = px3 - x3
x3 = self.bn4_0(x3)
x3 = self.relu(x3)
res3 = self.conv4_res(res3)
x3 = x3 + res3
x3 = self.bn4_1(x3)
x3 = self.relu(x3)
x4 = self.layer4(x3)
px4 = self.conv5(x4)
x4 = self.in5(px4)
res4 = px4 - x4
x4 = self.bn5_0(x4)
x4 = self.relu(x4)
res4 = self.conv5_res(res4)
x4 = x4 + res4
x4 = self.bn5_1(x4)
x4 = self.relu(x4)
if self.training:
return x0, x1, x2, x3, x4, res0, res1, res2, res3, res4
else:
return x4
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
{"/infer_SDCNet.py": ["/misc.py", "/datasets.py", "/model.py"], "/resnet/__init__.py": ["/resnet/make_model.py"], "/resnet/make_model.py": ["/resnet/config.py"], "/model/make_model.py": ["/model/backbones/resnet.py"], "/model.py": ["/resnext/__init__.py"], "/create_free.py": ["/misc.py", "/datasets.py", "/model.py"], "/SDCNet.py": ["/datasets.py", "/misc.py", "/model.py"]}
|
326
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/app/api/videoApi.py
|
import os
from flask import Flask, request, redirect, \
url_for, session, jsonify, send_from_directory, make_response, send_file
from . import api
from . import utils
from .. import VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION, VIDEO_EXTENSION, CACHE
from . VideoProcessing import Frame, VideoUploader, VideoDownloader, Filter
from . decorators import parameter_check, url_arg_check, metadata_check
from . errors import InvalidAPIUsage
@api.route('/upload/', methods=['POST'])
@parameter_check(does_return=False, req_c_type='multipart/form-data')
@metadata_check(does_return=False, req_type='video/mp4')
def upload_video():
"""
uploads the video
"""
byteStream = request.files['file']
vu = VideoUploader()
vu.upload_from_bytestream(byteStream)
session['s_id'] = vu.id
f_c = utils.framecount_from_vid_id(vu.id)
session['video_frame_count'] = f_c
session['is_uploaded'] = True
return jsonify({'status' : '201',
'message' : 'video uploaded!'}), 201
@api.route('/preview/', defaults={'frame_idx':1}, methods=['GET'])
@api.route('/preview/<frame_idx>/', methods=['GET', 'POST'])
@parameter_check(does_return=False, req_c_type='application/json')
@url_arg_check(does_return=True, req_type=int, arg='frame_idx', session=session)
def preview_thumbnail(frame_idx):
"""
Preview a frame by index, given filter parameters
"""
if session.get('is_uploaded'):
data = request.get_json()
filter_params = data['filter_params']
session['filter_params'] = filter_params
frame = Frame(session['s_id'])
frame_i = frame.get_by_idx(frame_idx)
filter_frame = Filter(frame_i).run_func(filter_params)
frame.f_save(filter_frame, session['s_id'])
return send_from_directory(directory=f'{FRAMES_UPLOAD_PATH}',
path=f'{session["s_id"]}{IMG_EXTENSION}',
as_attachment=True), 200
raise InvalidAPIUsage('Invalid usage: please upload a video first')
@api.route('/download/', methods=['POST'])
@parameter_check(does_return=True, req_c_type='application/json', session=session)
def download_video(vid_range):
"""
Download a video given filter parameters
"""
if session.get('is_uploaded'):
data = request.get_json()
fps = data['fps']
filter_params = data['filter_params']
frame_count = session['video_frame_count']
vd = VideoDownloader(fps, vid_range)
filter_vid = vd.download(session['s_id'], frame_count, filter_params)
session['is_downloaded'] = True
return send_from_directory(directory=f'{VIDEO_UPLOAD_PATH}',
path=f'{filter_vid}{VIDEO_EXTENSION}',
as_attachment=True), 200
raise InvalidAPIUsage('Invalid usage: please upload a video first')
@api.route('/status/', methods=['GET'])
@parameter_check(req_c_type='application/json')
def status():
"""
The progress of the user, uploaded, download / frames
"""
resp = {}
try:
if session['is_uploaded']:
resp["upload"] = "done"
if CACHE.get(f"{session['s_id']}_d"):
d_status = CACHE.get(f"{session['s_id']}_d")
resp["downloaded_frames"] = f'{d_status}/{session["video_frame_count"]}'
if session["is_downloaded"]:
resp["is_downloaded"] = True
except KeyError:
pass
return jsonify({"status" : resp}), 200
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
327
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/app/docs/__init__.py
|
from flask_swagger_ui import get_swaggerui_blueprint
swagger_ui = get_swaggerui_blueprint(
'/docs',
'/static/swagger.json',
config={
"app_name": "videoApi"
}
)
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
328
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/app/api/__init__.py
|
from flask import Blueprint
api = Blueprint('videoApi', __name__)
from . import videoApi, errors, help
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
329
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/app/main/errors.py
|
from flask import redirect, url_for, jsonify
from . import main
@main.app_errorhandler(404)
def page_not_found(e):
return jsonify(error=str(e)), 404
@main.app_errorhandler(405)
def method_not_allowed(e):
return jsonify(error=str(e)), 405
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
330
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/app/api/decorators.py
|
from flask import request, jsonify
from functools import wraps
from .errors import InvalidAPIUsage, InvalidFilterParams, IncorrectVideoFormat
"""
Almost like an Architect - makes decorations
"""
def decorator_maker(func):
def param_decorator(fn=None, does_return=None, req_c_type=None, req_type=None, arg=None, session=None):
def deco(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
result = func(does_return, req_c_type, req_type, arg, session)
if does_return:
return fn(result)
return fn(*args, **kwargs)
return wrapper
if callable(fn): return deco(fn)
return deco
return param_decorator
"""
Checks if user input is not out of bounds, and also Content-Type
"""
def wrap_param_check(does_return, req_c_type, req_type, arg, session):
check_content_type(req_c_type)
return check_correct_filter_params(session)
def check_content_type(req_c_type):
if not request.content_type.startswith(req_c_type):
raise InvalidAPIUsage(f'Content-Type should be of type: {req_c_type}', 400)
def check_correct_filter_params(session):
if request.data:
data = request.get_json()
f_params = data['filter_params']
if 'filter_params' not in data:
raise InvalidFilterParams(1)
elif 'type' not in f_params:
raise InvalidFilterParams(1)
if 'download' in request.url:
if 'fps' not in data:
raise InvalidFilterParams(1)
if 'max_f' in f_params and 'min_f' in f_params:
max_fr = session['video_frame_count']
min_f_raw = f_params['min_f']
max_f_raw = f_params['max_f']
if min_f_raw == "": min_f_raw = 0
if max_f_raw == "": max_f_raw = max_fr
min_f = _check_for_req_type(int, min_f_raw, 4)
max_f = _check_for_req_type(int, max_f_raw, 4)
a = check_bounds(min_f_raw, max_fr)
b = check_bounds(max_f_raw, max_fr)
return sorted([a, b])
def _check_for_req_type(req_type, val, ex):
try:
req_type(val)
except Exception:
raise InvalidFilterParams(ex)
return val
parameter_check = decorator_maker(wrap_param_check)
"""
Checks if user input is not out of bounds, and also Content-Type
"""
def wrap_url_arg_check(does_return, req_c_type, req_type, arg, session):
check_arg_urls(req_type, arg)
frame_idx = request.view_args[arg]
return check_bounds(frame_idx, session['video_frame_count'])
def check_arg_urls(req_type, arg):
try:
req_type(request.view_args[arg])
except ValueError:
raise InvalidAPIUsage(f'Content-Type should be of type: {req_type.__name__}', 400)
def check_bounds(frame_idx, max_frames):
f_max = int(max_frames)
f_idx = int(frame_idx)
if f_idx > f_max:
f_idx = f_max-50
elif f_idx < 1:
f_idx = 1
return f_idx
url_arg_check = decorator_maker(wrap_url_arg_check)
"""
Checks Video Metadata
"""
def wrap_metadata_check(does_return, req_c_type, req_type, arg, session):
check_metadata(req_type)
def check_metadata(req_type):
byteStream = request.files['file']
vid_type = byteStream.__dict__['headers'].get('Content-Type')
if vid_type != req_type:
raise IncorrectVideoFormat(1)
metadata_check = decorator_maker(wrap_metadata_check)
"""
Excpetion Handler for non-Endpoints
"""
def exception_handler(fn=None, ex=None, type=None, pas=False):
def deco(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
fn(*args, **kwargs)
except Exception:
if not pas:
raise ex(type)
pass
return fn(*args, **kwargs)
return wrapper
if callable(fn): return deco(fn)
return deco
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
331
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/app/api/utils.py
|
import cv2
import math
import string
import random
import numpy as np
import skvideo.io
from PIL import Image
from .. import VIDEO_EXTENSION, VIDEO_UPLOAD_PATH, \
FRAMES_UPLOAD_PATH, IMG_EXTENSION, CACHE
FPS = 23.98
SK_CODEC = 'libx264'
def create_vid_path(name):
return f'{VIDEO_UPLOAD_PATH}/{name}{VIDEO_EXTENSION}'
def create_frame_path(name):
return f'{FRAMES_UPLOAD_PATH}/{name}{IMG_EXTENSION}'
def framecount_from_vid_id(video_id):
video_path = create_vid_path(video_id)
cap = cv2.VideoCapture(video_path)
return math.floor(cap.get(7))
def id_generator(size, chars=string.ascii_lowercase + string.digits) -> str:
return ''.join(random.choice(chars) for _ in range(size))
def create_sk_video_writer(video_f_path, fps = None):
if not fps : fps = FPS
return skvideo.io.FFmpegWriter(video_f_path,
outputdict={'-c:v':SK_CODEC, '-profile:v':'main',
'-pix_fmt': 'yuv420p', '-r':str(fps)})
def set_cache_f_count(s_id: str, ud: str, fc: str) -> None:
CACHE.set(f'{s_id}_{ud}', fc)
def bgr_to_rgb(frame: np.ndarray) -> np.ndarray:
return frame[:, :, ::-1]
def is_greyscale(frame) -> bool:
return frame.ndim == 2
def is_rgb(frame) -> bool:
return frame.ndim == 3
def img_from_greyscale(frame: np.ndarray) -> Image:
return Image.fromarray(frame).convert("L")
def img_from_bgr(frame: np.ndarray) -> Image:
return Image.fromarray(bgr_to_rgb(frame))
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
332
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
"""
"""
SECRET_KEY = os.environ.get('SECRET_KEY')
FLASK_CONFIG = os.environ.get('FLASK_CONFIG')
VIDEO_EXTENSION = os.environ.get('VIDEO_EXTENSION')
VIDEO_WIDTH = os.environ.get('VIDEO_WIDTH')
VIDEO_HEIGHT = os.environ.get('VIDEO_HEIGHT')
IMG_EXTENSION = os.environ.get('IMG_EXTENSION')
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
"""
"""
DEBUG = True
config = {
'development': DevelopmentConfig,
'default': DevelopmentConfig
}
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
333
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/app/api/VideoProcessing.py
|
from werkzeug.utils import secure_filename
from functools import partial
import subprocess as sp
import time
import skvideo.io
import numpy as np
import threading
import ffmpeg
import shlex
import cv2
import re
from PIL import Image
from werkzeug.datastructures import FileStorage as FStorage
from .. import VIDEO_EXTENSION, VIDEO_WIDTH, VIDEO_HEIGHT, \
VIDEO_UPLOAD_PATH, FRAMES_UPLOAD_PATH, IMG_EXTENSION
from . import utils
from . errors import IncorrectVideoFormat, InvalidFilterParams, InvalidAPIUsage
from . decorators import exception_handler
FRAME_SIZE = VIDEO_WIDTH * VIDEO_HEIGHT * 3
FRAME_WH = (VIDEO_WIDTH, VIDEO_HEIGHT)
FFMPEG_COMMAND = 'ffmpeg -i pipe: -f rawvideo -pix_fmt bgr24 -an -sn pipe: -loglevel quiet'
ID_LEN = 32
class Frame:
def __init__(self, id=None):
self.id = id
@exception_handler(ex=IncorrectVideoFormat, type=2)
def from_bytes(self, in_bytes: bytes) -> np.ndarray:
"""
"""
frame_arr = np.frombuffer(in_bytes, np.uint8)
f_arr = frame_arr.reshape([VIDEO_HEIGHT, VIDEO_WIDTH, 3])
return utils.bgr_to_rgb(f_arr)
def f_save(self, frame: np.ndarray, frame_id: str) -> None:
upload_path = utils.create_frame_path(frame_id)
if utils.is_rgb(frame):
Image.fromarray(frame).save(upload_path)
return
utils.img_from_greyscale(frame).save(upload_path)
return
def get_by_idx(self, frame_idx):
vid = utils.create_vid_path(self.id)
cap = cv2.VideoCapture(vid)
cap.set(1, frame_idx)
_, frame = cap.read()
return frame
class VideoUploader(Frame):
def __init__(self):
id = utils.id_generator(ID_LEN)
super().__init__(id)
self.frame_count = 0
def upload_from_bytestream(self, byte_stream: FStorage):
video_f_path = utils.create_vid_path(self.id)
sk_writer = utils.create_sk_video_writer(video_f_path)
sh_command = shlex.split(FFMPEG_COMMAND)
process = sp.Popen(sh_command, stdin=sp.PIPE, stdout=sp.PIPE, bufsize=10**8)
thread = threading.Thread(target=self._writer, args=(process, byte_stream, ))
thread.start()
while True:
in_bytes = process.stdout.read(FRAME_SIZE)
if not in_bytes: break
frame = self.from_bytes(in_bytes)
self.frame_count += 1
if self.frame_count == 1: self.f_save(frame, self.id)
sk_writer.writeFrame(frame)
thread.join()
sk_writer.close()
def _writer(self, process, byte_stream):
for chunk in iter(partial(byte_stream.read, 1024), b''):
process.stdin.write(chunk)
try:
process.stdin.close()
except (BrokenPipeError):
pass
class Filter:
def __init__(self, img=None):
self.img = img
def applyCanny(self, params):
if 'thresh1' in params and 'thresh2' in params:
gs_img = self.applyGreyScale(params)
return cv2.Canny(gs_img,
int(params['thresh1']),
int(params['thresh2']))
raise InvalidFilterParams(3, 'canny')
def applyGauss(self, params):
if 'ksize_x' and 'ksize_y' in params and \
params['ksize_x'] % 2 != 0 and \
params['ksize_y'] % 2 != 0:
g_img = self.img.copy()
if np.ndim(g_img) == 3: g_img = utils.bgr_to_rgb(g_img)
return cv2.GaussianBlur(g_img,
(int(params["ksize_x"]), int(params["ksize_y"])), 0)
raise InvalidFilterParams(3, 'gauss')
def applyGreyScale(self, _):
c_img = self.img.copy()
return cv2.cvtColor(c_img, cv2.COLOR_RGB2GRAY)
def applyLaplacian(self, params):
gs_img = self.applyGreyScale(params)
return cv2.Laplacian(gs_img, cv2.CV_8U)
def run_func(self, params):
if params["type"] in self.filter_map:
func = self.filter_map[params["type"]].__get__(self, type(self))
return func(params)
raise InvalidFilterParams(2)
def _default(self, _):
return utils.bgr_to_rgb(self.img)
filter_map = {'canny': applyCanny,
'gauss': applyGauss,
'greyscale': applyGreyScale,
'laplacian': applyLaplacian,
'': _default}
class VideoDownloader(Frame, Filter):
def __init__(self, fps, vid_range=None):
Frame.__init__(self)
Filter.__init__(self)
self.fps = fps
self.vid_range = vid_range
self.curr_f_frame = None
if vid_range:
self.range_min = vid_range[0]
self.range_max = vid_range[1]
def download(self, s_id, tot_video_frames, params):
f_vid_name = f'{s_id}_{params["type"]}'
video_f_path = utils.create_vid_path(f_vid_name)
local_vid = cv2.VideoCapture(utils.create_vid_path(s_id))
vid_writer = utils.create_sk_video_writer(video_f_path, self.fps)
for i in range(tot_video_frames-1):
utils.set_cache_f_count(s_id, 'd', i)
_, curr_frame = local_vid.read()
if curr_frame is None: break
self.img = curr_frame
f_frame = self._filter_apply(i, params)
vid_writer.writeFrame(f_frame)
vid_writer.close()
return f_vid_name
def _filter_apply(self, i, params):
"""
we simply check if a range is given,
then if we get a gs-img from the filter we add three dimensions
"""
if self.vid_range:
if(i >= self.vid_range[0] and
i <= self.vid_range[1]):
f_frame = self.run_func(params)
if not utils.is_rgb(f_frame):
return np.dstack(3*[f_frame])
return f_frame
else:
return self.run_func({"type":""})
else:
return self.run_func(params)
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
334
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/app/api/help.py
|
from flask import jsonify, request, send_from_directory
from . decorators import parameter_check
from . import api
from ..import HELP_MSG_PATH
import json
AV_EP = ["upload", "preview", "download", "stats", "filters"]
AV_FILTERS = ["canny", "greyscale", "laplacian", "gauss"]
@api.route('/help/', methods=['GET'])
@api.route('/help/<endpts>/', methods=['GET'])
@api.route('/help/filters/<filter_type>/', methods=['GET'])
@parameter_check(req_c_type='application/json')
def help(endpts=None, filter_type=None):
if endpts and endpts in AV_EP:
return jsonify(load_json_from_val(endpts)), 200
elif filter_type and filter_type in AV_FILTERS:
return jsonify(load_json_from_val(filter_type)), 200
else:
return jsonify(load_json_from_val('help')), 200
def load_json_from_val(val):
f = open(HELP_MSG_PATH+f'/{val}.json')
return json.load(f)
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
335
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/app/__init__.py
|
from flask import Flask
from config import config
from flask_caching import Cache
from flask_swagger_ui import get_swaggerui_blueprint
VIDEO_EXTENSION=None
VIDEO_WIDTH=None
VIDEO_HEIGHT=None
VIDEO_UPLOAD_PATH=None
FRAMES_UPLOAD_PATH=None
IMG_EXTENSION=None
HELP_MSG_PATH=None
CACHE=None
def create_app(config_name):
global VIDEO_EXTENSION
global VIDEO_WIDTH
global VIDEO_HEIGHT
global VIDEO_UPLOAD_PATH
global FRAMES_UPLOAD_PATH
global IMG_EXTENSION
global HELP_MSG_PATH
global CACHE
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
cache = Cache(config={"CACHE_TYPE": "filesystem",
"CACHE_DIR": app.root_path + '/static/cache'})
cache.init_app(app)
CACHE = cache
VIDEO_EXTENSION = app.config["VIDEO_EXTENSION"]
VIDEO_WIDTH = int(app.config["VIDEO_WIDTH"])
VIDEO_HEIGHT = int(app.config["VIDEO_HEIGHT"])
IMG_EXTENSION = app.config["IMG_EXTENSION"]
VIDEO_UPLOAD_PATH = app.root_path + '/static/uploads/videos'
FRAMES_UPLOAD_PATH = app.root_path + '/static/uploads/frames'
HELP_MSG_PATH = app.root_path + '/static/helpmessages'
#TODO: video max dimensions, video max length
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
from .api import api as api_blueprint
app.register_blueprint(api_blueprint, url_prefix='/videoApi/v1')
from .docs import swagger_ui
app.register_blueprint(swagger_ui, url_prefix="/docs")
return app
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
336
|
lukasld/Flask-Video-Editor
|
refs/heads/main
|
/app/api/errors.py
|
import sys
import traceback
from flask import jsonify, request
from . import api
class InvalidAPIUsage(Exception):
status_code = 400
def __init__(self, message='', status_code=None):
super().__init__()
self.message = message
self.path = request.path
if status_code is None:
self.status_code = InvalidAPIUsage.status_code
def to_dict(self):
rv = {}
rv['path'] = self.path
rv['status'] = self.status_code
rv['message'] = self.message
return rv
class IncorrectVideoFormat(InvalidAPIUsage):
def __init__(self, message_id):
super().__init__()
self.message = self.msg[message_id]
msg = {1:'Incorrect video type: only RGB - Type=video/mp4 allowed',
2:'Incorrect video dimensions: only 720p supported (1280*720)'}
class InvalidFilterParams(InvalidAPIUsage):
def __init__(self, message_id, filter_name=''):
super().__init__()
self.message = self.msg(message_id, filter_name)
def msg(self, id, filter_name):
# TODO:Lukas [07252021] messges could be stored in static files as JSON
avail_msg = {1:'Incorrect filter parameters: should be {"fps": "<fps: float>", "filter_params":{"type":"<filter: str>"}} \
or for default preview, {"filter_params":{"type":""}}',
2:f'Incorrect filter parameters: filter does not exist, for more go to /api/v1/help/filters/',
3:f'Incorrect filter parameters: required parameters are missing or invalid, for more go to /api/v1/help/filters/{filter_name}/',
4:f'Incorrect download parameters: for more go to /api/v1/help/download/',
}
return avail_msg[id]
@api.errorhandler(InvalidAPIUsage)
def invalid_api_usage(e):
return jsonify(e.to_dict()), 400
|
{"/app/api/videoApi.py": ["/app/api/__init__.py", "/app/__init__.py", "/app/api/VideoProcessing.py", "/app/api/decorators.py", "/app/api/errors.py"], "/app/api/decorators.py": ["/app/api/errors.py"], "/app/api/utils.py": ["/app/__init__.py"], "/app/api/VideoProcessing.py": ["/app/__init__.py", "/app/api/__init__.py", "/app/api/errors.py", "/app/api/decorators.py"], "/app/api/help.py": ["/app/api/decorators.py", "/app/api/__init__.py", "/app/__init__.py"], "/app/__init__.py": ["/config.py", "/app/api/__init__.py", "/app/docs/__init__.py"], "/app/api/errors.py": ["/app/api/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.