index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
36
|
Yuliashka/Snake-Game
|
refs/heads/main
|
/food.py
|
from turtle import Turtle
import random
# we want this Food class to inherit from the Turtle class, so it will have all the capapibilities from
# the turtle class, but also some specific things that we want
class Food(Turtle):
# creating initializer for this class
def __init__(self):
# we inherit things from the super class:
super().__init__()
# below we are using methods from Turtle class:
self.shape("circle")
self.penup()
# normal sise is 20x20, we want to stretch the length and the width for 0.5 so we have 10x10
self.shapesize(stretch_len=0.5, stretch_wid=0.5)
self.color("blue")
self.speed("fastest")
# call the method refresh so the food goes in random location
self.refresh()
def refresh(self):
# our screen is 600x600
# we want to place our food from -280 to 280 in coordinates:
random_x = random.randint(-280, 280)
random_y = random.randint(-280, 280)
# telling our food to go to random_y and random_x:
self.goto(random_x, random_y)
# All this methods will happen as soon as we create a new object
# This food object we initialize in main.py
|
{"/main.py": ["/snake.py", "/food.py", "/scoreboard.py"]}
|
37
|
Yuliashka/Snake-Game
|
refs/heads/main
|
/snake.py
|
from turtle import Turtle
STARTING_POSITIONS = [(0, 0), (-20, 0), (-40, 0)]
MOVE_DISTANCE = 20
UP = 90
DOWN = 270
RIGHT = 0
LEFT = 180
class Snake:
# The code here is going to determine what should happen when we initialize a new snake object
def __init__(self):
# below we create a new attribute for our class
self.segments = []
# We create a snake:
self.create_snake()
self.head = self.segments[0]
# CREATING SNAKE (2 functions)
def create_snake(self):
for position in STARTING_POSITIONS:
# we are calling the function and passing there the position that we are looping through
self.add_segment(position)
def add_segment(self, position):
new_segment = Turtle("square")
new_segment.color("white")
new_segment.penup()
new_segment.goto(position)
self.segments.append(new_segment)
# Creating a snake extend function
def extend(self):
# we are using the list of segments and counting from the end of list to get the last one segment of the snake
# after we are going to hold segment's position using a method of Turtle class
# then we add the new_segment to the same position as the last segment
self.add_segment(self.segments[-1].position())
# Creating another method for snake class
def move(self):
for seg_num in range(len(self.segments)-1, 0, -1):
new_x = self.segments[seg_num - 1].xcor()
new_y = self.segments[seg_num - 1].ycor()
self.segments[seg_num].goto(new_x, new_y)
self.head.forward(MOVE_DISTANCE)
def up(self):
# if the current heading is pointed down it can't move up
# because the snake can't go backword
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading() != UP:
self.head.setheading(DOWN)
def left(self):
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
def right(self):
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
|
{"/main.py": ["/snake.py", "/food.py", "/scoreboard.py"]}
|
38
|
Yuliashka/Snake-Game
|
refs/heads/main
|
/main.py
|
from turtle import Screen
import time
from snake import Snake
from food import Food
from scoreboard import Score
# SETTING UP THE SCREEN:
screen = Screen()
screen.setup(width=600, height=600)
screen.bgcolor("black")
screen.title("My Snake Game")
# to turn off the screen tracer
screen.tracer(0)
# CREATING A SNAKE OBJECT:
snake = Snake()
# CREATING A FOOD OBJECT:
food = Food()
# CREATING A SCORE OBJECT:
score = Score()
# CREATING A KEY CONTROL:
screen.listen()
# these methods snake.up ,,, we have in a snake class (up = 90, down = 270, left = 180, right = 0)
screen.onkey(key="Up", fun=snake.up)
screen.onkey(key="Down", fun=snake.down)
screen.onkey(key="Left", fun=snake.left)
screen.onkey(key="Right", fun=snake.right)
game_is_on = True
while game_is_on:
# while the game is on the screen is going to be updated every 0.1 second
# It is saying delay for 0.1 sec and then update:
screen.update()
time.sleep(0.1)
# every time the screen refreshes we get the snake to move forwards by one step
snake.move()
# DETECT COLLISION WITH THE FOOD
# if the snake head is within 15 px of the food or closer they have collided
if snake.head.distance(food) < 15:
food.refresh()
snake.extend()
print("nom nom nom")
# when the snake collide with the food we increase the score:
score.increase_score()
# # DETECT COLLISION WITH THE TAIL METHOD 1:
# # we can loop through our list of segments in the snake
# for segment in snake.segments:
# # if head has distance from any segment in segments list less than 10 px - that a collision
# # if the head collides with any segment in the tail: trigger GAME OVER
# # the first segment is the head so we should exclude it from the list of segments
# if segment == snake.head:
# pass
# elif snake.head.distance(segment) < 10:
# game_is_on = False
# score.game_over()
# DETECT COLLISION WITH THE TAIL METHOD 2 SLICING:
# we can loop through our list of segments in the snake using slicing method of python
# we are taking all positions inside the list without the first head segment
for segment in snake.segments[1:]:
# if head has distance from any segment in segments list less than 10 px - that a collision
# if the head collides with any segment in the tail: trigger GAME OVER
if snake.head.distance(segment) < 10:
game_is_on = False
score.game_over()
# DETECT COLLISION WITH THE WALL
if snake.head.xcor() >280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:
score.game_over()
game_is_on = False
screen.exitonclick()
|
{"/main.py": ["/snake.py", "/food.py", "/scoreboard.py"]}
|
39
|
Yuliashka/Snake-Game
|
refs/heads/main
|
/scoreboard.py
|
from turtle import Turtle
ALIGMENT = "center"
FONT = ("Arial", 18, "normal")
class Score(Turtle):
def __init__(self):
super().__init__()
self.score = 0
self.color("white")
self.penup()
self.goto(0, 270)
self.write(f"Current score: {self.score}", align="center", font=("Arial", 18, "normal"))
self.hideturtle()
self.update_score()
def update_score(self):
self.write(f"Current score: {self.score}", align="center", font=("Arial", 18, "normal"))
def game_over(self):
self.goto(0, 0)
self.write("GAME OVER", align=ALIGMENT, font=FONT)
def increase_score(self):
self.score += 1
# to clear the previous score before we update:
self.clear()
self.update_score()
|
{"/main.py": ["/snake.py", "/food.py", "/scoreboard.py"]}
|
46
|
marcin-mulawa/Water-Sort-Puzzle-Bot
|
refs/heads/main
|
/loading_phone.py
|
import numpy as np
import cv2
import imutils
picture = 'puzzle.jpg'
def load_transform_img(picture):
image = cv2.imread(picture)
image = imutils.resize(image, height=800)
org = image.copy()
#cv2.imshow('orginal', image)
mask = np.zeros(image.shape[:2], dtype = "uint8")
cv2.rectangle(mask, (15, 150), (440, 700), 255, -1)
#cv2.imshow("Mask", mask)
image = cv2.bitwise_and(image, image, mask = mask)
#cv2.imshow("Applying the Mask", image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('image', image)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
edged = cv2.Canny(blurred, 140, 230)
#cv2.imshow("Canny", edged)
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print(len(cnts))
cv2.fillPoly(edged, pts =cnts, color=(255,255,255))
#cv2.imshow('filled', edged)
fedged = cv2.Canny(edged, 140, 230)
#cv2.imshow("fedged", fedged)
(cnts, _) = cv2.findContours(fedged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
boxes = fedged.copy()
#cv2.drawContours(boxes, cnts, 10, (100 , 200, 100), 2)
#cv2.imshow("Boxes", boxes)
image = cv2.bitwise_and(org, org, mask = edged)
#cv2.imshow("Applying the Mask2", image)
puzzlelist = []
for (i, c) in enumerate(cnts):
(x, y, w, h) = cv2.boundingRect(c)
print("Box #{}".format(i + 1))
box = org[y:y + h, x:x + w]
cv2.imwrite(f'temp/box{i+1}.jpg',box)
#cv2.imshow("Box", box)
gray = cv2.cvtColor(box, cv2.COLOR_BGR2GRAY)
#cv2.imshow("gray", gray)
mask = np.zeros(gray.shape[:2], dtype = "uint8")
y1,y2 = 35, 50
for i in range(4):
cv2.rectangle(mask, (15, y1), (37, y2), 255, -1)
y1,y2 = y1+40, y2+40
#cv2.imshow("Mask2 ", mask)
masked = cv2.bitwise_and(gray, gray, mask = mask)
y1,y2 = 35, 50
temp = []
for i in range(4):
value = masked[y1:y2,15:37]
#cv2.imshow(f'val{i}',value)
max_val = max(value.flatten())
if max_val >= 45:
temp.append(max_val)
y1,y2 = y1+40, y2+40
puzzlelist.append(temp[::-1])
#cv2.waitKey(0)
return puzzlelist[::-1] , len(cnts)
|
{"/auto_puzzle.py": ["/solver.py"], "/solver.py": ["/loading_pc.py"]}
|
47
|
marcin-mulawa/Water-Sort-Puzzle-Bot
|
refs/heads/main
|
/loading_pc.py
|
import numpy as np
import cv2
import imutils
picture = 'puzzle.jpg'
def load_transform_img(picture):
image = cv2.imread(picture)
#image = imutils.resize(image, height=800)
org = image.copy()
#cv2.imshow('orginal', image)
mask = np.zeros(image.shape[:2], dtype = "uint8")
cv2.rectangle(mask, (680, 260), (1160, 910), 255, -1)
#cv2.imshow("Mask", mask)
image = cv2.bitwise_and(image, image, mask = mask)
#cv2.imshow("Applying the Mask", image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('image', image)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
edged = cv2.Canny(blurred, 140, 230)
#cv2.imshow("Canny", edged)
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#print(len(cnts))
cv2.fillPoly(edged, pts =cnts, color=(255,255,255))
#cv2.imshow('filled', edged)
fedged = cv2.Canny(edged, 140, 230)
#cv2.imshow("fedged", fedged)
(cnts, _) = cv2.findContours(fedged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# boxes = fedged.copy()
# cv2.drawContours(boxes, cnts, 10, (100 , 200, 100), 2)
# cv2.imshow("Boxes", boxes)
image = cv2.bitwise_and(org, org, mask = edged)
#cv2.imshow("Applying the Mask2", image)
puzzlelist = []
boxes_positon = []
for (i, c) in enumerate(cnts):
(x, y, w, h) = cv2.boundingRect(c)
#print("Box #{}".format(i + 1))
box = org[y:y + h, x:x + w]
boxes_positon.append( ( (x+x+w)/2, (y+y+h)/2 ) )
cv2.imwrite(f'temp/box{i+1}.jpg',box)
#cv2.imshow("Box", box)
gray = cv2.cvtColor(box, cv2.COLOR_BGR2GRAY)
#cv2.imshow("gray", gray)
mask = np.zeros(gray.shape[:2], dtype = "uint8")
y1,y2 = 45, 60
for i in range(4):
cv2.rectangle(mask, (15, y1), (37, y2), 255, -1)
y1,y2 = y1+45, y2+45
#cv2.imshow("Mask2 ", mask)
masked = cv2.bitwise_and(gray, gray, mask = mask)
#cv2.imshow('Masked', masked)
y1,y2 = 45, 60
temp = []
for i in range(4):
value = masked[y1:y2,15:37]
#cv2.imshow(f'val{i}',value)
max_val = max(value.flatten())
if max_val >= 45:
temp.append(max_val)
y1,y2 = y1+45, y2+45
puzzlelist.append(temp[::-1])
#cv2.waitKey(0)
print(f'Pozycja początkowa: {puzzlelist[::-1]}\n')
print(f'Pozycje boksow: {boxes_positon[::-1]}\n')
return puzzlelist[::-1], boxes_positon[::-1], len(cnts)
if __name__ == '__main__':
answer, boxes_positon[::-1], boxes = load_transform_img('level/screen.jpg')
print(answer)
|
{"/auto_puzzle.py": ["/solver.py"], "/solver.py": ["/loading_pc.py"]}
|
48
|
marcin-mulawa/Water-Sort-Puzzle-Bot
|
refs/heads/main
|
/auto_puzzle.py
|
import pyautogui as pya
import solver
import time
import glob
import os
import numpy as np
import cv2
import shutil
path = os.getcwd()
path1 = path + r'/temp'
path2 = path +r'/level'
try:
shutil.rmtree(path1)
except:
pass
try:
os.mkdir('temp')
except:
pass
try:
os.mkdir('level')
except:
pass
bluestacks = pya.locateCenterOnScreen('static/bluestacks.jpg', confidence=.9)
print(bluestacks)
pya.click(bluestacks)
time.sleep(3)
full = pya.locateCenterOnScreen('static/full.jpg', confidence=.8)
pya.click(full)
time.sleep(15)
mojeGry = pya.locateCenterOnScreen('static/mojegry.jpg', confidence=.8)
print(mojeGry)
if mojeGry:
pya.click(mojeGry)
time.sleep(2)
game = pya.locateCenterOnScreen('static/watersort.jpg', confidence=.5)
print(game)
if game:
pya.click(game)
time.sleep(6)
record = pya.locateCenterOnScreen('static/record.jpg', confidence=.8)
for m in range(4):
pya.click(record)
time.sleep(4.5)
for k in range(10):
screenshoot = pya.screenshot()
screenshoot = cv2.cvtColor(np.array(screenshoot), cv2.COLOR_RGB2BGR)
cv2.imwrite("level/screen.jpg", screenshoot)
moves, boxes_position = solver.game_loop("level/screen.jpg")
print(f'Steps to solve level: {len(moves)}')
print(moves)
for i,j in moves:
pya.click(boxes_position[i])
time.sleep(0.3)
pya.click(boxes_position[j])
pya.sleep(2.5)
next_level = pya.locateCenterOnScreen('static/next.jpg', confidence=.7)
pya.click(next_level)
time.sleep(3)
x_location = pya.locateCenterOnScreen('static/x.jpg', confidence=.7)
if x_location:
pya.click(x_location)
time.sleep(2)
x_location = pya.locateCenterOnScreen('static/x.jpg', confidence=.7)
if x_location:
pya.click(x_location)
time.sleep(2)
pya.click(record)
time.sleep(2)
|
{"/auto_puzzle.py": ["/solver.py"], "/solver.py": ["/loading_pc.py"]}
|
49
|
marcin-mulawa/Water-Sort-Puzzle-Bot
|
refs/heads/main
|
/solver.py
|
from collections import deque
import random
import copy
import sys
import loading_pc
import os
def move(new_list, from_, to):
temp = new_list[from_].pop()
for _i in range(0,4):
if len(new_list[from_])>0 and abs(int(temp) - int(new_list[from_][-1]))<3 and len(new_list[to])<3:
temp = new_list[from_].pop()
new_list[to].append(temp)
new_list[to].append(temp)
return new_list
def possible_moves(table, boxes):
pos=[]
for i in range(0, boxes):
for j in range(0, boxes):
pos.append((i,j))
possible = []
for from_, to in pos:
if (len(table[from_])>=1 and len(table[to])<4 and to != from_
and (len(table[to]) == 0 or (abs(int(table[from_][-1]) - int(table[to][-1]))<3))
and not (len(table[from_])==4 and len(set(table[from_]))==1)
and not (len(set(table[from_]))==1 and len(table[to]) ==0)):
possible.append((from_,to))
return possible
def check_win(table):
temp = []
not_full =[]
for i in table:
temp.append(len(set(i)))
if len(i)<4:
not_full.append(i)
if len(not_full)>2:
return False
for i in temp:
if i>1:
return False
print(table)
return True
def game_loop(agent, picture):
table, boxes_position, boxes = loading_pc.load_transform_img(picture)
print(len(boxes_position))
answer = agent(table, boxes)
return answer, boxes_position
def random_agent(table, boxes):
k=5
l=0
while True:
print(l)
table_copy = copy.deepcopy(table)
if l%1000 == 0:
k+=1
correct_moves = []
for i in range(boxes*k):
pmove = possible_moves(table_copy, boxes)
if len(pmove) == 0:
win = check_win(table_copy)
if win:
return correct_moves
else:
break
x, y = random.choice(pmove)
table_copy = move(table_copy, x, y)
correct_moves.append((x,y))
l+=1
if __name__ == '__main__':
answer, boxes_position = game_loop(random_agent, 'level/screen.jpg')
print('answer', answer)
|
{"/auto_puzzle.py": ["/solver.py"], "/solver.py": ["/loading_pc.py"]}
|
54
|
TheDinner22/lightning-sim
|
refs/heads/main
|
/lib/board.py
|
# represent the "board" in code
# dependencies
import random
class Board:
def __init__(self, width=10):
self.width = width
self.height = width * 2
self.WALL_CHANCE = .25
self.FLOOR_CHANCE = .15
# create the grid
self.create_random_grid()
def create_random_grid(self):
# reset old grid
self.grid = []
# generate cells for new grid
for i in range(self.width * self.height):
# is the cell at the left, right, top, or bottom?
is_left = True if i % self.width == 0 else False
is_right = True if i % self.width == self.width-1 else False
is_top = True if i < self.width else False
is_bottom = True if i > (self.width * self.height - self.width) else False
# create the cell
cell = {
"left" : is_left,
"right" : is_right,
"roof" : is_top,
"floor" : is_bottom,
"ID" : i
}
# append to grid
self.grid.append(cell)
# randomly generate walls
total = self.width * self.height
horizontal_amount = int(total * self.FLOOR_CHANCE)
verticle_amount = int(total * self.WALL_CHANCE)
# generate the walls
for _i in range(verticle_amount):
random_index = random.randrange(0, total)
adding_num = -1 if random_index == total - 1 else 1
first = "right" if adding_num == 1 else "left"
second = "right" if first == "left" else "left"
self.grid[random_index][first] = True
self.grid[random_index + adding_num][second] = True
# generate the floors
for _i in range(horizontal_amount):
random_index = random.randrange(0, total)
adding_num = self.width * -1 if random_index > (total - self.width) else self.width
first = "floor" if adding_num == self.width else "roof"
second = "floor" if first == "roof" else "roof"
self.grid[random_index][first] = True
self.grid[random_index + adding_num - 1][second] = True
def can_move_from(self, cell_index):
# TODO this works but its a lot of repeated code. Can it be made better?
# can you move left
can_move_left = False
is_left = True if cell_index % self.width == 0 else False
if not is_left and self.grid[cell_index]["left"] == False:
left_cell = self.grid[cell_index - 1]
is_wall_left = True if left_cell["right"] == True else False
can_move_left = True if not is_wall_left else False
# can you move right
can_move_right = False
is_right = True if cell_index % self.width == self.width-1 else False
if not is_right and self.grid[cell_index]["right"] == False:
right_cell = self.grid[cell_index + 1]
is_wall_right = True if right_cell["left"] == True else False
can_move_right = True if not is_wall_right else False
# can you move up
can_move_up = False
is_top = True if cell_index < self.width else False
if not is_top and self.grid[cell_index]["roof"] == False:
top_cell = self.grid[cell_index - self.width]
is_wall_top = True if top_cell["floor"] == True else False
can_move_up = True if not is_wall_top else False
# can you move down
can_move_down = False
is_bottom = True if cell_index > (self.width * self.height - self.width) else False
if not is_bottom and self.grid[cell_index]["floor"] == False:
bottom_cell = self.grid[cell_index + self.width]
is_wall_bottom = True if bottom_cell["roof"] == True else False
can_move_down = True if not is_wall_bottom else False
# return the results
return can_move_left, can_move_right, can_move_up, can_move_down
def BFS(self):
"""breadth first search to find the quickest way to the bottom"""
start_i = random.randrange(0,self.width)
paths = [ [start_i] ]
solved = False
dead_ends = []
while not solved:
for path in paths:
# find all possibles moves from path
if len(dead_ends) >= len(paths) or len(paths) > 10000: # TODO this solution sucks
return False, False
# NOTE order is left right up down
if path[-1] >= (self.width * self.height - self.width):
solved = True
return paths, paths.index(path)
possible_moves = self.can_move_from(path[-1])
if True in possible_moves:
move_order = [-1, 1, (self.width) * -1, self.width]
first_append_flag = False
origonal_path = path.copy()
for i in range(4):
possible_move = possible_moves[i]
if possible_move:
move = move_order[i]
next_index = origonal_path[-1] + move
if not next_index in origonal_path:
if not first_append_flag:
path.append(next_index)
first_append_flag = True
else:
new_path = origonal_path.copy()
new_path.append(next_index)
paths.append(new_path)
if not first_append_flag:
dead_ends.append(paths.index(path))
else:
dead_ends.append(paths.index(path))
def pretty_print_BFS(self, path):
for i in range(self.width * self.height):
cell = self.grid[i]
in_path = True if cell["ID"] in path else False
number_str = str(i)
if len(number_str) == 1:
number_str += " "
elif len(number_str) == 2:
number_str += " "
end_str = "\n" if i % self.width == self.width-1 else " "
if in_path:
print('\033[92m' + number_str + '\033[0m', end=end_str)
else:
print(number_str, end=end_str)
print(path)
if __name__ == "__main__":
b = Board(10)
paths, index = b.BFS()
if paths and index:
b.pretty_print_BFS(paths[index])
else:
print('ljfdsakfdl')
# can_move_left, can_move_right, can_move_up, can_move_down = b.can_move_from(0)
# print("can_move_left ", can_move_left)
# print("can_move_right ", can_move_right)
# print("can_move_up ", can_move_up)
# print("can_move_down ", can_move_down)
|
{"/main.py": ["/lib/board.py", "/lib/window.py"]}
|
55
|
TheDinner22/lightning-sim
|
refs/heads/main
|
/lib/window.py
|
# use pygame to show the board on a window
# dependencies
import pygame, random
class Window:
def __init__(self, board):
# init py game
pygame.init()
# width height
self.WIDTH = 600
self.HEIGHT = 600
# diffenet display modes
self.display_one = False
self.display_all = False
# place holder
self.solution = []
self.display_all_c = 0
# the board to display on the window
self.board = board
# define the dimensions of the cells of the board
self.cell_width = self.WIDTH // self.board.width
# define the left padding for the grid
total_width = self.cell_width * self.board.width
self.left_padding = (self.WIDTH - total_width) // 2
# colors
self.COLORS = {
"BLACK" : (255, 255, 255),
"GREY" : (230, 230, 230),
"BLUE" : (0, 0, 255),
"RED" : (255, 0, 0),
"YELLOW" : (212, 175, 55)
}
def create_random_color(self):
return (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
def create_window(self):
# define window
self.WIN = pygame.display.set_mode( (self.WIDTH, self.HEIGHT) )
# name window
pygame.display.set_caption("LIGHT NING")
# logo/icon for window
logo = pygame.image.load("images/logo.png")
pygame.display.set_icon(logo)
def get_BFS(self):
solved = False
while not solved:
self.board.create_random_grid()
paths, index = self.board.BFS()
if paths != False and index != False:
self.solution = paths[index]
solved = True
self.paths = paths
self.solution_i = index
def draw_grid_solution(self):
fflag = True
for i in range(self.board.width * self.board.height):
if not i in self.solution: continue
# might not work
col_num = i % self.board.width
row_num = i // self.board.width
x_pos = self.left_padding + (col_num * self.cell_width)
y_pos = row_num * self.cell_width
# define rect
r = pygame.Rect(x_pos, y_pos, self.cell_width, self.cell_width)
# draw the rectangle
pygame.draw.rect(self.WIN, self.COLORS["YELLOW"], r)
def draw_BFS(self):
if self.display_all_c >= len(self.paths):
self.display_all_c = 0
# generate a color for each path
path_colors = []
for path in self.paths:
path_colors.append(self.create_random_color())
path_colors[-1] = (0, 0 ,0)
temp = self.paths.pop(self.display_all_c)
self.paths.append(temp)
for path in self.paths:
for i in path:
# might not work
col_num = i % self.board.width
row_num = i // self.board.width
x_pos = self.left_padding + (col_num * self.cell_width)
y_pos = row_num * self.cell_width
# define rect
r = pygame.Rect(x_pos, y_pos, self.cell_width, self.cell_width)
# draw the rectangle
pygame.draw.rect(self.WIN, path_colors[self.paths.index(path)], r)
self.display_all_c += 1
def draw_window(self):
self.WIN.fill(self.COLORS["GREY"])
if self.display_one:
self.draw_grid_solution()
elif self.display_all:
self.draw_BFS()
pygame.display.update()
def main(self):
# create window
self.create_window()
self.running = True
while self.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_0:
self.get_BFS()
elif event.key == pygame.K_1:
# toggle display one
self.display_one = not self.display_one
if self.display_one:
self.display_all = False
elif event.key == pygame.K_2:
# toggle display all
self.display_all = not self.display_all
if self.display_all:
self.display_all_c = 0
self.display_one = False
self.draw_window()
if __name__ == "__main__":
win = Window()
win.main()
|
{"/main.py": ["/lib/board.py", "/lib/window.py"]}
|
56
|
TheDinner22/lightning-sim
|
refs/heads/main
|
/main.py
|
# this could and will be better i just needed to make it here as a
# proof of concept but it will be online and better later
import os, sys
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # adds project dir to places it looks for the modules
sys.path.append(BASE_PATH)
from lib.board import Board
from lib.window import Window
b = Board()
win = Window(b)
win.main()
|
{"/main.py": ["/lib/board.py", "/lib/window.py"]}
|
79
|
igoryuha/wct
|
refs/heads/master
|
/eval.py
|
import torch
from models import NormalisedVGG, Decoder
from utils import load_image, preprocess, deprocess, extract_image_names
from ops import style_decorator, wct
import argparse
import os
parser = argparse.ArgumentParser(description='WCT')
parser.add_argument('--content-path', type=str, help='path to the content image')
parser.add_argument('--style-path', type=str, help='path to the style image')
parser.add_argument('--content-dir', type=str, help='path to the content image folder')
parser.add_argument('--style-dir', type=str, help='path to the style image folder')
parser.add_argument('--style-decorator', type=int, default=1)
parser.add_argument('--kernel-size', type=int, default=12)
parser.add_argument('--stride', type=int, default=1)
parser.add_argument('--alpha', type=float, default=0.8)
parser.add_argument('--ss-alpha', type=float, default=0.6)
parser.add_argument('--synthesis', type=int, default=0, help='0-transfer, 1-synthesis')
parser.add_argument('--encoder-path', type=str, default='encoder/vgg_normalised_conv5_1.pth')
parser.add_argument('--decoders-dir', type=str, default='decoders')
parser.add_argument('--save-dir', type=str, default='./results')
parser.add_argument('--save-name', type=str, default='result', help='save name for single output image')
parser.add_argument('--save-ext', type=str, default='jpg', help='The extension name of the output image')
parser.add_argument('--content-size', type=int, default=768, help='New (minimum) size for the content image')
parser.add_argument('--style-size', type=int, default=768, help='New (minimum) size for the style image')
parser.add_argument('--gpu', type=int, default=0, help='ID of the GPU to use; for CPU mode set --gpu = -1')
args = parser.parse_args()
assert args.content_path is not None or args.content_dir is not None, \
'Either --content-path or --content-dir should be given.'
assert args.style_path is not None or args.style_dir is not None, \
'Either --style-path or --style-dir should be given.'
device = torch.device('cuda:%s' % args.gpu if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
encoder = NormalisedVGG(pretrained_path=args.encoder_path).to(device)
d5 = Decoder('relu5_1', pretrained_path=os.path.join(args.decoders_dir, 'd5.pth')).to(device)
d4 = Decoder('relu4_1', pretrained_path=os.path.join(args.decoders_dir, 'd4.pth')).to(device)
d3 = Decoder('relu3_1', pretrained_path=os.path.join(args.decoders_dir, 'd3.pth')).to(device)
d2 = Decoder('relu2_1', pretrained_path=os.path.join(args.decoders_dir, 'd2.pth')).to(device)
d1 = Decoder('relu1_1', pretrained_path=os.path.join(args.decoders_dir, 'd1.pth')).to(device)
def style_transfer(content, style):
if args.style_decorator:
relu5_1_cf = encoder(content, 'relu5_1')
relu5_1_sf = encoder(style, 'relu5_1')
relu5_1_scf = style_decorator(relu5_1_cf, relu5_1_sf, args.kernel_size, args.stride, args.ss_alpha)
relu5_1_recons = d5(relu5_1_scf)
else:
relu5_1_cf = encoder(content, 'relu5_1')
relu5_1_sf = encoder(style, 'relu5_1')
relu5_1_scf = wct(relu5_1_cf, relu5_1_sf, args.alpha)
relu5_1_recons = d5(relu5_1_scf)
relu4_1_cf = encoder(relu5_1_recons, 'relu4_1')
relu4_1_sf = encoder(style, 'relu4_1')
relu4_1_scf = wct(relu4_1_cf, relu4_1_sf, args.alpha)
relu4_1_recons = d4(relu4_1_scf)
relu3_1_cf = encoder(relu4_1_recons, 'relu3_1')
relu3_1_sf = encoder(style, 'relu3_1')
relu3_1_scf = wct(relu3_1_cf, relu3_1_sf, args.alpha)
relu3_1_recons = d3(relu3_1_scf)
relu2_1_cf = encoder(relu3_1_recons, 'relu2_1')
relu2_1_sf = encoder(style, 'relu2_1')
relu2_1_scf = wct(relu2_1_cf, relu2_1_sf, args.alpha)
relu2_1_recons = d2(relu2_1_scf)
relu1_1_cf = encoder(relu2_1_recons, 'relu1_1')
relu1_1_sf = encoder(style, 'relu1_1')
relu1_1_scf = wct(relu1_1_cf, relu1_1_sf, args.alpha)
relu1_1_recons = d1(relu1_1_scf)
return relu1_1_recons
if not os.path.exists(args.save_dir):
print('Creating save folder at', args.save_dir)
os.mkdir(args.save_dir)
content_paths = []
style_paths = []
if args.content_dir:
# use a batch of content images
content_paths = extract_image_names(args.content_dir)
else:
# use a single content image
content_paths.append(args.content_path)
if args.style_dir:
# use a batch of style images
style_paths = extract_image_names(args.style_dir)
else:
# use a single style image
style_paths.append(args.style_path)
print('Number content images:', len(content_paths))
print('Number style images:', len(style_paths))
with torch.no_grad():
for i in range(len(content_paths)):
content = load_image(content_paths[i])
content = preprocess(content, args.content_size)
content = content.to(device)
for j in range(len(style_paths)):
style = load_image(style_paths[j])
style = preprocess(style, args.style_size)
style = style.to(device)
if args.synthesis == 0:
output = style_transfer(content, style)
output = deprocess(output)
if len(content_paths) == 1 and len(style_paths) == 1:
# used a single content and style image
save_path = '%s/%s.%s' % (args.save_dir, args.save_name, args.save_ext)
else:
# used a batch of content and style images
save_path = '%s/%s_%s.%s' % (args.save_dir, i, j, args.save_ext)
print('Output image saved at:', save_path)
output.save(save_path)
else:
content = torch.rand(*content.shape).uniform_(0, 1).to(device)
for iteration in range(3):
output = style_transfer(content, style)
content = output
output = deprocess(output)
if len(content_paths) == 1 and len(style_paths) == 1:
# used a single content and style image
save_path = '%s/%s_%s.%s' % (args.save_dir, args.save_name, iteration, args.save_ext)
else:
# used a batch of content and style images
save_path = '%s/%s_%s_%s.%s' % (args.save_dir, i, j, iteration, args.save_ext)
print('Output image saved at:', save_path)
output.save(save_path)
|
{"/eval.py": ["/models.py", "/utils.py", "/ops.py"], "/utils.py": ["/ops.py"]}
|
80
|
igoryuha/wct
|
refs/heads/master
|
/models.py
|
import torch
import torch.nn as nn
import copy
normalised_vgg_relu5_1 = nn.Sequential(
nn.Conv2d(3, 3, 1),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU()
)
class NormalisedVGG(nn.Module):
def __init__(self, pretrained_path=None):
super().__init__()
self.net = normalised_vgg_relu5_1
if pretrained_path is not None:
self.net.load_state_dict(torch.load(pretrained_path, map_location=lambda storage, loc: storage))
def forward(self, x, target):
if target == 'relu1_1':
return self.net[:4](x)
elif target == 'relu2_1':
return self.net[:11](x)
elif target == 'relu3_1':
return self.net[:18](x)
elif target == 'relu4_1':
return self.net[:31](x)
elif target == 'relu5_1':
return self.net(x)
vgg_decoder_relu5_1 = nn.Sequential(
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 256, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 128, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 64, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 3, 3)
)
class Decoder(nn.Module):
def __init__(self, target, pretrained_path=None):
super().__init__()
if target == 'relu1_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-5:])) # current -2
elif target == 'relu2_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-9:]))
elif target == 'relu3_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-16:]))
elif target == 'relu4_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-29:]))
elif target == 'relu5_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())))
if pretrained_path is not None:
self.net.load_state_dict(torch.load(pretrained_path, map_location=lambda storage, loc: storage))
def forward(self, x):
return self.net(x)
|
{"/eval.py": ["/models.py", "/utils.py", "/ops.py"], "/utils.py": ["/ops.py"]}
|
81
|
igoryuha/wct
|
refs/heads/master
|
/ops.py
|
import torch
import torch.nn.functional as F
def extract_image_patches_(image, kernel_size, strides):
kh, kw = kernel_size
sh, sw = strides
patches = image.unfold(2, kh, sh).unfold(3, kw, sw)
patches = patches.permute(0, 2, 3, 1, 4, 5)
patches = patches.reshape(-1, *patches.shape[-3:]) # (patch_numbers, C, kh, kw)
return patches
def style_swap(c_features, s_features, kernel_size, stride=1):
s_patches = extract_image_patches_(s_features, [kernel_size, kernel_size], [stride, stride])
s_patches_matrix = s_patches.reshape(s_patches.shape[0], -1)
s_patch_wise_norm = torch.norm(s_patches_matrix, dim=1)
s_patch_wise_norm = s_patch_wise_norm.reshape(-1, 1, 1, 1)
s_patches_normalized = s_patches / (s_patch_wise_norm + 1e-8)
# Computes the normalized cross-correlations.
# At each spatial location, "K" is a vector of cross-correlations
# between a content activation patch and all style activation patches.
K = F.conv2d(c_features, s_patches_normalized, stride=stride)
# Replace each vector "K" by a one-hot vector corresponding
# to the best matching style activation patch.
best_matching_idx = K.argmax(1, keepdim=True)
one_hot = torch.zeros_like(K)
one_hot.scatter_(1, best_matching_idx, 1)
# At each spatial location, only the best matching style
# activation patch is in the output, as the other patches
# are multiplied by zero.
F_ss = F.conv_transpose2d(one_hot, s_patches, stride=stride)
overlap = F.conv_transpose2d(one_hot, torch.ones_like(s_patches), stride=stride)
F_ss = F_ss / overlap
return F_ss
def relu_x_1_transform(c, s, encoder, decoder, relu_target, alpha=1):
c_latent = encoder(c, relu_target)
s_latent = encoder(s, relu_target)
t_features = wct(c_latent, s_latent, alpha)
return decoder(t_features)
def relu_x_1_style_decorator_transform(c, s, encoder, decoder, relu_target, kernel_size, stride=1, alpha=1):
c_latent = encoder(c, relu_target)
s_latent = encoder(s, relu_target)
t_features = style_decorator(c_latent, s_latent, kernel_size, stride, alpha)
return decoder(t_features)
def style_decorator(cf, sf, kernel_size, stride=1, alpha=1):
cf_shape = cf.shape
sf_shape = sf.shape
b, c, h, w = cf_shape
cf_vectorized = cf.reshape(c, h * w)
b, c, h, w = sf.shape
sf_vectorized = sf.reshape(c, h * w)
# map features to normalized domain
cf_whiten = whitening(cf_vectorized)
sf_whiten = whitening(sf_vectorized)
# in this normalized domain, we want to align
# any element in cf with the nearest element in sf
reassembling_f = style_swap(
cf_whiten.reshape(cf_shape),
sf_whiten.reshape(sf_shape),
kernel_size, stride
)
b, c, h, w = reassembling_f.shape
reassembling_vectorized = reassembling_f.reshape(c, h*w)
# reconstruct reassembling features into the
# domain of the style features
result = coloring(reassembling_vectorized, sf_vectorized)
result = result.reshape(cf_shape)
bland = alpha * result + (1 - alpha) * cf
return bland
def wct(cf, sf, alpha=1):
cf_shape = cf.shape
b, c, h, w = cf_shape
cf_vectorized = cf.reshape(c, h*w)
b, c, h, w = sf.shape
sf_vectorized = sf.reshape(c, h*w)
cf_transformed = whitening(cf_vectorized)
cf_transformed = coloring(cf_transformed, sf_vectorized)
cf_transformed = cf_transformed.reshape(cf_shape)
bland = alpha * cf_transformed + (1 - alpha) * cf
return bland
def feature_decomposition(x):
x_mean = x.mean(1, keepdims=True)
x_center = x - x_mean
x_cov = x_center.mm(x_center.t()) / (x_center.size(1) - 1)
e, d, _ = torch.svd(x_cov)
d = d[d > 0]
e = e[:, :d.size(0)]
return e, d, x_center, x_mean
def whitening(x):
e, d, x_center, _ = feature_decomposition(x)
transform_matrix = e.mm(torch.diag(d ** -0.5)).mm(e.t())
return transform_matrix.mm(x_center)
def coloring(x, y):
e, d, _, y_mean = feature_decomposition(y)
transform_matrix = e.mm(torch.diag(d ** 0.5)).mm(e.t())
return transform_matrix.mm(x) + y_mean
|
{"/eval.py": ["/models.py", "/utils.py", "/ops.py"], "/utils.py": ["/ops.py"]}
|
82
|
igoryuha/wct
|
refs/heads/master
|
/utils.py
|
import torch
from torchvision import transforms
from ops import relu_x_1_style_decorator_transform, relu_x_1_transform
from PIL import Image
import os
def eval_transform(size):
return transforms.Compose([
transforms.Resize(size),
transforms.ToTensor()
])
def load_image(path):
return Image.open(path).convert('RGB')
def preprocess(img, size):
transform = eval_transform(size)
return transform(img).unsqueeze(0)
def deprocess(tensor):
tensor = tensor.cpu()
tensor = tensor.squeeze(0)
tensor = torch.clamp(tensor, 0, 1)
return transforms.ToPILImage()(tensor)
def extract_image_names(path):
r_ = []
valid_ext = ['.jpg', '.png']
items = os.listdir(path)
for item in items:
item_path = os.path.join(path, item)
_, ext = os.path.splitext(item_path)
if ext not in valid_ext:
continue
r_.append(item_path)
return r_
|
{"/eval.py": ["/models.py", "/utils.py", "/ops.py"], "/utils.py": ["/ops.py"]}
|
89
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
/tattle_helper.py
|
import os
import json
import boto3
import requests
from logger import log, logError
from dotenv import load_dotenv
load_dotenv()
s3 = boto3.client("s3",aws_access_key_id=os.environ.get('S3_ACCESS_KEY'),aws_secret_access_key=os.environ.get('S3_SECRET_ACCESS_KEY'))
API_BASE_URL = "https://archive-server.tattle.co.in"
# API_BASE_URL = "https://postman-echo.com/post"
ARCHIVE_TOKEN = os.environ.get('ARCHIVE_TOKEN')
def register_post(data):
"""
registers a post on archive server
"""
url_to_post_to = API_BASE_URL+"/api/posts"
payload = json.dumps(data)
headers = {
'token': ARCHIVE_TOKEN,
'Content-Type': "application/json",
'cache-control': "no-cache",
}
try:
r = requests.post(url_to_post_to, data=payload, headers=headers)
if r.status_code==200:
log('STATUS CODE 200 \n'+json.dumps(r.json(), indent=2))
else:
log('STATUS CODE '+str(r.status_code)+'\n '+r.text)
except:
log('error with API call')
def upload_file(file_name, s3=s3 ,acl="public-read"):
bucket_name = os.environ.get('TGM_BUCKET_NAME')
#opens file, reads it, and uploads it to the S3 bucket.
try:
with open(file_name, 'rb') as data:
s3.upload_fileobj(data,bucket_name,file_name,ExtraArgs={"ACL": acl,"ContentType": file_name.split(".")[-1]})
except:
logError('ERROR_S3_UPLOAD of '+file_name)
file_url = "https://s3.ap-south-1.amazonaws.com/"+bucket_name+"/"+file_name
return file_url
def upload_file(file_name, s3=s3 ,acl="public-read"):
bucket_name = os.environ.get('TGM_BUCKET_NAME')
#opens file, reads it, and uploads it to the S3 bucket.
try:
with open(file_name, 'rb') as data:
s3.upload_fileobj(data,bucket_name,file_name,ExtraArgs={"ACL": acl,"ContentType": file_name.split(".")[-1]})
except:
logError('ERROR_S3_UPLOAD of '+file_name)
file_url = "https://s3.ap-south-1.amazonaws.com/"+bucket_name+"/"+file_name
return file_url
|
{"/tattle_helper.py": ["/logger.py"], "/test.py": ["/tattle_helper.py"], "/prototype.py": ["/logger.py", "/tattle_helper.py"]}
|
90
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
/post_request.py
|
token = "78a6fc20-fa83-11e9-a4ad-d1866a9a3c7b" # add your token here
url = "<base-api-url>/api/posts"
try:
payload = d
payload = json.dumps(payload)
headers = {
'token': token,
'Content-Type': "application/json",
'cache-control': "no-cache",
}
r = requests.post(url, data=payload, headers=headers)
if r.ok:
print ('success')
else:
print ('something went wrong')
except:
logging.exception('error in POST request')
raise
{
"type" : "image", # can be image, text, video
"data" : "",
"filename": "4bf4b1cc-516b-469d-aa38-be6762d417a5", #filename you put on s3
"userId" : 169 # for telegram_bot this should be 169
}
|
{"/tattle_helper.py": ["/logger.py"], "/test.py": ["/tattle_helper.py"], "/prototype.py": ["/logger.py", "/tattle_helper.py"]}
|
91
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
/test.py
|
from tattle_helper import register_post, upload_file
data = {
"type" : "image",
"data" : "",
"filename": "asdf",
"userId" : 169
}
response = upload_file(file_name='denny.txt')
print(response)
# register_post(data)
|
{"/tattle_helper.py": ["/logger.py"], "/test.py": ["/tattle_helper.py"], "/prototype.py": ["/logger.py", "/tattle_helper.py"]}
|
92
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
/prototype.py
|
import os
import sys
import json
import requests
import telegram
import logging
import re
from threading import Thread
from telegram.ext import CommandHandler, MessageHandler, Updater, Filters, InlineQueryHandler
from telegram import InlineQueryResultArticle, InputTextMessageContent
from telegram.ext.dispatcher import run_async
from dotenv import load_dotenv
from pymongo import MongoClient
from logger import log, logError
from tattle_helper import upload_file
# loads all environment variables
load_dotenv()
log('STARTING APP v1')
TOKEN = os.environ.get('ACCESS_TOKEN')
PORT = int(os.environ.get('PORT', '8443'))
print(TOKEN)
# logging.basicConfig(filename='telegram_bot_log.log',filemode='a',format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
# Calls for Database modification and reads start
def insert_document(document, required_collection):
return db[required_collection].insert_one(document)
def find_document(query, required_collection):
return db[required_collection].find_one(query)
def update_document(find_query, update_query, required_collection, upsert=False):
return db[required_collection].update_one(find_query, update_query, upsert)
def delete_document(find_query, required_collection):
return db[required_collection].delete_one(find_query)
# Calls for Database modification and reads end
@run_async
def start(update, context):
# start message
context.bot.send_message(chat_id=update.effective_chat.id, text="Hey! \n\nI'm the Tattle Bot. Here are some instructions to use me:\n\n1. You can send whatever content to me that you'd like. All mediums : Text, Video, and Photos are allowed.\n2. You can tag your content using hashtags. When uploading photos or videos you can mention the tags in the caption, with text you can just tag it at the end or in the beginning(anywhere else in the text will also work).\n3. You can edit your messages after you've sent them, we'll update them in our database accordingly.\n 4. In case you miss tagging a message, you can reply to that message and insert the tags required. Only tags will be extracted, so please don't write text while replying to messages.")
def determine_type(message_json):
# checks what type of content is being passed, and returns the type
type_of_content = ''
if(message_json.text):
type_of_content = 'text'
elif(message_json.photo):
type_of_content = 'photo'
elif(message_json.video):
type_of_content = 'video'
elif(message_json.document):
type_of_content = 'document'
return type_of_content
def entity_extraction(all_entities, message_content):
# entity extraction, which basically extracts all the hashtags out of the message
list_of_tags = []
if(bool(all_entities)):
# checks if there are any entities, and if so loops over them
for each_entity in all_entities:
if(each_entity['type'] == 'hashtag'):
# string slicing based on offset and length values
tag = message_content[each_entity['offset']:(
each_entity['offset']+each_entity['length'])]
list_of_tags.append(tag)
if(bool(list_of_tags)):
# converts to set to remove duplicates
return list(set(list_of_tags))
else:
return None
def new_tags(message_json, current_document, all_tags):
# adds or replaces tags in messages that had no tags or in case of edits
new_tags = all_tags
update_document({'message_id': message_json.reply_to_message.message_id}, {
"$set": {"reply_tags": new_tags}}, 'messages')
def error_message(message_json):
# standard error message
context.bot.send_message(chat_id=message_json.chat.id,
text="Something went wrong with registering these tags, apologies for the same.")
def reply_to_messages(message_json, edit_flag):
all_tags = entity_extraction(message_json.entities, message_json.text)
if(all_tags is not None):
# first finds the document that the reply is being done to
current_document = find_document(
{'message_id': message_json.reply_to_message.message_id}, 'messages')
try:
# add reply tags with a new key called reply_tags
new_tags(message_json, current_document, all_tags)
except:
# or, throw an error message and log
error_message()
raise
def edit_message(message_json, final_dict, content_type, context):
tags = []
# check content type before processing the data
if(content_type == 'text'):
# In case of edits, we need to replace file on S3. Replacing happens automatically as long as file name is same.
file_name = str(message_json.message_id) + '.txt'
with open(file_name, 'w') as open_file:
open_file.write(message_json['text'])
upload_file(file_name)
os.remove(file_name)
final_dict = process_text(
message_json, final_dict, message_json['text'], False)
else:
final_dict = process_media(
message_json, final_dict, content_type, context, False)
# in case message is being edited, we first find the document being edited
current_document = find_document(
{'message_id': message_json.message_id}, 'messages')
# we check if the document had any existing tags, if so we store them before deleting the document
# FLAW IN CODE : If existing tags are being edited, it doesn't reflect this way. NEED TO FIX.
try:
tags = current_document['tags']
except KeyError:
tags = None
try:
reply_tags = current_document['reply_tags']
except KeyError:
reply_tags = None
if(reply_tags is not None):
final_dict['reply_tags'] = reply_tags
# add tags to final dict for new, edited document
if(tags is not None):
final_dict['tags'] = tags
# delete the document
delete_document({'message_id': message_json.message_id}, 'messages')
# insert edited document
insert_document(final_dict, 'messages')
def process_text(message_json, final_dict, message_content, caption_flag):
# check if we're processing a caption or a text message
if(caption_flag):
all_tags = entity_extraction(
message_json['caption_entities'], message_content)
else:
all_tags = entity_extraction(message_json['entities'], message_content)
# check if any tags are present
if(all_tags is not None):
final_dict['tags'] = all_tags
if(bool(message_content)):
# cleans out the hashtags
modified_message = re.sub(r'#\w+', '', message_content)
# removes all excessive spacing
cleaned_message = re.sub(' +', ' ', modified_message)
# changes key based on whether it is a caption or not
if(caption_flag):
# removing leading and trailing spaces
final_dict['caption'] = cleaned_message.strip()
else:
final_dict['text'] = cleaned_message.strip()
return final_dict
# just for testing
# BASE_URL = "http://archive-telegram-bot.tattle.co.in.s3.amazonaws.com/"
# print("{}{}".format(BASE_URL, file_name))
def make_post_request(dict_to_post):
log('***')
log(dict_to_post)
API_BASE_URL = "https://archive-server.tattle.co.in"
access_token = os.environ.get('ARCHIVE_TOKEN')
url_to_post_to = API_BASE_URL+"/api/posts"
payload = json.dumps(dict_to_post)
headers = {
'token': access_token,
'Content-Type': "application/json",
'cache-control': "no-cache",
}
r = requests.post(url_to_post_to, data=payload, headers=headers)
print('API response')
print(r)
# print(r.json())
def construct_dict(file_name, file_type):
return {"type": file_type, "data": "", "filename": file_name, "userId": 169}
def process_media(message_json, final_dict, content_type, context, creation_flag):
# check if content type is photo, and constructs dict and file_name appropriately
if(content_type == 'photo'):
final_dict['photo'] = [{'file_id': each_photo.file_id, 'width': each_photo.width,
'height': each_photo.height, 'file_size': each_photo.file_size} for each_photo in message_json.photo]
file_id = message_json.photo[-1].file_id
file_name = str(message_json.message_id)+'.jpeg'
post_request_type = 'image'
# same with video as above
elif(content_type == 'video'):
final_dict['video'] = {'file_id': message_json.video.file_id, 'width': message_json.video.width, 'height': message_json.video.height, 'duration': message_json.video.duration, 'thumb': {'file_id': message_json.video.thumb.file_id,
'width': message_json.video.thumb.width, 'height': message_json.video.thumb.height, 'file_size': message_json.video.thumb.file_size}, 'mime_type': message_json.video.mime_type, 'file_size': message_json.video.file_size}
file_id = message_json.video.file_id
file_type = str(message_json.video.mime_type).split("/")[-1]
file_name = str(message_json.message_id)+"."+file_type
post_request_type = 'video'
# process_media is only called from two places, one of which is when message is edited. Since we don't want duplicates, we set a flag to differentiate.
if(creation_flag):
try:
new_file = context.bot.get_file(file_id)
new_file.download(file_name) # downloads the file
final_dict['file_name'] = file_name
file_url = upload_file(file_name) # uploads to S3
final_dict['s3_url'] = file_url
os.remove(file_name) # removes it from local runtime
request_dict = construct_dict(file_name, post_request_type)
make_post_request(request_dict)
except:
logging.exception(
"The file_name when the error happened is: {}".format(file_name))
# process any caption or text found
final_dict = process_text(message_json, final_dict,
message_json.caption, True)
return final_dict
@run_async
def storing_data(update, context):
log(update)
final_dict = {}
# print(update)
# selects just the effective_message part
relevant_section = update.effective_message
# some general data appended to each dict
final_dict['message_id'] = relevant_section['message_id']
final_dict['date'] = relevant_section['date']
# final_dict['from'] = {'id':relevant_section.from_user.id,'type':relevant_section.chat.type,'first_name':relevant_section.from_user.first_name,'last_name':relevant_section.from_user.last_name,'username':relevant_section.from_user.username,'is_bot':relevant_section.from_user.is_bot}
content_type = determine_type(relevant_section)
final_dict['content_type'] = content_type
# checks if the request is that of an edition
if(relevant_section.edit_date):
# if yes, checks if the edited message was replying to another message
if(relevant_section.reply_to_message):
# if yes, then deals with it by setting edit flag to True
reply_to_messages(relevant_section, True)
return
else:
# else, just edits the message normally
edit_message(relevant_section, final_dict, content_type, context)
return
# if the message is a reply, then respond appropriately
if(relevant_section.reply_to_message):
# edit flag is set to false because we're just handling simple reply
reply_to_messages(relevant_section, False)
return
if(content_type == 'text'):
# creates file with message ID, then writes the text into the file and uploads it to S3
try:
file_name = str(relevant_section.message_id) + '.txt'
with open(file_name, 'w') as open_file:
open_file.write(relevant_section['text'])
file_url = upload_file(file_name)
final_dict['s3_url'] = file_url
os.remove(file_name)
request_dict = construct_dict(file_name, content_type)
r = make_post_request(request_dict)
except Exception as e:
logging.exception(
"The file_name when the error happened is: {}".format(file_name))
logging.exception(e)
# if new text message, process it and then insert it in the database
final_dict = process_text(
relevant_section, final_dict, relevant_section['text'], False)
insert_document(final_dict, 'messages')
else:
final_dict = process_media(
relevant_section, final_dict, content_type, context, True)
insert_document(final_dict, 'messages')
context.bot.send_message(
chat_id=update.effective_chat.id, text='message archived')
# context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
def stop_and_restart():
"""Gracefully stop the Updater and replace the current process with a new one"""
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
def restart(update, context):
update.message.reply_text('Bot is restarting...')
Thread(target=stop_and_restart).start()
try:
client = MongoClient("mongodb+srv://"+os.environ.get("TGM_DB_USERNAME")+":"+os.environ.get("TGM_DB_PASSWORD") +
"@tattle-data-fkpmg.mongodb.net/test?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE")
db = client[os.environ.get("TGM_DB_NAME")]
except error_message:
print('error connecting to db')
print(error_message)
updater = Updater(token=TOKEN, use_context=True, workers=32)
dispatcher = updater.dispatcher
start_handler = CommandHandler('start', start)
storing_data_handler = MessageHandler(Filters.all, storing_data)
restart_handler = CommandHandler(
'r', restart, filters=Filters.user(username='@thenerdyouknow'))
dispatcher.add_handler(restart_handler)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(storing_data_handler)
# updater.start_webhook(listen="0.0.0.0",
# port=PORT,
# url_path=TOKEN)
# updater.bot.set_webhook("https://services-server.tattle.co.in/" + TOKEN)
updater.start_polling()
updater.idle()
log('STARTING SERVER v1.0')
|
{"/tattle_helper.py": ["/logger.py"], "/test.py": ["/tattle_helper.py"], "/prototype.py": ["/logger.py", "/tattle_helper.py"]}
|
93
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
/logger.py
|
from datetime import datetime
def log(data):
print('----', datetime.now(), '----')
print(data)
def logError(error):
print('****', datetime.now(), '****')
print(error)
|
{"/tattle_helper.py": ["/logger.py"], "/test.py": ["/tattle_helper.py"], "/prototype.py": ["/logger.py", "/tattle_helper.py"]}
|
143
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/main.py
|
import data_helper
import time
import datetime
import os
import tensorflow as tf
import numpy as np
import evaluation
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
timeDay = time.strftime("%Y%m%d", timeArray)
print (timeStamp)
def main(args):
args._parse_flags()
print("\nParameters:")
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
log_dir = 'log/'+ timeDay
if not os.path.exists(log_dir):
os.makedirs(log_dir)
data_file = log_dir + '/test_' + args.data + timeStamp
precision = data_file + 'precise'
print('load data ...........')
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev])
print('the number of words',len(alphabet))
print('get embedding')
if args.data=="quora":
embedding = data_helper.get_embedding(alphabet,language="cn")
else:
embedding = data_helper.get_embedding(alphabet)
with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model = QA_CNN_extend(max_input_left = q_max_sent_length,
max_input_right = a_max_sent_length,
batch_size = args.batch_size,
vocab_size = len(alphabet),
embedding_size = args.embedding_dim,
filter_sizes = list(map(int, args.filter_sizes.split(","))),
num_filters = args.num_filters,
hidden_size = args.hidden_size,
dropout_keep_prob = args.dropout_keep_prob,
embeddings = embedding,
l2_reg_lambda = args.l2_reg_lambda,
trainable = args.trainable,
pooling = args.pooling,
conv = args.conv)
model.build_graph()
sess.run(tf.global_variables_initializer())
def train_step(model,sess,batch):
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.answer_negative:data[2],
model.q_mask:data[3],
model.a_mask:data[4],
model.a_neg_mask:data[5]
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[model.train_op, model.merged,model.global_step,model.loss, model.accuracy,model.score12,model.score13, model.see],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
def predict(model,sess,batch,test):
scores = []
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3]
}
score = sess.run(
model.score12,
feed_dict)
scores.extend(score)
return np.array(scores[:len(test)])
for i in range(args.num_epoches):
datas = data_helper.get_mini_batch(train,alphabet,args.batch_size)
train_step(model,sess,datas)
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
print(len(predicted_test))
print(len(test))
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
print('map_mrr test',map_mrr_test)
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
144
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/config.py
|
class Singleton(object):
__instance=None
def __init__(self):
pass
def getInstance(self):
if Singleton.__instance is None:
# Singleton.__instance=object.__new__(cls,*args,**kwd)
Singleton.__instance=self.get_test_flag()
print("build FLAGS over")
return Singleton.__instance
def get_test_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_rnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
# flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('data','trec','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_cnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_qcnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "qcnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','mean','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_8008_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
if __name__=="__main__":
args=Singleton().get_test_flag()
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
145
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/run.py
|
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime,os
import models
import numpy as np
import evaluation
import sys
import logging
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename('program')
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.makedirs(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_qcnn_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
# ckpt = tf.train.get_checkpoint_state("checkpoint")
# if ckpt and ckpt.model_checkpoint_path:
# # Restores from checkpoint
# saver.restore(sess, ckpt.model_checkpoint_path)
# if os.path.exists("model") :
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
# variable_averages = tf.train.ExponentialMovingAverage( model)
# variables_to_restore = variable_averages.variables_to_restore()
# saver = tf.train.Saver(variables_to_restore)
# for name in variables_to_restore:
# print(name)
sess.run(tf.global_variables_initializer())
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
best_p1=0
for i in range(args.num_epoches):
for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess):
# for data in data_helper.getBatch48008(train,alphabet,args.batch_size):
_, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
#<<<<<<< HEAD
#
#
# if i>0 and i % 5 ==0:
# test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
#
# predicted_test = predict(model,sess,test_datas,test)
# map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
#
# logger.info('map_mrr test' +str(map_mrr_test))
# print('map_mrr test' +str(map_mrr_test))
#
# test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size)
# predicted_test = predict(model,sess,test_datas,dev)
# map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test)
#
# logger.info('map_mrr dev' +str(map_mrr_test))
# print('map_mrr dev' +str(map_mrr_test))
# map,mrr,p1 = map_mrr_test
# if p1>best_p1:
# best_p1=p1
# filename= "checkpoint/"+args.data+"_"+str(p1)+".model"
# save_path = saver.save(sess, filename)
# # load_path = saver.restore(sess, model_path)
#
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
#
#
#=======
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
logger.info('map_mrr test' +str(map_mrr_test))
print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
146
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/models/QA_CNN_pairwise.py
|
#coding:utf-8
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import models.blocks as blocks
# model_type :apn or qacnn
class QA_CNN_extend(object):
# def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size,
# dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'):
#
# """
# QA_RNN model for question answering
#
# Args:
# self.dropout_keep_prob: dropout rate
# self.num_filters : number of filters
# self.para : parameter list
# self.extend_feature_dim : my extend feature dimension
# self.max_input_left : the length of question
# self.max_input_right : the length of answer
# self.pooling : pooling strategy :max pooling or attentive pooling
#
# """
# self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
# self.num_filters = num_filters
# self.embeddings = embeddings
# self.embedding_size = embedding_size
# self.batch_size = batch_size
# self.filter_sizes = filter_sizes
# self.l2_reg_lambda = l2_reg_lambda
# self.para = []
#
# self.max_input_left = max_input_left
# self.max_input_right = max_input_right
# self.trainable = trainable
# self.vocab_size = vocab_size
# self.pooling = pooling
# self.total_num_filter = len(self.filter_sizes) * self.num_filters
#
# self.conv = conv
# self.pooling = 'traditional'
# self.learning_rate = learning_rate
#
# self.hidden_size = hidden_size
#
# self.attention_size = 100
def __init__(self,opt):
for key,value in opt.items():
self.__setattr__(key,value)
self.attention_size = 100
self.pooling = 'mean'
self.total_num_filter = len(self.filter_sizes) * self.num_filters
self.para = []
self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
def create_placeholder(self):
print(('Create placeholders'))
# he length of the sentence is varied according to the batch,so the None,None
self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question')
self.max_input_left = tf.shape(self.question)[1]
self.batch_size = tf.shape(self.question)[0]
self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer')
self.max_input_right = tf.shape(self.answer)[1]
self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right')
# self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask')
# self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask')
# self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask')
def add_embeddings(self):
print( 'add embeddings')
if self.embeddings is not None:
print( "load embedding")
W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable)
else:
print( "random embedding")
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name="W",trainable = self.trainable)
self.embedding_W = W
# self.overlap_W = tf.Variable(a,name="W",trainable = True)
self.para.append(self.embedding_W)
self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question)
self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer)
self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative)
#real length
self.q_len,self.q_mask = blocks.length(self.question)
self.a_len,self.a_mask = blocks.length(self.answer)
self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative)
def convolution(self):
print( 'convolution:wide_convolution')
self.kernels = []
for i,filter_size in enumerate(self.filter_sizes):
with tf.name_scope('conv-max-pool-%s' % filter_size):
filter_shape = [filter_size,self.embedding_size,1,self.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name="W")
b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name="b")
self.kernels.append((W,b))
self.para.append(W)
self.para.append(b)
embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding]
self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings]
#convolution
def pooling_graph(self):
if self.pooling == 'mean':
self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask)
self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask)
elif self.pooling == 'attentive':
self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'position':
self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'traditional':
print( self.pooling)
print(self.q_cnn)
self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
def para_initial(self):
# print(("---------"))
# self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp'))
self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U'))
self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm'))
self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm'))
self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms'))
self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi'))
def mean_pooling(self,conv,mask):
conv = tf.squeeze(conv,2)
print( tf.expand_dims(tf.cast(mask,tf.float32),-1))
# conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1))
# self.see = conv_mask
# print( conv_mask)
return tf.reduce_mean(conv,axis = 1);
def attentive_pooling(self,input_left,input_right,q_mask,a_mask):
Q = tf.squeeze(input_left,axis = 2)
A = tf.squeeze(input_right,axis = 2)
print( Q)
print( A)
# Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A')
# G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\
# A,transpose_b = True),name = 'G')
first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U)
second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters])
result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1]))
print( second_step)
print( tf.transpose(A,perm = [0,2,1]))
# print( 'result',result)
G = tf.tanh(result)
# G = result
# column-wise pooling ,row-wise pooling
row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling')
col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling')
self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q')
self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1))
self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a')
self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1))
self.see = G
R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q')
R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a')
return R_q,R_a
def traditional_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2))
Q = tf.reduce_mean(input_left_mask,1)
a_shape = tf.shape(input_right)
A = tf.reshape(input_right,[-1,self.total_num_filter])
m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1))
f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1]))
self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2))
self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True))
self.see = self.f_attention_norm
a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1)
return Q,a_attention
def position_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
# Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A')
Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
QU = tf.matmul(Q,self.U)
QUA = tf.multiply(tf.expand_dims(QU,1),input_right)
self.attention_a = tf.cast(tf.argmax(QUA,2)
,tf.float32)
# q_shape = tf.shape(input_left)
# Q_1 = tf.reshape(input_left,[-1,self.total_num_filter])
# QU = tf.matmul(Q_1,self.U)
# QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter])
# A_1 = tf.transpose(input_right,[0,2,1])
# QUA = tf.matmul(QU_1,A_1)
# QUA = tf.nn.l2_normalize(QUA,1)
# G = tf.tanh(QUA)
# Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
# # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2))
# row_pooling = tf.reduce_max(G,1,name="row_pooling")
# col_pooling = tf.reduce_max(G,2,name="col_pooling")
# self.attention_a = tf.nn.softmax(row_pooling,1,name = "attention_a")
self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32))
self.see = self.attention_a
self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True))
self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter])
return Q ,self.r_a
def create_loss(self):
with tf.name_scope('score'):
self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn)
self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn)
l2_loss = tf.constant(0.0)
for p in self.para:
l2_loss += tf.nn.l2_loss(p)
with tf.name_scope("loss"):
self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13)))
self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss
tf.summary.scalar('loss', self.loss)
# Accuracy
with tf.name_scope("accuracy"):
self.correct = tf.equal(0.0, self.losses)
self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy")
tf.summary.scalar('accuracy', self.accuracy)
def create_op(self):
self.global_step = tf.Variable(0, name = "global_step", trainable = False)
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step)
def max_pooling(self,conv,input_length):
pooled = tf.nn.max_pool(
conv,
ksize = [1, input_length, 1, 1],
strides = [1, 1, 1, 1],
padding = 'VALID',
name="pool")
return pooled
def getCosine(self,q,a):
pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder)
pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder)
pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1))
pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1))
pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1)
score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name="scores")
return score
def wide_convolution(self,embedding):
cnn_outputs = []
for i,filter_size in enumerate(self.filter_sizes):
conv = tf.nn.conv2d(
embedding,
self.kernels[i][0],
strides=[1, 1, self.embedding_size, 1],
padding='SAME',
name="conv-1"
)
h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name="relu-1")
cnn_outputs.append(h)
cnn_reshaped = tf.concat(cnn_outputs,3)
return cnn_reshaped
def variable_summaries(self,var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def build_graph(self):
self.create_placeholder()
self.add_embeddings()
self.para_initial()
self.convolution()
self.pooling_graph()
self.create_loss()
self.create_op()
self.merged = tf.summary.merge_all()
def train(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
self.answer_negative:data[2],
# self.q_mask:data[3],
# self.a_mask:data[4],
# self.a_neg_mask:data[5],
self.dropout_keep_prob_holder:self.dropout_keep_prob
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see],
feed_dict)
return _, summary, step, loss, accuracy,score12, score13, see
def predict(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
# self.q_mask:data[2],
# self.a_mask:data[3],
self.dropout_keep_prob_holder:1.0
}
score = sess.run( self.score12, feed_dict)
return score
if __name__ == '__main__':
cnn = QA_CNN_extend(
max_input_left = 33,
max_input_right = 40,
batch_size = 3,
vocab_size = 5000,
embedding_size = 100,
filter_sizes = [3,4,5],
num_filters = 64,
hidden_size = 100,
dropout_keep_prob = 1.0,
embeddings = None,
l2_reg_lambda = 0.0,
trainable = True,
pooling = 'max',
conv = 'wide')
cnn.build_graph()
input_x_1 = np.reshape(np.arange(3 * 33),[3,33])
input_x_2 = np.reshape(np.arange(3 * 40),[3,40])
input_x_3 = np.reshape(np.arange(3 * 40),[3,40])
q_mask = np.ones((3,33))
a_mask = np.ones((3,40))
a_neg_mask = np.ones((3,40))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {
cnn.question:input_x_1,
cnn.answer:input_x_2,
# cnn.answer_negative:input_x_3,
cnn.q_mask:q_mask,
cnn.a_mask:a_mask,
cnn.dropout_keep_prob_holder:cnn.dropout_keep
# cnn.a_neg_mask:a_neg_mask
# cnn.q_pos_overlap:q_pos_embedding,
# cnn.q_neg_overlap:q_neg_embedding,
# cnn.a_pos_overlap:a_pos_embedding,
# cnn.a_neg_overlap:a_neg_embedding,
# cnn.q_position:q_position,
# cnn.a_pos_position:a_pos_position,
# cnn.a_neg_position:a_neg_position
}
question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict)
print( question.shape,answer.shape)
print( score)
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
147
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/models/my/nn.py
|
from my.general import flatten, reconstruct, add_wd, exp_mask
import numpy as np
import tensorflow as tf
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):#, name_w='', name_b=''
# if args is None or (nest.is_sequence(args) and not args):
# raise ValueError("`args` must be specified")
# if not nest.is_sequence(args):
# args = [args]
flat_args = [flatten(arg, 1) for arg in args]#[210,20]
# if input_keep_prob < 1.0:
# assert is_train is not None
flat_args = [tf.nn.dropout(arg, input_keep_prob) for arg in flat_args]
total_arg_size = 0#[60]
shapes = [a.get_shape() for a in flat_args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
# print(total_arg_size)
# exit()
dtype = [a.dtype for a in flat_args][0]
# scope = tf.get_variable_scope()
with tf.variable_scope(scope) as outer_scope:
weights = tf.get_variable(_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype)
if len(flat_args) == 1:
res = tf.matmul(flat_args[0], weights)
else:
res = tf.matmul(tf.concat(flat_args, 1), weights)
if not bias:
flat_out = res
else:
with tf.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = tf.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=tf.constant_initializer(bias_start, dtype=dtype))
flat_out = tf.nn.bias_add(res, biases)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
def softmax(logits, mask=None, scope=None):
with tf.name_scope(scope or "Softmax"):
if mask is not None:
logits = exp_mask(logits, mask)
flat_logits = flatten(logits, 1)
flat_out = tf.nn.softmax(flat_logits)
out = reconstruct(flat_out, logits, 1)
return out
def softsel(target, logits, mask=None, scope=None):
"""
:param target: [ ..., J, d] dtype=float
:param logits: [ ..., J], dtype=float
:param mask: [ ..., J], dtype=bool
:param scope:
:return: [..., d], dtype=float
"""
with tf.name_scope(scope or "Softsel"):
a = softmax(logits, mask = mask)
target_rank = len(target.get_shape().as_list())
out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2)
return out
def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):
with tf.variable_scope(scope or "highway_layer"):
d = arg.get_shape()[-1]
trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob)
trans = tf.nn.relu(trans)
gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob)
gate = tf.nn.sigmoid(gate)
out = gate * trans + (1 - gate) * arg
return out
def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):
with tf.variable_scope(scope or "highway_network"):
prev = arg
cur = None
for layer_idx in range(num_layers):
cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx), wd=wd,
input_keep_prob=input_keep_prob)
prev = cur
return cur
def conv1d(in_, filter_size, height, padding, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "conv1d"):
num_channels = in_.get_shape()[-1]
filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype='float')
bias = tf.get_variable("bias", shape=[filter_size], dtype='float')
strides = [1, 1, 1, 1]
in_ = tf.nn.dropout(in_, keep_prob)
xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d]
out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d]
return out
def multi_conv1d(in_, filter_sizes, heights, padding, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "multi_conv1d"):
assert len(filter_sizes) == len(heights)
outs = []
for filter_size, height in zip(filter_sizes, heights):
if filter_size == 0:
continue
out = conv1d(in_, filter_size, height, padding, keep_prob=keep_prob, scope="conv1d_{}".format(height))
outs.append(out)
concat_out = tf.concat(outs, axis=2)
return concat_out
if __name__ == '__main__':
a = tf.Variable(np.random.random(size=(2,2,4)))
b = tf.Variable(np.random.random(size=(2,3,4)))
c = tf.tile(tf.expand_dims(a, 2), [1, 1, 3, 1])
test = flatten(c,1)
out = reconstruct(test, c, 1)
d = tf.tile(tf.expand_dims(b, 1), [1, 2, 1, 1])
e = linear([c,d,c*d],1,bias = False,scope = "test",)
# f = softsel(d, e)
with tf.Session() as sess:
tf.global_variables_initializer().run()
print(sess.run(test))
print(sess.run(tf.shape(out)))
exit()
print(sess.run(tf.shape(a)))
print(sess.run(a))
print(sess.run(tf.shape(b)))
print(sess.run(b))
print(sess.run(tf.shape(c)))
print(sess.run(c))
print(sess.run(tf.shape(d)))
print(sess.run(d))
print(sess.run(tf.shape(e)))
print(sess.run(e))
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
148
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/models/__init__.py
|
from .QA_CNN_pairwise import QA_CNN_extend as CNN
from .QA_RNN_pairwise import QA_RNN_extend as RNN
from .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN
def setup(opt):
if opt["model_name"]=="cnn":
model=CNN(opt)
elif opt["model_name"]=="rnn":
model=RNN(opt)
elif opt['model_name']=='qcnn':
model=QCNN(opt)
else:
print("no model")
exit(0)
return model
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
149
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/test.py
|
# -*- coding: utf-8 -*-
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime
import os
import models
import numpy as np
import evaluation
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_rnn_flag()
#args = Singleton().get_8008_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer()) # fun first than print or save
ckpt = tf.train.get_checkpoint_state("checkpoint")
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
print(sess.run(model.position_embedding)[0])
if os.path.exists("model") :
import shutil
shutil.rmtree("model")
builder = tf.saved_model.builder.SavedModelBuilder("./model")
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
builder.save(True)
variable_averages = tf.train.ExponentialMovingAverage( model)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
for name in variables_to_restore:
print(name)
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
text = "怎么 提取 公积金 ?"
splited_text=data_helper.encode_to_split(text,alphabet)
mb_q,mb_q_mask = data_helper.prepare_data([splited_text])
mb_a,mb_a_mask = data_helper.prepare_data([splited_text])
data = (mb_q,mb_a,mb_q_mask,mb_a_mask)
score = model.predict(sess,data)
print(score)
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3],
model.dropout_keep_prob_holder:1.0
}
sess.run(model.position_embedding,feed_dict=feed_dict)[0]
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
150
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
/data_helper.py
|
#-*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
from functools import wraps
import time
import pickle
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
import tqdm
from nltk.corpus import stopwords
OVERLAP = 237
class Alphabet(dict):
def __init__(self, start_feature_id = 1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def cut(sentence):
tokens = sentence.lower().split()
# tokens = [w for w in tokens if w not in stopwords.words('english')]
return tokens
@log_time_delta
def load(dataset, filter = False):
data_dir = "data/" + dataset
datas = []
for data_name in ['train.txt','test.txt','dev.txt']:
data_file = os.path.join(data_dir,data_name)
data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0')
# data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0')
if filter == True:
datas.append(removeUnanswerdQuestion(data))
else:
datas.append(data)
# sub_file = os.path.join(data_dir,'submit.txt')
# submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3)
# datas.append(submit)
return tuple(datas)
@log_time_delta
def removeUnanswerdQuestion(df):
counter= df.groupby("question").apply(lambda group: sum(group["flag"]))
questions_have_correct=counter[counter>0].index
counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0))
questions_have_uncorrect=counter[counter>0].index
counter=df.groupby("question").apply(lambda group: len(group["flag"]))
questions_multi=counter[counter>1].index
return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index()
@log_time_delta
def get_alphabet(corpuses=None,dataset=""):
pkl_name="temp/"+dataset+".alphabet.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
alphabet = Alphabet(start_feature_id = 0)
alphabet.add('[UNK]')
alphabet.add('END')
count = 0
for corpus in corpuses:
for texts in [corpus["question"].unique(),corpus["answer"]]:
for sentence in texts:
tokens = cut(sentence)
for token in set(tokens):
alphabet.add(token)
print("alphabet size %d" % len(alphabet.keys()) )
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump( alphabet,open(pkl_name,"wb"))
return alphabet
@log_time_delta
def getSubVectorsFromDict(vectors,vocab,dim = 300):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
def encode_to_split(sentence,alphabet):
indices = []
tokens = cut(sentence)
seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]
return seq
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size = 100):
vectors = {}
with open(filename,encoding='utf-8') as f:
i = 0
for line in f:
i += 1
if i % 100000 == 0:
print( 'epch %d' % i)
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( ( vocab_size, embedding_size))
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'embedding_size',embedding_size)
print( 'done')
print( 'words found in wor2vec embedding ',len(vectors.keys()))
return vectors
@log_time_delta
def get_embedding(alphabet,dim = 300,language ="en",dataset=""):
pkl_name="temp/"+dataset+".subembedding.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
if language=="en":
fname = 'embedding/glove.6B/glove.6B.300d.txt'
else:
fname= "embedding/embedding.200.header_txt"
embeddings = load_text_vec(alphabet,fname,embedding_size = dim)
sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)
pickle.dump( sub_embeddings,open(pkl_name,"wb"))
return sub_embeddings
@log_time_delta
def get_mini_batch_test(df,alphabet,batch_size):
q = []
a = []
pos_overlap = []
for index,row in df.iterrows():
question = encode_to_split(row["question"],alphabet)
answer = encode_to_split(row["answer"],alphabet)
overlap_pos = overlap_index(row['question'],row['answer'])
q.append(question)
a.append(answer)
pos_overlap.append(overlap_pos)
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in mini_batches:
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
yield(mb_q,mb_a)
# calculate the overlap_index
def overlap_index(question,answer,stopwords = []):
ans_token = cut(answer)
qset = set(cut(question))
aset = set(ans_token)
a_len = len(ans_token)
# q_index = np.arange(1,q_len)
a_index = np.arange(1,a_len + 1)
overlap = qset.intersection(aset)
# for i,q in enumerate(cut(question)[:q_len]):
# value = 1
# if q in overlap:
# value = 2
# q_index[i] = value
for i,a in enumerate(ans_token):
if a in overlap:
a_index[i] = OVERLAP
return a_index
def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):
q,a,neg_a=[],[],[]
answers=df["answer"][:250]
ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict()
for question in tqdm(df['question'].unique()):
index= ground_truth[question]
canindates = [i for i in range(250)]
canindates.remove(index)
a_neg_index = random.choice(canindates)
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(answers[index],alphabet)
seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)
q.append(seq_q)
a.append( seq_a)
neg_a.append(seq_neg_a )
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):
if sort_by_len:
sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)
q = [ q[i] for i in sorted_index]
a = [a[i] for i in sorted_index]
neg_a = [ neg_a[i] for i in sorted_index]
pos_overlap = [pos_overlap[i] for i in sorted_index]
neg_overlap = [neg_overlap[i] for i in sorted_index]
#get batch
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
if shuffle:
np.random.shuffle(idx_list)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in tqdm(mini_batches):
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_neg_a = [ neg_a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_neg_overlap = [neg_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)
# mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)
# mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)
yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)
def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):
q = []
a = []
neg_a = []
for question in df['question'].unique():
# group = df[df["question"]==question]
# pos_answers = group[df["flag"] == 1]["answer"]
# neg_answers = group[df["flag"] == 0]["answer"].reset_index()
group = df[df["question"]==question]
pos_answers = group[group["flag"] == 1]["answer"]
neg_answers = group[group["flag"] == 0]["answer"]#.reset_index()
for pos in pos_answers:
if model is not None and sess is not None:
pos_sent= encode_to_split(pos,alphabet)
q_sent,q_mask= prepare_data([pos_sent])
neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers]
a_sent,a_mask= prepare_data(neg_sents)
scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))
neg_index = scores.argmax()
else:
if len(neg_answers.index) > 0:
neg_index = np.random.choice(neg_answers.index)
neg = neg_answers.reset_index().loc[neg_index,]["answer"]
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(pos,alphabet)
seq_neg_a = encode_to_split(neg,alphabet)
q.append(seq_q)
a.append(seq_a)
neg_a.append(seq_neg_a)
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def prepare_data(seqs,overlap = None):
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples,max_len)).astype('int32')
if overlap is not None:
overlap_position = np.zeros((n_samples,max_len)).astype('float')
for idx ,seq in enumerate(seqs):
x[idx,:lengths[idx]] = seq
overlap_position[idx,:lengths[idx]] = overlap[idx]
return x,overlap_position
else:
x_mask = np.zeros((n_samples, max_len)).astype('float')
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
x_mask[idx, :lengths[idx]] = 1.0
# print( x, x_mask)
return x, x_mask
# def prepare_data(seqs):
# lengths = [len(seq) for seq in seqs]
# n_samples = len(seqs)
# max_len = np.max(lengths)
# x = np.zeros((n_samples, max_len)).astype('int32')
# x_mask = np.zeros((n_samples, max_len)).astype('float')
# for idx, seq in enumerate(seqs):
# x[idx, :lengths[idx]] = seq
# x_mask[idx, :lengths[idx]] = 1.0
# # print( x, x_mask)
# return x, x_mask
def getLogger():
import sys
import logging
import os
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.mkdir(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
return logger
|
{"/main.py": ["/data_helper.py"], "/run.py": ["/config.py", "/data_helper.py", "/models/__init__.py"], "/models/__init__.py": ["/models/QA_CNN_pairwise.py"], "/test.py": ["/config.py", "/data_helper.py", "/models/__init__.py"]}
|
158
|
pedromeldola/Desafio
|
refs/heads/master
|
/core/models.py
|
from django.db import models
#criação da classe com os atributos
class Jogo(models.Model):
idJogo = models.AutoField(primary_key=True)
placar = models.IntegerField()
placarMin = models.IntegerField()
placarMax = models.IntegerField()
quebraRecMin = models.IntegerField()
quebraRecMax = models.IntegerField()
def __str__(self):
return str(self.idJogo)
|
{"/core/views.py": ["/core/models.py"]}
|
159
|
pedromeldola/Desafio
|
refs/heads/master
|
/core/views.py
|
from django.shortcuts import render,redirect
from .models import Jogo
from django.views.decorators.csrf import csrf_protect
#método para chamar todos os objetos que estão na classe Jogo quando entrar na home page
def home_page(request):
jogo = Jogo.objects.all()
return render (request,'home.html',{'jogo':jogo})
#método para inserir os dados na tabela quando o botão ser clicado
def inserir(request):
placar = request.POST.get('nPlacar')
#método para buscar os valores do objeto anterior
try:
placarMin = int(Jogo.objects.earliest('placarMin').placarMin)
placarMax = int(Jogo.objects.latest('placarMax').placarMax)
quebraRecMin = int(Jogo.objects.latest('quebraRecMin').quebraRecMin)
quebraRecMax = int(Jogo.objects.latest('quebraRecMax').quebraRecMax)
except:
placarMin = False
placarMax = False
quebraRecMin = False
quebraRecMax = False
placar = int(placar)
#condição para adicionar o placar nos demais atributos alem dele mesmo
if placarMin is False:
placarMin = placar
placarMax = placar
elif placar < placarMin:
placarMin = placar
quebraRecMin += 1
elif placar > placarMax or placarMax is False:
placarMax = placar
quebraRecMax += 1
else:
quebraRecMin = quebraRecMin+ 0
quebraRecMmax = quebraRecMax+ 0
#método para criar o objeto já com os atributos populados
jogo = Jogo.objects.create(placarMin=placarMin,placar=placar,placarMax=placarMax,quebraRecMin=quebraRecMin,quebraRecMax=quebraRecMax)
return redirect('/') #função para ficar home page após inserir o dado e clica no botão inserir
|
{"/core/views.py": ["/core/models.py"]}
|
160
|
pedromeldola/Desafio
|
refs/heads/master
|
/core/migrations/0002_auto_20200930_2254.py
|
# Generated by Django 3.1 on 2020-10-01 01:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='jogo',
name='id',
),
migrations.AlterField(
model_name='jogo',
name='idJogo',
field=models.AutoField(primary_key=True, serialize=False),
),
migrations.AlterField(
model_name='jogo',
name='placar',
field=models.IntegerField(),
),
]
|
{"/core/views.py": ["/core/models.py"]}
|
161
|
pedromeldola/Desafio
|
refs/heads/master
|
/core/migrations/0001_initial.py
|
# Generated by Django 3.1.1 on 2020-09-28 18:50
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Jogo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idJogo', models.IntegerField()),
('placar', models.IntegerField(max_length=3)),
('placarMin', models.IntegerField()),
('placarMax', models.IntegerField()),
('quebraRecMin', models.IntegerField()),
('quebraRecMax', models.IntegerField()),
],
),
]
|
{"/core/views.py": ["/core/models.py"]}
|
185
|
andrewjschuang/Turing
|
refs/heads/master
|
/turing.py
|
import time
from datetime import datetime
from flask import (Flask, abort, flash, redirect, render_template, request,
session, url_for)
from sqlalchemy.exc import IntegrityError
from wtforms import (Form, RadioField, StringField, SubmitField, TextAreaField, TextField,
validators)
from models.model import User, Project, Task, Questionnaire, Question, Response
from models.shared import db
class SignUp(Form):
name = TextField('Name:', validators=[validators.required()])
email = TextField('Email:', validators=[
validators.required(), validators.Length(min=6, max=35)])
password = TextField('Password:', validators=[
validators.required(), validators.Length(min=3, max=35)])
class Login(Form):
email = TextField('Email:', validators=[
validators.required(), validators.Length(min=6, max=35)])
password = TextField('Password:', validators=[
validators.required(), validators.Length(min=3, max=35)])
def create_app(config=None):
app = Flask(__name__)
if config:
app.config.from_mapping(config)
else:
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///prod.db'
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db.init_app(app)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.route('/signup', methods=['GET', 'POST'])
def signup():
form = SignUp(request.form)
if request.method == 'POST':
if form.validate():
name = request.form['name']
password = request.form['password']
email = request.form['email']
u = User(email=email, name=name, password=password)
db.session.add(u)
db.session.commit()
session['auth'] = {'name': name,
'email': email, 'timestamp': time.time()}
return redirect(url_for('index'))
else:
flash('All the form fields are required.', category='error')
return render_template('signup.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = Login(request.form)
if request.method == 'POST':
if form.validate():
password = request.form['password']
email = request.form['email']
user = User.query.filter_by(email=email).first()
print(user)
if user:
print(user)
if user.password == password:
session['auth'] = {'name': user.name,
'email': user.email,
'timestamp': time.time()
}
return redirect(url_for('index'))
else:
flash('Authentication failed', category='error')
else:
flash('Authentication failed', category='error')
else:
flash('All the form fields are required', category='error')
return render_template('login.html', form=form)
@app.route('/', methods=['GET'])
def index():
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
info = user.get_index_data()
print(info)
return render_template('index.html', **info)
return redirect('/login')
@app.route('/responses')
def responses():
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
quests = Questionnaire.query.all()
return render_template('responses.html', quests=quests)
@app.route('/respond/<int:ref>', methods=['GET', 'POST'])
def respond(ref):
quest = Questionnaire.query.get(ref)
if not quest:
print('no questionnaire found with id %s' % ref)
return abort(404)
if request.method == 'GET':
return render_template('feedback.html', name=quest.name, questions=quest.questions)
elif request.method == 'POST':
for question_id in request.form:
question = Question.query.get(question_id)
resp = Response(question=question.id, rating=request.form.get(question_id))
db.session.add(resp)
db.session.commit()
return render_template('feedback_received.html')
@app.route('/projects', methods=['GET', 'POST'])
def projects():
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
if request.method == 'POST':
name = request.form['projectName']
description = request.form['projectDescription']
pro = Project(name=name,description=description)
db.session.add(pro)
user.project.append(pro)
db.session.commit()
grid = user.get_project_grid(3)
return render_template('projects.html', projectgrid=grid)
return redirect('/login')
@app.route('/tasks/user')
@app.route('/tasks/user/<int:ref>', methods=['GET', 'POST'])
def user_tasks(ref=None):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
if ref:
user: User = User.query.filter_by(id=ref).first()
if not user:
return abort(404)
if request.method == 'POST':
name = request.form.get('taskName')
description = request.form.get('taskDescription')
t_time = request.form.get('taskTime')
if not all((name, description, t_time)):
abort(404)
t_time = datetime.strptime(t_time,'%Y-%m-%dT%H:%M:%S.%fZ')
n_task: Task = Task(name=name, description=description, end_time=t_time)
user.tasks.append(n_task)
db.session.commit()
return abort(200)
else:
return render_template('tasks.html', data=user)
@app.route('/tasks/project/<int:ref>', methods=['GET', 'POST'])
def proj_tasks(ref):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
project:Project = Project.query.filter_by(id=ref).first()
if not project:
return abort(404)
if request.method == 'POST':
name = request.form.get('taskName')
description = request.form.get('taskDescription')
t_time = request.form.get('taskDate')
if not all((name, description, t_time)):
abort(404)
t_time = datetime.strptime(t_time,'%Y-%m-%dT%H:%M:%S.%fZ')
n_task: Task = Task(name=name, description=description, end_time=t_time)
project.tasks.append(n_task)
user.tasks.append(n_task)
db.session.commit()
return ('' ,200)
else:
return render_template('tasks.html', data=project)
@app.route('/tasks/task/<int:ref>', methods=['GET', 'POST'])
def task_tasks(ref):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
task:Task = Task.query.filter_by(id=ref).first()
if not task:
return abort(404)
if request.method == 'POST':
name = request.form.get('taskName')
description = request.form.get('taskDescription')
t_time = request.form.get('taskDate')
if not all((name, description, t_time)):
abort(404)
t_time = datetime.strptime(t_time,'%Y-%m-%dT%H:%M:%S.%fZ')
n_task: Task = Task(name=name, description=description, end_time=t_time)
db.session.add(n_task)
task.tasks.append(n_task)
db.session.commit()
user.tasks.append(n_task)
db.session.commit()
print(task, task.tasks)
print(n_task, n_task.tasks)
return ('' ,200)
else:
print(task, task.tasks)
return render_template('tasks.html', data=task)
@app.route('/test', methods=['GET'])
def test():
return render_template('newQuestionnaire.html')
@app.route('/questionnaire/<int:ref>', methods=['GET', 'POST'])
def questionnaire(ref):
auth = session.get('auth')
if auth:
user: User = User.query.filter_by(email=auth.get('email')).first()
if not user:
session['auth'] = {}
return redirect('/login')
task:Task = Task.query.filter_by(id=ref).first()
if not task:
return abort(404)
if request.method == 'POST':
name = request.form.get('name')
if not name:
return abort(404)
quest = Questionnaire(name=name,task=task)
task.questionnaires.append(quest)
for key, value in request.form.items():
if not value or key == 'name':
continue
else:
quest.questions.append(Question(text=value,questionnaire=quest))
db.session.commit()
return render_template('newQuestionnaire.html')
@app.route('/logout', methods=['GET'])
def logout():
session.pop('auth')
return redirect(url_for('index'))
return app
if __name__ == '__main__':
app = create_app()
db.create_all(app=app)
app.run(host='localhost', port=3000, debug=True)
|
{"/test.py": ["/turing.py"]}
|
186
|
andrewjschuang/Turing
|
refs/heads/master
|
/test.py
|
from flask_testing import TestCase
from models.shared import db
from models.model import User, Task, Project, Question, Response, Questionnaire
from turing import create_app
import unittest
class MyTest(TestCase):
def create_app(self):
config = {
'SQLALCHEMY_DATABASE_URI': 'sqlite:///test.db',
'TESTING': True,
'SECRET_KEY': 'secret',
'SQLALCHEMY_TRACK_MODIFICATIONS': True
}
return create_app(config)
def setUp(self):
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
def test_nothing(self):
assert True
def test_user(self):
user = User(email='em', name='us', password='pass')
db.session.add(user)
db.session.commit()
assert user in db.session
def test_project(self):
project = Project(name='n',description='desc')
db.session.add(project)
db.session.commit()
assert project in db.session
def test_task(self):
task = Task(name='n', description='desc')
db.session.add(task)
db.session.commit()
assert task in db.session
def test_usr_add_tsk2_prj(self):
user = User(email='em', name='us', password='pass')
db.session.add(user)
db.session.commit()
project = Project(name='n',description='desc')
db.session.add(project)
user.project.append(project)
db.session.commit()
project: Project= User.query.filter_by(email='em').first().project[0]
task = Task(name='n', description='desc')
db.session.add(task)
project.tasks.append(task)
db.session.commit()
assert user.project[0].tasks[0] == task
def test_sub_tasks(self):
task = Task(name='n', description='desc')
db.session.add(task)
assert task in db.session
s_task = Task(name='n', description='desc')
db.session.add(s_task)
assert task in db.session
db.session.commit()
task.tasks.append(s_task)
db.session.commit()
assert task.tasks[0] == s_task
def test_questionnaire(self):
questionnaire = Questionnaire(name='Questions')
db.session.add(questionnaire)
question0 = Question(text="ola ?", questionnaire=questionnaire)
question1 = Question(text="tudo bem ?", questionnaire=questionnaire)
questionnaire.questions.append(question0)
questionnaire.questions.append(question1)
for i in range(10):
question0.responses.append(Response(rating=5,question=question0))
for i in range(10):
question1.responses.append(Response(rating=5,question=question1))
rs = [x.rating for x in questionnaire.questions[0].responses]
assert sum(rs)/len(rs) == 5
rs = [x.rating for x in questionnaire.questions[1].responses]
assert sum(rs)/len(rs) == 5
if __name__ == '__main__':
unittest.main()
|
{"/test.py": ["/turing.py"]}
|
187
|
andrewjschuang/Turing
|
refs/heads/master
|
/functionalities.py
|
functionalities = {
'Login': 'Login page',
'Feedback': 'This feedback form',
'Todo': 'To do module',
'Projects': 'Anything related to projects',
'Code': 'Code editor',
'Forum': 'The forum',
'Profile': 'Your profile page',
}
|
{"/test.py": ["/turing.py"]}
|
188
|
yywang0514/dsnre
|
refs/heads/master
|
/train.py
|
import sys
import os
import time
import numpy as np
import torch
import torch.nn.functional as F
import argparse
import logging
from lib import *
from model import *
def train(options):
if not os.path.exists(options.folder):
os.mkdir(options.folder)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s: %(name)s: %(levelname)s: %(message)s")
hdlr = logging.FileHandler(os.path.join(options.folder, options.file_log), mode = "w")
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.info("python %s" %(" ".join(sys.argv)))
#################################################################################
start_time = time.time()
msg = "Loading dicts from %s..." %(options.file_dic)
display(msg)
vocab = dicfold(options.file_dic)
word2idx, pre_train_emb, part_point = build_word2idx(vocab, options.file_emb)
msg = "Loading data from %s..." %(options.file_train)
display(msg)
train = datafold(options.file_train)
msg = "Loading data from %s..." %(options.file_test)
display(msg)
test = datafold(options.file_test)
end_time = time.time()
msg = "Loading data time: %f seconds" %(end_time - start_time)
display(msg)
options.size_vocab = len(word2idx)
if options.devFreq == -1:
options.devFreq = (len(train) + options.batch_size - 1) // options.batch_size
msg = "#inst in train: %d" %(len(train))
display(msg)
msg = "#inst in test %d" %(len(test))
display(msg)
msg = "#word vocab: %d" %(options.size_vocab)
display(msg)
msg = "=" * 30 + "Hyperparameter:" + "=" * 30
display(msg)
for attr, value in sorted(vars(options).items(), key = lambda x: x[0]):
msg = "{}={}".format(attr.upper(), value)
display(msg)
#################################################################################
msg = "=" * 30 + "model:" + "=" * 30
display(msg)
os.environ["CUDA_VISIBLE_DEVICES"] = options.gpus
if options.seed is not None:
torch.manual_seed(options.seed)
np.random.seed(options.seed)
model = Model(options.fine_tune,
pre_train_emb,
part_point,
options.size_vocab,
options.dim_emb,
options.dim_proj,
options.head_count,
options.dim_FNN,
options.act_str,
options.num_layer,
options.num_class,
options.dropout_rate).cuda()
if os.path.exists("{}.pt".format(options.reload_model)):
model.load_state_dict(torch.load("{}.pt".format(options.reload_model)))
parameters = filter(lambda param: param.requires_grad, model.parameters())
optimizer = optimizer_wrapper(options.optimizer, options.lr, parameters)
msg = "\n{}".format(model)
display(msg)
#################################################################################
checkpoint_dir = os.path.join(options.folder, "checkpoints")
if not os.path.exists(checkpoint_dir):
os.mkdir(checkpoint_dir)
best_path = os.path.join(checkpoint_dir, options.saveto)
#################################################################################
msg = "=" * 30 + "Optimizing:" + "=" * 30
display(msg)
[train_rels, train_nums, train_sents, train_poss, train_eposs] = bags_decompose(train)
[test_rels, test_nums, test_sents, test_poss, test_eposs] = bags_decompose(test)
# batch_index = [0, 1, 2]
# batch_rels = [train_rels[m][0] for m in batch_index]
# batch_nums = [train_nums[m] for m in batch_index]
# batch_sents = [train_sents[m] for m in batch_index]
# batch_poss = [train_poss[m] for m in batch_index]
# batch_eposs = [train_eposs[m] for m in batch_index]
# batch_data = select_instance(batch_rels,
# batch_nums,
# batch_sents,
# batch_poss,
# batch_eposs,
# model)
# for sent in batch_data[0]:
# print(sent)
# print(batch_data[1])
# print(batch_data[2])
# print(batch_data[3])
train_idx_list = np.arange(len(train))
steps_per_epoch = (len(train) + options.batch_size - 1) // options.batch_size
n_updates = 0
for e in range(options.nepochs):
np.random.shuffle(train_idx_list)
for step in range(steps_per_epoch):
batch_index = train_idx_list[step * options.batch_size: (step + 1) * options.batch_size]
batch_rels = [train_rels[m][0] for m in batch_index]
batch_nums = [train_nums[m] for m in batch_index]
batch_sents = [train_sents[m] for m in batch_index]
batch_poss = [train_poss[m] for m in batch_index]
batch_eposs = [train_eposs[m] for m in batch_index]
batch_data = select_instance(batch_rels,
batch_nums,
batch_sents,
batch_poss,
batch_eposs,
model)
disp_start = time.time()
model.train()
n_updates += 1
optimizer.zero_grad()
logit = model(batch_data[0], batch_data[1], batch_data[2])
loss = F.cross_entropy(logit, batch_data[3])
loss.backward()
if options.clip_c != 0:
total_norm = torch.nn.utils.clip_grad_norm_(parameters, options.clip_c)
optimizer.step()
disp_end = time.time()
if np.mod(n_updates, options.dispFreq) == 0:
msg = "Epoch: %d, Step: %d, Loss: %f, Time: %.2f sec" %(e, n_updates, loss.cpu().item(), disp_end - disp_start)
display(msg)
if np.mod(n_updates, options.devFreq) == 0:
msg = "=" * 30 + "Evaluating" + "=" * 30
display(msg)
model.eval()
test_predict = predict(test_rels, test_nums, test_sents, test_poss, test_eposs, model)
test_pr = positive_evaluation(test_predict)
msg = 'test set PR = [' + str(test_pr[0][-1]) + ' ' + str(test_pr[1][-1]) + ']'
display(msg)
msg = "Saving model..."
display(msg)
torch.save(model.state_dict(), "{}_step_{}.pt".format(best_path, n_updates))
msg = "Model checkpoint has been saved to {}_step_{}.pt".format(best_path, n_updates)
display(msg)
end_time = time.time()
msg = "Optimizing time: %f seconds" %(end_time - start_time)
display(msg)
def predict(rels, nums, sents, poss, eposs, model):
numBags = len(rels)
predict_y = np.zeros((numBags), dtype=np.int32)
predict_y_prob = np.zeros((numBags), dtype=np.float32)
y = np.asarray(rels, dtype='int32')
for bagIndex, insRel in enumerate(rels):
insNum = nums[bagIndex]
maxP = -1
pred_rel_type = 0
max_pos_p = -1
positive_flag = False
for m in range(insNum):
insX = sents[bagIndex][m]
epos = eposs[bagIndex][m]
sel_x, sel_len, sel_epos = prepare_data([insX], [epos])
results = model(sel_x, sel_len, sel_epos)
rel_type = results.argmax()
if positive_flag and rel_type == 0:
continue
else:
# at least one instance is positive
tmpMax = results.max()
if rel_type > 0:
positive_flag = True
if tmpMax > max_pos_p:
max_pos_p = tmpMax
pred_rel_type = rel_type
else:
if tmpMax > maxP:
maxP = tmpMax
if positive_flag:
predict_y_prob[bagIndex] = max_pos_p
else:
predict_y_prob[bagIndex] = maxP
predict_y[bagIndex] = pred_rel_type
return [predict_y, predict_y_prob, y]
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument("--folder", help = "the dir of model", default = "workshop")
parser.add_argument("--file_dic", help = "the file of vocabulary", default = "./data/50/dict.txt")
parser.add_argument("--file_train", help = "the file of training data", default = "./data/gap_40_len_80/train_filtered.data")
parser.add_argument("--file_test", help = "the file of testing data", default = "./data/gap_40_len_80/test_filtered.data")
# parser.add_argument("--file_emb", help = "the file of embedding", default = "./data/50/dict_emb.txt")
parser.add_argument("--file_emb", help = "the file of embedding", default = "")
parser.add_argument("--file_log", help = "the log file", default = "train.log")
parser.add_argument("--reload_model", help = "the pretrained model", default = "")
parser.add_argument("--saveto", help = "the file to save the parameter", default = "model")
parser.add_argument("--seed", help = "the random seed", default = 1234, type = int)
parser.add_argument("--size_vocab", help = "the size of vocabulary", default = 10000, type = int)
parser.add_argument("--dim_emb", help = "the dimension of the word embedding", default = 256, type = int)
parser.add_argument("--dim_proj", help = "the dimension of the hidden state", default = 256, type = int)
parser.add_argument("--head_count", help = "the num of head in multi head attention", default = 8, type = int)
parser.add_argument("--dim_FNN", help = "the dimension of the positionwise FNN", default = 256, type = int)
parser.add_argument("--act_str", help = "the activation function of the positionwise FNN", default = "relu")
parser.add_argument("--num_layer", help = "the num of layers", default = 6, type = int)
parser.add_argument("--num_class", help = "the number of labels", default = 27, type = int)
parser.add_argument("--position_emb", help = "if true, the position embedding will be used", default = False, action = "store_true")
parser.add_argument("--fine_tune", help = "if true, the pretrained embedding will be fine tuned", default = False, action = "store_true")
parser.add_argument("--optimizer", help = "optimization algorithm", default = "adam")
parser.add_argument("--lr", help = "learning rate", default = 0.0004, type = float)
parser.add_argument("--dropout_rate", help = "dropout rate", default = 0.5, type = float)
parser.add_argument("--clip_c", help = "grad clip", default = 10.0, type = float)
parser.add_argument("--nepochs", help = "the max epoch", default = 30, type = int)
parser.add_argument("--batch_size", help = "batch size", default = 32, type = int)
parser.add_argument("--dispFreq", help = "the frequence of display", default = 100, type = int)
parser.add_argument("--devFreq", help = "the frequence of evaluation", default = -1, type = int)
parser.add_argument("--wait_N", help = "use to early stop", default = 1, type = int)
parser.add_argument("--patience", help = "use to early stop", default = 7, type = int)
parser.add_argument("--maxlen", help = "max length of sentence", default = 100, type = int)
parser.add_argument("--gpus", help = "specify the GPU IDs", default = "0")
options = parser.parse_args(argv)
train(options)
if "__main__" == __name__:
main(sys.argv[1:])
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
189
|
yywang0514/dsnre
|
refs/heads/master
|
/lib/module.py
|
import torch
import torch.nn as nn
import math
class LayerNorm(nn.Module):
"""Layer Normalization class"""
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class MLP(nn.Module):
def __init__(self, dim_in, dim_out):
super(MLP, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self._init_params()
def _init_params(self):
self.mlp = nn.Linear(in_features = self.dim_in,
out_features = self.dim_out)
def forward(self, inp):
proj_inp = self.mlp(inp)
return proj_inp
class BiLstm(nn.Module):
def __init__(self, dim_in, dim_out):
super(BiLstm, self).__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self._init_params()
def _init_params(self):
self.bilstm = nn.LSTM(input_size = self.dim_in,
hidden_size = self.dim_out,
bidirectional = True)
def forward(self, inp, inp_len):
sorted_inp_len, sorted_idx = torch.sort(inp_len, dim = 0, descending=True)
sorted_inp = torch.index_select(inp, dim = 1, index = sorted_idx)
pack_inp = torch.nn.utils.rnn.pack_padded_sequence(sorted_inp, sorted_inp_len)
proj_inp, _ = self.bilstm(pack_inp)
proj_inp = torch.nn.utils.rnn.pad_packed_sequence(proj_inp)
unsorted_idx = torch.zeros(sorted_idx.size()).long().cuda().scatter_(0, sorted_idx, torch.arange(inp.size()[1]).long().cuda())
unsorted_proj_inp = torch.index_select(proj_inp[0], dim = 1, index = unsorted_idx)
return unsorted_proj_inp
class Word_Emb(nn.Module):
def __init__(self,
fine_tune,
pre_train_emb,
part_point,
size_vocab,
dim_emb):
super(Word_Emb, self).__init__()
self.fine_tune = fine_tune
self.pre_train_emb = pre_train_emb
self.part_point = part_point
self.size_vocab = size_vocab
self.dim_emb = dim_emb
self._init_params()
def _init_params(self):
self.embedding = torch.nn.ModuleList()
if (not self.fine_tune) and self.pre_train_emb:
self.embedding.append(nn.Embedding(self.part_point, self.dim_emb))
self.embedding.append(nn.Embedding.from_pretrained(torch.Tensor(self.pre_train_emb), freeze = True))
elif self.fine_tune and self.pre_train_emb:
init_embedding = 0.01 * np.random.randn(self.size_vocab, self.dim_emb).astype(np.float32)
init_embedding[self.part_point: ] = self.pre_train_emb
self.embedding.append(nn.Embedding.from_pretrained(torch.Tensor(init_embedding), freeze = False))
else:
self.embedding.append(nn.Embedding(self.size_vocab, self.dim_emb))
def forward(self, inp):
if (not self.fine_tune) and self.pre_train_emb:
def get_emb(inp):
mask = self.inp2mask(inp)
inp_1 = inp * mask
emb_1 = self.embedding[0](inp_1) * mask[:, :, None].float()
inp_2 = (inp - self.part_point) * (1 - mask)
emb_2 = self.embedding[1](inp_2) * (1 - mask)[:, :, None].float()
emb = emb_1 + emb_2
return emb
emb_inp = get_emb(inp)
else:
emb_inp = self.embedding[0](inp)
return emb_inp
def inp2mask(self, inp):
mask = (inp < self.part_point).long()
return mask
class Position_Emb(nn.Module):
def __init__(self, dim_emb):
super(Position_Emb, self).__init__()
self.dim_emb = dim_emb
self._init_params()
def _init_params(self):
pass
def forward(self, inp):
pass
class Wemb(nn.Module):
"""docstring for Wemb"""
def __init__(self,
fine_tune,
pre_train_emb,
part_point,
size_vocab,
dim_emb,
position_emb,
dropout_rate):
super(Wemb, self).__init__()
self.fine_tune = fine_tune
self.pre_train_emb = pre_train_emb
self.part_point = part_point
self.size_vocab = size_vocab
self.dim_emb = dim_emb
self.position_emb = position_emb
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.wembs = torch.nn.ModuleList()
self.wembs.append(Word_Emb(self.fine_tune, self.pre_train_emb, self.part_point, self.size_vocab, self.dim_emb))
if self.position_emb:
self.wembs.append(Position_Emb(self.dim_emb))
self.layer_norm = LayerNorm(self.dim_emb)
self.dropout = nn.Dropout(self.dropout_rate)
def forward(self, inp):
def add_n(inps):
rval = inps[0] * 0
for inp in inps:
rval += inp
return rval
emb_inps = []
for wemb in self.wembs:
emb_inps.append(wemb(inp))
emb_inp = add_n(emb_inps)
emb_inp = self.layer_norm(emb_inp)
emb_inp = self.dropout(emb_inp)
return emb_inp
class Multi_Head_Attention(nn.Module):
def __init__(self,
dim_proj,
head_count,
dropout_rate):
super(Multi_Head_Attention, self).__init__()
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_per_head = self.dim_proj // self.head_count
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.linear_key = nn.Linear(self.dim_proj, self.head_count * self.dim_per_head)
self.linear_value = nn.Linear(self.dim_proj, self.head_count * self.dim_per_head)
self.linear_query = nn.Linear(self.dim_proj, self.head_count * self.dim_per_head)
self.dropout = nn.Dropout(self.dropout_rate)
self.softmax = nn.Softmax(dim=-1)
def forward(self, key, value, query, mask = None):
# key: batch X key_len X hidden
# value: batch X value_len X hidden
# query: batch X query_len X hidden
# mask: batch X query_len X key_len
batch_size = key.size()[0]
key_ = self.linear_key(key)
value_ = self.linear_value(value)
query_ = self.linear_query(query)
key_ = key_.reshape(batch_size, -1, self.head_count, self.dim_per_head).transpose(1, 2)
value_ = value_.reshape(batch_size, -1, self.head_count, self.dim_per_head).transpose(1, 2)
query_ = query_.reshape(batch_size, -1, self.head_count, self.dim_per_head).transpose(1, 2)
attention_scores = torch.matmul(query_, key_.transpose(2, 3))
attention_scores = attention_scores / math.sqrt(float(self.dim_per_head))
if mask is not None:
mask = mask.unsqueeze(1).expand_as(attention_scores)
attention_scores = attention_scores.masked_fill(1 - mask, -1e18)
attention_probs = self.softmax(attention_scores)
attention_probs = self.dropout(attention_probs)
context = torch.matmul(attention_probs, value_)
context = context.transpose(1, 2).reshape(batch_size, -1, self.head_count * self.dim_per_head)
return context
class TransformerEncoderBlock(nn.Module):
def __init__(self,
dim_proj,
head_count,
dim_FNN,
act_fn,
dropout_rate):
super(TransformerEncoderBlock, self).__init__()
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_FNN = dim_FNN
self.act_fn = act_fn
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.multi_head_attention = Multi_Head_Attention(self.dim_proj, self.head_count, self.dropout_rate)
self.linear_proj_context = MLP(self.dim_proj, self.dim_proj)
self.layer_norm_context = LayerNorm(self.dim_proj)
self.position_wise_fnn = MLP(self.dim_proj, self.dim_FNN)
self.linear_proj_intermediate = MLP(self.dim_FNN, self.dim_proj)
self.layer_norm_intermediate = LayerNorm(self.dim_proj)
self.dropout = nn.Dropout(self.dropout_rate)
def forward(self, inp, mask):
context = self.multi_head_attention(inp, inp, inp, mask = mask)
context = self.linear_proj_context(context)
context = self.dropout(context)
res_inp = self.layer_norm_context(inp + context)
rval = self.act_fn(self.position_wise_fnn(res_inp))
rval = self.linear_proj_intermediate(rval)
rval = self.dropout(rval)
res_rval = self.layer_norm_intermediate(rval + res_inp)
return res_rval
def get_activation(act_str):
if act_str == "relu":
return torch.nn.ReLU()
elif act_str == "tanh":
return torch.nn.Tanh()
elif act_str == "sigmoid":
return torch.nn.Sigmoid()
class TransformerEncoder(nn.Module):
def __init__(self,
dim_proj,
head_count,
dim_FNN,
act_str,
num_layers,
dropout_rate):
super(TransformerEncoder, self).__init__()
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_FNN = dim_FNN
self.act_fn = get_activation(act_str)
self.num_layers = num_layers
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.transformer = torch.nn.ModuleList([TransformerEncoderBlock(self.dim_proj, self.head_count, self.dim_FNN, self.act_fn, self.dropout_rate) for _ in range(self.num_layers)])
def forward(self, inp, mask = None):
rval = []
pre_output = inp
for i in range(self.num_layers):
cur_output = self.transformer[i](pre_output, mask)
rval.append(cur_output)
pre_output = cur_output
return pre_output, rval
def optimizer_wrapper(optimizer, lr, parameters):
if optimizer == "adam":
opt = torch.optim.Adam(params = parameters, lr = lr)
return opt
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
190
|
yywang0514/dsnre
|
refs/heads/master
|
/model.py
|
import torch
import torch.nn as nn
from lib import *
class Model(nn.Module):
def __init__(self,
fine_tune,
pre_train_emb,
part_point,
size_vocab,
dim_emb,
dim_proj,
head_count,
dim_FNN,
act_str,
num_layer,
num_class,
dropout_rate):
super(Model, self).__init__()
self.fine_tune = fine_tune
self.pre_train_emb = pre_train_emb
self.part_point = part_point
self.size_vocab = size_vocab
self.dim_emb = dim_emb
self.dim_proj = dim_proj
self.head_count = head_count
self.dim_FNN = dim_FNN
self.act_str = act_str
self.num_layer = num_layer
self.num_class = num_class
self.dropout_rate = dropout_rate
self._init_params()
def _init_params(self):
self.wemb = Word_Emb(self.fine_tune,
self.pre_train_emb,
self.part_point,
self.size_vocab,
self.dim_emb)
self.encoder = TransformerEncoder(self.dim_proj,
self.head_count,
self.dim_FNN,
self.act_str,
self.num_layer,
self.dropout_rate)
self.dense = MLP(self.dim_proj * 3, self.dim_proj)
self.relu = torch.nn.ReLU()
self.classifier = MLP(self.dim_proj, self.num_class)
self.dropout = nn.Dropout(self.dropout_rate)
def forward(self, inp, lengths, epos):
mask, mask_l, mask_m, mask_r = self.pos2mask(epos, lengths)
emb_inp = self.wemb(inp)
emb_inp = self.dropout(emb_inp)
proj_inp, _ = self.encoder(emb_inp, self.create_attention_mask(mask, mask))
proj_inp = proj_inp * mask[:, :, None]
pool_inp_l = torch.sum(proj_inp * mask_l[:, :, None], dim = 1) / torch.sum(mask_l, dim = 1)[:, None]
pool_inp_m = torch.sum(proj_inp * mask_m[:, :, None], dim = 1) / torch.sum(mask_m, dim = 1)[:, None]
pool_inp_r = torch.sum(proj_inp * mask_r[:, :, None], dim = 1) / torch.sum(mask_r, dim = 1)[:, None]
pool_inp = torch.cat([pool_inp_l, pool_inp_m, pool_inp_r], dim = 1)
pool_inp = self.dropout(pool_inp)
logit = self.relu(self.dense(pool_inp))
logit = self.dropout(logit)
logit = self.classifier(logit)
return logit
def pos2mask(self, epos, lengths):
mask = self.len2mask(lengths)
nsample = lengths.size()[0]
max_len = torch.max(lengths)
idxes = torch.arange(0, max_len).cuda()
mask_l = (idxes < epos[:, 0].unsqueeze(1)).float()
mask_r = mask - (idxes < epos[:, 1].unsqueeze(1)).float()
mask_m = torch.ones([nsample, max_len]).float().cuda() - mask_l - mask_r
return mask, mask_l, mask_m, mask_r
def len2mask(self, lengths):
max_len = torch.max(lengths)
idxes = torch.arange(0, max_len).cuda()
mask = (idxes < lengths.unsqueeze(1)).float()
return mask
def create_attention_mask(self, query_mask, key_mask):
return torch.matmul(query_mask[:, :, None], key_mask[:, None, :]).byte()
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
191
|
yywang0514/dsnre
|
refs/heads/master
|
/lib/__init__.py
|
from module import *
from util import *
from data_iterator import *
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
192
|
yywang0514/dsnre
|
refs/heads/master
|
/format.py
|
import sys
import codecs
class InstanceBag(object):
def __init__(self, entities, rel, num, sentences, positions, entitiesPos):
self.entities = entities
self.rel = rel
self.num = num
self.sentences = sentences
self.positions = positions
self.entitiesPos = entitiesPos
def bags_decompose(data_bags):
bag_sent = [data_bag.sentences for data_bag in data_bags]
bag_pos = [data_bag.positions for data_bag in data_bags]
bag_num = [data_bag.num for data_bag in data_bags]
bag_rel = [data_bag.rel for data_bag in data_bags]
bag_epos = [data_bag.entitiesPos for data_bag in data_bags]
return [bag_rel, bag_num, bag_sent, bag_pos, bag_epos]
def datafold(filename):
f = open(filename, 'r')
data = []
while 1:
line = f.readline()
if not line:
break
entities = map(int, line.split(' '))
line = f.readline()
bagLabel = line.split(' ')
rel = map(int, bagLabel[0:-1])
num = int(bagLabel[-1])
positions = []
sentences = []
entitiesPos = []
for i in range(0, num):
sent = f.readline().split(' ')
positions.append(map(int, sent[0:2]))
epos = map(int, sent[0:2])
epos.sort()
entitiesPos.append(epos)
sentences.append(map(int, sent[2:-1]))
ins = InstanceBag(entities, rel, num, sentences, positions, entitiesPos)
data += [ins]
f.close()
return data
def change_word_idx(data):
new_data = []
for inst in data:
entities = inst.entities
rel = inst.rel
num = inst.num
sentences = inst.sentences
positions = inst.positions
entitiesPos = inst.entitiesPos
new_sentences = []
for sent in sentences:
new_sent = []
for word in sent:
if word == 160696:
new_sent.append(1)
elif word == 0:
new_sent.append(0)
else:
new_sent.append(word + 1)
new_sentences.append(new_sent)
new_inst = InstanceBag(entities, rel, num, new_sentences, positions, entitiesPos)
new_data.append(new_inst)
return new_data
def save_data(data, textfile):
with codecs.open(textfile, "w", encoding = "utf8") as f:
for inst in data:
f.write("%s\n" %(" ".join(map(str, inst.entities))))
f.write("%s %s\n" %(" ".join(map(str, inst.rel)), str(inst.num)))
for pos, sent in zip(inst.positions, inst.sentences):
f.write("%s %s\n" %(" ".join(map(str, pos)), " ".join(map(str, sent))))
def main(argv):
data = datafold(argv[0])
new_data = change_word_idx(data)
save_data(new_data, argv[1])
if "__main__" == __name__:
main(sys.argv[1:])
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
193
|
yywang0514/dsnre
|
refs/heads/master
|
/lib/util.py
|
import sys
import re
import numpy as np
import cPickle as pkl
import codecs
import logging
from data_iterator import *
logger = logging.getLogger()
extra_token = ["<PAD>", "<UNK>"]
def display(msg):
print(msg)
logger.info(msg)
def datafold(filename):
f = open(filename, 'r')
data = []
while 1:
line = f.readline()
if not line:
break
entities = map(int, line.split(' '))
line = f.readline()
bagLabel = line.split(' ')
rel = map(int, bagLabel[0:-1])
num = int(bagLabel[-1])
positions = []
sentences = []
entitiesPos = []
for i in range(0, num):
sent = f.readline().split(' ')
positions.append(map(int, sent[0:2]))
epos = map(int, sent[0:2])
epos.sort()
entitiesPos.append(epos)
sentences.append(map(int, sent[2:-1]))
ins = InstanceBag(entities, rel, num, sentences, positions, entitiesPos)
data += [ins]
f.close()
return data
def dicfold(textfile):
vocab = []
with codecs.open(textfile, "r", encoding = "utf8") as f:
for line in f:
line = line.strip()
if line:
vocab.append(line)
return vocab
def build_word2idx(vocab, textFile):
msg = "Building word2idx..."
display(msg)
pre_train_emb = []
part_point = len(vocab)
if textFile:
word2emb = load_emb(vocab, textFile)
pre_train_vocab = []
un_pre_train_vocab = []
for word in vocab:
if word in word2emb:
pre_train_vocab.append(word)
pre_train_emb.append(word2emb[word])
else:
un_pre_train_vocab.append(word)
part_point = len(un_pre_train_vocab)
un_pre_train_vocab.extend(pre_train_vocab)
vocab = un_pre_train_vocab
word2idx = {}
for v, k in enumerate(extra_token):
word2idx[k] = v
for v, k in enumerate(vocab):
word2idx[k] = v + 2
part_point += 2
return word2idx, pre_train_emb, part_point
def load_emb(vocab, textFile):
msg = 'load emb from ' + textFile
display(msg)
vocab_set = set(vocab)
word2emb = {}
emb_p = re.compile(r" |\t")
count = 0
with codecs.open(textFile, "r", "utf8") as filein:
for line in filein:
count += 1
array = emb_p.split(line.strip())
word = array[0]
if word in vocab_set:
vector = [float(array[i]) for i in range(1, len(array))]
word2emb[word] = vector
del vocab_set
msg = "find %d words in %s" %(count, textFile)
display(msg)
msg = "Summary: %d words in the vocabulary and %d of them appear in the %s" %(len(vocab), len(word2emb), textFile)
display(msg)
return word2emb
def positive_evaluation(predict_results):
predict_y = predict_results[0]
predict_y_prob = predict_results[1]
y_given = predict_results[2]
positive_num = 0
#find the number of positive examples
for yi in range(y_given.shape[0]):
if y_given[yi, 0] > 0:
positive_num += 1
# if positive_num == 0:
# positive_num = 1
# sort prob
index = np.argsort(predict_y_prob)[::-1]
all_pre = [0]
all_rec = [0]
p_n = 0
p_p = 0
n_p = 0
# print y_given.shape[0]
for i in range(y_given.shape[0]):
labels = y_given[index[i],:] # key given labels
py = predict_y[index[i]] # answer
if labels[0] == 0:
# NA bag
if py > 0:
n_p += 1
else:
# positive bag
if py == 0:
p_n += 1
else:
flag = False
for j in range(y_given.shape[1]):
if j == -1:
break
if py == labels[j]:
flag = True # true positive
break
if flag:
p_p += 1
if (p_p+n_p) == 0:
precision = 1
else:
precision = float(p_p)/(p_p+n_p)
recall = float(p_p)/positive_num
if precision != all_pre[-1] or recall != all_rec[-1]:
all_pre.append(precision)
all_rec.append(recall)
return [all_pre[1:], all_rec[1:]]
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
194
|
yywang0514/dsnre
|
refs/heads/master
|
/lib/data_iterator.py
|
import time
import cPickle
import numpy as np
import torch
class InstanceBag(object):
def __init__(self, entities, rel, num, sentences, positions, entitiesPos):
self.entities = entities
self.rel = rel
self.num = num
self.sentences = sentences
self.positions = positions
self.entitiesPos = entitiesPos
def bags_decompose(data_bags):
bag_sent = [data_bag.sentences for data_bag in data_bags]
bag_pos = [data_bag.positions for data_bag in data_bags]
bag_num = [data_bag.num for data_bag in data_bags]
bag_rel = [data_bag.rel for data_bag in data_bags]
bag_epos = [data_bag.entitiesPos for data_bag in data_bags]
return [bag_rel, bag_num, bag_sent, bag_pos, bag_epos]
def select_instance(rels, nums, sents, poss, eposs, model):
batch_x = []
batch_len = []
batch_epos = []
batch_y = []
for bagIndex, insNum in enumerate(nums):
maxIns = 0
maxP = -1
if insNum > 1:
for m in range(insNum):
insX = sents[bagIndex][m]
epos = eposs[bagIndex][m]
sel_x, sel_len, sel_epos = prepare_data([insX], [epos])
results = model(sel_x, sel_len, sel_epos)
tmpMax = results.max()
if tmpMax > maxP:
maxIns = m
maxP=tmpMax
batch_x.append(sents[bagIndex][maxIns])
batch_epos.append(eposs[bagIndex][maxIns])
batch_y.append(rels[bagIndex])
batch_x, batch_len, batch_epos = prepare_data(batch_x, batch_epos)
batch_y = torch.LongTensor(np.array(batch_y).astype("int32")).cuda()
return [batch_x, batch_len, batch_epos, batch_y]
def prepare_data(sents, epos):
lens = [len(sent) for sent in sents]
n_samples = len(lens)
max_len = max(lens)
batch_x = np.zeros((n_samples, max_len)).astype("int32")
for idx, s in enumerate(sents):
batch_x[idx, :lens[idx]] = s
batch_len = np.array(lens).astype("int32")
batch_epos = np.array(epos).astype("int32")
return torch.LongTensor(batch_x).cuda(), torch.LongTensor(batch_len).cuda(), torch.LongTensor(batch_epos).cuda()
|
{"/train.py": ["/lib/__init__.py", "/model.py"], "/model.py": ["/lib/__init__.py"]}
|
210
|
rodelrod/pomodoro-report
|
refs/heads/master
|
/test_notebook_parser.py
|
#!/usr/bin/env python
import unittest
from notebook_parser import *
import os
import errno
from datetime import datetime
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
class TestParser(unittest.TestCase):
"""Tests the RedNotebook monthly files parser."""
def setUp(self):
self.nb_path = '/tmp/test_pomodoro_report'
mkdir_p(self.nb_path)
f = open(os.path.join(self.nb_path, '2012-10.txt'), 'w')
f.write(
"21: {text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'}\n"
"25:\n"
" Cat3: {Some other shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'\n"
"27:\n"
" Cat1: {Some shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b''illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk'\n")
f.close()
self.p = Parser(self.nb_path)
def test_get_nb_filename(self):
self.assertEqual(
self.p._get_nb_filename(datetime(2012, 10, 14)),
os.path.join(self.nb_path,'2012-10.txt'))
def test_parse_day_block(self):
block = ['', '5', 'some stuff', '26', 'some other stuff']
expected = {5: 'some stuff', 26: 'some other stuff'}
self.assertEqual(self.p._parse_day_block(block), expected)
def test_get_day_with_categories(self):
"""Get day 27."""
expected = (
"\n"
" Cat1: {Some shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b''illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk'\n")
actual = self.p._get_day(datetime(2012, 10, 27))
self.assertEqual(actual, expected)
def test_get_day_without_categories(self):
"""Get day 21."""
expected = (
" {text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'}\n")
actual = self.p._get_day(datetime(2012, 10, 21))
self.assertEqual(actual, expected)
def test_get_inexistant_day(self):
"""Get 14/10."""
with self.assertRaises(EmptyDayException):
self.p._get_day(datetime(2012, 10, 14))
def test_get_inexistant_month(self):
"""Get 14/04."""
with self.assertRaises(EmptyDayException):
self.p._get_day(datetime(2012, 4, 14))
def test_get_text_with_categories(self):
block = (
"\n"
" Cat1: {Some shit: null}\n"
" text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b''illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk'\n")
expected = (
"1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 report incongruencias sewan pdf/cdr\n"
" 1/1 fix b'illing db and run\n"
" 0/2 guide entretien prestataire\n"
" 0/1 org desk")
self.assertEqual(self.p._get_text(block), expected)
def test_get_text_without_categories(self):
block = (
" {text: '1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk'}\n")
expected = (
"1/1 fix import sewan\n"
" 2/2 check fidelidade, delete 0836\n"
" 0/1 org desk")
self.assertEqual(self.p._get_text(block), expected)
def test_get_pomodoros(self):
# TODO
pass
def tearDown(self):
os.remove(os.path.join(self.nb_path, '2012-10.txt'))
if __name__ == '__main__':
unittest.main()
|
{"/test_notebook_parser.py": ["/notebook_parser.py"]}
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 97