repo_name
stringclasses 400
values | branch_name
stringclasses 4
values | file_content
stringlengths 16
72.5k
| language
stringclasses 1
value | num_lines
int64 1
1.66k
| avg_line_length
float64 6
85
| max_line_length
int64 9
949
| path
stringlengths 5
103
| alphanum_fraction
float64 0.29
0.89
| alpha_fraction
float64 0.27
0.89
| context
stringlengths 0
91.6k
| context_file_paths
listlengths 0
3
|
|---|---|---|---|---|---|---|---|---|---|---|---|
shuishen112/pairwise-rnn
|
refs/heads/master
|
import data_helper
import time
import datetime
import os
import tensorflow as tf
import numpy as np
import evaluation
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
timeDay = time.strftime("%Y%m%d", timeArray)
print (timeStamp)
def main(args):
args._parse_flags()
print("\nParameters:")
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
log_dir = 'log/'+ timeDay
if not os.path.exists(log_dir):
os.makedirs(log_dir)
data_file = log_dir + '/test_' + args.data + timeStamp
precision = data_file + 'precise'
print('load data ...........')
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev])
print('the number of words',len(alphabet))
print('get embedding')
if args.data=="quora":
embedding = data_helper.get_embedding(alphabet,language="cn")
else:
embedding = data_helper.get_embedding(alphabet)
with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model = QA_CNN_extend(max_input_left = q_max_sent_length,
max_input_right = a_max_sent_length,
batch_size = args.batch_size,
vocab_size = len(alphabet),
embedding_size = args.embedding_dim,
filter_sizes = list(map(int, args.filter_sizes.split(","))),
num_filters = args.num_filters,
hidden_size = args.hidden_size,
dropout_keep_prob = args.dropout_keep_prob,
embeddings = embedding,
l2_reg_lambda = args.l2_reg_lambda,
trainable = args.trainable,
pooling = args.pooling,
conv = args.conv)
model.build_graph()
sess.run(tf.global_variables_initializer())
def train_step(model,sess,batch):
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.answer_negative:data[2],
model.q_mask:data[3],
model.a_mask:data[4],
model.a_neg_mask:data[5]
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[model.train_op, model.merged,model.global_step,model.loss, model.accuracy,model.score12,model.score13, model.see],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
def predict(model,sess,batch,test):
scores = []
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3]
}
score = sess.run(
model.score12,
feed_dict)
scores.extend(score)
return np.array(scores[:len(test)])
for i in range(args.num_epoches):
datas = data_helper.get_mini_batch(train,alphabet,args.batch_size)
train_step(model,sess,datas)
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
print(len(predicted_test))
print(len(test))
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
print('map_mrr test',map_mrr_test)
|
Python
| 116
| 36.043102
| 159
|
/main.py
| 0.553088
| 0.546842
|
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime,os
import models
import numpy as np
import evaluation
import sys
import logging
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename('program')
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.makedirs(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_qcnn_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
# ckpt = tf.train.get_checkpoint_state("checkpoint")
# if ckpt and ckpt.model_checkpoint_path:
# # Restores from checkpoint
# saver.restore(sess, ckpt.model_checkpoint_path)
# if os.path.exists("model") :
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
# variable_averages = tf.train.ExponentialMovingAverage( model)
# variables_to_restore = variable_averages.variables_to_restore()
# saver = tf.train.Saver(variables_to_restore)
# for name in variables_to_restore:
# print(name)
sess.run(tf.global_variables_initializer())
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
best_p1=0
for i in range(args.num_epoches):
for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess):
# for data in data_helper.getBatch48008(train,alphabet,args.batch_size):
_, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
#<<<<<<< HEAD
#
#
# if i>0 and i % 5 ==0:
# test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
#
# predicted_test = predict(model,sess,test_datas,test)
# map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
#
# logger.info('map_mrr test' +str(map_mrr_test))
# print('map_mrr test' +str(map_mrr_test))
#
# test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size)
# predicted_test = predict(model,sess,test_datas,dev)
# map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test)
#
# logger.info('map_mrr dev' +str(map_mrr_test))
# print('map_mrr dev' +str(map_mrr_test))
# map,mrr,p1 = map_mrr_test
# if p1>best_p1:
# best_p1=p1
# filename= "checkpoint/"+args.data+"_"+str(p1)+".model"
# save_path = saver.save(sess, filename)
# # load_path = saver.restore(sess, model_path)
#
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
#
#
#=======
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
logger.info('map_mrr test' +str(map_mrr_test))
print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
--- FILE SEPARATOR ---
#coding:utf-8
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import models.blocks as blocks
# model_type :apn or qacnn
class QA_CNN_extend(object):
# def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size,
# dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'):
#
# """
# QA_RNN model for question answering
#
# Args:
# self.dropout_keep_prob: dropout rate
# self.num_filters : number of filters
# self.para : parameter list
# self.extend_feature_dim : my extend feature dimension
# self.max_input_left : the length of question
# self.max_input_right : the length of answer
# self.pooling : pooling strategy :max pooling or attentive pooling
#
# """
# self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
# self.num_filters = num_filters
# self.embeddings = embeddings
# self.embedding_size = embedding_size
# self.batch_size = batch_size
# self.filter_sizes = filter_sizes
# self.l2_reg_lambda = l2_reg_lambda
# self.para = []
#
# self.max_input_left = max_input_left
# self.max_input_right = max_input_right
# self.trainable = trainable
# self.vocab_size = vocab_size
# self.pooling = pooling
# self.total_num_filter = len(self.filter_sizes) * self.num_filters
#
# self.conv = conv
# self.pooling = 'traditional'
# self.learning_rate = learning_rate
#
# self.hidden_size = hidden_size
#
# self.attention_size = 100
def __init__(self,opt):
for key,value in opt.items():
self.__setattr__(key,value)
self.attention_size = 100
self.pooling = 'mean'
self.total_num_filter = len(self.filter_sizes) * self.num_filters
self.para = []
self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
def create_placeholder(self):
print(('Create placeholders'))
# he length of the sentence is varied according to the batch,so the None,None
self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question')
self.max_input_left = tf.shape(self.question)[1]
self.batch_size = tf.shape(self.question)[0]
self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer')
self.max_input_right = tf.shape(self.answer)[1]
self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right')
# self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask')
# self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask')
# self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask')
def add_embeddings(self):
print( 'add embeddings')
if self.embeddings is not None:
print( "load embedding")
W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable)
else:
print( "random embedding")
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name="W",trainable = self.trainable)
self.embedding_W = W
# self.overlap_W = tf.Variable(a,name="W",trainable = True)
self.para.append(self.embedding_W)
self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question)
self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer)
self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative)
#real length
self.q_len,self.q_mask = blocks.length(self.question)
self.a_len,self.a_mask = blocks.length(self.answer)
self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative)
def convolution(self):
print( 'convolution:wide_convolution')
self.kernels = []
for i,filter_size in enumerate(self.filter_sizes):
with tf.name_scope('conv-max-pool-%s' % filter_size):
filter_shape = [filter_size,self.embedding_size,1,self.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name="W")
b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name="b")
self.kernels.append((W,b))
self.para.append(W)
self.para.append(b)
embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding]
self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings]
#convolution
def pooling_graph(self):
if self.pooling == 'mean':
self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask)
self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask)
elif self.pooling == 'attentive':
self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'position':
self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'traditional':
print( self.pooling)
print(self.q_cnn)
self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
def para_initial(self):
# print(("---------"))
# self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp'))
self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U'))
self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm'))
self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm'))
self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms'))
self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi'))
def mean_pooling(self,conv,mask):
conv = tf.squeeze(conv,2)
print( tf.expand_dims(tf.cast(mask,tf.float32),-1))
# conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1))
# self.see = conv_mask
# print( conv_mask)
return tf.reduce_mean(conv,axis = 1);
def attentive_pooling(self,input_left,input_right,q_mask,a_mask):
Q = tf.squeeze(input_left,axis = 2)
A = tf.squeeze(input_right,axis = 2)
print( Q)
print( A)
# Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A')
# G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\
# A,transpose_b = True),name = 'G')
first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U)
second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters])
result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1]))
print( second_step)
print( tf.transpose(A,perm = [0,2,1]))
# print( 'result',result)
G = tf.tanh(result)
# G = result
# column-wise pooling ,row-wise pooling
row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling')
col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling')
self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q')
self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1))
self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a')
self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1))
self.see = G
R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q')
R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a')
return R_q,R_a
def traditional_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2))
Q = tf.reduce_mean(input_left_mask,1)
a_shape = tf.shape(input_right)
A = tf.reshape(input_right,[-1,self.total_num_filter])
m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1))
f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1]))
self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2))
self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True))
self.see = self.f_attention_norm
a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1)
return Q,a_attention
def position_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
# Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A')
Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
QU = tf.matmul(Q,self.U)
QUA = tf.multiply(tf.expand_dims(QU,1),input_right)
self.attention_a = tf.cast(tf.argmax(QUA,2)
,tf.float32)
# q_shape = tf.shape(input_left)
# Q_1 = tf.reshape(input_left,[-1,self.total_num_filter])
# QU = tf.matmul(Q_1,self.U)
# QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter])
# A_1 = tf.transpose(input_right,[0,2,1])
# QUA = tf.matmul(QU_1,A_1)
# QUA = tf.nn.l2_normalize(QUA,1)
# G = tf.tanh(QUA)
# Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
# # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2))
# row_pooling = tf.reduce_max(G,1,name="row_pooling")
# col_pooling = tf.reduce_max(G,2,name="col_pooling")
# self.attention_a = tf.nn.softmax(row_pooling,1,name = "attention_a")
self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32))
self.see = self.attention_a
self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True))
self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter])
return Q ,self.r_a
def create_loss(self):
with tf.name_scope('score'):
self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn)
self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn)
l2_loss = tf.constant(0.0)
for p in self.para:
l2_loss += tf.nn.l2_loss(p)
with tf.name_scope("loss"):
self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13)))
self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss
tf.summary.scalar('loss', self.loss)
# Accuracy
with tf.name_scope("accuracy"):
self.correct = tf.equal(0.0, self.losses)
self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy")
tf.summary.scalar('accuracy', self.accuracy)
def create_op(self):
self.global_step = tf.Variable(0, name = "global_step", trainable = False)
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step)
def max_pooling(self,conv,input_length):
pooled = tf.nn.max_pool(
conv,
ksize = [1, input_length, 1, 1],
strides = [1, 1, 1, 1],
padding = 'VALID',
name="pool")
return pooled
def getCosine(self,q,a):
pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder)
pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder)
pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1))
pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1))
pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1)
score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name="scores")
return score
def wide_convolution(self,embedding):
cnn_outputs = []
for i,filter_size in enumerate(self.filter_sizes):
conv = tf.nn.conv2d(
embedding,
self.kernels[i][0],
strides=[1, 1, self.embedding_size, 1],
padding='SAME',
name="conv-1"
)
h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name="relu-1")
cnn_outputs.append(h)
cnn_reshaped = tf.concat(cnn_outputs,3)
return cnn_reshaped
def variable_summaries(self,var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def build_graph(self):
self.create_placeholder()
self.add_embeddings()
self.para_initial()
self.convolution()
self.pooling_graph()
self.create_loss()
self.create_op()
self.merged = tf.summary.merge_all()
def train(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
self.answer_negative:data[2],
# self.q_mask:data[3],
# self.a_mask:data[4],
# self.a_neg_mask:data[5],
self.dropout_keep_prob_holder:self.dropout_keep_prob
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see],
feed_dict)
return _, summary, step, loss, accuracy,score12, score13, see
def predict(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
# self.q_mask:data[2],
# self.a_mask:data[3],
self.dropout_keep_prob_holder:1.0
}
score = sess.run( self.score12, feed_dict)
return score
if __name__ == '__main__':
cnn = QA_CNN_extend(
max_input_left = 33,
max_input_right = 40,
batch_size = 3,
vocab_size = 5000,
embedding_size = 100,
filter_sizes = [3,4,5],
num_filters = 64,
hidden_size = 100,
dropout_keep_prob = 1.0,
embeddings = None,
l2_reg_lambda = 0.0,
trainable = True,
pooling = 'max',
conv = 'wide')
cnn.build_graph()
input_x_1 = np.reshape(np.arange(3 * 33),[3,33])
input_x_2 = np.reshape(np.arange(3 * 40),[3,40])
input_x_3 = np.reshape(np.arange(3 * 40),[3,40])
q_mask = np.ones((3,33))
a_mask = np.ones((3,40))
a_neg_mask = np.ones((3,40))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {
cnn.question:input_x_1,
cnn.answer:input_x_2,
# cnn.answer_negative:input_x_3,
cnn.q_mask:q_mask,
cnn.a_mask:a_mask,
cnn.dropout_keep_prob_holder:cnn.dropout_keep
# cnn.a_neg_mask:a_neg_mask
# cnn.q_pos_overlap:q_pos_embedding,
# cnn.q_neg_overlap:q_neg_embedding,
# cnn.a_pos_overlap:a_pos_embedding,
# cnn.a_neg_overlap:a_neg_embedding,
# cnn.q_position:q_position,
# cnn.a_pos_position:a_pos_position,
# cnn.a_neg_position:a_neg_position
}
question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict)
print( question.shape,answer.shape)
print( score)
--- FILE SEPARATOR ---
#-*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
from functools import wraps
import time
import pickle
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
import tqdm
from nltk.corpus import stopwords
OVERLAP = 237
class Alphabet(dict):
def __init__(self, start_feature_id = 1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def cut(sentence):
tokens = sentence.lower().split()
# tokens = [w for w in tokens if w not in stopwords.words('english')]
return tokens
@log_time_delta
def load(dataset, filter = False):
data_dir = "data/" + dataset
datas = []
for data_name in ['train.txt','test.txt','dev.txt']:
data_file = os.path.join(data_dir,data_name)
data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0')
# data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0')
if filter == True:
datas.append(removeUnanswerdQuestion(data))
else:
datas.append(data)
# sub_file = os.path.join(data_dir,'submit.txt')
# submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3)
# datas.append(submit)
return tuple(datas)
@log_time_delta
def removeUnanswerdQuestion(df):
counter= df.groupby("question").apply(lambda group: sum(group["flag"]))
questions_have_correct=counter[counter>0].index
counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0))
questions_have_uncorrect=counter[counter>0].index
counter=df.groupby("question").apply(lambda group: len(group["flag"]))
questions_multi=counter[counter>1].index
return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index()
@log_time_delta
def get_alphabet(corpuses=None,dataset=""):
pkl_name="temp/"+dataset+".alphabet.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
alphabet = Alphabet(start_feature_id = 0)
alphabet.add('[UNK]')
alphabet.add('END')
count = 0
for corpus in corpuses:
for texts in [corpus["question"].unique(),corpus["answer"]]:
for sentence in texts:
tokens = cut(sentence)
for token in set(tokens):
alphabet.add(token)
print("alphabet size %d" % len(alphabet.keys()) )
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump( alphabet,open(pkl_name,"wb"))
return alphabet
@log_time_delta
def getSubVectorsFromDict(vectors,vocab,dim = 300):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
def encode_to_split(sentence,alphabet):
indices = []
tokens = cut(sentence)
seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]
return seq
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size = 100):
vectors = {}
with open(filename,encoding='utf-8') as f:
i = 0
for line in f:
i += 1
if i % 100000 == 0:
print( 'epch %d' % i)
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( ( vocab_size, embedding_size))
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'embedding_size',embedding_size)
print( 'done')
print( 'words found in wor2vec embedding ',len(vectors.keys()))
return vectors
@log_time_delta
def get_embedding(alphabet,dim = 300,language ="en",dataset=""):
pkl_name="temp/"+dataset+".subembedding.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
if language=="en":
fname = 'embedding/glove.6B/glove.6B.300d.txt'
else:
fname= "embedding/embedding.200.header_txt"
embeddings = load_text_vec(alphabet,fname,embedding_size = dim)
sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)
pickle.dump( sub_embeddings,open(pkl_name,"wb"))
return sub_embeddings
@log_time_delta
def get_mini_batch_test(df,alphabet,batch_size):
q = []
a = []
pos_overlap = []
for index,row in df.iterrows():
question = encode_to_split(row["question"],alphabet)
answer = encode_to_split(row["answer"],alphabet)
overlap_pos = overlap_index(row['question'],row['answer'])
q.append(question)
a.append(answer)
pos_overlap.append(overlap_pos)
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in mini_batches:
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
yield(mb_q,mb_a)
# calculate the overlap_index
def overlap_index(question,answer,stopwords = []):
ans_token = cut(answer)
qset = set(cut(question))
aset = set(ans_token)
a_len = len(ans_token)
# q_index = np.arange(1,q_len)
a_index = np.arange(1,a_len + 1)
overlap = qset.intersection(aset)
# for i,q in enumerate(cut(question)[:q_len]):
# value = 1
# if q in overlap:
# value = 2
# q_index[i] = value
for i,a in enumerate(ans_token):
if a in overlap:
a_index[i] = OVERLAP
return a_index
def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):
q,a,neg_a=[],[],[]
answers=df["answer"][:250]
ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict()
for question in tqdm(df['question'].unique()):
index= ground_truth[question]
canindates = [i for i in range(250)]
canindates.remove(index)
a_neg_index = random.choice(canindates)
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(answers[index],alphabet)
seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)
q.append(seq_q)
a.append( seq_a)
neg_a.append(seq_neg_a )
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):
if sort_by_len:
sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)
q = [ q[i] for i in sorted_index]
a = [a[i] for i in sorted_index]
neg_a = [ neg_a[i] for i in sorted_index]
pos_overlap = [pos_overlap[i] for i in sorted_index]
neg_overlap = [neg_overlap[i] for i in sorted_index]
#get batch
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
if shuffle:
np.random.shuffle(idx_list)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in tqdm(mini_batches):
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_neg_a = [ neg_a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_neg_overlap = [neg_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)
# mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)
# mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)
yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)
def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):
q = []
a = []
neg_a = []
for question in df['question'].unique():
# group = df[df["question"]==question]
# pos_answers = group[df["flag"] == 1]["answer"]
# neg_answers = group[df["flag"] == 0]["answer"].reset_index()
group = df[df["question"]==question]
pos_answers = group[group["flag"] == 1]["answer"]
neg_answers = group[group["flag"] == 0]["answer"]#.reset_index()
for pos in pos_answers:
if model is not None and sess is not None:
pos_sent= encode_to_split(pos,alphabet)
q_sent,q_mask= prepare_data([pos_sent])
neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers]
a_sent,a_mask= prepare_data(neg_sents)
scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))
neg_index = scores.argmax()
else:
if len(neg_answers.index) > 0:
neg_index = np.random.choice(neg_answers.index)
neg = neg_answers.reset_index().loc[neg_index,]["answer"]
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(pos,alphabet)
seq_neg_a = encode_to_split(neg,alphabet)
q.append(seq_q)
a.append(seq_a)
neg_a.append(seq_neg_a)
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def prepare_data(seqs,overlap = None):
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples,max_len)).astype('int32')
if overlap is not None:
overlap_position = np.zeros((n_samples,max_len)).astype('float')
for idx ,seq in enumerate(seqs):
x[idx,:lengths[idx]] = seq
overlap_position[idx,:lengths[idx]] = overlap[idx]
return x,overlap_position
else:
x_mask = np.zeros((n_samples, max_len)).astype('float')
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
x_mask[idx, :lengths[idx]] = 1.0
# print( x, x_mask)
return x, x_mask
# def prepare_data(seqs):
# lengths = [len(seq) for seq in seqs]
# n_samples = len(seqs)
# max_len = np.max(lengths)
# x = np.zeros((n_samples, max_len)).astype('int32')
# x_mask = np.zeros((n_samples, max_len)).astype('float')
# for idx, seq in enumerate(seqs):
# x[idx, :lengths[idx]] = seq
# x_mask[idx, :lengths[idx]] = 1.0
# # print( x, x_mask)
# return x, x_mask
def getLogger():
import sys
import logging
import os
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.mkdir(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
return logger
|
[
"/run.py",
"/models/QA_CNN_pairwise.py",
"/data_helper.py"
] |
shuishen112/pairwise-rnn
|
refs/heads/master
|
class Singleton(object):
__instance=None
def __init__(self):
pass
def getInstance(self):
if Singleton.__instance is None:
# Singleton.__instance=object.__new__(cls,*args,**kwd)
Singleton.__instance=self.get_test_flag()
print("build FLAGS over")
return Singleton.__instance
def get_test_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_rnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
# flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('data','trec','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_cnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_qcnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "qcnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','mean','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_8008_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
if __name__=="__main__":
args=Singleton().get_test_flag()
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
|
Python
| 197
| 60.426395
| 121
|
/config.py
| 0.627396
| 0.597571
|
# -*- coding: utf-8 -*-
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime
import os
import models
import numpy as np
import evaluation
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_rnn_flag()
#args = Singleton().get_8008_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer()) # fun first than print or save
ckpt = tf.train.get_checkpoint_state("checkpoint")
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
print(sess.run(model.position_embedding)[0])
if os.path.exists("model") :
import shutil
shutil.rmtree("model")
builder = tf.saved_model.builder.SavedModelBuilder("./model")
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
builder.save(True)
variable_averages = tf.train.ExponentialMovingAverage( model)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
for name in variables_to_restore:
print(name)
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
text = "怎么 提取 公积金 ?"
splited_text=data_helper.encode_to_split(text,alphabet)
mb_q,mb_q_mask = data_helper.prepare_data([splited_text])
mb_a,mb_a_mask = data_helper.prepare_data([splited_text])
data = (mb_q,mb_a,mb_q_mask,mb_a_mask)
score = model.predict(sess,data)
print(score)
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3],
model.dropout_keep_prob_holder:1.0
}
sess.run(model.position_embedding,feed_dict=feed_dict)[0]
--- FILE SEPARATOR ---
#-*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
from functools import wraps
import time
import pickle
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
import tqdm
from nltk.corpus import stopwords
OVERLAP = 237
class Alphabet(dict):
def __init__(self, start_feature_id = 1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def cut(sentence):
tokens = sentence.lower().split()
# tokens = [w for w in tokens if w not in stopwords.words('english')]
return tokens
@log_time_delta
def load(dataset, filter = False):
data_dir = "data/" + dataset
datas = []
for data_name in ['train.txt','test.txt','dev.txt']:
data_file = os.path.join(data_dir,data_name)
data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0')
# data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0')
if filter == True:
datas.append(removeUnanswerdQuestion(data))
else:
datas.append(data)
# sub_file = os.path.join(data_dir,'submit.txt')
# submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3)
# datas.append(submit)
return tuple(datas)
@log_time_delta
def removeUnanswerdQuestion(df):
counter= df.groupby("question").apply(lambda group: sum(group["flag"]))
questions_have_correct=counter[counter>0].index
counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0))
questions_have_uncorrect=counter[counter>0].index
counter=df.groupby("question").apply(lambda group: len(group["flag"]))
questions_multi=counter[counter>1].index
return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index()
@log_time_delta
def get_alphabet(corpuses=None,dataset=""):
pkl_name="temp/"+dataset+".alphabet.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
alphabet = Alphabet(start_feature_id = 0)
alphabet.add('[UNK]')
alphabet.add('END')
count = 0
for corpus in corpuses:
for texts in [corpus["question"].unique(),corpus["answer"]]:
for sentence in texts:
tokens = cut(sentence)
for token in set(tokens):
alphabet.add(token)
print("alphabet size %d" % len(alphabet.keys()) )
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump( alphabet,open(pkl_name,"wb"))
return alphabet
@log_time_delta
def getSubVectorsFromDict(vectors,vocab,dim = 300):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
def encode_to_split(sentence,alphabet):
indices = []
tokens = cut(sentence)
seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]
return seq
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size = 100):
vectors = {}
with open(filename,encoding='utf-8') as f:
i = 0
for line in f:
i += 1
if i % 100000 == 0:
print( 'epch %d' % i)
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( ( vocab_size, embedding_size))
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'embedding_size',embedding_size)
print( 'done')
print( 'words found in wor2vec embedding ',len(vectors.keys()))
return vectors
@log_time_delta
def get_embedding(alphabet,dim = 300,language ="en",dataset=""):
pkl_name="temp/"+dataset+".subembedding.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
if language=="en":
fname = 'embedding/glove.6B/glove.6B.300d.txt'
else:
fname= "embedding/embedding.200.header_txt"
embeddings = load_text_vec(alphabet,fname,embedding_size = dim)
sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)
pickle.dump( sub_embeddings,open(pkl_name,"wb"))
return sub_embeddings
@log_time_delta
def get_mini_batch_test(df,alphabet,batch_size):
q = []
a = []
pos_overlap = []
for index,row in df.iterrows():
question = encode_to_split(row["question"],alphabet)
answer = encode_to_split(row["answer"],alphabet)
overlap_pos = overlap_index(row['question'],row['answer'])
q.append(question)
a.append(answer)
pos_overlap.append(overlap_pos)
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in mini_batches:
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
yield(mb_q,mb_a)
# calculate the overlap_index
def overlap_index(question,answer,stopwords = []):
ans_token = cut(answer)
qset = set(cut(question))
aset = set(ans_token)
a_len = len(ans_token)
# q_index = np.arange(1,q_len)
a_index = np.arange(1,a_len + 1)
overlap = qset.intersection(aset)
# for i,q in enumerate(cut(question)[:q_len]):
# value = 1
# if q in overlap:
# value = 2
# q_index[i] = value
for i,a in enumerate(ans_token):
if a in overlap:
a_index[i] = OVERLAP
return a_index
def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):
q,a,neg_a=[],[],[]
answers=df["answer"][:250]
ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict()
for question in tqdm(df['question'].unique()):
index= ground_truth[question]
canindates = [i for i in range(250)]
canindates.remove(index)
a_neg_index = random.choice(canindates)
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(answers[index],alphabet)
seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)
q.append(seq_q)
a.append( seq_a)
neg_a.append(seq_neg_a )
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):
if sort_by_len:
sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)
q = [ q[i] for i in sorted_index]
a = [a[i] for i in sorted_index]
neg_a = [ neg_a[i] for i in sorted_index]
pos_overlap = [pos_overlap[i] for i in sorted_index]
neg_overlap = [neg_overlap[i] for i in sorted_index]
#get batch
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
if shuffle:
np.random.shuffle(idx_list)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in tqdm(mini_batches):
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_neg_a = [ neg_a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_neg_overlap = [neg_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)
# mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)
# mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)
yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)
def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):
q = []
a = []
neg_a = []
for question in df['question'].unique():
# group = df[df["question"]==question]
# pos_answers = group[df["flag"] == 1]["answer"]
# neg_answers = group[df["flag"] == 0]["answer"].reset_index()
group = df[df["question"]==question]
pos_answers = group[group["flag"] == 1]["answer"]
neg_answers = group[group["flag"] == 0]["answer"]#.reset_index()
for pos in pos_answers:
if model is not None and sess is not None:
pos_sent= encode_to_split(pos,alphabet)
q_sent,q_mask= prepare_data([pos_sent])
neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers]
a_sent,a_mask= prepare_data(neg_sents)
scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))
neg_index = scores.argmax()
else:
if len(neg_answers.index) > 0:
neg_index = np.random.choice(neg_answers.index)
neg = neg_answers.reset_index().loc[neg_index,]["answer"]
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(pos,alphabet)
seq_neg_a = encode_to_split(neg,alphabet)
q.append(seq_q)
a.append(seq_a)
neg_a.append(seq_neg_a)
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def prepare_data(seqs,overlap = None):
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples,max_len)).astype('int32')
if overlap is not None:
overlap_position = np.zeros((n_samples,max_len)).astype('float')
for idx ,seq in enumerate(seqs):
x[idx,:lengths[idx]] = seq
overlap_position[idx,:lengths[idx]] = overlap[idx]
return x,overlap_position
else:
x_mask = np.zeros((n_samples, max_len)).astype('float')
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
x_mask[idx, :lengths[idx]] = 1.0
# print( x, x_mask)
return x, x_mask
# def prepare_data(seqs):
# lengths = [len(seq) for seq in seqs]
# n_samples = len(seqs)
# max_len = np.max(lengths)
# x = np.zeros((n_samples, max_len)).astype('int32')
# x_mask = np.zeros((n_samples, max_len)).astype('float')
# for idx, seq in enumerate(seqs):
# x[idx, :lengths[idx]] = seq
# x_mask[idx, :lengths[idx]] = 1.0
# # print( x, x_mask)
# return x, x_mask
def getLogger():
import sys
import logging
import os
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.mkdir(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
return logger
--- FILE SEPARATOR ---
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime,os
import models
import numpy as np
import evaluation
import sys
import logging
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename('program')
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.makedirs(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_qcnn_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
# ckpt = tf.train.get_checkpoint_state("checkpoint")
# if ckpt and ckpt.model_checkpoint_path:
# # Restores from checkpoint
# saver.restore(sess, ckpt.model_checkpoint_path)
# if os.path.exists("model") :
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
# variable_averages = tf.train.ExponentialMovingAverage( model)
# variables_to_restore = variable_averages.variables_to_restore()
# saver = tf.train.Saver(variables_to_restore)
# for name in variables_to_restore:
# print(name)
sess.run(tf.global_variables_initializer())
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
best_p1=0
for i in range(args.num_epoches):
for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess):
# for data in data_helper.getBatch48008(train,alphabet,args.batch_size):
_, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
#<<<<<<< HEAD
#
#
# if i>0 and i % 5 ==0:
# test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
#
# predicted_test = predict(model,sess,test_datas,test)
# map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
#
# logger.info('map_mrr test' +str(map_mrr_test))
# print('map_mrr test' +str(map_mrr_test))
#
# test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size)
# predicted_test = predict(model,sess,test_datas,dev)
# map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test)
#
# logger.info('map_mrr dev' +str(map_mrr_test))
# print('map_mrr dev' +str(map_mrr_test))
# map,mrr,p1 = map_mrr_test
# if p1>best_p1:
# best_p1=p1
# filename= "checkpoint/"+args.data+"_"+str(p1)+".model"
# save_path = saver.save(sess, filename)
# # load_path = saver.restore(sess, model_path)
#
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
#
#
#=======
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
logger.info('map_mrr test' +str(map_mrr_test))
print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
|
[
"/test.py",
"/data_helper.py",
"/run.py"
] |
shuishen112/pairwise-rnn
|
refs/heads/master
|
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime,os
import models
import numpy as np
import evaluation
import sys
import logging
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename('program')
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.makedirs(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_qcnn_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
# ckpt = tf.train.get_checkpoint_state("checkpoint")
# if ckpt and ckpt.model_checkpoint_path:
# # Restores from checkpoint
# saver.restore(sess, ckpt.model_checkpoint_path)
# if os.path.exists("model") :
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
# variable_averages = tf.train.ExponentialMovingAverage( model)
# variables_to_restore = variable_averages.variables_to_restore()
# saver = tf.train.Saver(variables_to_restore)
# for name in variables_to_restore:
# print(name)
sess.run(tf.global_variables_initializer())
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
best_p1=0
for i in range(args.num_epoches):
for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess):
# for data in data_helper.getBatch48008(train,alphabet,args.batch_size):
_, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
#<<<<<<< HEAD
#
#
# if i>0 and i % 5 ==0:
# test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
#
# predicted_test = predict(model,sess,test_datas,test)
# map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
#
# logger.info('map_mrr test' +str(map_mrr_test))
# print('map_mrr test' +str(map_mrr_test))
#
# test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size)
# predicted_test = predict(model,sess,test_datas,dev)
# map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test)
#
# logger.info('map_mrr dev' +str(map_mrr_test))
# print('map_mrr dev' +str(map_mrr_test))
# map,mrr,p1 = map_mrr_test
# if p1>best_p1:
# best_p1=p1
# filename= "checkpoint/"+args.data+"_"+str(p1)+".model"
# save_path = saver.save(sess, filename)
# # load_path = saver.restore(sess, model_path)
#
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
#
#
#=======
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
logger.info('map_mrr test' +str(map_mrr_test))
print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
|
Python
| 164
| 35.829269
| 161
|
/run.py
| 0.628704
| 0.62142
|
class Singleton(object):
__instance=None
def __init__(self):
pass
def getInstance(self):
if Singleton.__instance is None:
# Singleton.__instance=object.__new__(cls,*args,**kwd)
Singleton.__instance=self.get_test_flag()
print("build FLAGS over")
return Singleton.__instance
def get_test_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_rnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
# flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('data','trec','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_cnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_qcnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "qcnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','mean','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_8008_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
if __name__=="__main__":
args=Singleton().get_test_flag()
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
--- FILE SEPARATOR ---
#-*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
from functools import wraps
import time
import pickle
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
import tqdm
from nltk.corpus import stopwords
OVERLAP = 237
class Alphabet(dict):
def __init__(self, start_feature_id = 1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def cut(sentence):
tokens = sentence.lower().split()
# tokens = [w for w in tokens if w not in stopwords.words('english')]
return tokens
@log_time_delta
def load(dataset, filter = False):
data_dir = "data/" + dataset
datas = []
for data_name in ['train.txt','test.txt','dev.txt']:
data_file = os.path.join(data_dir,data_name)
data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0')
# data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0')
if filter == True:
datas.append(removeUnanswerdQuestion(data))
else:
datas.append(data)
# sub_file = os.path.join(data_dir,'submit.txt')
# submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3)
# datas.append(submit)
return tuple(datas)
@log_time_delta
def removeUnanswerdQuestion(df):
counter= df.groupby("question").apply(lambda group: sum(group["flag"]))
questions_have_correct=counter[counter>0].index
counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0))
questions_have_uncorrect=counter[counter>0].index
counter=df.groupby("question").apply(lambda group: len(group["flag"]))
questions_multi=counter[counter>1].index
return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index()
@log_time_delta
def get_alphabet(corpuses=None,dataset=""):
pkl_name="temp/"+dataset+".alphabet.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
alphabet = Alphabet(start_feature_id = 0)
alphabet.add('[UNK]')
alphabet.add('END')
count = 0
for corpus in corpuses:
for texts in [corpus["question"].unique(),corpus["answer"]]:
for sentence in texts:
tokens = cut(sentence)
for token in set(tokens):
alphabet.add(token)
print("alphabet size %d" % len(alphabet.keys()) )
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump( alphabet,open(pkl_name,"wb"))
return alphabet
@log_time_delta
def getSubVectorsFromDict(vectors,vocab,dim = 300):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
def encode_to_split(sentence,alphabet):
indices = []
tokens = cut(sentence)
seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]
return seq
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size = 100):
vectors = {}
with open(filename,encoding='utf-8') as f:
i = 0
for line in f:
i += 1
if i % 100000 == 0:
print( 'epch %d' % i)
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( ( vocab_size, embedding_size))
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'embedding_size',embedding_size)
print( 'done')
print( 'words found in wor2vec embedding ',len(vectors.keys()))
return vectors
@log_time_delta
def get_embedding(alphabet,dim = 300,language ="en",dataset=""):
pkl_name="temp/"+dataset+".subembedding.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
if language=="en":
fname = 'embedding/glove.6B/glove.6B.300d.txt'
else:
fname= "embedding/embedding.200.header_txt"
embeddings = load_text_vec(alphabet,fname,embedding_size = dim)
sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)
pickle.dump( sub_embeddings,open(pkl_name,"wb"))
return sub_embeddings
@log_time_delta
def get_mini_batch_test(df,alphabet,batch_size):
q = []
a = []
pos_overlap = []
for index,row in df.iterrows():
question = encode_to_split(row["question"],alphabet)
answer = encode_to_split(row["answer"],alphabet)
overlap_pos = overlap_index(row['question'],row['answer'])
q.append(question)
a.append(answer)
pos_overlap.append(overlap_pos)
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in mini_batches:
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
yield(mb_q,mb_a)
# calculate the overlap_index
def overlap_index(question,answer,stopwords = []):
ans_token = cut(answer)
qset = set(cut(question))
aset = set(ans_token)
a_len = len(ans_token)
# q_index = np.arange(1,q_len)
a_index = np.arange(1,a_len + 1)
overlap = qset.intersection(aset)
# for i,q in enumerate(cut(question)[:q_len]):
# value = 1
# if q in overlap:
# value = 2
# q_index[i] = value
for i,a in enumerate(ans_token):
if a in overlap:
a_index[i] = OVERLAP
return a_index
def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):
q,a,neg_a=[],[],[]
answers=df["answer"][:250]
ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict()
for question in tqdm(df['question'].unique()):
index= ground_truth[question]
canindates = [i for i in range(250)]
canindates.remove(index)
a_neg_index = random.choice(canindates)
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(answers[index],alphabet)
seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)
q.append(seq_q)
a.append( seq_a)
neg_a.append(seq_neg_a )
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):
if sort_by_len:
sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)
q = [ q[i] for i in sorted_index]
a = [a[i] for i in sorted_index]
neg_a = [ neg_a[i] for i in sorted_index]
pos_overlap = [pos_overlap[i] for i in sorted_index]
neg_overlap = [neg_overlap[i] for i in sorted_index]
#get batch
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
if shuffle:
np.random.shuffle(idx_list)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in tqdm(mini_batches):
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_neg_a = [ neg_a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_neg_overlap = [neg_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)
# mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)
# mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)
yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)
def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):
q = []
a = []
neg_a = []
for question in df['question'].unique():
# group = df[df["question"]==question]
# pos_answers = group[df["flag"] == 1]["answer"]
# neg_answers = group[df["flag"] == 0]["answer"].reset_index()
group = df[df["question"]==question]
pos_answers = group[group["flag"] == 1]["answer"]
neg_answers = group[group["flag"] == 0]["answer"]#.reset_index()
for pos in pos_answers:
if model is not None and sess is not None:
pos_sent= encode_to_split(pos,alphabet)
q_sent,q_mask= prepare_data([pos_sent])
neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers]
a_sent,a_mask= prepare_data(neg_sents)
scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))
neg_index = scores.argmax()
else:
if len(neg_answers.index) > 0:
neg_index = np.random.choice(neg_answers.index)
neg = neg_answers.reset_index().loc[neg_index,]["answer"]
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(pos,alphabet)
seq_neg_a = encode_to_split(neg,alphabet)
q.append(seq_q)
a.append(seq_a)
neg_a.append(seq_neg_a)
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def prepare_data(seqs,overlap = None):
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples,max_len)).astype('int32')
if overlap is not None:
overlap_position = np.zeros((n_samples,max_len)).astype('float')
for idx ,seq in enumerate(seqs):
x[idx,:lengths[idx]] = seq
overlap_position[idx,:lengths[idx]] = overlap[idx]
return x,overlap_position
else:
x_mask = np.zeros((n_samples, max_len)).astype('float')
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
x_mask[idx, :lengths[idx]] = 1.0
# print( x, x_mask)
return x, x_mask
# def prepare_data(seqs):
# lengths = [len(seq) for seq in seqs]
# n_samples = len(seqs)
# max_len = np.max(lengths)
# x = np.zeros((n_samples, max_len)).astype('int32')
# x_mask = np.zeros((n_samples, max_len)).astype('float')
# for idx, seq in enumerate(seqs):
# x[idx, :lengths[idx]] = seq
# x_mask[idx, :lengths[idx]] = 1.0
# # print( x, x_mask)
# return x, x_mask
def getLogger():
import sys
import logging
import os
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.mkdir(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
return logger
--- FILE SEPARATOR ---
from my.general import flatten, reconstruct, add_wd, exp_mask
import numpy as np
import tensorflow as tf
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):#, name_w='', name_b=''
# if args is None or (nest.is_sequence(args) and not args):
# raise ValueError("`args` must be specified")
# if not nest.is_sequence(args):
# args = [args]
flat_args = [flatten(arg, 1) for arg in args]#[210,20]
# if input_keep_prob < 1.0:
# assert is_train is not None
flat_args = [tf.nn.dropout(arg, input_keep_prob) for arg in flat_args]
total_arg_size = 0#[60]
shapes = [a.get_shape() for a in flat_args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
# print(total_arg_size)
# exit()
dtype = [a.dtype for a in flat_args][0]
# scope = tf.get_variable_scope()
with tf.variable_scope(scope) as outer_scope:
weights = tf.get_variable(_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype)
if len(flat_args) == 1:
res = tf.matmul(flat_args[0], weights)
else:
res = tf.matmul(tf.concat(flat_args, 1), weights)
if not bias:
flat_out = res
else:
with tf.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = tf.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=tf.constant_initializer(bias_start, dtype=dtype))
flat_out = tf.nn.bias_add(res, biases)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
def softmax(logits, mask=None, scope=None):
with tf.name_scope(scope or "Softmax"):
if mask is not None:
logits = exp_mask(logits, mask)
flat_logits = flatten(logits, 1)
flat_out = tf.nn.softmax(flat_logits)
out = reconstruct(flat_out, logits, 1)
return out
def softsel(target, logits, mask=None, scope=None):
"""
:param target: [ ..., J, d] dtype=float
:param logits: [ ..., J], dtype=float
:param mask: [ ..., J], dtype=bool
:param scope:
:return: [..., d], dtype=float
"""
with tf.name_scope(scope or "Softsel"):
a = softmax(logits, mask = mask)
target_rank = len(target.get_shape().as_list())
out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2)
return out
def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):
with tf.variable_scope(scope or "highway_layer"):
d = arg.get_shape()[-1]
trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob)
trans = tf.nn.relu(trans)
gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob)
gate = tf.nn.sigmoid(gate)
out = gate * trans + (1 - gate) * arg
return out
def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):
with tf.variable_scope(scope or "highway_network"):
prev = arg
cur = None
for layer_idx in range(num_layers):
cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx), wd=wd,
input_keep_prob=input_keep_prob)
prev = cur
return cur
def conv1d(in_, filter_size, height, padding, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "conv1d"):
num_channels = in_.get_shape()[-1]
filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype='float')
bias = tf.get_variable("bias", shape=[filter_size], dtype='float')
strides = [1, 1, 1, 1]
in_ = tf.nn.dropout(in_, keep_prob)
xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d]
out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d]
return out
def multi_conv1d(in_, filter_sizes, heights, padding, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "multi_conv1d"):
assert len(filter_sizes) == len(heights)
outs = []
for filter_size, height in zip(filter_sizes, heights):
if filter_size == 0:
continue
out = conv1d(in_, filter_size, height, padding, keep_prob=keep_prob, scope="conv1d_{}".format(height))
outs.append(out)
concat_out = tf.concat(outs, axis=2)
return concat_out
if __name__ == '__main__':
a = tf.Variable(np.random.random(size=(2,2,4)))
b = tf.Variable(np.random.random(size=(2,3,4)))
c = tf.tile(tf.expand_dims(a, 2), [1, 1, 3, 1])
test = flatten(c,1)
out = reconstruct(test, c, 1)
d = tf.tile(tf.expand_dims(b, 1), [1, 2, 1, 1])
e = linear([c,d,c*d],1,bias = False,scope = "test",)
# f = softsel(d, e)
with tf.Session() as sess:
tf.global_variables_initializer().run()
print(sess.run(test))
print(sess.run(tf.shape(out)))
exit()
print(sess.run(tf.shape(a)))
print(sess.run(a))
print(sess.run(tf.shape(b)))
print(sess.run(b))
print(sess.run(tf.shape(c)))
print(sess.run(c))
print(sess.run(tf.shape(d)))
print(sess.run(d))
print(sess.run(tf.shape(e)))
print(sess.run(e))
|
[
"/config.py",
"/data_helper.py",
"/models/my/nn.py"
] |
shuishen112/pairwise-rnn
|
refs/heads/master
|
#coding:utf-8
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import models.blocks as blocks
# model_type :apn or qacnn
class QA_CNN_extend(object):
# def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size,
# dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'):
#
# """
# QA_RNN model for question answering
#
# Args:
# self.dropout_keep_prob: dropout rate
# self.num_filters : number of filters
# self.para : parameter list
# self.extend_feature_dim : my extend feature dimension
# self.max_input_left : the length of question
# self.max_input_right : the length of answer
# self.pooling : pooling strategy :max pooling or attentive pooling
#
# """
# self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
# self.num_filters = num_filters
# self.embeddings = embeddings
# self.embedding_size = embedding_size
# self.batch_size = batch_size
# self.filter_sizes = filter_sizes
# self.l2_reg_lambda = l2_reg_lambda
# self.para = []
#
# self.max_input_left = max_input_left
# self.max_input_right = max_input_right
# self.trainable = trainable
# self.vocab_size = vocab_size
# self.pooling = pooling
# self.total_num_filter = len(self.filter_sizes) * self.num_filters
#
# self.conv = conv
# self.pooling = 'traditional'
# self.learning_rate = learning_rate
#
# self.hidden_size = hidden_size
#
# self.attention_size = 100
def __init__(self,opt):
for key,value in opt.items():
self.__setattr__(key,value)
self.attention_size = 100
self.pooling = 'mean'
self.total_num_filter = len(self.filter_sizes) * self.num_filters
self.para = []
self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
def create_placeholder(self):
print(('Create placeholders'))
# he length of the sentence is varied according to the batch,so the None,None
self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question')
self.max_input_left = tf.shape(self.question)[1]
self.batch_size = tf.shape(self.question)[0]
self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer')
self.max_input_right = tf.shape(self.answer)[1]
self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right')
# self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask')
# self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask')
# self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask')
def add_embeddings(self):
print( 'add embeddings')
if self.embeddings is not None:
print( "load embedding")
W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable)
else:
print( "random embedding")
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name="W",trainable = self.trainable)
self.embedding_W = W
# self.overlap_W = tf.Variable(a,name="W",trainable = True)
self.para.append(self.embedding_W)
self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question)
self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer)
self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative)
#real length
self.q_len,self.q_mask = blocks.length(self.question)
self.a_len,self.a_mask = blocks.length(self.answer)
self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative)
def convolution(self):
print( 'convolution:wide_convolution')
self.kernels = []
for i,filter_size in enumerate(self.filter_sizes):
with tf.name_scope('conv-max-pool-%s' % filter_size):
filter_shape = [filter_size,self.embedding_size,1,self.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name="W")
b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name="b")
self.kernels.append((W,b))
self.para.append(W)
self.para.append(b)
embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding]
self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings]
#convolution
def pooling_graph(self):
if self.pooling == 'mean':
self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask)
self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask)
elif self.pooling == 'attentive':
self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'position':
self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'traditional':
print( self.pooling)
print(self.q_cnn)
self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
def para_initial(self):
# print(("---------"))
# self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp'))
self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U'))
self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm'))
self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm'))
self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms'))
self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi'))
def mean_pooling(self,conv,mask):
conv = tf.squeeze(conv,2)
print( tf.expand_dims(tf.cast(mask,tf.float32),-1))
# conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1))
# self.see = conv_mask
# print( conv_mask)
return tf.reduce_mean(conv,axis = 1);
def attentive_pooling(self,input_left,input_right,q_mask,a_mask):
Q = tf.squeeze(input_left,axis = 2)
A = tf.squeeze(input_right,axis = 2)
print( Q)
print( A)
# Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A')
# G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\
# A,transpose_b = True),name = 'G')
first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U)
second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters])
result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1]))
print( second_step)
print( tf.transpose(A,perm = [0,2,1]))
# print( 'result',result)
G = tf.tanh(result)
# G = result
# column-wise pooling ,row-wise pooling
row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling')
col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling')
self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q')
self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1))
self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a')
self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1))
self.see = G
R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q')
R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a')
return R_q,R_a
def traditional_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2))
Q = tf.reduce_mean(input_left_mask,1)
a_shape = tf.shape(input_right)
A = tf.reshape(input_right,[-1,self.total_num_filter])
m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1))
f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1]))
self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2))
self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True))
self.see = self.f_attention_norm
a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1)
return Q,a_attention
def position_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
# Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A')
Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
QU = tf.matmul(Q,self.U)
QUA = tf.multiply(tf.expand_dims(QU,1),input_right)
self.attention_a = tf.cast(tf.argmax(QUA,2)
,tf.float32)
# q_shape = tf.shape(input_left)
# Q_1 = tf.reshape(input_left,[-1,self.total_num_filter])
# QU = tf.matmul(Q_1,self.U)
# QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter])
# A_1 = tf.transpose(input_right,[0,2,1])
# QUA = tf.matmul(QU_1,A_1)
# QUA = tf.nn.l2_normalize(QUA,1)
# G = tf.tanh(QUA)
# Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
# # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2))
# row_pooling = tf.reduce_max(G,1,name="row_pooling")
# col_pooling = tf.reduce_max(G,2,name="col_pooling")
# self.attention_a = tf.nn.softmax(row_pooling,1,name = "attention_a")
self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32))
self.see = self.attention_a
self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True))
self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter])
return Q ,self.r_a
def create_loss(self):
with tf.name_scope('score'):
self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn)
self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn)
l2_loss = tf.constant(0.0)
for p in self.para:
l2_loss += tf.nn.l2_loss(p)
with tf.name_scope("loss"):
self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13)))
self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss
tf.summary.scalar('loss', self.loss)
# Accuracy
with tf.name_scope("accuracy"):
self.correct = tf.equal(0.0, self.losses)
self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy")
tf.summary.scalar('accuracy', self.accuracy)
def create_op(self):
self.global_step = tf.Variable(0, name = "global_step", trainable = False)
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step)
def max_pooling(self,conv,input_length):
pooled = tf.nn.max_pool(
conv,
ksize = [1, input_length, 1, 1],
strides = [1, 1, 1, 1],
padding = 'VALID',
name="pool")
return pooled
def getCosine(self,q,a):
pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder)
pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder)
pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1))
pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1))
pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1)
score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name="scores")
return score
def wide_convolution(self,embedding):
cnn_outputs = []
for i,filter_size in enumerate(self.filter_sizes):
conv = tf.nn.conv2d(
embedding,
self.kernels[i][0],
strides=[1, 1, self.embedding_size, 1],
padding='SAME',
name="conv-1"
)
h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name="relu-1")
cnn_outputs.append(h)
cnn_reshaped = tf.concat(cnn_outputs,3)
return cnn_reshaped
def variable_summaries(self,var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def build_graph(self):
self.create_placeholder()
self.add_embeddings()
self.para_initial()
self.convolution()
self.pooling_graph()
self.create_loss()
self.create_op()
self.merged = tf.summary.merge_all()
def train(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
self.answer_negative:data[2],
# self.q_mask:data[3],
# self.a_mask:data[4],
# self.a_neg_mask:data[5],
self.dropout_keep_prob_holder:self.dropout_keep_prob
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see],
feed_dict)
return _, summary, step, loss, accuracy,score12, score13, see
def predict(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
# self.q_mask:data[2],
# self.a_mask:data[3],
self.dropout_keep_prob_holder:1.0
}
score = sess.run( self.score12, feed_dict)
return score
if __name__ == '__main__':
cnn = QA_CNN_extend(
max_input_left = 33,
max_input_right = 40,
batch_size = 3,
vocab_size = 5000,
embedding_size = 100,
filter_sizes = [3,4,5],
num_filters = 64,
hidden_size = 100,
dropout_keep_prob = 1.0,
embeddings = None,
l2_reg_lambda = 0.0,
trainable = True,
pooling = 'max',
conv = 'wide')
cnn.build_graph()
input_x_1 = np.reshape(np.arange(3 * 33),[3,33])
input_x_2 = np.reshape(np.arange(3 * 40),[3,40])
input_x_3 = np.reshape(np.arange(3 * 40),[3,40])
q_mask = np.ones((3,33))
a_mask = np.ones((3,40))
a_neg_mask = np.ones((3,40))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {
cnn.question:input_x_1,
cnn.answer:input_x_2,
# cnn.answer_negative:input_x_3,
cnn.q_mask:q_mask,
cnn.a_mask:a_mask,
cnn.dropout_keep_prob_holder:cnn.dropout_keep
# cnn.a_neg_mask:a_neg_mask
# cnn.q_pos_overlap:q_pos_embedding,
# cnn.q_neg_overlap:q_neg_embedding,
# cnn.a_pos_overlap:a_pos_embedding,
# cnn.a_neg_overlap:a_neg_embedding,
# cnn.q_position:q_position,
# cnn.a_pos_position:a_pos_position,
# cnn.a_neg_position:a_neg_position
}
question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict)
print( question.shape,answer.shape)
print( score)
|
Python
| 381
| 46.682415
| 147
|
/models/QA_CNN_pairwise.py
| 0.592162
| 0.5751
|
#-*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
from functools import wraps
import time
import pickle
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
import tqdm
from nltk.corpus import stopwords
OVERLAP = 237
class Alphabet(dict):
def __init__(self, start_feature_id = 1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def cut(sentence):
tokens = sentence.lower().split()
# tokens = [w for w in tokens if w not in stopwords.words('english')]
return tokens
@log_time_delta
def load(dataset, filter = False):
data_dir = "data/" + dataset
datas = []
for data_name in ['train.txt','test.txt','dev.txt']:
data_file = os.path.join(data_dir,data_name)
data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0')
# data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0')
if filter == True:
datas.append(removeUnanswerdQuestion(data))
else:
datas.append(data)
# sub_file = os.path.join(data_dir,'submit.txt')
# submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3)
# datas.append(submit)
return tuple(datas)
@log_time_delta
def removeUnanswerdQuestion(df):
counter= df.groupby("question").apply(lambda group: sum(group["flag"]))
questions_have_correct=counter[counter>0].index
counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0))
questions_have_uncorrect=counter[counter>0].index
counter=df.groupby("question").apply(lambda group: len(group["flag"]))
questions_multi=counter[counter>1].index
return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index()
@log_time_delta
def get_alphabet(corpuses=None,dataset=""):
pkl_name="temp/"+dataset+".alphabet.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
alphabet = Alphabet(start_feature_id = 0)
alphabet.add('[UNK]')
alphabet.add('END')
count = 0
for corpus in corpuses:
for texts in [corpus["question"].unique(),corpus["answer"]]:
for sentence in texts:
tokens = cut(sentence)
for token in set(tokens):
alphabet.add(token)
print("alphabet size %d" % len(alphabet.keys()) )
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump( alphabet,open(pkl_name,"wb"))
return alphabet
@log_time_delta
def getSubVectorsFromDict(vectors,vocab,dim = 300):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
def encode_to_split(sentence,alphabet):
indices = []
tokens = cut(sentence)
seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]
return seq
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size = 100):
vectors = {}
with open(filename,encoding='utf-8') as f:
i = 0
for line in f:
i += 1
if i % 100000 == 0:
print( 'epch %d' % i)
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( ( vocab_size, embedding_size))
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'embedding_size',embedding_size)
print( 'done')
print( 'words found in wor2vec embedding ',len(vectors.keys()))
return vectors
@log_time_delta
def get_embedding(alphabet,dim = 300,language ="en",dataset=""):
pkl_name="temp/"+dataset+".subembedding.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
if language=="en":
fname = 'embedding/glove.6B/glove.6B.300d.txt'
else:
fname= "embedding/embedding.200.header_txt"
embeddings = load_text_vec(alphabet,fname,embedding_size = dim)
sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)
pickle.dump( sub_embeddings,open(pkl_name,"wb"))
return sub_embeddings
@log_time_delta
def get_mini_batch_test(df,alphabet,batch_size):
q = []
a = []
pos_overlap = []
for index,row in df.iterrows():
question = encode_to_split(row["question"],alphabet)
answer = encode_to_split(row["answer"],alphabet)
overlap_pos = overlap_index(row['question'],row['answer'])
q.append(question)
a.append(answer)
pos_overlap.append(overlap_pos)
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in mini_batches:
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
yield(mb_q,mb_a)
# calculate the overlap_index
def overlap_index(question,answer,stopwords = []):
ans_token = cut(answer)
qset = set(cut(question))
aset = set(ans_token)
a_len = len(ans_token)
# q_index = np.arange(1,q_len)
a_index = np.arange(1,a_len + 1)
overlap = qset.intersection(aset)
# for i,q in enumerate(cut(question)[:q_len]):
# value = 1
# if q in overlap:
# value = 2
# q_index[i] = value
for i,a in enumerate(ans_token):
if a in overlap:
a_index[i] = OVERLAP
return a_index
def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):
q,a,neg_a=[],[],[]
answers=df["answer"][:250]
ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict()
for question in tqdm(df['question'].unique()):
index= ground_truth[question]
canindates = [i for i in range(250)]
canindates.remove(index)
a_neg_index = random.choice(canindates)
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(answers[index],alphabet)
seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)
q.append(seq_q)
a.append( seq_a)
neg_a.append(seq_neg_a )
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):
if sort_by_len:
sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)
q = [ q[i] for i in sorted_index]
a = [a[i] for i in sorted_index]
neg_a = [ neg_a[i] for i in sorted_index]
pos_overlap = [pos_overlap[i] for i in sorted_index]
neg_overlap = [neg_overlap[i] for i in sorted_index]
#get batch
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
if shuffle:
np.random.shuffle(idx_list)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in tqdm(mini_batches):
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_neg_a = [ neg_a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_neg_overlap = [neg_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)
# mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)
# mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)
yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)
def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):
q = []
a = []
neg_a = []
for question in df['question'].unique():
# group = df[df["question"]==question]
# pos_answers = group[df["flag"] == 1]["answer"]
# neg_answers = group[df["flag"] == 0]["answer"].reset_index()
group = df[df["question"]==question]
pos_answers = group[group["flag"] == 1]["answer"]
neg_answers = group[group["flag"] == 0]["answer"]#.reset_index()
for pos in pos_answers:
if model is not None and sess is not None:
pos_sent= encode_to_split(pos,alphabet)
q_sent,q_mask= prepare_data([pos_sent])
neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers]
a_sent,a_mask= prepare_data(neg_sents)
scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))
neg_index = scores.argmax()
else:
if len(neg_answers.index) > 0:
neg_index = np.random.choice(neg_answers.index)
neg = neg_answers.reset_index().loc[neg_index,]["answer"]
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(pos,alphabet)
seq_neg_a = encode_to_split(neg,alphabet)
q.append(seq_q)
a.append(seq_a)
neg_a.append(seq_neg_a)
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def prepare_data(seqs,overlap = None):
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples,max_len)).astype('int32')
if overlap is not None:
overlap_position = np.zeros((n_samples,max_len)).astype('float')
for idx ,seq in enumerate(seqs):
x[idx,:lengths[idx]] = seq
overlap_position[idx,:lengths[idx]] = overlap[idx]
return x,overlap_position
else:
x_mask = np.zeros((n_samples, max_len)).astype('float')
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
x_mask[idx, :lengths[idx]] = 1.0
# print( x, x_mask)
return x, x_mask
# def prepare_data(seqs):
# lengths = [len(seq) for seq in seqs]
# n_samples = len(seqs)
# max_len = np.max(lengths)
# x = np.zeros((n_samples, max_len)).astype('int32')
# x_mask = np.zeros((n_samples, max_len)).astype('float')
# for idx, seq in enumerate(seqs):
# x[idx, :lengths[idx]] = seq
# x_mask[idx, :lengths[idx]] = 1.0
# # print( x, x_mask)
# return x, x_mask
def getLogger():
import sys
import logging
import os
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.mkdir(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
return logger
--- FILE SEPARATOR ---
import data_helper
import time
import datetime
import os
import tensorflow as tf
import numpy as np
import evaluation
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
timeDay = time.strftime("%Y%m%d", timeArray)
print (timeStamp)
def main(args):
args._parse_flags()
print("\nParameters:")
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
log_dir = 'log/'+ timeDay
if not os.path.exists(log_dir):
os.makedirs(log_dir)
data_file = log_dir + '/test_' + args.data + timeStamp
precision = data_file + 'precise'
print('load data ...........')
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev])
print('the number of words',len(alphabet))
print('get embedding')
if args.data=="quora":
embedding = data_helper.get_embedding(alphabet,language="cn")
else:
embedding = data_helper.get_embedding(alphabet)
with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model = QA_CNN_extend(max_input_left = q_max_sent_length,
max_input_right = a_max_sent_length,
batch_size = args.batch_size,
vocab_size = len(alphabet),
embedding_size = args.embedding_dim,
filter_sizes = list(map(int, args.filter_sizes.split(","))),
num_filters = args.num_filters,
hidden_size = args.hidden_size,
dropout_keep_prob = args.dropout_keep_prob,
embeddings = embedding,
l2_reg_lambda = args.l2_reg_lambda,
trainable = args.trainable,
pooling = args.pooling,
conv = args.conv)
model.build_graph()
sess.run(tf.global_variables_initializer())
def train_step(model,sess,batch):
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.answer_negative:data[2],
model.q_mask:data[3],
model.a_mask:data[4],
model.a_neg_mask:data[5]
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[model.train_op, model.merged,model.global_step,model.loss, model.accuracy,model.score12,model.score13, model.see],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
def predict(model,sess,batch,test):
scores = []
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3]
}
score = sess.run(
model.score12,
feed_dict)
scores.extend(score)
return np.array(scores[:len(test)])
for i in range(args.num_epoches):
datas = data_helper.get_mini_batch(train,alphabet,args.batch_size)
train_step(model,sess,datas)
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
print(len(predicted_test))
print(len(test))
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
print('map_mrr test',map_mrr_test)
--- FILE SEPARATOR ---
from .QA_CNN_pairwise import QA_CNN_extend as CNN
from .QA_RNN_pairwise import QA_RNN_extend as RNN
from .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN
def setup(opt):
if opt["model_name"]=="cnn":
model=CNN(opt)
elif opt["model_name"]=="rnn":
model=RNN(opt)
elif opt['model_name']=='qcnn':
model=QCNN(opt)
else:
print("no model")
exit(0)
return model
|
[
"/data_helper.py",
"/main.py",
"/models/__init__.py"
] |
shuishen112/pairwise-rnn
|
refs/heads/master
| "from my.general import flatten, reconstruct, add_wd, exp_mask\n\nimport numpy as np\nimport tensorf(...TRUNCATED)
|
Python
| 160
| 36.712502
| 116
|
/models/my/nn.py
| 0.572754
| 0.558005
| "#-*- coding:utf-8 -*-\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport string\nfro(...TRUNCATED)
|
[
"/data_helper.py",
"/models/QA_CNN_pairwise.py",
"/test.py"
] |
shuishen112/pairwise-rnn
|
refs/heads/master
| "from .QA_CNN_pairwise import QA_CNN_extend as CNN\nfrom .QA_RNN_pairwise import QA_RNN_extend as RN(...TRUNCATED)
|
Python
| 14
| 25.642857
| 58
|
/models/__init__.py
| 0.691689
| 0.689008
| "\nimport data_helper\nimport time\nimport datetime\nimport os\nimport tensorflow as tf\n\nimport nu(...TRUNCATED)
|
[
"/main.py",
"/run.py",
"/models/my/nn.py"
] |
shuishen112/pairwise-rnn
|
refs/heads/master
| "# -*- coding: utf-8 -*-\n\nfrom tensorflow import flags\nimport tensorflow as tf\nfrom config impor(...TRUNCATED)
|
Python
| 114
| 31.622807
| 92
|
/test.py
| 0.651019
| 0.64324
| "#coding:utf-8\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib import rnn\nimp(...TRUNCATED)
|
[
"/models/QA_CNN_pairwise.py",
"/main.py",
"/config.py"
] |
shuishen112/pairwise-rnn
|
refs/heads/master
| "#-*- coding:utf-8 -*-\n\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport string\nfro(...TRUNCATED)
|
Python
| 363
| 33.487602
| 167
|
/data_helper.py
| 0.582389
| 0.574804
| "from tensorflow import flags\nimport tensorflow as tf\nfrom config import Singleton\nimport data_he(...TRUNCATED)
|
[
"/run.py",
"/models/QA_CNN_pairwise.py",
"/test.py"
] |
pablor0mero/Placester_Test_Pablo_Romero
|
refs/heads/master
| "# For this solution I'm using TextBlob, using it's integration with WordNet.\n\nfrom textblob impor(...TRUNCATED)
|
Python
| 71
| 44.929577
| 157
|
/main.py
| 0.670813
| 0.666869
|
[] |
|
GabinCleaver/Auto_Discord_Bump
|
refs/heads/main
| "import requests\r\nimport time\r\n\r\ntoken = \"TOKEN\"\r\n\r\nheaders = {\r\n 'User-Agent' : 'M(...TRUNCATED)
|
Python
| 21
| 20.809525
| 109
|
/autobump.py
| 0.561845
| 0.51153
|
[] |
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 6