是否带眼镜检测 您所在的位置:网站首页 jj.jpg 是否带眼镜检测

是否带眼镜检测

2024-02-06 07:52| 来源: 网络整理| 查看: 265

第一个版本:

数据样本,带眼镜的图片2000

                 不带眼镜的2000张

 

开始训练模型

# -*- coding: utf-8 -*- """ Created on Wed Jul 10 15:39:22 2019 @author: 01 """ import cv2 import glob import os import tensorflow as tf import numpy as np import time tf.reset_default_graph() #数据集地址 path='C:/Users/01/Desktop/face/data/images/img_small/' #模型保存地址 model_path='F:/1/model.ckpt' #将所有的图片resize成100*100 w=100 h=100 c=3 #读取图片 def read_img(path): cate=[path+x for x in os.listdir(path) if os.path.isdir(path+x)] imgs=[] labels=[] for idx,folder in enumerate(cate): for im in glob.glob(folder+'/*.jpg'): print('reading the images:%s'%(im)) img=cv2.imread(im) img=cv2.resize(img,(w,h)) imgs.append(img) labels.append(idx) return np.asarray(imgs,np.float32),np.asarray(labels,np.int32) data,label=read_img(path) #打乱顺序 num_example=data.shape[0] arr=np.arange(num_example) np.random.shuffle(arr) data=data[arr] label=label[arr] #将所有数据分为训练集和验证集 ratio=0.8 s=np.int(num_example*ratio) x_train=data[:s] y_train=label[:s] x_val=data[s:] y_val=label[s:] #-----------------构建网络---------------------- #占位符 x=tf.placeholder(tf.float32,shape=[None,w,h,c],name='x') y_=tf.placeholder(tf.int32,shape=[None,],name='y_') def inference(input_tensor, train, regularizer): with tf.variable_scope('layer1-conv1'): conv1_weights = tf.get_variable("weight",[5,5,3,32],initializer=tf.truncated_normal_initializer(stddev=0.1)) conv1_biases = tf.get_variable("bias", [32], initializer=tf.constant_initializer(0.0)) conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) with tf.name_scope("layer2-pool1"): pool1 = tf.nn.max_pool(relu1, ksize = [1,2,2,1],strides=[1,2,2,1],padding="VALID") with tf.variable_scope("layer3-conv2"): conv2_weights = tf.get_variable("weight",[5,5,32,64],initializer=tf.truncated_normal_initializer(stddev=0.1)) conv2_biases = tf.get_variable("bias", [64], initializer=tf.constant_initializer(0.0)) conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) with tf.name_scope("layer4-pool2"): pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') with tf.variable_scope("layer5-conv3"): conv3_weights = tf.get_variable("weight",[3,3,64,128],initializer=tf.truncated_normal_initializer(stddev=0.1)) conv3_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0)) conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME') relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases)) with tf.name_scope("layer6-pool3"): pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') with tf.variable_scope("layer7-conv4"): conv4_weights = tf.get_variable("weight",[3,3,128,128],initializer=tf.truncated_normal_initializer(stddev=0.1)) conv4_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0)) conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME') relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases)) with tf.name_scope("layer8-pool4"): pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') nodes = 6*6*128 reshaped = tf.reshape(pool4,[-1,nodes]) with tf.variable_scope('layer9-fc1'): fc1_weights = tf.get_variable("weight", [nodes, 1024], initializer=tf.truncated_normal_initializer(stddev=0.1)) if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights)) fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1)) fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases) if train: fc1 = tf.nn.dropout(fc1, 0.5) with tf.variable_scope('layer10-fc2'): fc2_weights = tf.get_variable("weight", [1024, 512], initializer=tf.truncated_normal_initializer(stddev=0.1)) if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights)) fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1)) fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases) if train: fc2 = tf.nn.dropout(fc2, 0.5) with tf.variable_scope('layer11-fc3'): fc3_weights = tf.get_variable("weight", [512, 2], initializer=tf.truncated_normal_initializer(stddev=0.1)) if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights)) fc3_biases = tf.get_variable("bias", [2], initializer=tf.constant_initializer(0.1)) logit = tf.matmul(fc2, fc3_weights) + fc3_biases return logit #---------------------------网络结束--------------------------- regularizer = tf.contrib.layers.l2_regularizer(0.0001) logits = inference(x,False,regularizer) #(小处理)将logits乘以1赋值给logits_eval,定义name,方便在后续调用模型时通过tensor名字调用输出tensor b = tf.constant(value=1,dtype=tf.float32) logits_eval = tf.multiply(logits,b,name='logits_eval') loss=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_) train_op=tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_) acc= tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #定义一个函数,按批次取数据 def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): assert len(inputs) == len(targets) if shuffle: indices = np.arange(len(inputs)) np.random.shuffle(indices) for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): if shuffle: excerpt = indices[start_idx:start_idx + batch_size] else: excerpt = slice(start_idx, start_idx + batch_size) yield inputs[excerpt], targets[excerpt] #训练和测试数据,可将n_epoch设置更大一些 n_epoch=10 batch_size=64 saver=tf.train.Saver() sess=tf.Session() sess.run(tf.global_variables_initializer()) for epoch in range(n_epoch): start_time = time.time() #training train_loss, train_acc, n_batch = 0, 0, 0 for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True): _,err,ac=sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a}) train_loss += err; train_acc += ac; n_batch += 1 print(" train loss: %f" % (np.sum(train_loss)/ n_batch)) print(" train acc: %f" % (np.sum(train_acc)/ n_batch)) #validation val_loss, val_acc, n_batch = 0, 0, 0 for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False): err, ac = sess.run([loss,acc], feed_dict={x: x_val_a, y_: y_val_a}) val_loss += err; val_acc += ac; n_batch += 1 print(" validation loss: %f" % (np.sum(val_loss)/ n_batch)) print(" validation acc: %f" % (np.sum(val_acc)/ n_batch)) saver.save(sess,model_path) sess.close()

 

 

 

 

大概10次后,训练准确率达到99%,验证准确率80%左右,

测试模型

# -*- coding: utf-8 -*- import cv2 import tensorflow as tf import numpy as np path1 = "./1.jpg" path2 = "./2.jpg" path3 = "./3.jpg" path4 = "./4.jpg" path5 = "./face5.jpg" path6 = "./face6.jpg" path7 = "./face7.jpg" path8 = "./face8.jpg" face_dict = {1:'Has Glass',0:'No Glass'} w=100 h=100 c=3 def read_one_image(path): img = cv2.imread(path) print(img) img = cv2.resize(img,(w,h)) return np.asarray(img) with tf.Session() as sess: data = [] data1 = read_one_image(path1) data2 = read_one_image(path2) data3 = read_one_image(path3) data4 = read_one_image(path4) data5 = read_one_image(path5) data6 = read_one_image(path6) data7 = read_one_image(path7) data8 = read_one_image(path8) data.append(data1) data.append(data2) data.append(data3) data.append(data4) data.append(data5) data.append(data6) data.append(data7) data.append(data8) saver = tf.train.import_meta_graph('./models/model.ckpta') saver.restore(sess,tf.train.latest_checkpoint('./models/')) graph = tf.get_default_graph() x = graph.get_tensor_by_name("x:0") feed_dict = {x:data} logits = graph.get_tensor_by_name("logits_eval:0") classification_result = sess.run(logits,feed_dict) #打印出预测矩阵 print(classification_result) #打印出预测矩阵每一行最大值的索引 #print(tf.argmax(classification_result,1).eval()) #根据索引通过字典对应人脸的分类 output = [] output = tf.argmax(classification_result,1).eval() for i in range(len(output)): print("No.",i+1,"face is belong to:"+face_dict[output[i]])

如果想要c++调用模型,需要把ckpt的模型文件转换为pb的形式,代码如下

# -*- coding: utf-8 -*- """ Created on Mon Jul 15 08:48:53 2019 @author: 01 """ import cv2 import tensorflow as tf import numpy as np def freeze_graph(input_checkpoint,output_graph): ''' :param input_checkpoint: :param output_graph: PB模型保存路径 :return: ''' # checkpoint = tf.train.get_checkpoint_state(model_folder) #检查目录下ckpt文件状态是否可用 # input_checkpoint = checkpoint.model_checkpoint_path #得ckpt文件路径 # 指定输出的节点名称,该节点名称必须是原模型中存在的节点 output_node_names = "logits_eval" saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True) graph = tf.get_default_graph() # 获得默认的图 input_graph_def = graph.as_graph_def() # 返回一个序列化的图代表当前的图 with tf.Session() as sess: saver.restore(sess, input_checkpoint) #恢复图并得到数据 output_graph_def = tf.graph_util.convert_variables_to_constants( # 模型持久化,将变量值固定 sess=sess, input_graph_def=input_graph_def,# 等于:sess.graph_def output_node_names=output_node_names.split(","))# 如果有多个输出节点,以逗号隔开 with tf.gfile.GFile(output_graph, "wb") as f: #保存模型 f.write(output_graph_def.SerializeToString()) #序列化输出 print("%d ops in the final graph." % len(output_graph_def.node)) #得到当前图有几个操作节点 # for op in graph.get_operations(): # print(op.name, op.values()) # 输入ckpt模型路径 input_checkpoint='C:/Users/01/Desktop/face/CNN_Face_Glass_Classfy-master/model/model.ckpt' # 输出pb模型的路径 out_pb_path="models/pb/frozen_model.pb" # 调用freeze_graph将ckpt转为pb freeze_graph(input_checkpoint,out_pb_path)

然后就是c++调用pb文件。

 

 

#include #include #include #include #include #include #include #include "lib\facedetect-dll.h" #include "lib\facedetect-dll.h" using namespace cv; using namespace std; using namespace dnn; String labels_txt_file = "F:/123.txt"; #define DETECT_BUFFER_SIZE 0x20000 int Glasses_ide(const char* path) { cout


【本文地址】

公司简介

联系我们

今日新闻

    推荐新闻

    专题文章
      CopyRight 2018-2019 实验室设备网 版权所有