作者:萌新求学 | 来源:互联网 | 2023-05-31 15:44
前言本篇为Qt+Caffe+OpenCV——【一个基于VGG网络的人脸识别考勤系统】的第二篇博文,将所有的人脸检测与识别进行实现。与原博文相比,本文的人脸检测与识别更为简洁,少了人脸矫
前言
本篇为Qt+Caffe+OpenCV——【一个基于VGG网络的人脸识别考勤系统】的第二篇博文,将所有的人脸检测与识别进行实现。与原博文相比,本文的人脸检测与识别更为简洁,少了人脸矫正模块,放弃了dlib的使用,对系统影响不大。
环境
采用的是Visual Studio2013 + Qt 5.7的VS开发控件。
请确保你已经按照http://blog.csdn.net/mr_curry/article/details/52443126配置好caffe。
libfacedetection库:https://github.com/ShiqiYu/libfacedetection
代码
姑且将整个系统的名字命名为YotoFace。
人脸的数据由SingleFace类来完成,其具备的是存储人脸的信息。
SingleFace.h:
#pragma once
#include
using namespace cv;
using namespace std;
class SingleFace
{
public:
string label;
Mat sourceImage;
Mat Roi_224;
Rect position;
vector<float> feature;
bool empty()
{
if (Roi_224.empty())
return true;
else
return false;
}
void draw()
{
rectangle(sourceImage, position, Scalar(0, 0, 255));
}
private:
};
人脸检测和识别都由YotoFace类来完成。
YotoFace.h:
#pragma once
#include
#include
#include
#include "caffe/layers/input_layer.hpp"
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/layers/dropout_layer.hpp"
#include "caffe/layers/conv_layer.hpp"
#include "caffe/layers/relu_layer.hpp"
#include
#include "caffe/layers/pooling_layer.hpp"
#include "caffe/layers/lrn_layer.hpp"
#include "caffe/layers/softmax_layer.hpp"
#include
#include
class YotoFace
{
public:
YotoFace();
vector<float> ExtractFeature(Mat input_224);
bool Generate(Mat input, SingleFace &singleface);
bool Generate(Mat input, SingleFace &singleface, string label);
SingleFace Recognition(Mat input_224, SingleFace &singleface);
vector FaceArray;
void drawFaceImage(Mat input);
private:
caffe::MemoryDataLayer<float> *memory_layer;
caffe::Net<float>* net;
bool FaceDetect(Mat input, Rect &roi);
mutex thread_mutex;
};
YotoFace.cpp:
#include
#include
namespace caffe
{
extern INSTANTIATE_CLASS(InputLayer);
extern INSTANTIATE_CLASS(InnerProductLayer);
extern INSTANTIATE_CLASS(DropoutLayer);
extern INSTANTIATE_CLASS(ConvolutionLayer);
REGISTER_LAYER_CLASS(Convolution);
extern INSTANTIATE_CLASS(ReLULayer);
REGISTER_LAYER_CLASS(ReLU);
extern INSTANTIATE_CLASS(PoolingLayer);
REGISTER_LAYER_CLASS(Pooling);
extern INSTANTIATE_CLASS(LRNLayer);
REGISTER_LAYER_CLASS(LRN);
extern INSTANTIATE_CLASS(SoftmaxLayer);
REGISTER_LAYER_CLASS(Softmax);
extern INSTANTIATE_CLASS(MemoryDataLayer);
}
YotoFace::YotoFace()
{
net = new caffe::Net<float>("vgg_extract_feature_memorydata.prototxt", caffe::TEST);
net->CopyTrainedLayersFrom("VGG_FACE.caffemodel");
memory_layer = (caffe::MemoryDataLayer<float> *)net->layers()[0].get();
}
vector<float> YotoFace::ExtractFeature(Mat img_224)
{
std::vector test{ img_224 };
std::vector<int> testLabel{ 0 };
memory_layer->AddMatVector(test, testLabel);
vectorfloat>
*> input_vec;
net->Forward(input_vec);
auto fc7 = net->blob_by_name(
"fc7");
float* begin = fc7->mutable_cpu_data();
vector<float> feature{ begin, begin + fc7->channels() };
return move(feature);
}
bool YotoFace::FaceDetect(Mat input, Rect &roi)
{
thread_mutex.lock();
{
Mat gray;
cvtColor(input, gray, CV_BGR2GRAY);
int * pResults = NULL;
pResults = facedetect_multiview_reinforce((
unsigned char*)(gray.ptr(
0)), gray.cols, gray.rows, gray.step,
1.2f,
5,
24);
int p_num = (pResults ? *pResults :
0);
if (p_num ==
0)
{
thread_mutex.unlock();
return false;
}
short * p = ((
short*)(pResults +
1));
Point left(p[
0], p[
1]);
Point right(p[
0] + p[
2], p[
1] + p[
3]);
roi = Rect(left, right);
thread_mutex.unlock();
return true;
}
}
bool YotoFace::Generate(Mat input, SingleFace &singleface)
{
Rect roi;
if (FaceDetect(input, roi))
{
Mat img_224 = input(roi);
resize(img_224, img_224, Size(
224,
224));
auto feature_=ExtractFeature(img_224);
if (feature_.empty())
return false;
else {
singleface.sourceImage = input;
singleface.position = roi;
singleface.feature = feature_;
singleface.Roi_224 = img_224;
return true;
}
}
else {
return false;
}
}
bool YotoFace::Generate(Mat input, SingleFace &singleface,
string label_)
{
Rect roi;
if (label_.empty())
return false;
if (FaceDetect(input, roi))
{
Mat img_224 = input(roi);
resize(img_224, img_224, Size(
224,
224));
auto feature_ = ExtractFeature(img_224);
if (feature_.empty())
return false;
else {
singleface.sourceImage = input;
singleface.position = roi;
singleface.feature = feature_;
singleface.Roi_224 = img_224;
singleface.label = label_;
return true;
}
}
else {
return false;
}
}
void YotoFace::drawFaceImage(Mat input)
{
Rect rec;
if (FaceDetect(input, rec))
{
Mat draw = input;
rectangle(draw, rec, Scalar(
0,
0,
255),
2);
}
}
这里解释一下YotoFace::FaceDetect这个函数需要加锁的原因。在具体落实到Qt上的时候,我们应该是希望有一个窗口是始终在检测人脸的,这就需要死循环,若不能跳出则会影响其他程序的执行。所以要采用多线程。而如果我们用的是libfacedetection,在两条线程同时调用函数时将会出错,所以要保证同时段只能调用一次。
thread_mutex.lock()表示锁上当前的线程,当别的线程碰到它时,会处于挂起状态,等待唤醒。
thread_mutex.unlock()进行解锁。
问题来了。人脸也检测了,特征也提取了,如何计算向量距离?
LikeValue的实现是?
ComputeDistance.cpp:
#include
inline double LikeValue(float *v1, float *v2, int channels)
{
double mult = 0;
double v1_2 = 0;
double v2_2 = 0;
for (int i = 0; i {
mult += v1[i] * v2[i];
v1_2 += pow(v1[i], 2);
v2_2 += pow(v2[i], 2);
}
return mult / (sqrt(v1_2)*sqrt(v2_2));
}
SingleFace YotoFace::Recognition(Mat input_, SingleFace &singleface)
{
if (Generate(input_, singleface))
{
float *single_feature = &singleface.feature[0];
int single_channel = singleface.feature.size();
int size_ = FaceArray.size();
vector<double> like_array;
for (int i = 0; i {
float *faces_feature = &FaceArray[i].feature[0];
like_array.push_back(LikeValue(single_feature, faces_feature, single_channel));
}
vector<double>::iterator biggest = std::max_element(std::begin(like_array), std::end(like_array));
int max_ = distance(std::begin(like_array), biggest);
return FaceArray[max_];
}
else
{
return singleface;
}
}
转成数组运算,速度更快。
实际上在应用时,在未按注册按钮时,A窗口是通过第2个线程调用的drawFaceImage():
按下确认按钮的一瞬间,主线程也会调用drawFaceImage()中的FaceDetect(),由于加了锁,所以很安全啦。
识别的时候,调用的是Recognition()函数,也有锁保护。
结语
无GPU环境,速度杠杠的。