篇首语:本文由编程笔记#小编为大家整理,主要介绍了TensorFlow人脸识别OpenFaceFace-recognitionInsightface和FaceNet源码运行相关的知识,希望对你有一定的参考价值。
比较人脸识别OpenFace、Face-recognition、Insightface:
FaceNet源码运行
https://github.com/davidsandberg/facenet
1、使用Anaconda安装TensorFlow;
2、更新scipy库;
3、添加os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
OMP: Error #15: Initializing libiomp5.dylib, but found libiomp5.dylib already initialized.
4、下载模型:20180402-114759模型路径
python compare.py 20180402-114759 02.jpg 11.jpg
"""Performs face alignment and calculates L2 distance between the embeddings of images."""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import tensorflow as tf
import numpy as np
import sys
import os
import copy
import argparse
import facenet
import align.detect_face
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
def main(args):
images = load_and_align_data(args.image_files, args.image_size, args.margin, args.gpu_memory_fraction)
with tf.Graph().as_default():
with tf.Session() as sess:
# Load the model
facenet.load_model(args.model)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
# Run forward pass to calculate embeddings
feed_dict = images_placeholder: images, phase_train_placeholder:False
emb = sess.run(embeddings, feed_dict=feed_dict)
nrof_images = len(args.image_files)
print('Images:')
for i in range(nrof_images):
print('%1d: %s' % (i, args.image_files[i]))
print('')
# Print distance matrix
print('Distance matrix')
print(' ', end='')
for i in range(nrof_images):
print(' %1d ' % i, end='')
print('')
for i in range(nrof_images):
print('%1d ' % i, end='')
for j in range(nrof_images):
dist = np.sqrt(np.sum(np.square(np.subtract(emb[i,:], emb[j,:]))))
print(' %1.4f ' % dist, end='')
print('')
def load_and_align_data(image_paths, image_size, margin, gpu_memory_fraction):
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
print('Creating networks and loading parameters')
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None)
tmp_image_paths=copy.copy(image_paths)
img_list = []
for image in tmp_image_paths:
img = misc.imread(os.path.expanduser(image), mode='RGB')
img_size = np.asarray(img.shape)[0:2]
bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor)
if len(bounding_boxes) <1:
image_paths.remove(image)
print("can&#39;t detect face, remove ", image)
continue
det &#61; np.squeeze(bounding_boxes[0,0:4])
bb &#61; np.zeros(4, dtype&#61;np.int32)
bb[0] &#61; np.maximum(det[0]-margin/2, 0)
bb[1] &#61; np.maximum(det[1]-margin/2, 0)
bb[2] &#61; np.minimum(det[2]&#43;margin/2, img_size[1])
bb[3] &#61; np.minimum(det[3]&#43;margin/2, img_size[0])
cropped &#61; img[bb[1]:bb[3],bb[0]:bb[2],:]
aligned &#61; misc.imresize(cropped, (image_size, image_size), interp&#61;&#39;bilinear&#39;)
prewhitened &#61; facenet.prewhiten(aligned)
img_list.append(prewhitened)
images &#61; np.stack(img_list)
return images
def parse_arguments(argv):
parser &#61; argparse.ArgumentParser()
parser.add_argument(&#39;model&#39;, type&#61;str,
help&#61;&#39;Could be either a directory containing the meta_file and ckpt_file or a model protobuf (.pb) file&#39;)
parser.add_argument(&#39;image_files&#39;, type&#61;str, nargs&#61;&#39;&#43;&#39;, help&#61;&#39;Images to compare&#39;)
parser.add_argument(&#39;--image_size&#39;, type&#61;int,
help&#61;&#39;Image size (height, width) in pixels.&#39;, default&#61;160)
parser.add_argument(&#39;--margin&#39;, type&#61;int,
help&#61;&#39;Margin for the crop around the bounding box (height, width) in pixels.&#39;, default&#61;44)
parser.add_argument(&#39;--gpu_memory_fraction&#39;, type&#61;float,
help&#61;&#39;Upper bound on the amount of GPU memory that will be used by the process.&#39;, default&#61;1.0)
return parser.parse_args(argv)
if __name__ &#61;&#61; &#39;__main__&#39;:
main(parse_arguments(sys.argv[1:]))
运行结果&#xff1a;
(py27tf) bash-3.2$ python compare.py 20180402-114759 02.jpg 11.jpg
Creating networks and loading parameters
2019-01-15 17:11:02.874055: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2019-01-15 17:11:02.874720: I tensorflow/core/common_runtime/process_util.cc:69] Creating new thread pool with default inter op setting: 8. Tune using inter_op_parallelism_threads for best performance.
Model directory: 20180402-114759
Metagraph file: model-20180402-114759.meta
Checkpoint file: model-20180402-114759.ckpt-275
WARNING:tensorflow:From /anaconda2/envs/py27tf/lib/python2.7/site-packages/tensorflow/python/training/queue_runner_impl.py:391: __init__ (from tensorflow.python.training.queue_runner_impl) is deprecated and will be removed in a future version.
Instructions for updating:
To construct input pipelines, use the &#96;tf.data&#96; module.
Images:
0: 02.jpg
1: 11.jpg
Distance matrix
0 1
0 0.0000 0.4207
1 0.4207 0.0000
MTCNN实时检测人脸&#xff1a;
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types, iteritems
import sys
import os
import numpy as np
import tensorflow as tf
#from math import floor
import cv2
import detect_face
import random
from time import sleep
os.environ["KMP_DUPLICATE_LIB_OK"]&#61;"TRUE"
video &#61; cv2.VideoCapture(0)
print(&#39;Creating networks and loading parameters&#39;)
with tf.Graph().as_default():
gpu_options &#61; tf.GPUOptions(per_process_gpu_memory_fraction&#61;1.0)
sess &#61; tf.Session(config&#61;tf.ConfigProto(gpu_options&#61;gpu_options, log_device_placement&#61;False))
with sess.as_default():
pnet, rnet, onet &#61; detect_face.create_mtcnn(sess, None)
minsize &#61; 20
threshold &#61; [0.6, 0.7, 0.7]
factor &#61; 0.709
while True:
ret, frame &#61; video.read()
bounding_boxes, _ &#61; detect_face.detect_face(frame, minsize, pnet, rnet, onet, threshold, factor)
nrof_faces &#61; bounding_boxes.shape[0]
print(&#39;face number :&#39;.format(nrof_faces))
for face_position in bounding_boxes:
face_position &#61; face_position.astype(int)
cv2.rectangle(frame, (face_position[0], face_position[1]), (face_position[2], face_position[3]), (0, 255, 0), 2)
cv2.imshow(&#39;show&#39;, frame)
if cv2.waitKey(5) & 0xFF &#61;&#61; ord(&#39;q&#39;):
break
video.release()
cv2.destroyAllWindows()