1

私は顔認識プログラムをコーディングしようとしましたが、コミュニティからの助けが必要です。以下に投稿されたコードはエラーなしでコンパイルされましたが、レコグナイザーが機能していないようですか?基本的にtarget.jpgには、pic1.jpg(内部に3人)から切り取った人物が含まれているため、認識機能はそれをより簡単に検出できるはずです。

以下のコードはエラーなしで実行されますが、pic1.jpgの3人すべてがボックス化され、3つの顔すべてのGetEigenDistancesは0です。正しくはpic1.jpgの人(target.jpgの人)のみがボックス化されます。

私がどこで間違っているのかについて何か考えはありますか?前もって感謝します。

私はc#2010Expressでemgucv2.4を使用しています

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Emgu.CV;
using Emgu.Util;
using Emgu.CV.Structure;
using Emgu.CV.UI;
using Emgu.CV.CvEnum;


namespace FaceReco
{
    public partial class Form1 : Form
    {
        private HaarCascade haar;
        List<Image<Gray, byte>> trainingImages = new List<Image<Gray, byte>>();
        Image<Gray, byte> TrainedFace, UnknownFace = null;
        MCvFont font = new MCvFont(FONT.CV_FONT_HERSHEY_TRIPLEX, 0.5d, 0.5d);


        public Form1()
        {
            InitializeComponent();
        }

        private void Form1_Load(object sender, EventArgs e)
        {
            // adjust path to find your XML file 
            haar = new HaarCascade("haarcascade_frontalface_alt_tree.xml");

            //Read an target image
            Image TargetImg = Image.FromFile(Environment.CurrentDirectory + "\\target\\target.jpg");
            Image<Bgr, byte> TargetFrame = new Image<Bgr, byte>(new Bitmap(TargetImg));

            //FACE DETECTION FOR TARGET FACE
            if (TargetImg != null)   // confirm that image is valid
            {
                //convert the image to gray scale
                Image<Gray, byte> grayframe = TargetFrame.Convert<Gray, byte>();
                var faces = grayframe.DetectHaarCascade(haar, 1.4, 4,
                                        HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                                        new Size(25, 25))[0];
                foreach (var face in faces)
                {
                    //add into training array
                    TrainedFace = TargetFrame.Copy(face.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                    trainingImages.Add(TrainedFace);
                    break;
                }
                TargetImageBox.Image = TrainedFace;
            }


            //Read an unknown image
            Image UnknownImg = Image.FromFile(Environment.CurrentDirectory + "\\img\\pic1.jpg");
            Image<Bgr, byte> UnknownFrame = new Image<Bgr, byte>(new Bitmap(UnknownImg));

            //FACE DETECTION PROCESS
            if (UnknownFrame != null)   // confirm that image is valid
            {
                //convert the image to gray scale
                Image<Gray, byte> grayframe = UnknownFrame.Convert<Gray, byte>();

                //Detect faces from the gray-scale image and store into an array of type 'var',i.e 'MCvAvgComp[]'
                var faces = grayframe.DetectHaarCascade(haar, 1.4, 4,
                                        HAAR_DETECTION_TYPE.DO_CANNY_PRUNING,
                                        new Size(25, 25))[0];

                //draw a green rectangle on each detected face in image
                foreach (var face in faces)
                {
                    UnknownFace = UnknownFrame.Copy(face.rect).Convert<Gray, byte>().Resize(100, 100, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);
                    MCvTermCriteria termCrit = new MCvTermCriteria(16, 0.001);
                    //Eigen face recognizer
                    EigenObjectRecognizer recognizer = new EigenObjectRecognizer(trainingImages.ToArray(), ref termCrit);

                    // if recognise face, draw green box
                    if (recognizer.Recognize(UnknownFace) != null)
                    {
                        UnknownFrame.Draw(face.rect, new Bgr(Color.Green), 3);
                    }

                    float f = recognizer.GetEigenDistances(UnknownFace)[0];
                    // display threshold
                    UnknownFrame.Draw(f.ToString("R"), ref font, new Point(face.rect.X - 3, face.rect.Y - 3), new Bgr(Color.Red));

                }

                //Display the image
                CamImageBox.Image = UnknownFrame;
            }
        }
    }
}
4

1 に答える 1

2

この分野はまだ私の専門ではありませんが、お手伝いできることがあればやってみます。これは私が使用しているもので、非常にうまく機能しています。

GPU ですべての作業を行うようにしてください。GPU は CPU よりもはるかに高速です。

List<Rectangle> faces = new List<Rectangle>();

List<Rectangle> eyes = new List<Rectangle>();

RightCameraImage = RightCameraImageCapture.QueryFrame().Resize(480, 360, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC); //Read the files as an 8-bit Bgr image

//Emgu.CV.GPU.GpuInvoke.HasCuda
if (GpuInvoke.HasCuda)
{
Video.DetectFace.UsingGPU(RightCameraImage, Main.FaceGpuCascadeClassifier, Main.EyeGpuCascadeClassifier, faces, eyes, out detectionTime);
}
else
{
Video.DetectFace.UsingCPU(RightCameraImage, Main.FaceCascadeClassifier, Main.EyeCascadeClassifier, faces, eyes, out detectionTime);
}

string PersonsName = string.Empty;

Image<Gray, byte> GreyScaleFaceImage;

foreach (Rectangle face in faces)
{
RightCameraImage.Draw(face, new Bgr(Color.Red), 2);

GreyScaleFaceImage = RightCameraImage.Copy(face).Convert<Gray, byte>().Resize(200, 200, Emgu.CV.CvEnum.INTER.CV_INTER_CUBIC);

if (KnownFacesList.Count > 0)
{
// MCvTermCriteria for face recognition...
MCvTermCriteria mCvTermCriteria = new MCvTermCriteria(KnownFacesList.Count, 0.001);

// Recognize Known Faces with Eigen Object Recognizer...
EigenObjectRecognizer recognizer = new EigenObjectRecognizer(KnownFacesList.ToArray(), KnownNamesList.ToArray(), eigenDistanceThreashhold, ref mCvTermCriteria);

EigenObjectRecognizer.RecognitionResult recognitionResult = recognizer.Recognize(GreyScaleFaceImage);

if (recognitionResult != null)
{
// Set the Persons Name...
PersonsName = recognitionResult.Label;

// Draw the label for each face detected and recognized...
RightCameraImage.Draw(PersonsName, ref mCvFont, new Point(face.X - 2, face.Y - 2), new Bgr(Color.LightGreen));
}
else
{
// Draw the label for each face NOT Detected...
RightCameraImage.Draw(FaceUnknown, ref mCvFont, new Point(face.X - 2, face.Y - 2), new Bgr(Color.LightGreen));
}
}
}

クラス内の私のコード: Video.DetectFace:

using System;
using Emgu.CV;
using Emgu.CV.GPU;
using System.Drawing;
using Emgu.CV.Structure;
using System.Diagnostics;
using System.Collections.Generic;

namespace Video
{
//-----------------------------------------------------------------------------------
//  Copyright (C) 2004-2012 by EMGU. All rights reserved. Modified by Chris Sykes. 
//-----------------------------------------------------------------------------------

public static class DetectFace
{
// Use me like this:
/*
//Emgu.CV.GPU.GpuInvoke.HasCuda
if (GpuInvoke.HasCuda)
{
DetectUsingGPU(...);
}
else
{
DetectUsingCPU(...);
}
*/

private static Stopwatch watch;

public static void UsingGPU(Image<Bgr, Byte> image, GpuCascadeClassifier face, GpuCascadeClassifier eye, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime)
{
watch = Stopwatch.StartNew();

using (GpuImage<Bgr, Byte> gpuImage = new GpuImage<Bgr, byte>(image))
using (GpuImage<Gray, Byte> gpuGray = gpuImage.Convert<Gray, Byte>())
{
Rectangle[] faceRegion = face.DetectMultiScale(gpuGray, 1.1, 10, Size.Empty);
faces.AddRange(faceRegion);
foreach (Rectangle f in faceRegion)
{
using (GpuImage<Gray, Byte> faceImg = gpuGray.GetSubRect(f))
{
//For some reason a clone is required.
//Might be a bug of GpuCascadeClassifier in opencv
using (GpuImage<Gray, Byte> clone = faceImg.Clone())
{
Rectangle[] eyeRegion = eye.DetectMultiScale(clone, 1.1, 10, Size.Empty);

foreach (Rectangle e in eyeRegion)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
}
}
watch.Stop();
detectionTime = watch.ElapsedMilliseconds;
}
public static void UsingCPU(Image<Bgr, Byte> image, CascadeClassifier face, CascadeClassifier eye, List<Rectangle> faces, List<Rectangle> eyes, out long detectionTime)
{
watch = Stopwatch.StartNew();
using (Image<Gray, Byte> gray = image.Convert<Gray, Byte>()) //Convert it to Grayscale
{
//normalizes brightness and increases contrast of the image
gray._EqualizeHist();

//Detect the faces  from the gray scale image and store the locations as rectangle
//The first dimensional is the channel
//The second dimension is the index of the rectangle in the specific channel
Rectangle[] facesDetected = face.DetectMultiScale(gray, 1.1, 10, new Size(20, 20), Size.Empty);
faces.AddRange(facesDetected);

foreach (Rectangle f in facesDetected)
{
//Set the region of interest on the faces
gray.ROI = f;
Rectangle[] eyesDetected = eye.DetectMultiScale(gray, 1.1, 10, new Size(20, 20), Size.Empty);
gray.ROI = Rectangle.Empty;

foreach (Rectangle e in eyesDetected)
{
Rectangle eyeRect = e;
eyeRect.Offset(f.X, f.Y);
eyes.Add(eyeRect);
}
}
}
watch.Stop();
detectionTime = watch.ElapsedMilliseconds;
}
} // END of CLASS...
}// END of NAMESPACE...
于 2013-09-03T10:18:01.700 に答える