0

初歩的な質問が 1 つあります。opencCV 2.4.6 を使用して、Visual Studio 2010 で顔検出/認識プログラムを作成しようとしています。openCV ドキュメントから取得した顔認識アルゴリズムに問題があります。アルゴリズム自体はエラーなしで機能しますが、それが出力であることを理解しているかどうか、または実際には正しくないかどうかはわかりません..トレーニングと認識にAT&Tデータベースを使用しています..私のcsvファイル(at.txt ) は次のようになります。

C:\face\s1/1.pgm;0
C:\face\s1/2.pgm;0
C:\face\s1/3.pgm;0
C:\face\s1/4.pgm;0
C:\face\s1/5.pgm;0
C:\face\s1/6.pgm;0
C:\face\s1/7.pgm;0
C:\face\s1/8.pgm;0
C:\face\s1/9.pgm;0
C:\face\s1/10.pgm;0
C:\face\s2/1.pgm;1
C:\face\s2/2.pgm;1
C:\face\s2/3.pgm;1
C:\face\s2/4.pgm;1
C:\face\s2/5.pgm;1
C:\face\s2/6.pgm;1
C:\face\s2/7.pgm;1
C:\face\s2/8.pgm;1
C:\face\s2/9.pgm;1
C:\face\s2/10.pgm;1
C:\face\s3/1.pgm;2
C:\face\s3/2.pgm;2
C:\face\s3/3.pgm;2
C:\face\s3/4.pgm;2
C:\face\s3/5.pgm;2
C:\face\s3/6.pgm;2
C:\face\s3/7.pgm;2
C:\face\s3/8.pgm;2
C:\face\s3/9.pgm;2
C:\face\s3/10.pgm;2
C:\face\s4/1.pgm;3
C:\face\s4/2.pgm;3
C:\face\s4/3.pgm;3
C:\face\s4/4.pgm;3
C:\face\s4/5.pgm;3
C:\face\s4/6.pgm;3
C:\face\s4/7.pgm;3
C:\face\s4/8.pgm;3
C:\face\s4/9.pgm;3
C:\face\s4/10.pgm;3
C:\face\s5/1.pgm;4
C:\face\s5/2.pgm;4
C:\face\s5/3.pgm;4
C:\face\s5/4.pgm;4
C:\face\s5/5.pgm;4
C:\face\s5/6.pgm;4
C:\face\s5/7.pgm;4
C:\face\s5/8.pgm;4
C:\face\s5/9.pgm;4
C:\face\s5/10.pgm;4
C:\face\s6/1.pgm;5
C:\face\s6/2.pgm;5
C:\face\s6/3.pgm;5
C:\face\s6/4.pgm;5
C:\face\s6/5.pgm;5
C:\face\s6/6.pgm;5
C:\face\s6/7.pgm;5
C:\face\s6/8.pgm;5
C:\face\s6/9.pgm;5
C:\face\s6/10.pgm;5
C:\face\s7/1.pgm;6
C:\face\s7/2.pgm;6
C:\face\s7/3.pgm;6
C:\face\s7/4.pgm;6
C:\face\s7/5.pgm;6
C:\face\s7/6.pgm;6
C:\face\s7/7.pgm;6
C:\face\s7/8.pgm;6
C:\face\s7/9.pgm;6
C:\face\s7/10.pgm;6
C:\face\s8/1.pgm;7
C:\face\s8/2.pgm;7
C:\face\s8/3.pgm;7
C:\face\s8/4.pgm;7
C:\face\s8/5.pgm;7
C:\face\s8/6.pgm;7
C:\face\s8/7.pgm;7
C:\face\s8/8.pgm;7
C:\face\s8/9.pgm;7
C:\face\s8/10.pgm;7
C:\face\s9/1.pgm;8
C:\face\s9/2.pgm;8
C:\face\s9/3.pgm;8
C:\face\s9/4.pgm;8
C:\face\s9/5.pgm;8
C:\face\s9/6.pgm;8
C:\face\s9/7.pgm;8
C:\face\s9/8.pgm;8
C:\face\s9/9.pgm;8
C:\face\s9/10.pgm;8
C:\face\s10/1.pgm;9
C:\face\s10/2.pgm;9
C:\face\s10/3.pgm;9
C:\face\s10/4.pgm;9
C:\face\s10/5.pgm;9
C:\face\s10/6.pgm;9
C:\face\s10/7.pgm;9
C:\face\s10/8.pgm;9
C:\face\s10/9.pgm;9
C:\face\s10/10.pgm;9

私の顔認識コードは次のようになります。

#include "stdafx.h"

#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"

#include <iostream>
#include <fstream>
#include <sstream>

using namespace cv;
using namespace std;

static Mat norm_0_255(InputArray _src) {
    Mat src = _src.getMat();
    // Create and return normalized image:
    Mat dst;
    switch(src.channels()) {
    case 1:
        cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
        break;
    case 3:
        cv::normalize(_src, dst, 0, 255, NORM_MINMAX, CV_8UC3);
        break;
    default:
        src.copyTo(dst);
        break;
    }
    return dst;
}

static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
    std::ifstream file(filename.c_str(), ifstream::in);
    if (!file) {
        string error_message = "No valid input file was given, please check the given filename.";
        CV_Error(CV_StsBadArg, error_message);
    }
    string line, path, classlabel;
    while (getline(file, line)) {
        stringstream liness(line);
        getline(liness, path, separator);
        getline(liness, classlabel);
        if(!path.empty() && !classlabel.empty()) {
            images.push_back(imread(path, 0));
            labels.push_back(atoi(classlabel.c_str()));
        }
    }
}

int main(int argc, const char *argv[]) {
    // Check for valid command line arguments, print usage
    // if no arguments were given.
    if (argc < 2) {
        cout << "usage: " << argv[0] << " <csv.ext> <output_folder> " << endl;
        exit(1);
    }
    string output_folder;
    if (argc == 3) {
        output_folder = string(argv[2]);
    }
    // Get the path to your CSV.
    string fn_csv = string(argv[1]);
    // These vectors hold the images and corresponding labels.
    vector<Mat> images;
    vector<int> labels;
    // Read in the data. This can fail if no valid
    // input filename is given.
    try {
        read_csv(fn_csv, images, labels);
    } catch (cv::Exception& e) {
        cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
        // nothing more we can do
        exit(1);
    }
    // Quit if there are not enough images for this demo.
    if(images.size() <= 1) {
        string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!";
        CV_Error(CV_StsError, error_message);
    }
    // Get the height from the first image. We'll need this
    // later in code to reshape the images to their original
    // size:
    int height = images[0].rows;
    // The following lines simply get the last images from
    // your dataset and remove it from the vector. This is
    // done, so that the training data (which we learn the
    // cv::FaceRecognizer on) and the test data we test
    // the model with, do not overlap.
    Mat testSample = images[images.size() - 1];
    int testLabel = labels[labels.size() - 1];
    images.pop_back();
    labels.pop_back();
    // The following lines create an Eigenfaces model for
    // face recognition and train it with the images and
    // labels read from the given CSV file.
    // This here is a full PCA, if you just want to keep
    // 10 principal components (read Eigenfaces), then call
    // the factory method like this:
    //
    //      cv::createEigenFaceRecognizer(10);
    //
    // If you want to create a FaceRecognizer with a
    // confidence threshold (e.g. 123.0), call it with:
    //
    //      cv::createEigenFaceRecognizer(10, 123.0);
    //
    // If you want to use _all_ Eigenfaces and have a threshold,
    // then call the method like this:
    //
    //      cv::createEigenFaceRecognizer(0, 123.0);
    //
    Ptr<FaceRecognizer> model = createEigenFaceRecognizer();
    model->train(images, labels);
    // The following line predicts the label of a given
    // test image:
    int predictedLabel = model->predict(testSample);
    //
    // To get the confidence of a prediction call the model with:
    //
    //      int predictedLabel = -1;
    //      double confidence = 0.0;
    //      model->predict(testSample, predictedLabel, confidence);
    //
    string result_message = format("Predicted class = %d / Actual class = %d.", predictedLabel, testLabel);
    cout << result_message << endl;
    // Here is how to get the eigenvalues of this Eigenfaces model:
    Mat eigenvalues = model->getMat("eigenvalues");
    // And we can do the same to display the Eigenvectors (read Eigenfaces):
    Mat W = model->getMat("eigenvectors");
    // Get the sample mean from the training data
    Mat mean = model->getMat("mean");
    // Display or save:
    if(argc == 2) {
        imshow("mean", norm_0_255(mean.reshape(1, images[0].rows)));
    } else {
        imwrite(format("%s/mean.png", output_folder.c_str()), norm_0_255(mean.reshape(1, images[0].rows)));
    }
    // Display or save the Eigenfaces:
    for (int i = 0; i < min(10, W.cols); i++) {
        string msg = format("Eigenvalue #%d = %.5f", i, eigenvalues.at<double>(i));
        cout << msg << endl;
        // get eigenvector #i
        Mat ev = W.col(i).clone();
        // Reshape to original size & normalize to [0...255] for imshow.
        Mat grayscale = norm_0_255(ev.reshape(1, height));
        // Show the image & apply a Jet colormap for better sensing.
        Mat cgrayscale;
        applyColorMap(grayscale, cgrayscale, COLORMAP_JET);
        // Display or save:
        if(argc == 2) {
            imshow(format("eigenface_%d", i), cgrayscale);
        } else {
            imwrite(format("%s/eigenface_%d.png", output_folder.c_str(), i), norm_0_255(cgrayscale));
        }
    }

    // Display or save the image reconstruction at some predefined steps:
    for(int num_components = min(W.cols, 10); num_components < min(W.cols, 300); num_components+=15) {
        // slice the eigenvectors from the model
        Mat evs = Mat(W, Range::all(), Range(0, num_components));
        Mat projection = subspaceProject(evs, mean, images[0].reshape(1,1));
        Mat reconstruction = subspaceReconstruct(evs, mean, projection);
        // Normalize the result:
        reconstruction = norm_0_255(reconstruction.reshape(1, images[0].rows));
        // Display or save:
        if(argc == 2) {
            imshow(format("eigenface_reconstruction_%d", num_components), reconstruction);
        } else {
            imwrite(format("%s/eigenface_reconstruction_%d.png", output_folder.c_str(), num_components), reconstruction);
        }
    }
    // Display if we are not writing to an output folder:
    if(argc == 2) {
        waitKey(0);
    }
    return 0;
}

そして、私の出力は次のようになります。

http://s15.postimg.org/xq76erurf/image.png

アルゴリズムは画像も出力します。これは、平均画像、固有画像、および再構成画像です..私が知る限り、最も重要な画像は再構成画像です..出力では、再構成画像はほとんど得られませんが、最後の画像を除いてほとんどすべてが幽霊のように見えますそれが正しく再構築された最初の顔/画像です.. アルゴリズムは正しく機能していますか? 再構築された他の顔も取得しないのはなぜですか? 予測クラス = 7、実際のクラス = 9 とはどういう意味ですか?

4

1 に答える 1

0

アルゴリズムの基本的な理解が必要なようです。

Eigenfaces に関するウィキペディアの記事と、 Turk & Pentlandの論文: Face Recognition using Eigenfacesを読むことをお勧めします

また、目標を教えていただけると助かります。このアルゴリズムを使用すると、間違った方向に進んでいる可能性があります。

于 2013-10-30T15:16:06.903 に答える