0

Kinect を使用して、学校のプロジェクトで顔を追跡しています。Visual Studio 2012 をセットアップし、すべてのテスト プログラムが正しく動作しています。ただし、このコードを実行しようとすると、エラーが発生します。コードを修正しようと何度も試みた後、次のエラーが発生します。

「アプリケーションを正しく起動できませんでした (0xc000007b)。[OK] をクリックしてアプリケーションを閉じます。

良いことは、それがついに実行されることです。悪い点は、コンパイラがこのあいまいなエラー以外のエラーをスローしないことです。

私たちは完全に迷っており、誰かが私たちを助けてくれるか、正しい方向に向けてくれることを願っています. ご協力いただきありがとうございます。

コード:

#include "stdafx.h"
#include <iostream>
#include <Windows.h>
#include <NuiApi.h>
#include <FaceTrackLib.h>
#include <NuiSensor.h>

using namespace std;

HANDLE rgbStream;
HANDLE depthStream;

INuiSensor* sensor;

#define width 640
#define height 480

bool initKinect() {
    // Get a working kinect sensor
    int numSensors;
    if (NuiGetSensorCount(&numSensors) < 0 || numSensors < 1) return false;
    if (NuiCreateSensorByIndex(0, &sensor) < 0) return false;
    // Initialize sensor
    sensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH | NUI_INITIALIZE_FLAG_USES_COLOR);
    sensor->NuiImageStreamOpen(
        NUI_IMAGE_TYPE_COLOR,            // Depth camera or rgb camera?
        NUI_IMAGE_RESOLUTION_640x480,    // Image resolution
        0,      // Image stream flags, e.g. near mode
        2,      // Number of frames to buffer
        NULL,   // Event handle
        &rgbStream);
    // --------------- END CHANGED CODE -----------------
    return true;
}
BYTE* dataEnd;
USHORT* dataEndD;
void getKinectDataD(){
    NUI_IMAGE_FRAME imageFrame;
    NUI_LOCKED_RECT LockedRect;

    if (sensor->NuiImageStreamGetNextFrame(rgbStream, 0, &imageFrame) < 0) return;
    INuiFrameTexture* texture = imageFrame.pFrameTexture;
    texture->LockRect(0, &LockedRect, NULL, 0);

    const USHORT* curr = (const USHORT*)LockedRect.pBits;
    const USHORT* dataEnding = curr + (width*height);

    if (LockedRect.Pitch != 0)
    {
        const BYTE* curr = (const BYTE*)LockedRect.pBits;
        dataEnd = (BYTE*)(curr + (width*height) * 4);
    }

    while (curr < dataEnding) {
        // Get depth in millimeters
        USHORT depth = NuiDepthPixelToDepth(*curr++);
        dataEndD = (USHORT*)depth;
        // Draw a grayscale image of the depth:
        // B,G,R are all set to depth%256, alpha set to 1.
        }
            texture->UnlockRect(0);
        sensor->NuiImageStreamReleaseFrame(rgbStream, &imageFrame);
}

// This example assumes that the application provides
// void* cameraFrameBuffer, a buffer for an image, and that there is a method
// to fill the buffer with data from a camera, for example
// cameraObj.ProcessIO(cameraFrameBuffer)

int main(){
    initKinect();
    // Create an instance of a face tracker
    IFTFaceTracker* pFT = FTCreateFaceTracker();
    if (!pFT)
    {
        // Handle errors
    }

    // Initialize cameras configuration structures.
    // IMPORTANT NOTE: resolutions and focal lengths must be accurate, since it affects tracking precision!
    // It is better to use enums defined in NuiAPI.h

    // Video camera config with width, height, focal length in pixels
    // NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS focal length is computed for 640x480 resolution
    // If you use different resolutions, multiply this focal length by the scaling factor
    FT_CAMERA_CONFIG videoCameraConfig = { 640, 480, NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS };

    // Depth camera config with width, height, focal length in pixels
    // NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS focal length is computed for 320x240 resolution
    // If you use different resolutions, multiply this focal length by the scaling factor
    FT_CAMERA_CONFIG depthCameraConfig = { 320, 240, NUI_CAMERA_DEPTH_NOMINAL_FOCAL_LENGTH_IN_PIXELS };

    // Initialize the face tracker
    HRESULT hr = pFT->Initialize(&videoCameraConfig, &depthCameraConfig, NULL, NULL);
    if (FAILED(hr))
    {
        // Handle errors
    }

    // Create a face tracking result interface
    IFTResult* pFTResult = NULL;
    hr = pFT->CreateFTResult(&pFTResult);
    if (FAILED(hr))
    {
        // Handle errors
    }

    // Prepare image interfaces that hold RGB and depth data
    IFTImage* pColorFrame = FTCreateImage();
    IFTImage* pDepthFrame = FTCreateImage();
    if (!pColorFrame || !pDepthFrame)
    {
        // Handle errors
    }

    // Attach created interfaces to the RGB and depth buffers that are filled with
    // corresponding RGB and depth frame data from Kinect cameras
    pColorFrame->Attach(640, 480, dataEnd, FTIMAGEFORMAT_UINT8_R8G8B8, 640 * 3);
    pDepthFrame->Attach(320, 240, dataEndD, FTIMAGEFORMAT_UINT16_D13P3, 320 * 2);
    // You can also use Allocate() method in which case IFTImage interfaces own their memory.
    // In this case use CopyTo() method to copy buffers

    FT_SENSOR_DATA sensorData;
    sensorData.ZoomFactor = 1.0f;       // Not used must be 1.0

    bool isFaceTracked = false;

    // Track a face
    while (true)
    {
        // Call Kinect API to fill videoCameraFrameBuffer and depthFrameBuffer with RGB and depth data
        getKinectDataD();

        // Check if we are already tracking a face
        if (!isFaceTracked)
        {
            // Initiate face tracking.
            // This call is more expensive and searches the input frame for a face.
            hr = pFT->StartTracking(&sensorData, NULL, NULL, pFTResult);
            if (SUCCEEDED(hr))
            {
                isFaceTracked = true;
            }
            else
            {
                // No faces found
                isFaceTracked = false;
            }
        }
        else
        {
            // Continue tracking. It uses a previously known face position.
            // This call is less expensive than StartTracking()
            hr = pFT->ContinueTracking(&sensorData, NULL, pFTResult);
            if (FAILED(hr))
            {
                // Lost the face
                isFaceTracked = false;
            }
        }

        // Do something with pFTResult like visualize the mask, drive your 3D avatar,
        // recognize facial expressions
    }

    // Clean up
    pFTResult->Release();
    pColorFrame->Release();
    pDepthFrame->Release();
    pFT->Release();
    return 0;
}
4

1 に答える 1

0

実際に間違った dll を使用したことがわかりました。現在はエラーなしで実行されます。しかし、別の問題に遭遇しました。pFTResult を使用し、"getFaceRect" を使用して顔の角度を取得する方法がわかりません。誰かが方法を知っていますか?

于 2013-10-11T12:00:13.987 に答える