3

3 台のptgreyカメラを使用して画像を取得し、ハードディスクに保存しています。私はMultiCameraWriteToDiskExの例を使用していますが、うまく機能しますが、取得中に画像も表示したいので、cvShowImage() または imshow() 関数で表示するために、FlyCapture 画像を読み取り可能な OpenCV 形式に変換しようとしています。 .

Flycapture2 Image* を OpenCV IplImage* に変換できる関数がありますが、カメラからフレームをキャプチャするdoGrabLoop()関数でIplImage* ConvertImageToOpenCV(Image* pImage)unsigned char * を FlyCapture2::Image* に正しく変換する方法がわかりません。

助けてくれませんか?私はC/C++にあまり自信がありません:(特に、g_arImageplus[ uiCamera ].image.pDataをConvertImageToOpenCV()に渡すために変換する方法がわかりません。(g_arImageplus[ uiCamera ]を使用するのは正しいですか? .image.pData?)

IplImage* ConvertImageToOpenCV(Image* pImage)
{
    IplImage* cvImage = NULL;
    bool bColor = true;
    CvSize mySize;
    mySize.height = pImage->GetRows();
    mySize.width = pImage->GetCols();
    printf("ciao %d\n",  pImage->GetPixelFormat() );
    switch ( pImage->GetPixelFormat() )
    {
        case PIXEL_FORMAT_MONO8:     cvImage = cvCreateImageHeader(mySize, 8, 1 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 1;
                                     bColor = false;
                                     break;
        case PIXEL_FORMAT_411YUV8:   cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_422YUV8:   cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_444YUV8:   cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_RGB8:      cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_MONO16:    cvImage = cvCreateImageHeader(mySize, 16, 1 );
                                     cvImage->depth = IPL_DEPTH_16U;
                                     cvImage->nChannels = 1;
                                     bColor = false;
                                     break;
        case PIXEL_FORMAT_RGB16:     cvImage = cvCreateImageHeader(mySize, 16, 3 );
                                     cvImage->depth = IPL_DEPTH_16U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_S_MONO16:  cvImage = cvCreateImageHeader(mySize, 16, 1 );
                                     cvImage->depth = IPL_DEPTH_16U;
                                     cvImage->nChannels = 1;
                                     bColor = false;
                                     break;
        case PIXEL_FORMAT_S_RGB16:   cvImage = cvCreateImageHeader(mySize, 16, 3 );
                                     cvImage->depth = IPL_DEPTH_16U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_RAW8:      cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_RAW16:     cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_MONO12:    printf("Not supported by OpenCV");
                                     bColor = false;
                                     break;
        case PIXEL_FORMAT_RAW12:     printf("Not supported by OpenCV");
                                     break;
        case PIXEL_FORMAT_BGR:       cvImage = cvCreateImageHeader(mySize, 8, 3 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 3;
                                     break;
        case PIXEL_FORMAT_BGRU:      cvImage = cvCreateImageHeader(mySize, 8, 4 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 4;
                                     break;
        case PIXEL_FORMAT_RGBU:      cvImage = cvCreateImageHeader(mySize, 8, 4 );
                                     cvImage->depth = IPL_DEPTH_8U;
                                     cvImage->nChannels = 4;
                                     break;
        default: printf("Some error occured...\n");
                 return NULL;
    }

    if(bColor) {
        if(!bInitialized)
        {
            colorImage.SetData(new unsigned char[pImage->GetCols() * pImage->GetRows()*3], pImage->GetCols() * pImage->GetRows()*3);
            bInitialized = true;
        }

        pImage->Convert(PIXEL_FORMAT_BGR, &colorImage); //needs to be as BGR to be saved

        cvImage->width = colorImage.GetCols();
        cvImage->height = colorImage.GetRows();
        cvImage->widthStep = colorImage.GetStride();

        cvImage->origin = 0; //interleaved color channels

        cvImage->imageDataOrigin = (char*)colorImage.GetData(); //DataOrigin and Data same pointer, no ROI
        cvImage->imageData         = (char*)(colorImage.GetData());
        cvImage->widthStep      = colorImage.GetStride();
        cvImage->nSize = sizeof (IplImage);
        cvImage->imageSize = cvImage->height * cvImage->widthStep;
    }
    else
    {
        cvImage->imageDataOrigin = (char*)(pImage->GetData());
        cvImage->imageData         = (char*)(pImage->GetData());
        cvImage->widthStep         = pImage->GetStride();
        cvImage->nSize             = sizeof (IplImage);
        cvImage->imageSize         = cvImage->height * cvImage->widthStep;

        //at this point cvImage contains a valid IplImage
     }
    return cvImage;
}

//
// Grab and test loop
//
int doGrabLoop()
{
   FlyCaptureError   error = FLYCAPTURE_FAILED;
   unsigned int      aruiPrevSeqNum[ _MAX_CAMERAS ];
   unsigned int      aruiDelta[ _MAX_CAMERAS ];
   unsigned int      aruiCycles[ _MAX_CAMERAS ];
   HANDLE        arhFile[ _MAX_CAMERAS ];
   DWORD         ardwBytesWritten[ _MAX_CAMERAS ];
   DWORD         dwTotalKiloBytesWritten = 0;
   bool          bMissed = false;
   bool          bOutOfSync = false;
   unsigned int      uiMissedImages = 0;
   unsigned int      uiOutOfSyncImages = 0;
   __int64       nStartTime = 0;
   __int64       nEndTime = 0;
   __int64       nDifference = 0;
   __int64       nTotalTime = 0;
   __int64       nGlobalStartTime = 0;
   __int64       nGlobalEndTime = 0;
   __int64       nGlobalTotalTime = 0;
   __int64       nFrequency = 0;

   QueryPerformanceFrequency( (LARGE_INTEGER*)&nFrequency );
   QueryPerformanceCounter( (LARGE_INTEGER*)&nGlobalStartTime );

   printf( "Starting grab...\n" );

   // Create files to write to
   if ( createFiles( arhFile ) != 0 )
   {
      printf( "There was error creating the files\n" );
      return -1;
   }  

   BOOL bSuccess;

   //
   // Start grabbing the images
   //

   for( int iImage = 0; iImage < g_iNumImagesToGrab; iImage++ )
   {
#ifdef _VERBOSE
      printf( "Grabbing image %u\n", iImage );
#else
      printf( "." );
#endif

      unsigned int uiCamera = 0;

      // Grab an image from each camera
      for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
      {
     error = flycaptureLockNext( g_arContext[uiCamera], &g_arImageplus[uiCamera] );
         _HANDLE_ERROR( error, "flycaptureLockNext()" );

     // Save image dimensions & bayer info from first image for each camera
     if(iImage == 0)
     {
        g_arImageTemplate[uiCamera] = g_arImageplus[uiCamera].image;

        error = flycaptureGetColorTileFormat(g_arContext[uiCamera], &g_arBayerTile[uiCamera]);
        _HANDLE_ERROR( error, "flycaptureGetColorTileFormat()" );
     }
      }

      for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
      {      
     // Start timer
     QueryPerformanceCounter( (LARGE_INTEGER*)&nStartTime );

     // Calculate the size of the image to be written
     int iImageSize = 0;     
     int iRowInc = g_arImageplus[uiCamera].image.iRowInc;
     int iRows = g_arImageplus[uiCamera].image.iRows;
     iImageSize = iRowInc * iRows;
 // ERROR: HOW CAN I CONVERT g_arImageplus[ uiCamera ].image.pData IN ORDER TO USE IT WITH ConvertImageToOpenCV() function?
     IplImage* destImage = ConvertImageToOpenCV(g_arImageplus[ uiCamera ].image.pData);

        cvShowImage("prova", destImage);


     waitKey(1);

     // Write to the file
     bSuccess = WriteFile(
        arhFile[uiCamera], 
        g_arImageplus[ uiCamera ].image.pData, 
        iImageSize,
        &ardwBytesWritten[uiCamera], 
        NULL ); 

     // End timer
     QueryPerformanceCounter( (LARGE_INTEGER*)&nEndTime );  

     // Ensure that the write was successful
     if ( !bSuccess || ( ardwBytesWritten[uiCamera] != (unsigned)iImageSize ) ) 
     {
        printf( "Error writing to file for camera %u!\n", uiCamera );
        return -1;
     }

     // Update various counters
     dwTotalKiloBytesWritten += (ardwBytesWritten[uiCamera] / 1024);
     nDifference = nEndTime - nStartTime;
     nTotalTime += nDifference;

     // Keep track of the difference in image sequence numbers (uiSeqNum)
     // in order to determine if any images have been missed.  A difference
     // greater than 1 indicates that an image has been missed.
         if( iImage == 0 )
         {
        // This is the first image, set up the variables
            aruiPrevSeqNum[uiCamera] = g_arImageplus[uiCamera].uiSeqNum;
            aruiDelta[uiCamera] = 1;
         }
         else
         {
        // Get the difference in sequence numbers between the current
        // image and the last image we received
            aruiDelta[uiCamera] = 
               g_arImageplus[uiCamera].uiSeqNum - aruiPrevSeqNum[uiCamera];
         }         

         if( aruiDelta[uiCamera] != 1 )
         {
            // We have missed an image.
        bMissed = true;
            uiMissedImages += aruiDelta[uiCamera] - 1;
         }
     else
     {
        bMissed = false;
     }

         aruiPrevSeqNum[uiCamera] = g_arImageplus[uiCamera].uiSeqNum;

     // Calculate the cycle count for the camera
         aruiCycles[uiCamera] = 
            g_arImageplus[uiCamera].image.timeStamp.ulCycleSeconds * 8000 +
            g_arImageplus[uiCamera].image.timeStamp.ulCycleCount;

     // Determine the difference of the timestamp for every image from the
     // first camera.  If the difference is greater than 1 cycle count, 
     // register the camera as being out of synchronization.
     int iDeltaFrom0 = abs( (int)(aruiCycles[uiCamera] - aruiCycles[0]) );

         if( ( iDeltaFrom0 % ( 128 * 8000 - 1 ) ) > 1 )
         {
        bOutOfSync = true;
        uiOutOfSyncImages++;
     }
     else
     {
        bOutOfSync = false;
     }

#ifdef _VERBOSE
     // Output is in the following order:
     // - The index of the image being captured
     // - The index of the camera that is currently being captured
     // - The time taken to write the image to disk
     // - Number of kilobytes written
     // - Write speed (in MB/s)
     // - Sequence number
     // - Cycle seconds in timestamp
     // - Cycle count in timestamp
     // - Delta from 0th value
     // - Missed an image?
     // - Out of sync?
     printf( 
        "%04d: \t%02u\t%0.5f\t%.0lf\t%.2lf\t%04u\t%03u.%04u\t%d\t%s %s\n",
        iImage,
        uiCamera,
        nDifference,
        (double)ardwBytesWritten[ uiCamera ] / 1024.0,
        (double)ardwBytesWritten[ uiCamera ] / ( 1024 * 1024 * nDifference ),
        g_arImageplus[ uiCamera ].uiSeqNum,
        g_arImageplus[ uiCamera ].image.timeStamp.ulCycleSeconds,
            g_arImageplus[ uiCamera ].image.timeStamp.ulCycleCount,
        iDeltaFrom0,
        bMissed ? "Y" : "N",
        bOutOfSync ? "Y" : "N");
#endif
      }

      // Unlock image, handing the buffer back to the buffer pool.
      for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
      {
         error = flycaptureUnlock( 
            g_arContext[uiCamera], g_arImageplus[uiCamera].uiBufferIndex );
         _HANDLE_ERROR( error, "flycaptureUnlock()" );
      }
   }

   //
   // Done grabbing images
   //

   QueryPerformanceCounter( (LARGE_INTEGER*)&nGlobalEndTime );
   nGlobalTotalTime = nGlobalEndTime - nGlobalStartTime;

   double dGlobalTotalTime = (double)nGlobalTotalTime / (double)nFrequency;
   double dTotalTime = (double)nTotalTime / (double)nFrequency;

   // Report on the results
   // Burst time is the time that was spent writing to disk only
   // Overall time is total time taken, including image grabs, calculations etc
   printf( 
      "\nBurst: Wrote %.1lfMB in %0.2fs ( %.2lfMB/sec )\n", 
      (double)( dwTotalKiloBytesWritten / 1024 ), 
      dTotalTime,
      (double)( dwTotalKiloBytesWritten / ( 1024 * dTotalTime ) ) );

   printf(
      "Overall: Wrote %.1lfMB in %0.2fs ( %.2lfMB/sec )\n",
      (double)( dwTotalKiloBytesWritten / 1024 ), 
      dGlobalTotalTime,
      (double)( dwTotalKiloBytesWritten / ( 1024 * dGlobalTotalTime ) ) );  

   printf( g_bSyncSuccess ? "Sync success\n" : "Sync failed\n" );
   printf( "Missed images = %u.\n", uiMissedImages );
   printf( "Out of sync images = %u.\n", uiOutOfSyncImages );

   for ( unsigned int uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
   {
      // Close file handles
      CloseHandle(arhFile[uiCamera]);
   }

   return 0;
}
4

1 に答える 1

1

あなたの変換方法:

IplImage* ConvertImageToOpenCV(Image* pImage)

Image ポインターを受け取ります。あなたが持っているデータからそれに渡す新しい画像を構築します。Image クラスのドキュメントは次のとおりです。 この関数を使用すると、バッファを保持したりコピーしたりしません。

Image   (   unsigned int    rows,
            unsigned int    cols,
            unsigned int    stride,
            unsigned char*  pData,
            unsigned int    dataSize,
            PixelFormat     format,
            BayerTileFormat bayerFormat = NONE 
)

したがって、コードは次のようになります。

// Pull these values from your other image structure
Image image(rows, cols, stride, pData, dataSize, format, bayerFormat);

// Pass the address of the temporary image to your conversion
IplImage* opencvImage = ConvertImageToOpenCV(&image);
于 2013-04-17T08:46:25.373 に答える