-5

以下を使用して、2 つの色付きオブジェクトの位置、向きを検出しています。カメラがこれらの 2 つのオブジェクト (黄色と青色) のいずれかを微調整できない場合、またはカメラの前に手を置いた場合を除き、すべてがうまく機能します。次のエラーがスローされます。

OpenCV エラー: cvGetMat、ファイル /tmp/buildd/libopencv-2.3.1+svn6514+branch23/modules/core/src/array.cpp で Null ポインター (NULL 配列ポインターが渡されます)、行 2382 のインスタンスをスローした後に呼び出されて終了します'cv::Exception' what(): /tmp/buildd/libopencv-2.3.1+svn6514+branch23/modules/core/src/array.cpp:2382: エラー: (-27) NULL 配列ポインターが関数に渡されましたcvGetMat

誰かがこのエラーを解決するのを手伝ってくれたら幸いです。

ありがとうございました

#include <ros/ros.h>
#include <opencv/cv.h>
#include <opencv/highgui.h>



#include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>

#include <math.h>

#include <geometry_msgs/Twist.h>

#define MAX_CONTOUR_LEVELS 5 //This will be used when displaying contours
#define PI 3.14159265
using namespace cv;
using namespace std;
// Class Definition
class color_control {
public:
    void mainLoop1();
    void mainLoop2();
    double R_X, R_Y; /* red center of gravity x,y in the frame normalized from -1.0 to 1.0 */
    int C_C;
    double setSpeed( double, double );
    void do_some_magic( IplImage* , int , int , int , int );

    color_control()
    {
      // node creation in root-namespace 
      ros::NodeHandle m_nodeHandle("/");

      m_commandPublisher = m_nodeHandle.advertise<geometry_msgs::Twist> ("cmd_vel", 20);
    }

    ~color_control()
    {
      cvDestroyWindow("result");
    }



protected:

    ros::NodeHandle m_nodeHandle;

    // Publisher und Membervariable für die Fahrbefehle
    ros::Publisher m_commandPublisher;
    geometry_msgs::Twist m_roombaCommand;

};

int abort_error_handler(int status, char const* func_name, char const* err_msg, char const* file_name, int line, void*) {
cout << "ERROR: in " << func_name << "(" << file_name << ":" << line << ") Message: " << err_msg << endl;
abort();
}

IplImage* GetThresholdedImage1(IplImage* img)
{
    IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
    cvCvtColor(img, imgHSV, CV_BGR2HSV);
    IplImage* imgThreshed1=cvCreateImage(cvGetSize(img),8,1);
    //cvInRangeS(imgHSV, cvScalar(20, 100, 100), cvScalar(30, 255, 255), imgYellow);
    cvInRangeS(imgHSV, cvScalar(22, 100, 100), cvScalar(32, 255, 255), imgThreshed1);
    cvReleaseImage(&imgHSV);
    //cvAdd(imgYellow,imgGreen,imgThreshed);
    //if (imgThreshed1 != NULL)
            return imgThreshed1;
    //cvShowImage( "result", imgThreshed1 );
}

IplImage* GetThresholdedImage2(IplImage* img)
{
    IplImage* imgHSV = cvCreateImage(cvGetSize(img), 8, 3);
    cvCvtColor(img, imgHSV, CV_BGR2HSV);
    IplImage* imgThreshed2=cvCreateImage(cvGetSize(img),8,1);
    //cvInRangeS(imgHSV, cvScalar(20, 100, 100), cvScalar(30, 255, 255), imgYellow);
    cvInRangeS(imgHSV, cvScalar(100,100,100^3),cvScalar(120,255,255^3), imgThreshed2);
    cvReleaseImage(&imgHSV);
    //cvAdd(imgYellow,imgGreen,imgThreshed);
    //if (imgThreshed2 == NULL)
            return imgThreshed2;


}

// find the largest contour (by area) from a sequence of contours and return a
// pointer to that item in the sequence

CvSeq* findLargestContour(CvSeq* contours){

  CvSeq* current_contour = contours;
  double largestArea = 0;
  CvSeq* largest_contour = NULL;

  // check we at least have some contours

  if (contours == NULL){return NULL;}

  // for each contour compare it to current largest area on
  // record and remember the contour with the largest area
  // (note the use of fabs() for the cvContourArea() function)

  while (current_contour != NULL){

      double area = fabs(cvContourArea(current_contour));

      if(area > largestArea){
          largestArea = area;
          largest_contour = current_contour;
      }

      current_contour = current_contour->h_next;
  }

  // return pointer to largest

  return largest_contour;

}

// normalize center of color to propulsion command
double color_control::setSpeed( double center, double width ){


  double normedCenter = (12.0*center/width) - 6.0;
    //ROS_INFO("normed center: %f", normedCenter);
  double retval = 0.0;

  if ( fabs( normedCenter ) > 1.0 ){
      retval = (trunc( normedCenter )/10.0 );
  }
  return retval;

}

//
// comments R 4 mollycoddles, guess what i'm doing!
// 
// Please help me ! I need someone writing comments. Thanx
//
void color_control::do_some_magic( IplImage* img, int red, int green, int blue, int tolerance) {

    int radius = 20 ;

    int width = img->width;
    int height = img->height;
    int channels = img->nChannels;
    int step = img->widthStep;

    unsigned char redP = 0, greenP = 0, blueP = 0;
    double S_x = 0.0 ;
    double S_y = 0.0 ;
    int red_difference, green_difference, blue_difference = 255;

    C_C = 0 ;

    for(int y=0;y<height;y++) {
        for(int x=0;x<width;x++) {
          blueP = img->imageData[y*step+x*channels+0] ;
          greenP = img->imageData[y*step+x*channels+1] ;
          redP = img->imageData[y*step+x*channels+2] ;

      red_difference = fabs(red - (int)redP);
      green_difference = fabs(green - (int)greenP);
      blue_difference = fabs(blue - (int)blueP);

      if ( (red_difference + green_difference + blue_difference) < tolerance ) {
         C_C++ ;
         S_x += x;
         S_y += y;

         //ROS_INFO( "Difference to target color: %i", (red_difference + green_difference + blue_difference) );
        }

        }
     }

    S_x = S_x / (C_C) ;
    S_y = S_y / (C_C) ;

    R_X = setSpeed( S_x, img->width );
    R_Y = setSpeed( S_y, img->height );

    // for monitoring, draw a circle and display the respective picture
    CvPoint center;
    center.x = S_x;
    center.y = S_y;
    cvCircle( img, center, radius, CV_RGB( 255 - red, 255 - green, 255 - blue ) , 3, 8, 0 );
    cvShowImage( "result", img );
}

// Main Loop
void color_control::mainLoop1() {
    // determines the number of frames per second
    ros::Rate loop_rate(20);
     int radius ;
    CvCapture* capture = 0;
    //IplImage *frame, *frame_copy = 0;
    IplImage* imgScribble=NULL;
    // open the camera
    capture = cvCaptureFromCAM( 0 );
    cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT, 640);
    cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH, 480);
    //capture = cvCaptureFromCAM( 1 );
    //cvRedirectError(abort_error_handler);



    // loop stops, e.g. if the node recieves a kill signal
    while (m_nodeHandle.ok())
    {
        IplImage* frame=0;
                    frame=cvQueryFrame(capture);

                    if(!frame)
                                break;
//                  if(imgScribble == NULL)
//                  {
//                      imgScribble=cvCreateImage(cvGetSize(frame),8,1);
//                  }


                    IplImage* imgYellowThresh = GetThresholdedImage1(frame);
                    IplImage* imgGreenThresh = GetThresholdedImage2(frame);



            //CvMemStorage and CvSeq are structures used for dynamic data collection. CvMemStorage contains pointers to the actual
            //allocated memory, but CvSeq is used to access this data. Here, it will hold the list of image contours.
            CvMemStorage *storage = cvCreateMemStorage(0);
            CvSeq *contours = 0;

            //Once we have a binary image, we can look for contours in it. cvFindContours will scan through the image and store connected contours in "sorage".
            //"contours" will point to the first contour detected. CV_RETR_TREE means that the function will create a contour hierarchy. Each contour will contain
            //a pointer to contours that are contained inside it (holes). CV_CHAIN_APPROX_NONE means that all the contours points will be stored. Finally, an offset
            //value can be specified, but we set it to (0,0).
            cvFindContours(imgYellowThresh, storage, &contours, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));

            //This function will display contours on top of an image. We can specify different colours depending on whether the contour in a hole or not.
            CvSeq* largest_contour = NULL;
            largest_contour = findLargestContour(contours);

                //cvDrawContours(imgYellowThresh, largest_contour, CV_RGB(255,255,255), CV_RGB(0,255,0), MAX_CONTOUR_LEVELS, 3, CV_AA, cvPoint(0,0));
if(largest_contour!=NULL) {
                cvDrawContours(imgYellowThresh, largest_contour, CV_RGB(255,255,255), CV_RGB(0,255,0), 0, 2, 8, cvPoint(0,0));

}

            static int posX_Yellow=0;
            static int posY_Yellow=0;


            //int lastX=posX;
            //int lastY=posY;

           CvMoments moments1,cenmoments1;
           double M00, M01, M10;

           cvMoments(largest_contour,&moments1);
           M00 = cvGetSpatialMoment(&moments1,0,0);
           M10 = cvGetSpatialMoment(&moments1,1,0);
           M01 = cvGetSpatialMoment(&moments1,0,1);
           posX_Yellow = (int)(M10/M00);
           posY_Yellow = (int)(M01/M00);

          double theta = 0.5 * atan(
                    (2 * cvGetCentralMoment(&moments1, 1, 1)) /
                    (cvGetCentralMoment(&moments1, 2, 0) -  cvGetCentralMoment(&moments1, 0, 2)));
                theta = (theta / PI) * 180;

                // fit an ellipse (and draw it)

                if (largest_contour->total >= 6) // can only do an ellipse fit
                                                 // if we have > 6 points
                {
                    CvBox2D box = cvFitEllipse2(largest_contour);
                    if ((box.size.width < imgYellowThresh->width) &&  (box.size.height < imgYellowThresh->height))
                    {
                        // get the angle of the ellipse correct (taking into account MS Windows
                        // seems to draw ellipses differently

                        box.angle =  270 - box.angle;
                        #ifndef WIN32
                            if( imgYellowThresh->origin ) {
                                box.angle = - box.angle;
                            }
                        #endif
                        cvEllipseBox(imgYellowThresh, box, CV_RGB(255, 255 ,255), 3, 8, 0 );
                    }
                }
              CvFont font;
              cvInitFont(&font, CV_FONT_HERSHEY_PLAIN, 1, 1 , 0, 2, 8 );

            #ifdef WIN32
                int height_offset = 20;
            #else
                int height_offset = 5;
            #endif
                char outputString[255];

                sprintf(outputString, "angle: %.10f", theta);
                cvPutText(imgYellowThresh, outputString,
                    cvPoint(10,imgYellowThresh->height - height_offset), &font, CV_RGB(255, 255,255));






                              CvMemStorage *storageG = cvCreateMemStorage(0);

    CvSeq *contoursG = 0;

        //Once we have a binary image, we can look for contours in it. cvFindContours will scan through the image and store connected contours in "sorage".
        //"contours" will point to the first contour detected. CV_RETR_TREE means that the function will create a contour hierarchy. Each contour will contain
        //a pointer to contours that are contained inside it (holes). CV_CHAIN_APPROX_NONE means that all the contours points will be stored. Finally, an offset
        //value can be specified, but we set it to (0,0).
        cvFindContours(imgGreenThresh, storageG, &contoursG, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));

        //This function will display contours on top of an image. We can specify different colours depending on whether the contour in a hole or not.
        CvSeq* largest_contourG = NULL;
        largest_contourG = findLargestContour(contoursG);

if(largest_contourG!=NULL) {
            //cvDrawContours(imgYellowThresh, largest_contour, CV_RGB(255,255,255), CV_RGB(0,255,0), MAX_CONTOUR_LEVELS, 3, CV_AA, cvPoint(0,0));

            cvDrawContours(imgGreenThresh, largest_contourG, CV_RGB(255,255,255), CV_RGB(0,255,0), 0, 2, 8, cvPoint(0,0));
}
        //cvShowImage( "result2", imgGreenThresh );
    CvMoments moments2;
                               double M00g, M01g, M10g;

                               cvMoments(largest_contourG,&moments2);
                               M00g = cvGetSpatialMoment(&moments2,0,0);
                               M10g = cvGetSpatialMoment(&moments2,1,0);
                               M01g = cvGetSpatialMoment(&moments2,0,1);
                               static int posX_Green=0;
                                        static int posY_Green=0;
                                posX_Green = (int)(M10g/M00g);
                               posY_Green = (int)(M01g/M00g);

                               //printf("%d\n",M00g);
   //ROS_INFO("position (%d,%d)\n",posX_Yellow,posY_Yellow);
   //ROS_INFO("position (%d,%d)\n",posX_Green,posY_Green);

            //do_some_magic( frame , 0, 0, 0, 100);




//          if(lastX>0 && lastY>0 && posX>0 && posY>0)
//                  {
//                      cvLine(imgScribble,cvPoint(posX,posY),cvPoint(lastX,lastY),cvScalar(0,255,255), 5);
//                  }
//          cvAdd(frame,imgScribble,frame);
            cv::Point centerY(posX_Yellow,posY_Yellow);
            //centerY.x = posX_Yellow;
            //centerY.y = posY_Yellow;
            //radius = sqrt(areaY/3.14);
            cvCircle( frame, centerY, 10, CV_RGB( 255, 255, 255) , 3, 8, 0 );

            cv::Point centerG(posX_Green,posY_Green);
            //centerG.x = posX_Green;
            //centerG.y = posY_Green;
            cv::Point diff = centerY - centerG;
            //radius = sqrt(areaG/3.14);
            cvCircle( frame, centerG, 10, CV_RGB( 255, 255, 255) , 3, 8, 0 );
            //float angle = atan2(centerY,centerX);
            int slope = ((diff.y)/(diff.x));
            int len = cv::sqrt(diff.x*diff.x + diff.y*diff.y);
            double newdist = (len / 2000000);
            cvLine(frame, centerY, centerG, CV_RGB( 255, 255, 255), 5,8,0);
            //cvCircle( frame, diff, 10, CV_RGB( 255, 255, 255) , 3, 8, 0 );
            //ROS_INFO("angle :  %d\n",theta);
            //ROS_INFO("len: %d    slope: %d\n",len,slope);
            //printf("len: %f\n", slope);
            cvShowImage( "result", frame );
            cvShowImage( "result1", imgYellowThresh );
            cvShowImage( "result2", imgGreenThresh );

            if( cvWaitKey( 10 ) >= 0 )
                                    break;

         //ROS_INFO( "C_C: %d", C_C );



         // set driving direction and velocity
//       if (len > 50 || slope > 0)
//        {
//          m_roombaCommand.linear.x = 0.1;
//          m_roombaCommand.angular.z = 0.0;
//        } else {
//          m_roombaCommand.linear.x = 0.0;
//          m_roombaCommand.angular.z = 0.1;
//        }


         //
//      ROS_INFO("Forward: %f - Turning: %f", m_roombaCommand.linear.x, m_roombaCommand.angular.z);
//
//      // send the driving command to the roomba
        //m_commandPublisher.publish(m_roombaCommand);

    // spinOnce is doing the loop exactly once
    ros::spinOnce();
            //cvReleaseCapture( &capture );

    // sleep stops the process for approx ~50ms, to keep the timing for the looprate
    loop_rate.sleep();


    }

    //cvReleaseImage( &frame_copy );


}


int main(int argc, char** argv) {
    //sleep(10);
    // Initialize
    ros::init(argc, argv, "color_control");

    ros::NodeHandle n;
    color_control f_g;
    f_g.mainLoop1();
    //color_control g_g;
    //g_g.mainLoop2();
    ros::spin();
    //cvRedirectError(abort_error_handler);
    return 0;




        //cvNamedWindow("Video");
        //cvNamedWindow("thresh");
    }

4

1 に答える 1

4

を呼び出すときは、呼び出し時findlargestContourの戻り値を確認します。ただし、その後も戻り値をチェックせずに使用します。NULLcvDrawContoursNULL

NULL簡単な解決策は、戻り値を使用する前にチェックを追加することです。

于 2012-08-17T09:51:40.877 に答える