1

ここに画像の説明を入力

上の画像は私の出力です

以下に書かれているように、私はopencvエッジデクションC ++コードを使用しています

JNIEXPORT jfloatArray JNICALL Java_com_test_getPoints
(JNIEnv *env, jobject thiz,jobject bitmap)
{
    __android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Scaning getPoints");
    int ret;
    AndroidBitmapInfo info;
    void* pixels = 0;

    if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
        __android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"AndroidBitmap_getInfo() failed ! error=%d", ret);
        return 0;
    }

    if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888 )
    {       __android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"Bitmap format is not RGBA_8888!");
        return 0;
    }

    if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
        __android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"AndroidBitmap_lockPixels() failed ! error=%d", ret);
    }

    Mat mbgra(info.height, info.width, CV_8UC4, pixels);
 =========================================  
  vector<Point> img_pts = getPoints(mbgra);
  =====================================  
    jfloatArray jArray = env->NewFloatArray(8);

    if (jArray != NULL)
    {
        jfloat *ptr = env->GetFloatArrayElements(jArray, NULL);

        for (int i=0,j=i+4; j<8; i++,j++)
        {
            ptr[i] = img_pts[i].x;
            ptr[j] = img_pts[i].y;
        }
        env->ReleaseFloatArrayElements(jArray, ptr, NULL);
    }
    AndroidBitmap_unlockPixels(env, bitmap);
    return jArray;
}


vector<Point> getPoints(Mat image)
{
    int width = image.size().width;
    int height = image.size().height;
    Mat image_proc = image.clone();
    vector<vector<Point> > squares;
   Mat blurred(image_proc);
    medianBlur(image_proc, blurred, 9);
     Mat gray0(blurred.size(), CV_8U), gray;
    vector<vector<Point> > contours;
     for (int c = 0; c < 3; c++)
    {
        int ch[] = {c, 0};
        mixChannels(&blurred, 1, &gray0, 1, ch, 1);
               const int threshold_level = 2;
        for (int l = 0; l < threshold_level; l++)
        {
          if (l == 0)
            {
                Canny(gray0, gray, 10, 20, 3); //
              dilate(gray, gray, Mat(), Point(-1,-1));
            }
            else
            {
                gray = gray0 >= (l+1) * 255 / threshold_level;
            }
           findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
           vector<Point> approx;
            for (size_t i = 0; i < contours.size(); i++)
            {
               approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
               if (approx.size() == 4 &&
                    fabs(contourArea(Mat(approx))) > 1000 &&
                    isContourConvex(Mat(approx)))
                {
                    double maxCosine = 0;
                    for (int j = 2; j < 5; j++)
                    {
                        double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
                        maxCosine = MAX(maxCosine, cosine);
                    }

                    if (maxCosine < 0.3)
                        squares.push_back(approx);
                }
            }
        }

        double largest_area = -1;
        int largest_contour_index = 0;
        for(int i=0;i<squares.size();i++)
        {
            double a =contourArea(squares[i],false);
            if(a>largest_area)
            {
                largest_area = a;
                largest_contour_index = i;
            }
        }

        __android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Scaning size() %d",squares.size());
        vector<Point> points;
        if(squares.size() > 0)
        {
            points = squares[largest_contour_index];
        }
        else
        {
            points.push_back(Point(0, 0));
            points.push_back(Point(width, 0));
            points.push_back(Point(0, height));
            points.push_back(Point(width, height));
        }

        return points;
    }
}

ここに画像の説明を入力

上記のスクリーンショットに従って正確なエッジを見つける方法。

私はopencvの初心者です。誰かが私を案内してくれます。応答に感謝します。

出力画像 必要な出力

4

0 に答える 0