57

camera2 の顔検出メカニズムに関する十分な情報がありません。Google の Camera2 サンプルを使用しました: https://github.com/android/camera-samples

顔検出モードをFULLにしてみました。

mPreviewRequestBuilder.set(CaptureRequest.STATISTICS_FACE_DETECT_MODE,
                                    CameraMetadata.STATISTICS_FACE_DETECT_MODE_FULL);

また、チェックした

STATISTICS_INFO_MAX_FACE_COUNTSTATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES:

int max_count = characteristics.get(
CameraCharacteristics.STATISTICS_INFO_MAX_FACE_COUNT);
int modes [] = characteristics.get(
CameraCharacteristics.STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES);

出力: maxCount : 5 、モード : [0, 2]

私のCaptureCallback:

 private CameraCaptureSession.CaptureCallback mCaptureCallback
 = new CameraCaptureSession.CaptureCallback() {

    private void process(CaptureResult result) {
                Integer mode = result.get(CaptureResult.STATISTICS_FACE_DETECT_MODE);
                Face [] faces = result.get(CaptureResult.STATISTICS_FACES);
                if(faces != null && mode != null)
                    Log.e("tag", "faces : " + faces.length + " , mode : " + mode ); 
    }

    @Override
    public void onCaptureProgressed(CameraCaptureSession session, CaptureRequest request,
                                    CaptureResult partialResult) {
        process(partialResult);
    }

    @Override
    public void onCaptureCompleted(CameraCaptureSession session, CaptureRequest request,
                                   TotalCaptureResult result) {
        process(result);
    }

出力:面: 0、モード: 2

 public static final int STATISTICS_FACE_DETECT_MODE_FULL = 2;

顔の長さは常に 0 です。顔が正しく検出されていないか、何かを見逃しているようです。

FaceDetectorでのアプローチを知っています。新しい camera2 Faceでどのように機能するかを確認したかっただけです。

4

5 に答える 5

0

お使いの携帯電話は、Google の顔検出がうまく機能していないようです。HAL3 を使用し、API2 を使用できることは確かですか?

たとえば、私のコードでは、次のような問題なく顔検出を使用しています。

 private CameraCaptureSession.CaptureCallback mPhotoCaptureCallback
            = new CameraCaptureSession.CaptureCallback() {
//more code...
  private void process(CaptureResult result) {
            switch (mState) {
                case STATE_PREVIEW: {
                    checkFaces(result.get(CaptureResult.STATISTICS_FACES));
                   //more code....
                    break;
                }
//more code...
}

これが checkFaces メソッドです。

 private void checkFaces(Face[] faces) {
    if (faces != null) {
        CameraUtil.CustomFace[] mMappedCustomFaces;
        mMappedCustomFaces = computeFacesFromCameraCoordinates(faces);
        if (faces != null && faces.length > 0) {
            mHandler.sendEmptyMessage(SHOW_FACES_MSG);
            mLastTimeRenderingFaces = System.currentTimeMillis();
        }
    } else {
        if (System.currentTimeMillis() > (mLastTimeRenderingFaces + 100)) {
            mHandler.sendEmptyMessage(HIDE_FACES_MSG);
        }
    }
}

私のカスタム Face クラス:

     //    public static class CustomFace extends Camera.CustomFace{
public static class CustomFace {
    private int score = 0;
    private Rect rect = null;

    public CustomFace(Rect rect, int score) {
        this.score = score;
        this.rect = rect;
    }

    public int getScore() {
        return score;
    }

    public Rect getBounds() {
        return rect;
    }
}

最後に、この方法で顔を正しく描くことができます (デフォルトの android を使用できますが、4:3 または 16:9 のサイズで、または電話を回転させると、長方形はうまく機能しません:

  public static RectF rectToRectF(Rect r) {
    return new RectF(r.left, r.top, r.right, r.bottom);
}

     private CameraFaceUtil.CustomFace[] computeFacesFromCameraCoordinates(Face[] faces) {
        CameraFaceUtil.CustomFace[] mappedFacesList = new CameraFaceUtil.CustomFace[faces.length];

        mCameraCharacteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);

        float toStandardAspectRatio = ((float) mPreviewRect.bottom / (float) mPreviewRect.right) / AutoFitTextureView.RATIO_STANDARD;
//
        for (int i = 0; i < faces.length; i++) {

            RectF mappedRect = new RectF();
            Log.i(TAG, "[computeFacesFromCameraCoordinates] toStandardAspectRatio: " + toStandardAspectRatio);
            Log.i(TAG, "[computeFacesFromCameraCoordinates] preview rect: " + mPreviewRect);
            Log.i(TAG, "[computeFacesFromCameraCoordinates] raw rect: " + faces[i].getBounds());

            mCameraToPreviewMatrix.mapRect(mappedRect, CameraUtil.rectToRectF(faces[i].getBounds()));

            Log.i(TAG, "[computeFacesFromCameraCoordinates] mapped rect: " + mappedRect);

            Rect auxRect = new Rect(CameraUtil.rectFToRect(mappedRect));


            Log.i(TAG, "[computeFacesFromCameraCoordinates] aux rect: " + auxRect);

            int cameraSensorOrientation = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);
            Log.i(TAG, "[computeFacesFromCameraCoordinates] cameraSensorOrientation: " + cameraSensorOrientation);
            switch (cameraSensorOrientation) {
                case 90:
                    mappedRect.top = auxRect.left;
                    mappedRect.bottom = auxRect.right;
                    mappedRect.left = (mPreviewRect.right - auxRect.bottom);
                    mappedRect.right = (mPreviewRect.right - auxRect.top);
                    break;

                case 180:
                    mappedRect.top = (mPreviewRect.bottom - auxRect.bottom) * toStandardAspectRatio;
                    mappedRect.bottom = (mPreviewRect.bottom - auxRect.top) * toStandardAspectRatio;
                    mappedRect.left = (mPreviewRect.right - auxRect.right) * toStandardAspectRatio;
                    mappedRect.right = (mPreviewRect.right - auxRect.left) * toStandardAspectRatio;
                    break;

                case 270:
                    mappedRect.top = (mPreviewRect.bottom - auxRect.right) * toStandardAspectRatio;
                    mappedRect.bottom = (mPreviewRect.bottom - auxRect.left) * toStandardAspectRatio;
                    mappedRect.left = auxRect.top;
                    mappedRect.right = auxRect.bottom;
                    break;
            }

            Log.i(TAG, "[computeFacesFromCameraCoordinates] rotated by camera driver orientation rect without scale: "
                    + mappedRect + ",  with score: " + faces[i].getScore());

            float topOffset = mappedRect.top;
            float leftOffset = mappedRect.left;

            mappedRect.top = mappedRect.top * toStandardAspectRatio;
            mappedRect.bottom = mappedRect.bottom * toStandardAspectRatio;
            mappedRect.left = mappedRect.left * toStandardAspectRatio;
            mappedRect.right = mappedRect.right * toStandardAspectRatio;


            Log.i(TAG, "[computeFacesFromCameraCoordinates] rotated by camera driver orientation rect with scale: "
                    + mappedRect + ",  with score: " + faces[i].getScore());

            topOffset = mappedRect.top - topOffset;
            leftOffset = mappedRect.left - leftOffset;

            mappedRect.top -= topOffset /*- (mMirror ? mPreviewRect.height() : 0)*/;
            mappedRect.bottom -= topOffset /* - (mMirror ? mPreviewRect.height() : 0)*/;
            mappedRect.left -= leftOffset;
            mappedRect.right -= leftOffset;

            Log.i(TAG, "[computeFacesFromCameraCoordinates] rotated by camera driver orientation rect with offset: "
                    + mappedRect + " topOffset " + topOffset + " leftOffset " + leftOffset);

            // set the new values to the mapping array to get rendered
            mappedFacesList[i] = new CameraFaceUtil.CustomFace(CameraUtil.rectFToRect(mappedRect), faces[i].getScore());
        }

        return mappedFacesList;

    }

私がやっているのは、画面の比率とサイズに基づいて顔を描いていることです。camera2API について他に何か必要な場合は、お気軽にお問い合わせください。

于 2016-06-30T07:47:21.170 に答える