お使いの携帯電話は、Google の顔検出がうまく機能していないようです。HAL3 を使用し、API2 を使用できることは確かですか?
たとえば、私のコードでは、次のような問題なく顔検出を使用しています。
private CameraCaptureSession.CaptureCallback mPhotoCaptureCallback
= new CameraCaptureSession.CaptureCallback() {
//more code...
private void process(CaptureResult result) {
switch (mState) {
case STATE_PREVIEW: {
checkFaces(result.get(CaptureResult.STATISTICS_FACES));
//more code....
break;
}
//more code...
}
これが checkFaces メソッドです。
private void checkFaces(Face[] faces) {
if (faces != null) {
CameraUtil.CustomFace[] mMappedCustomFaces;
mMappedCustomFaces = computeFacesFromCameraCoordinates(faces);
if (faces != null && faces.length > 0) {
mHandler.sendEmptyMessage(SHOW_FACES_MSG);
mLastTimeRenderingFaces = System.currentTimeMillis();
}
} else {
if (System.currentTimeMillis() > (mLastTimeRenderingFaces + 100)) {
mHandler.sendEmptyMessage(HIDE_FACES_MSG);
}
}
}
私のカスタム Face クラス:
// public static class CustomFace extends Camera.CustomFace{
public static class CustomFace {
private int score = 0;
private Rect rect = null;
public CustomFace(Rect rect, int score) {
this.score = score;
this.rect = rect;
}
public int getScore() {
return score;
}
public Rect getBounds() {
return rect;
}
}
最後に、この方法で顔を正しく描くことができます (デフォルトの android を使用できますが、4:3 または 16:9 のサイズで、または電話を回転させると、長方形はうまく機能しません:
public static RectF rectToRectF(Rect r) {
return new RectF(r.left, r.top, r.right, r.bottom);
}
private CameraFaceUtil.CustomFace[] computeFacesFromCameraCoordinates(Face[] faces) {
CameraFaceUtil.CustomFace[] mappedFacesList = new CameraFaceUtil.CustomFace[faces.length];
mCameraCharacteristics.get(CameraCharacteristics.SENSOR_INFO_ACTIVE_ARRAY_SIZE);
float toStandardAspectRatio = ((float) mPreviewRect.bottom / (float) mPreviewRect.right) / AutoFitTextureView.RATIO_STANDARD;
//
for (int i = 0; i < faces.length; i++) {
RectF mappedRect = new RectF();
Log.i(TAG, "[computeFacesFromCameraCoordinates] toStandardAspectRatio: " + toStandardAspectRatio);
Log.i(TAG, "[computeFacesFromCameraCoordinates] preview rect: " + mPreviewRect);
Log.i(TAG, "[computeFacesFromCameraCoordinates] raw rect: " + faces[i].getBounds());
mCameraToPreviewMatrix.mapRect(mappedRect, CameraUtil.rectToRectF(faces[i].getBounds()));
Log.i(TAG, "[computeFacesFromCameraCoordinates] mapped rect: " + mappedRect);
Rect auxRect = new Rect(CameraUtil.rectFToRect(mappedRect));
Log.i(TAG, "[computeFacesFromCameraCoordinates] aux rect: " + auxRect);
int cameraSensorOrientation = mCameraCharacteristics.get(CameraCharacteristics.SENSOR_ORIENTATION);
Log.i(TAG, "[computeFacesFromCameraCoordinates] cameraSensorOrientation: " + cameraSensorOrientation);
switch (cameraSensorOrientation) {
case 90:
mappedRect.top = auxRect.left;
mappedRect.bottom = auxRect.right;
mappedRect.left = (mPreviewRect.right - auxRect.bottom);
mappedRect.right = (mPreviewRect.right - auxRect.top);
break;
case 180:
mappedRect.top = (mPreviewRect.bottom - auxRect.bottom) * toStandardAspectRatio;
mappedRect.bottom = (mPreviewRect.bottom - auxRect.top) * toStandardAspectRatio;
mappedRect.left = (mPreviewRect.right - auxRect.right) * toStandardAspectRatio;
mappedRect.right = (mPreviewRect.right - auxRect.left) * toStandardAspectRatio;
break;
case 270:
mappedRect.top = (mPreviewRect.bottom - auxRect.right) * toStandardAspectRatio;
mappedRect.bottom = (mPreviewRect.bottom - auxRect.left) * toStandardAspectRatio;
mappedRect.left = auxRect.top;
mappedRect.right = auxRect.bottom;
break;
}
Log.i(TAG, "[computeFacesFromCameraCoordinates] rotated by camera driver orientation rect without scale: "
+ mappedRect + ", with score: " + faces[i].getScore());
float topOffset = mappedRect.top;
float leftOffset = mappedRect.left;
mappedRect.top = mappedRect.top * toStandardAspectRatio;
mappedRect.bottom = mappedRect.bottom * toStandardAspectRatio;
mappedRect.left = mappedRect.left * toStandardAspectRatio;
mappedRect.right = mappedRect.right * toStandardAspectRatio;
Log.i(TAG, "[computeFacesFromCameraCoordinates] rotated by camera driver orientation rect with scale: "
+ mappedRect + ", with score: " + faces[i].getScore());
topOffset = mappedRect.top - topOffset;
leftOffset = mappedRect.left - leftOffset;
mappedRect.top -= topOffset /*- (mMirror ? mPreviewRect.height() : 0)*/;
mappedRect.bottom -= topOffset /* - (mMirror ? mPreviewRect.height() : 0)*/;
mappedRect.left -= leftOffset;
mappedRect.right -= leftOffset;
Log.i(TAG, "[computeFacesFromCameraCoordinates] rotated by camera driver orientation rect with offset: "
+ mappedRect + " topOffset " + topOffset + " leftOffset " + leftOffset);
// set the new values to the mapping array to get rendered
mappedFacesList[i] = new CameraFaceUtil.CustomFace(CameraUtil.rectFToRect(mappedRect), faces[i].getScore());
}
return mappedFacesList;
}
私がやっているのは、画面の比率とサイズに基づいて顔を描いていることです。camera2API について他に何か必要な場合は、お気軽にお問い合わせください。