Vuforia プラットフォームに基づく拡張現実アプリを作成しました。ターゲットが失われた場合、システムがターゲットの最後の既知の位置と、CoreMotion からのデバイスの向きのデータを使用して、オブジェクトを正しい位置に保つように変更しています。
CoreMotion データの統合という最後の部分で助けが必要です。これを行う最善の方法は、ジャイロ入力に基づいて仮想カメラを回転させることだと思いますが、私は OpenGL ES の専門家ではありません。誰かがこれを行うための最良の方法に光を当てることができますか? デバイスの向きデータを取得する方法は知っています。OpenGL と行列代数についてのガイダンスが必要です。
私のrenderFrameメソッドは以下です。
-(void)renderFrameQCAR {
[self setFramebuffer];
// Clear colour and depth buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Render video background and retrieve tracking state
QCAR::State state = QCAR::Renderer::getInstance().begin();
QCAR::Renderer::getInstance().drawVideoBackground();
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
// Check if any trackables are visible.
int numberOfTrackables = state.getNumActiveTrackables();
QCAR::Matrix44F modelViewMatrix;
// Skip rendering if there is nothing to render.
if (numberOfTrackables > 0 || hasPickedUpTrackablePreviously == YES) {
// If there are none, but one was picked up in the past, use the last pose matrix.
if (numberOfTrackables == 0 && hasPickedUpTrackablePreviously == YES) {
modelViewMatrix = trackablePoseMatrix;
}
else {
// Get the trackable
const QCAR::Trackable* trackable = state.getActiveTrackable(0);
modelViewMatrix = QCAR::Tool::convertPose2GLMatrix(trackable->getPose());
// Store these variables for use later.
trackablePoseMatrix = modelViewMatrix;
hasPickedUpTrackablePreviously = YES;
}
// Fetch the 3D object to render.
Object3D *obj3D;
if (currentlyChangingTextures == YES || useDummyModel == YES) {
obj3D = dummyObject;
}
else {
obj3D = [objects3D objectAtIndex:0];
}
// Render using the appropriate version of OpenGL
// OpenGL 2
QCAR::Matrix44F modelViewProjection;
// Apply usual transformations here
ShaderUtils::translatePoseMatrix(sideToSideFloat, forwardBackFloat, 0.0f, &modelViewMatrix.data[0]);
ShaderUtils::scalePoseMatrix(kObjectScale * sizeFloat, kObjectScale * sizeFloat, kObjectScale * sizeFloat, &modelViewMatrix.data[0]);
ShaderUtils::rotatePoseMatrix(0.0f + rotationAngleFloat, 0.0f, 0.0f, 1.0f, &modelViewMatrix.data[0]);
// Apply our translation vector here based on gesture info from the buttonOverlayViewController
QCAR::Vec3F translationFromWorldPerspective = SampleMath::Vec3FTransformNormal(translationVectorFromCamerasPerspective, inverseModelViewMatrix);
translationFromWorldPerspective = SampleMath::Vec3FNormalize(translationFromWorldPerspective);
theTranslation.data[0] = theTranslation.data[0] + speed*translationFromWorldPerspective.data[0];
theTranslation.data[1] = theTranslation.data[1] + speed*translationFromWorldPerspective.data[1];
theTranslation.data[2] = 0.0f;
ShaderUtils::translatePoseMatrix(theTranslation.data[0], theTranslation.data[1], theTranslation.data[2], &modelViewMatrix.data[0]);
// Update inverseModelViewMatrix
inverseModelViewMatrix = SampleMath::Matrix44FInverse(modelViewMatrix);
// Multiply modelview and projection matrix as usual
ShaderUtils::multiplyMatrix(&qUtils.projectionMatrix.data[0], &modelViewMatrix.data[0], &modelViewProjection.data[0]);
glUseProgram(shaderProgramID);
glVertexAttribPointer(vertexHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.vertices);
glVertexAttribPointer(normalHandle, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.normals);
glVertexAttribPointer(textureCoordHandle, 2, GL_FLOAT, GL_FALSE, 0, (const GLvoid*)obj3D.texCoords);
glEnableVertexAttribArray(vertexHandle);
glEnableVertexAttribArray(normalHandle);
glEnableVertexAttribArray(textureCoordHandle);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, [obj3D.texture textureID]);
glUniformMatrix4fv(mvpMatrixHandle, 1, GL_FALSE, (const GLfloat*)&modelViewProjection.data[0]);
glDrawArrays(GL_TRIANGLES, 0, obj3D.numVertices);
ShaderUtils::checkGlError("EAGLView renderFrameQCAR");
}
// Disable these things.
glDisable(GL_DEPTH_TEST);
glDisable(GL_CULL_FACE);
glDisableVertexAttribArray(vertexHandle);
glDisableVertexAttribArray(normalHandle);
glDisableVertexAttribArray(textureCoordHandle);
QCAR::Renderer::getInstance().end();
[self presentFramebuffer];
}
ありがとう!!