現実世界には立方体とカメラがあります。各点の座標を次の図に示します。カメラの座標が であることは明らかです[0,0,1]
。
画面上の各位置の表示座標を計算できます。
import numpy as np
import cv2
import math
world = np.array(\
[\
( 5.00, 0.00, 0.00), \
( 5.00, 0.00, 0.50), \
( 6.00, -1.00, 0.00), \
( 6.00, -1.00, 0.50), \
( 6.00, 1.00, 0.00), \
( 6.00, 1.00, 0.50), \
( 7.00, 0.00, 0.50), \
])
# Camera Extrinsic Parameter
xRadEuler_C2W = -120 / 180 * math.pi
yRadEuler_C2W = 0 / 180 * math.pi
zRadEuler_C2W = -90 / 180 * math.pi
Rx = np.matrix([[1, 0, 0], [0, math.cos(xRadEuler_C2W), - math.sin(xRadEuler_C2W)], [0, math.sin(xRadEuler_C2W), math.cos(xRadEuler_C2W)]])
Ry = np.matrix([[ math.cos(yRadEuler_C2W), 0, math.sin(yRadEuler_C2W)], [0, 1, 0], [-math.sin(yRadEuler_C2W), 0, math.cos(yRadEuler_C2W)]])
Rz = np.matrix([[ math.cos(zRadEuler_C2W), -math.sin(zRadEuler_C2W), 0], [ math.sin(zRadEuler_C2W), math.cos(zRadEuler_C2W), 0], [0, 0, 1]])
# Notice : Rotation Matrix from Euler Angle.
R = Rx * Ry * Rz
# tvec is expressed wrt camra coord.
tvec = R * np.matrix((0, 0, -1)).T
# Camera Intrinsic Paramter
dist_coeffs = np.zeros((5, 1))
width = 640
height = 480
focal_length = 160
center = (width / 2, height / 2)
camera_matrix = np.array([[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double")
if __name__ == "__main__":
print("\nProject Point on Screen")
result = cv2.projectPoints(world, rvec, tvec, camera_matrix, None)
for n in range(len(world)):
print(world[n], '==>', result[0][n])
その結果、
Project Point on Screen
[ 5. 0. 0. ] ==> [[ 320. 294.12609945]]
[ 5. 0. 0.5 ] ==> [[ 320. 312.2071607]]
[ 6. -1. 0. ] ==> [[ 291.91086401 299.94150262]]
[ 6. -1. 0.5 ] ==> [[ 290.62146125 315.41433581]]
[ 6. 1. 0. ] ==> [[ 348.08913599 299.94150262]]
[ 6. 1. 0.5 ] ==> [[ 349.37853875 315.41433581]]
[ 7. 0. 0.5 ] ==> [[ 320. 317.74146755]]
ここで、と定義したカメラの位置を計算したいと思い[0,0,1]
ます。
import numpy as np
import cv2
import math
world = np.array(\
[\
( 5.00, 0.00, 0.00), \
( 5.00, 0.00, 0.50), \
( 6.00, -1.00, 0.00), \
( 6.00, -1.00, 0.50), \
( 6.00, 1.00, 0.00), \
( 6.00, 1.00, 0.50), \
( 7.00, 0.00, 0.50), \
])
img_pnts = np.array(\
[\
(320. , 294.12609945), \
(320. , 312.2071607), \
(291.91086401, 299.94150262), \
(290.62146125, 315.41433581), \
(348.08913599, 299.94150262), \
(349.37853875, 315.41433581), \
(320. , 317.74146755), \
])
# Camera Intrinsic Paramter
dist_coeffs = np.zeros((5, 1))
width = 640
height = 480
focal_length = 160
center = (width / 2, height / 2)
camera_matrix = np.array(
[[focal_length, 0, center[0]],
[0, focal_length, center[1]],
[0, 0, 1]], dtype = "double"
)
if __name__ == "__main__":
(success, rot_vec, trans_vec) = cv2.solvePnP(world, img_pnts, camera_matrix, dist_coeffs, flags=cv2.SOLVEPNP_ITERATIVE)
print("\nTranslation Vector")
print(trans_vec)
print("\nRotation Vector")
print(rot_vec)
print("\nRotation Matrix")
R, jacob = cv2.Rodrigues(rot_vec)
print(R)
結果はこんな感じ。
Translation Vector
[[ -8.87481507e-11]
[ -8.66025403e-01]
[ 4.99999999e-01]]
Rotation Vector
[[-1.58351453]
[-1.58351453]
[-0.91424254]]
Rotation Matrix
[[ 1.53020929e-11 1.00000000e+00 -5.93469718e-13]
[ 5.00000000e-01 -7.13717974e-12 8.66025404e-01]
[ 8.66025404e-01 -1.35487732e-11 -5.00000000e-01]]
どこに行った[0,0,1]
?
免責事項: 図とコードはこの記事から借用しました。