Boost python は、cv2.cv.Mat (python 内) を cv::Mat (C++) に自動的に変換しません。
boost::object * を受け取るように C++ メソッドを宣言し、オブジェクトを cv::Mat に変換するための追加のコードを C++ に含める必要があります。
これは、STASM Active Shape Model ライブラリをラップするために行ったサンプルです。
#ifndef ASMSearcher_HPP
#define ASMSearcher_HPP
#include <string>
#include <boost/python.hpp>
#include <opencv2/core/core.hpp>
class ASMSearcher;
/*
* Wrapper around STASM ASMSearcher class so that we don't mix boost python code into the STASM library.
*/
struct memtrack_t {
PyObject_HEAD
void *ptr;
Py_ssize_t size;
};
struct cvmat_t
{
PyObject_HEAD
CvMat *a;
PyObject *data;
size_t offset;
};
struct iplimage_t {
PyObject_HEAD
IplImage *a;
PyObject *data;
size_t offset;
};
namespace bp = boost::python;
class Stasm
{
public:
Stasm();
Stasm(const std::string &conf_file0, const std::string &conf_file1);
~Stasm();
bp::list detect(bp::object image, const std::string &conf_file0="",
const std::string &conf_file1="");
private:
ASMSearcher *asmLandmarksSearcher;
cv::Mat convertObj2Mat(bp::object image);
cv::Mat convert_from_cviplimage(PyObject *o,const char *name);
cv::Mat convert_from_cvmat(PyObject *o, const char* name);
};
#endif
#include "stasm.hpp"
#include "stasm_ocv.hpp"
#include <opencv2/highgui/highgui.hpp>
Stasm::Stasm()
{
asmLandmarksSearcher = NULL;
}
Stasm::~Stasm()
{
if (asmLandmarksSearcher != NULL)
delete asmLandmarksSearcher;
}
Stasm::Stasm(const std::string &conf_file0, const std::string &conf_file1)
{
asmLandmarksSearcher = new ASMSearcher(conf_file0, conf_file1);
}
/*Detect asm facial landmarks in image*/
bp::list Stasm::detect(bp::object image,
const std::string &conf_file0,
const std::string &conf_file1)
{
const char *file0 = conf_file0 == "" ? NULL : conf_file0.c_str();
const char *file1 = conf_file1 == "" ? NULL : conf_file1.c_str();
// Convert pyobject to IplImage/Mat etc.
cv::Mat img = convertObj2Mat(image);
bool isColor = img.channels() == 3 ? true : false;
int nlandmarks;
int landmarks[500]; // space for x,y coords of up to 250 landmarks
asmLandmarksSearcher->search(&nlandmarks, landmarks,
"image_name", (const char*)img.data, img.cols, img.rows,
isColor /* is_color */, file0 /* conf_file0 */, file1 /* conf_file1 */);
//isColor /* is_color */, NULL /* conf_file0 */, NULL /* conf_file1 */);
// Convert landmarks to python list object
bp::list pyLandmarks;
for (int i = 0; i < 2*nlandmarks; i++)
pyLandmarks.append(landmarks[i]);
return pyLandmarks;
}
cv::Mat Stasm::convert_from_cvmat(PyObject *o, const char* name)
{
cv::Mat dest;
cvmat_t *m = (cvmat_t*)o;
void *buffer;
Py_ssize_t buffer_len;
m->a->refcount = NULL;
if (m->data && PyString_Check(m->data))
{
assert(cvGetErrStatus() == 0);
char *ptr = PyString_AsString(m->data) + m->offset;
cvSetData(m->a, ptr, m->a->step);
assert(cvGetErrStatus() == 0);
dest = m->a;
}
else if (m->data && PyObject_AsWriteBuffer(m->data, &buffer, &buffer_len) == 0)
{
cvSetData(m->a, (void*)((char*)buffer + m->offset), m->a->step);
assert(cvGetErrStatus() == 0);
dest = m->a;
}
else
{
printf("CvMat argument '%s' has no data", name);
//failmsg("CvMat argument '%s' has no data", name);
}
return dest;
}
cv::Mat Stasm::convert_from_cviplimage(PyObject *o,const char *name)
{
cv::Mat dest;
iplimage_t *ipl = (iplimage_t*)o;
void *buffer;
Py_ssize_t buffer_len;
if (PyString_Check(ipl->data)) {
cvSetData(ipl->a, PyString_AsString(ipl->data) + ipl->offset, ipl->a->widthStep);
assert(cvGetErrStatus() == 0);
dest = ipl->a;
} else if (ipl->data && PyObject_AsWriteBuffer(ipl->data, &buffer, &buffer_len) == 0) {
cvSetData(ipl->a, (void*)((char*)buffer + ipl->offset), ipl->a->widthStep);
assert(cvGetErrStatus() == 0);
dest = ipl->a;
} else {
printf("IplImage argument '%s' has no data", name);
}
return dest;
}
cv::Mat Stasm::convertObj2Mat(bp::object image)
{
if(strcmp(image.ptr()->ob_type->tp_name,"cv2.cv.iplimage") == 0)
{
return convert_from_cviplimage(image.ptr(),image.ptr()->ob_type->tp_name);
}
else
return convert_from_cvmat(image.ptr(), image.ptr()->ob_type->tp_name);
}
そして、それをテストするためのサンプル コードは次のようになります。
#!/usr/bin/env python
import cv2
import pystasm
import numpy as np
import sys
DEFAULT_TEST_IMAGE = "428.jpg"
def getFacePointsMapping():
mapping = {}
fhd = open('mapping2.txt')
line = fhd.readline()
a = line.split()
for i, n in enumerate(a):
mapping[int(n)] = i
return mapping
def drawFaceKeypoints(img, landmarks):
mapping = getFacePointsMapping()
numpyLandmarks = np.asarray(landmarks)
numLandmarks = len(landmarks) / 2
numpyLandmarks = numpyLandmarks.reshape(numLandmarks, -1)
for i in range(0, len(landmarks) - 1, 2):
pt = (landmarks[i], landmarks[i+1])
#cv2.polylines(img, [numpyLandmarks], False, (0, 255, 0))
number = mapping[i/2]
cv2.circle(img, pt, 3, (255, 0, 0), cv2.cv.CV_FILLED)
cv2.putText(img, str(number), pt, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255))
return img
def getFacePointsMapping():
mapping = []
fhd = open('mapping2.txt')
line = fhd.readline()
a = line.split()
for n in a:
mapping.append(n)
return mapping
def main():
asmsearcher = pystasm.Stasm('mu-68-1d.conf', 'mu-76-2d.conf')
if len(sys.argv) == 2:
imagename = sys.argv[1]
else:
imagename = DEFAULT_TEST_IMAGE
# Detect facial keypoints in image
img = cv2.imread(imagename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
landmarks = asmsearcher.detect(cv2.cv.fromarray(img))
img = drawFaceKeypoints(img, landmarks)
#numpyLandmarks = np.asarray(landmarks)
#numLandmarks = len(landmarks) / 2
#numpyLandmarks = numpyLandmarks.reshape(numLandmarks, -1)
#for i in range(0, len(landmarks) - 1, 2):
# pt = (landmarks[i], landmarks[i+1])
# #cv2.polylines(img, [numpyLandmarks], False, (0, 255, 0))
# number = mapping[i/2]
# cv2.circle(img, pt, 3, (255, 0, 0), cv2.cv.CV_FILLED)
# cv2.putText(img, str(number), pt, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255))
cv2.imshow("test", img)
cv2.waitKey()
if __name__ == '__main__':
main()
申し訳ありませんが、コードをクリーンアップする時間がありません。動作させるには cv2.cv.fromarray(numpy_array) を呼び出す必要があることに注意してください。私はまだnumpy配列をpython boostに直接渡す方法を見つけようとしています。あなたがすでにそれを理解しているなら、私に知らせてください:)。
ところで、boost オブジェクトと opencv の IplImage と Mat を変換するためのコードは、OpenCV のソースから取得したものであることを付け加えておきます。