kinect skd 1.5とWPFアプリケーションを使用して、マウスイベントをシミュレートしています。そのために、kinectセンサーを使用しています。私がdepthFramesを使用して、手までの距離を取得し、手を追跡しているのを聞いてください。手を正しく追跡しました。しかし、ジェスチャ認識を行うには、グレースケール画像を使用する必要があります。私はdepthFrameを使用して手を得ることができます。しかし、それはbyte[]配列を返します。このバイト配列をgrayScaleイメージに変換する方法はありますか。ジェスチャ認識を行うために、openCVラッパーにEmguCVを使用しています。これは私のソースコードです。しかし、変換すると、ビットマップのオブジェクト->静的メンバーに「式を逆参照できません。ポインターが無効です」というエラーが表示されます。このプロセスを正しく行うにはどうすればよいですか。plzは私を助けます。
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Windows;
using System.Windows.Controls;
using System.Windows.Data;
using System.Windows.Documents;
using System.Windows.Input;
using System.Windows.Media;
using System.Windows.Media.Imaging;
using System.Windows.Navigation;
using System.Windows.Shapes;
using Microsoft.Kinect;
using System.Windows.Forms;
using Emgu.CV.Structure;
using Emgu.CV;
using System.IO;
using System.Drawing;
using System.ComponentModel;
using System.Drawing.Imaging;
using System.Runtime.InteropServices;
namespace SkelitonApp
{
/// <summary>
/// Interaction logic for MainWindow.xaml
/// </summary>
public partial class MainWindow : Window
{
byte[] pixeData;
private WriteableBitmap colorBitmap;
KinectSensor kinectsensor = KinectSensor.KinectSensors.FirstOrDefault(s => s.Status == KinectStatus.Connected);
public MainWindow()
{
InitializeComponent();
}
const int skeletonCount = 6;
Skeleton[] allSkeletons = new Skeleton[skeletonCount];
private void Window_Loaded(object sender, RoutedEventArgs e)
{
kinectsensor.Start();
kinectsensor.AllFramesReady+=new EventHandler<AllFramesReadyEventArgs>(kinectsensor_AllFramesReady);
kinectsensor.ColorStream.Enable();
kinectsensor.DepthStream.Enable();
kinectsensor.SkeletonStream.Enable();
}
void kinectsensor_AllFramesReady(object sender, AllFramesReadyEventArgs e)
{
Skeleton first = GetFirstSkeleton(e);
if (first == null)
{
return;
}
GetCameraPoint(first, e);
//set scaled position
/*ScalePosition(headImage, first.Joints[JointType.Head]);
ScalePosition(leftEllipse1, first.Joints[JointType.HandLeft]);
ScalePosition(rightEllipse2, first.Joints[JointType.HandRight]);*/
using (DepthImageFrame handDepthFrame = e.OpenDepthImageFrame())
{
byte[] handBytes = null;
SkeletonFrame newskeletonFrame;
if (handDepthFrame == null)
{
return;
}
using (newskeletonFrame = e.OpenSkeletonFrame())
{
if (newskeletonFrame == null)
{
return;
}
}
handBytes = GenerateColoredBytes(handDepthFrame, newskeletonFrame, first);
int stride = handDepthFrame.Width * 4;
image2.Source =
BitmapSource.Create(handDepthFrame.Width, handDepthFrame.Height, 96, 96, PixelFormats.Bgr32, null, handBytes, stride);
//Hear is the place that i have the error(nnn bitmap variable)
Bitmap nnn = BitmapSourceToBitmap2(BitmapSource.Create(handDepthFrame.Width, handDepthFrame.Height, 96, 96, PixelFormats.Bgr32, null, handBytes, stride));
Console.WriteLine("aa");
}
}
public static System.Drawing.Bitmap BitmapSourceToBitmap2(BitmapSource srs)
{
System.Drawing.Bitmap btm = null;
int width = srs.PixelWidth;
int height = srs.PixelHeight;
int stride = width * ((srs.Format.BitsPerPixel + 7) / 8);
IntPtr ptr = Marshal.AllocHGlobal(height * stride);
srs.CopyPixels(new Int32Rect(0, 0, width, height), ptr, height * stride, stride);
btm = new System.Drawing.Bitmap(width, height, stride, System.Drawing.Imaging.PixelFormat.Format1bppIndexed, ptr);
return btm;
}
private byte[] GenerateColoredBytes(DepthImageFrame handDepthFrame, SkeletonFrame newskeletonFrame, Skeleton first)
{
short[] rawDepthdata=new short[handDepthFrame.PixelDataLength];
handDepthFrame.CopyPixelDataTo(rawDepthdata);
Byte[] pixels=new byte[handDepthFrame.Height*handDepthFrame.Width*4];
DepthImagePoint rightHandPoint = handDepthFrame.MapFromSkeletonPoint(first.Joints[JointType.HandRight].Position);
int DistanceToHand = rightHandPoint.Depth;
const int BlueIndex = 0;
const int GreenIndex =1;
const int RedIndex = 2;
int handDistanceMax = DistanceToHand + 10;
int handDistancemin = DistanceToHand - 60;
//int handAreaDiff = handDistanceMax - handDistancemin;
for (int depthIndex = 0, colorIndex = 0; depthIndex < rawDepthdata.Length && colorIndex < pixeData.Length; depthIndex++, colorIndex += 4)
{
int player = rawDepthdata[depthIndex] & DepthImageFrame.PlayerIndexBitmask;
int depth = rawDepthdata[depthIndex] >> DepthImageFrame.PlayerIndexBitmaskWidth;
/*if (depth < 900)
{
pixels[colorIndex + BlueIndex] = 255;
pixels[colorIndex + GreenIndex] = 0;
pixels[colorIndex + RedIndex] = 0;
}*/
if (depth <handDistanceMax && depth>handDistancemin)
{
pixels[colorIndex + BlueIndex] = 255;
pixels[colorIndex + GreenIndex] = 0;
pixels[colorIndex + RedIndex] = 0;
}
}
return pixels;
}
private void ScalePosition(FrameworkElement element, Joint joint)
{
/*Joint scaledJoint = joint.ScaleTo(1280, 720);
Canvas.SetLeft(element, scaledJoint.Position.X);
Canvas.SetTop(element, scaledJoint.Position.Y); */
}
private void GetCameraPoint(Skeleton first, AllFramesReadyEventArgs e)
{
using (DepthImageFrame depth = e.OpenDepthImageFrame())
{
if (depth == null)
{
return;
}
//map a joint location to a point on the depth map
DepthImagePoint headDepthPoint =
depth.MapFromSkeletonPoint(first.Joints[JointType.Head].Position);
DepthImagePoint leftDepthPoint =
depth.MapFromSkeletonPoint(first.Joints[JointType.HandLeft].Position);
DepthImagePoint rightDepthPoint =
depth.MapFromSkeletonPoint(first.Joints[JointType.HandRight].Position);
//map a depth point to a point in the color image
ColorImagePoint headColorPoint =
depth.MapToColorImagePoint(headDepthPoint.X,headDepthPoint.Y,
ColorImageFormat.RgbResolution640x480Fps30);
ColorImagePoint leftColorPoint =
depth.MapToColorImagePoint(leftDepthPoint.X, leftDepthPoint.Y,
ColorImageFormat.RgbResolution640x480Fps30);
ColorImagePoint rightColorPoint =
depth.MapToColorImagePoint(rightDepthPoint.X, rightDepthPoint.Y,
ColorImageFormat.RgbResolution640x480Fps30);
//set location
//System.Windows.Forms.Cursor.Position = new System.Drawing.Point(rightColorPoint.X,rightColorPoint.Y);
double screenWidth = Screen.PrimaryScreen.WorkingArea.Width;
double screenHeight = Screen.PrimaryScreen.WorkingArea.Height;
double windowWidth = Convert.ToInt32(image1.Width);
double windowHeight = Convert.ToInt32(image1.Height);
double x1 = rightColorPoint.X;
double y1 = rightColorPoint.Y;
double posX = (x1*100/ windowWidth);
posX = posX / 100 * screenWidth;
double posY = (y1 * 100 / windowHeight);
posY = posY / 100 * screenHeight;
// System.Windows.Forms.Cursor.Position = new System.Drawing.Point((int)posX, (int)posY);
/*
CameraPosition(headImage,headColorPoint);
CameraPosition(leftEllipse1, leftColorPoint);
CameraPosition(rightEllipse2, rightColorPoint);
*/
}
}
private void CameraPosition(FrameworkElement element, ColorImagePoint point)
{
Canvas.SetLeft(element,point.X-element.Width/2);
Canvas.SetTop(element, point.Y - element.Height / 2);
}
private Skeleton GetFirstSkeleton(AllFramesReadyEventArgs e)
{
////////////////////////
bool receiveData = false;
using (ColorImageFrame colorImageFrame = e.OpenColorImageFrame())
{
if (colorImageFrame != null)
{
if (pixeData == null)
{
pixeData = new byte[colorImageFrame.PixelDataLength];
}
colorImageFrame.CopyPixelDataTo(pixeData);
receiveData = true;
this.colorBitmap = new WriteableBitmap(this.kinectsensor.ColorStream.FrameWidth, this.kinectsensor.ColorStream.FrameHeight, 96.0, 96.0, PixelFormats.Bgr32, null);
}
else
{
// apps processing of image data is taking too long, it got more than 2 frames behind.
// the data is no longer avabilable.
}
}
if (receiveData)
{
this.colorBitmap.WritePixels(
new Int32Rect(0, 0, this.colorBitmap.PixelWidth, this.colorBitmap.PixelHeight),
this.pixeData,
this.colorBitmap.PixelWidth * sizeof(int),
0);
image1.Source = this.colorBitmap;
}
///////////////////////////
using(SkeletonFrame skeletonFrameData=e.OpenSkeletonFrame())
{
if (skeletonFrameData == null)
{
return null;
}
skeletonFrameData.CopySkeletonDataTo(allSkeletons);
//get the first tracked skeleton
Skeleton first=(from s in allSkeletons
where s.TrackingState==SkeletonTrackingState.Tracked
select s).FirstOrDefault();
return first;
}
}
}
}