2

基本的な検出を行うiOSアプリを作成しています。AVCaptureVideoDataOutputから生のフレームを取得し、CMSampleBufferRefをUIImageに変換し、UIImageのサイズを変更してから、CVPixelBufferRefに変換します。Instrumentsで検出できる限り、リークはCGImageをCVPixelBufferRefに変換する最後の部分です。

私が使用するコードは次のとおりです。

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection 
{
    videof = [[ASMotionDetect alloc] initWithSampleImage:[self resizeSampleBuffer:sampleBuffer]];
    // ASMotionDetect is my class for detection and I use videof to calculate the movement
}

-(UIImage*)resizeSampleBuffer:(CMSampleBufferRef) sampleBuffer {
    UIImage *img;
    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
    CVPixelBufferLockBaseAddress(imageBuffer,0);        // Lock the image buffer 

    uint8_t *baseAddress = (uint8_t *)CVPixelBufferGetBaseAddressOfPlane(imageBuffer, 0);   // Get information of the image 
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer); 
    size_t width = CVPixelBufferGetWidth(imageBuffer); 
    size_t height = CVPixelBufferGetHeight(imageBuffer); 
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); 

    CGContextRef newContext = CGBitmapContextCreate(baseAddress, width, height, 8, bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); 
    CGImageRef newImage = CGBitmapContextCreateImage(newContext); 
    CGContextRelease(newContext); 

    CGColorSpaceRelease(colorSpace); 
    CVPixelBufferUnlockBaseAddress(imageBuffer,0); 
    /* CVBufferRelease(imageBuffer); */  // do not call this!

    img = [UIImage imageWithCGImage:newImage];
    CGImageRelease(newImage);
    newContext = nil;
    img = [self resizeImageToSquare:img];
    return img;
}

-(UIImage*)resizeImageToSquare:(UIImage*)_temp {
    UIImage *img;
    int w = _temp.size.width;
    int h = _temp.size.height;
    CGRect rect;
    if (w>h) {
        rect = CGRectMake((w-h)/2,0,h,h);
    } else {
        rect = CGRectMake(0, (h-w)/2, w, w);
    }
    //
    img = [self crop:_temp inRect:rect];
    return img;
}

-(UIImage*) crop:(UIImage*)image inRect:(CGRect)rect{
    UIImage *sourceImage = image;
    CGRect selectionRect = rect;
    CGRect transformedRect = TransformCGRectForUIImageOrientation(selectionRect, sourceImage.imageOrientation, sourceImage.size);
    CGImageRef resultImageRef = CGImageCreateWithImageInRect(sourceImage.CGImage, transformedRect);
    UIImage *resultImage = [[UIImage alloc] initWithCGImage:resultImageRef scale:1.0 orientation:image.imageOrientation];
    CGImageRelease(resultImageRef);
    return resultImage;
}

そして、私の検出クラスには次のものがあります。

- (id)initWithSampleImage:(UIImage*)sampleImage {
  if ((self = [super init])) {
    _frame = new CVMatOpaque();
    _histograms = new CVMatNDOpaque[kGridSize *
                                    kGridSize];
    [self extractFrameFromImage:sampleImage];
  }
  return self;
}

- (void)extractFrameFromImage:(UIImage*)sampleImage {
    CGImageRef imageRef = [sampleImage CGImage];
    CVImageBufferRef imageBuffer = [self pixelBufferFromCGImage:imageRef];
    CVPixelBufferLockBaseAddress(imageBuffer, 0);
  // Collect some information required to extract the frame.
    void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
    size_t height = CVPixelBufferGetHeight(imageBuffer);
    size_t width = CVPixelBufferGetWidth(imageBuffer);

  // Extract the frame, convert it to grayscale, and shove it in _frame.
    cv::Mat frame(height, width, CV_8UC4, baseAddress, bytesPerRow);
    cv::cvtColor(frame, frame, CV_BGR2GRAY);
    _frame->matrix = frame;
    CVPixelBufferUnlockBaseAddress(imageBuffer, 0);
    CGImageRelease(imageRef);
}

- (CVPixelBufferRef) pixelBufferFromCGImage: (CGImageRef) image
{
    CVPixelBufferRef pxbuffer = NULL;
    int width = CGImageGetWidth(image)*2;
    int height = CGImageGetHeight(image)*2;

    NSMutableDictionary *attributes = [NSMutableDictionary dictionaryWithObjectsAndKeys:[NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, [NSNumber numberWithInt:width], kCVPixelBufferWidthKey, [NSNumber numberWithInt:height], kCVPixelBufferHeightKey, nil];
    CVPixelBufferPoolRef pixelBufferPool; 
    CVReturn theError = CVPixelBufferPoolCreate(kCFAllocatorDefault, NULL, (__bridge CFDictionaryRef) attributes, &pixelBufferPool);
    NSParameterAssert(theError == kCVReturnSuccess);
    CVReturn status = CVPixelBufferPoolCreatePixelBuffer(NULL, pixelBufferPool, &pxbuffer);
    NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL);

    CVPixelBufferLockBaseAddress(pxbuffer, 0);
    void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
    NSParameterAssert(pxdata != NULL);
    CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef context = CGBitmapContextCreate(pxdata, width,
                                                 height, 8, width*4, rgbColorSpace, 
                                                 kCGImageAlphaNoneSkipFirst);
    NSParameterAssert(context);
/* here is the problem: */
    CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
    CGColorSpaceRelease(rgbColorSpace);
    CGContextRelease(context);

    CVPixelBufferUnlockBaseAddress(pxbuffer, 0);

    return pxbuffer;
}

Instrumentを使用すると、問題はCVPixelBufferRefの割り当てにあることがわかりましたが、理由がわかりません。誰かが問題を確認できますか?

ありがとうございました

4

1 に答える 1

1

では、と-pixelBufferFromCGImage:の両方がリリースされていません。これは戻り値であるため意味がありますが、メソッドの呼び出しごとに 1 つ作成してリークする場合は意味がありません。pxBufferpixelBufferPoolpxBufferpixelBufferPool

簡単な修正はする必要があります

  1. pixelBufferPoolでリリース-pixelBufferFromCGImage:
  2. リリースpxBuffer( の戻り値 -pixelBufferFromCGImage:) in-extractFrameFromImage:

また、保持されたオブジェクトを返すことを明確にするために、名前-pixelBufferFromCGImage:をに変更する必要があります。-createPixelBufferFromCGImage:

于 2012-06-26T10:16:09.313 に答える