0

これらのタスクの両方を達成する方法についての断片をまとめましたが、それらを組み合わせる方法がわかりません。コードの最初のブロックは画像​​をキャプチャしますが、これは単なる画像バッファーであり、UIImage に変換できるものではありません。

- (void) captureStillImage
{
    AVCaptureConnection *stillImageConnection = [[self stillImageOutput] connectionWithMediaType:AVMediaTypeVideo];

    [[self stillImageOutput] captureStillImageAsynchronouslyFromConnection:stillImageConnection
                                                         completionHandler:^(CMSampleBufferRef imageDataSampleBuffer, NSError *error) {

                                                             if (imageDataSampleBuffer != NULL) {
                                                                 NSData *imageData = [AVCaptureStillImageOutput jpegStillImageNSDataRepresentation:imageDataSampleBuffer];

                                                                 UIImage *captureImage = [[UIImage alloc] initWithData:imageData];


                                                             }

                                                             if ([[self delegate] respondsToSelector:@selector(captureManagerStillImageCaptured:)]) {
                                                                 [[self delegate] captureManagerStillImageCaptured:self];
                                                             }
                                                         }];
}

これは、画像バッファを取り、それを UIImage に変換するアップルの例です。これら 2 つの方法を組み合わせて連携するにはどうすればよいですか?

-(UIImage*) getUIImageFromBuffer:(CMSampleBufferRef) imageSampleBuffer{

    // Get a CMSampleBuffer's Core Video image buffer for the media data
    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(imageSampleBuffer);

    if (imageBuffer==NULL) {
        NSLog(@"No buffer");
    }

    // Lock the base address of the pixel buffer
    if((CVPixelBufferLockBaseAddress(imageBuffer, 0))==kCVReturnSuccess){
        NSLog(@"Buffer locked successfully");
    }

    void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);

    // Get the number of bytes per row for the pixel buffer
    size_t bytesPerRow = CVPixelBufferGetBytesPerRow(imageBuffer);
    NSLog(@"bytes per row %zu",bytesPerRow );
    // Get the pixel buffer width and height
    size_t width = CVPixelBufferGetWidth(imageBuffer);
    NSLog(@"width %zu",width);

    size_t height = CVPixelBufferGetHeight(imageBuffer);
    NSLog(@"height %zu",height);

    // Create a device-dependent RGB color space
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();

    // Create a bitmap graphics context with the sample buffer data
    CGContextRef context = CGBitmapContextCreate(baseAddress, width, height, 8,
                                                 bytesPerRow, colorSpace, kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst);

    // Create a Quartz image from the pixel data in the bitmap graphics context
    CGImageRef quartzImage = CGBitmapContextCreateImage(context);

    // Free up the context and color space
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);

    // Create an image object from the Quartz image
    UIImage *image= [UIImage imageWithCGImage:quartzImage];

    // Release the Quartz image
    CGImageRelease(quartzImage);

    // Unlock the pixel buffer
    CVPixelBufferUnlockBaseAddress(imageBuffer,0);


    return (image );

}
4

1 に答える 1