1

以下のコードを使用して、AVAsset ライターを使用して静的な 16:9 画像からビデオを作成しています。問題は、何らかの理由で生成されたビデオが 4:3 形式になっていることです。

コードを修正して 16:9 ビデオを生成する方法、または 4:3 ビデオを 16:9 に変換する方法を誰かが提案できますか?

ありがとうございました

- (void) createVideoFromStillImage 
{
//Set the size according to the device type (iPhone or iPad).
CGSize size = CGSizeMake(screenWidth, screenHeight);

NSString *betaCompressionDirectory = [NSHomeDirectory() stringByAppendingPathComponent:@"Documents/IntroVideo.mov"];

NSError *error = nil;

unlink([betaCompressionDirectory UTF8String]);

//----initialize compression engine
AVAssetWriter *videoWriter = [[AVAssetWriter alloc] initWithURL:[NSURL fileURLWithPath:betaCompressionDirectory]
                                                       fileType:AVFileTypeQuickTimeMovie
                                                          error:&error];
NSParameterAssert(videoWriter);
if(error)
    NSLog(@"error = %@", [error localizedDescription]);

NSDictionary *videoSettings = [NSDictionary dictionaryWithObjectsAndKeys: AVVideoCodecH264,AVVideoCodecKey,
                               [NSNumber numberWithInt:size.height], AVVideoWidthKey,
                               [NSNumber numberWithInt:size.width], AVVideoHeightKey, nil];
AVAssetWriterInput *writerInput = [AVAssetWriterInput assetWriterInputWithMediaType:AVMediaTypeVideo outputSettings:videoSettings];

NSDictionary *sourcePixelBufferAttributesDictionary = [NSDictionary dictionaryWithObjectsAndKeys:
                                                       [NSNumber numberWithInt:kCVPixelFormatType_32ARGB], kCVPixelBufferPixelFormatTypeKey, nil];

AVAssetWriterInputPixelBufferAdaptor *adaptor = [AVAssetWriterInputPixelBufferAdaptor assetWriterInputPixelBufferAdaptorWithAssetWriterInput:writerInput
                                                                                                                 sourcePixelBufferAttributes:sourcePixelBufferAttributesDictionary];
NSParameterAssert(writerInput);
NSParameterAssert([videoWriter canAddInput:writerInput]);

if ([videoWriter canAddInput:writerInput])
    NSLog(@"I can add this input");
else
    NSLog(@"i can't add this input");

[videoWriter addInput:writerInput];

[videoWriter startWriting];
[videoWriter startSessionAtSourceTime:kCMTimeZero];

//CGImageRef theImage = [finishedMergedImage CGImage];
CGImageRef theImage = [introImage CGImage];

//dispatch_queue_t    dispatchQueue = dispatch_queue_create("mediaInputQueue", NULL);
int __block         frame = 0;

//Calculate how much progress % one frame completion represents. Maximum of 75%.
float currentProgress = 0.0;
float progress = (80.0 / kDurationOfIntroOutro);    
//NSLog(@"Progress is %f", progress);

for (int i=0; i<=kDurationOfIntroOutro; i++) {

    //Update our progress view for every frame that is generated.
    [self updateProgressView:currentProgress];
    currentProgress +=progress;

    //NSLog(@"CurrentProgress is %f", currentProgress);

    frame++;
    [NSThread sleepForTimeInterval:0.05]; //Delay to allow buffer to be ready.
    CVPixelBufferRef buffer = (CVPixelBufferRef)[self pixelBufferFromCGImage:theImage size:size];
    if (buffer) {
        if (adaptor.assetWriterInput.readyForMoreMediaData)
    {
        if(![adaptor appendPixelBuffer:buffer withPresentationTime:CMTimeMake(frame, 20)])
            NSLog(@"FAIL");
        else
            NSLog(@"Success:%d", frame);
        CFRelease(buffer);
    }
    }
}

[writerInput markAsFinished];
[videoWriter finishWriting];
[videoWriter release];

//NSLog(@"outside for loop");
//Grab the URL for the video so we can use it later.
NSURL * url = [self applicationDocumentsDirectory : kIntroVideoFileName];
[assetURLArray setObject:url forKey:kIntroVideo];

}

- (CVPixelBufferRef )pixelBufferFromCGImage:(CGImageRef)image size:(CGSize)size
{
NSDictionary *options = [NSDictionary dictionaryWithObjectsAndKeys:
                         [NSNumber numberWithBool:YES], kCVPixelBufferCGImageCompatibilityKey, 
                         [NSNumber numberWithBool:YES], kCVPixelBufferCGBitmapContextCompatibilityKey, nil];
CVPixelBufferRef pxbuffer = NULL;
CVReturn status = CVPixelBufferCreate(kCFAllocatorDefault, size.width, size.height, kCVPixelFormatType_32ARGB, (CFDictionaryRef) options, &pxbuffer);
// CVReturn status = CVPixelBufferPoolCreatePixelBuffer(NULL, adaptor.pixelBufferPool, &pxbuffer);

NSParameterAssert(status == kCVReturnSuccess && pxbuffer != NULL); 

CVPixelBufferLockBaseAddress(pxbuffer, 0);
void *pxdata = CVPixelBufferGetBaseAddress(pxbuffer);
NSParameterAssert(pxdata != NULL);

CGColorSpaceRef rgbColorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef context = CGBitmapContextCreate(pxdata, size.width, size.height, 8, 4*size.width, rgbColorSpace, kCGImageAlphaPremultipliedFirst);
NSParameterAssert(context);

CGContextDrawImage(context, CGRectMake(0, 0, CGImageGetWidth(image), CGImageGetHeight(image)), image);

CGColorSpaceRelease(rgbColorSpace);
CGContextRelease(context);

CVPixelBufferUnlockBaseAddress(pxbuffer, 0);

return pxbuffer;
}
4

1 に答える 1

2

これを締めくくるために、上で行ったことをもう一度述べます。使用videoSettingsする辞書は、ビデオのターゲット サイズを使用する必要がありますが、ビューのサイズを渡しています。それが記録したいものでない限り、 に渡された値を正しい出力サイズに変更する必要がありAVVideoWidthKeyますAVVideoWidthKey

iOS デバイスの画面の縦横比が 4:3 に近いことを考えると、これがおそらく、記録されたビデオでその比率につながった原因です。

于 2012-04-27T21:57:50.133 に答える