7

マイクから音を録音し、OS X でリアルタイムに再生しようとしています。最終的にはネットワーク経由でストリーミングされますが、今のところ、ローカルでの録音/再生を実現しようとしています。

サウンドを録音してファイルに書き込むことができます。これは、 と の両方AVCaptureSessionで行うことができますAVAudioRecorder。ただし、録音したオーディオを再生する方法がわかりません。使用AVCaptureAudioDataOutput作品:

self.captureSession = [[AVCaptureSession alloc] init];
AVCaptureDevice *audioCaptureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];

NSError *error = nil;
AVCaptureDeviceInput *audioInput = [AVCaptureDeviceInput deviceInputWithDevice:audioCaptureDevice error:&error];

AVCaptureAudioDataOutput *audioDataOutput = [[AVCaptureAudioDataOutput alloc] init];

self.serialQueue = dispatch_queue_create("audioQueue", NULL);
[audioDataOutput setSampleBufferDelegate:self queue:self.serialQueue];

if (audioInput && [self.captureSession canAddInput:audioInput] && [self.captureSession canAddOutput:audioDataOutput]) {
    [self.captureSession addInput:audioInput];
    [self.captureSession addOutput:audioDataOutput];

    [self.captureSession startRunning];

    // Stop after arbitrary time    
    double delayInSeconds = 4.0;
    dispatch_time_t popTime = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(delayInSeconds * NSEC_PER_SEC));
        dispatch_after(popTime, dispatch_get_main_queue(), ^(void){
            [self.captureSession stopRunning];
        });

} else {
    NSLog(@"Couldn't add them; error = %@",error);
}

...しかし、コールバックを実装する方法がわかりません:

- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
    ?
}

からデータを取得して、この SO answerからコードをコピーしてsampleBuffer再生しようとしましたが、そのコードはメソッドでクラッシュします。AVAudioPlayerappendBytes:length:

AudioBufferList audioBufferList;
NSMutableData *data= [NSMutableData data];
CMBlockBufferRef blockBuffer;
CMSampleBufferGetAudioBufferListWithRetainedBlockBuffer(sampleBuffer, NULL, &audioBufferList, sizeof(audioBufferList), NULL, NULL, 0, &blockBuffer);

for( int y=0; y< audioBufferList.mNumberBuffers; y++ ){

    AudioBuffer audioBuffer = audioBufferList.mBuffers[y];
    Float32 *frame = (Float32*)audioBuffer.mData;

    NSLog(@"Length = %i",audioBuffer.mDataByteSize);
    [data appendBytes:frame length:audioBuffer.mDataByteSize]; // Crashes here

}

CFRelease(blockBuffer);

NSError *playerError;
AVAudioPlayer *player = [[AVAudioPlayer alloc] initWithData:data error:&playerError];
if(player && !playerError) {
    NSLog(@"Player was valid");
    [player play];
} else {
    NSLog(@"Error = %@",playerError);
}

編集CMSampleBufferGetAudioBufferListWithRetainedBlockBufferメソッドは、-12737 の OSStatus コードを返します。ドキュメントによると、kCMSampleBufferError_ArrayTooSmall

Edit2 :このメーリング リストの応答size_tに基づいて、2 番目のパラメーターとして out パラメーターを に渡しました...GetAudioBufferList...。これは 40 を返しました。現在、ハードコードされた値として 40 を渡していますが、これは機能しているようです (OSStatus の戻り値は少なくとも 0 です)。

プレーヤーのinitWithData:error:メソッドでエラーが発生するようになりました:

Error Domain=NSOSStatusErrorDomain Code=1954115647 "The operation couldn’t be completed. (OSStatus error 1954115647.)"私が調べているもの。

iOSプログラミングは昔からやっていますが、AVFoundationやCoreAudioなどは今まで使ったことがありませんでした。どれだけ低レベルまたは高レベルになりたいかに応じて、同じことを達成する方法がたくさんあるようです。

付録

ファイルへの記録

を使用してファイルに記録するAVCaptureSession:

- (void)applicationDidFinishLaunching:(NSNotification *)aNotification
{

    [[NSNotificationCenter defaultCenter] addObserver:self selector:@selector(captureSessionStartedNotification:) name:AVCaptureSessionDidStartRunningNotification object:nil];
    self.captureSession = [[AVCaptureSession alloc] init];
    AVCaptureDevice *audioCaptureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];

    NSError *error = nil;
    AVCaptureDeviceInput *audioInput = [AVCaptureDeviceInput deviceInputWithDevice:audioCaptureDevice error:&error];

    AVCaptureAudioFileOutput *audioOutput = [[AVCaptureAudioFileOutput alloc] init];


    if (audioInput && [self.captureSession canAddInput:audioInput] && [self.captureSession canAddOutput:audioOutput]) {
            NSLog(@"Can add the inputs and outputs");

            [self.captureSession addInput:audioInput];
            [self.captureSession addOutput:audioOutput];

            [self.captureSession startRunning];

            double delayInSeconds = 5.0;
            dispatch_time_t popTime = dispatch_time(DISPATCH_TIME_NOW, (int64_t)(delayInSeconds * NSEC_PER_SEC));
            dispatch_after(popTime, dispatch_get_main_queue(), ^(void){
                [self.captureSession stopRunning];
            });
        }
        else {
            NSLog(@"Error was = %@",error);
        }
}

- (void)captureSessionStartedNotification:(NSNotification *)notification
{

    AVCaptureSession *session = notification.object;
    id audioOutput  = session.outputs[0];
    NSLog(@"Capture session started; notification = %@",notification);
    NSLog(@"Notification audio output = %@",audioOutput);

    [audioOutput startRecordingToOutputFileURL:[[self class] outputURL] outputFileType:AVFileTypeAppleM4A recordingDelegate:self];
}

+ (NSURL *)outputURL
{
    NSArray *searchPaths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory, NSUserDomainMask, YES);
    NSString *documentPath = [searchPaths objectAtIndex:0];
    NSString *filePath = [documentPath stringByAppendingPathComponent:@"z1.alac"];
    return [NSURL fileURLWithPath:filePath];
}

を使用してファイルに記録するAVAudioRecorder:

NSDictionary *recordSettings = [NSDictionary
                                    dictionaryWithObjectsAndKeys:
                                    [NSNumber numberWithInt:AVAudioQualityMin],
                                    AVEncoderAudioQualityKey,
                                    [NSNumber numberWithInt:16],
                                    AVEncoderBitRateKey,
                                    [NSNumber numberWithInt: 2],
                                    AVNumberOfChannelsKey,
                                    [NSNumber numberWithFloat:44100.0], 
                                    AVSampleRateKey,
                                    @(kAudioFormatAppleLossless),
                                    AVFormatIDKey,
                                    nil];


    NSError *recorderError;
    self.recorder = [[AVAudioRecorder alloc] initWithURL:[[self class] outputURL] settings:recordSettings error:&recorderError];
    self.recorder.delegate = self;
    if (self.recorder && !recorderError) {
        NSLog(@"Success!");
        [self.recorder recordForDuration:10];
    } else {
        NSLog(@"Failure, recorder = %@",self.recorder);
        NSLog(@"Error = %@",recorderError);
    }
4

1 に答える 1

3

わかりました、私はより低いレベルで作業することになりましたAVFoundation-それが必要かどうかはわかりません。Learning Core Audioの第 5 章まで読み、Audio Queues を使用した実装を行いました。このコードは、ファイルへの記録/ファイルの再生に使用されるものから変換されているため、誤って残してしまった不要なビットが確実に存在します。さらに、実際には出力キューにバッファーを再エンキューしていません ( be) ですが、これは概念実証として機能します。唯一のファイルはここにリストされており、Githubにもあります。

//
//  main.m
//  Recorder
//
//  Created by Maximilian Tagher on 8/7/13.
//  Copyright (c) 2013 Tagher. All rights reserved.
//

#import <Foundation/Foundation.h>
#import <AudioToolbox/AudioToolbox.h>

#define kNumberRecordBuffers 3

//#define kNumberPlaybackBuffers 3

#define kPlaybackFileLocation CFSTR("/Users/Max/Music/iTunes/iTunes Media/Music/Taylor Swift/Red/02 Red.m4a")

#pragma mark - User Data Struct
// listing 4.3

struct MyRecorder;

typedef struct MyPlayer {
    AudioQueueRef playerQueue;
    SInt64 packetPosition;
    UInt32 numPacketsToRead;
    AudioStreamPacketDescription *packetDescs;
    Boolean isDone;
    struct MyRecorder *recorder;
} MyPlayer;

typedef struct MyRecorder {
    AudioQueueRef recordQueue;
    SInt64      recordPacket;
    Boolean     running;
    MyPlayer    *player;
} MyRecorder;

#pragma mark - Utility functions

// Listing 4.2
static void CheckError(OSStatus error, const char *operation) {
    if (error == noErr) return;

    char errorString[20];
    // See if it appears to be a 4-char-code
    *(UInt32 *)(errorString + 1) = CFSwapInt32HostToBig(error);
    if (isprint(errorString[1]) && isprint(errorString[2])
        && isprint(errorString[3]) && isprint(errorString[4])) {
        errorString[0] = errorString[5] = '\'';
        errorString[6] = '\0';
    } else {
        // No, format it as an integer
        NSLog(@"Was integer");
        sprintf(errorString, "%d",(int)error);
    }

    fprintf(stderr, "Error: %s (%s)\n",operation,errorString);

    exit(1);
}

OSStatus MyGetDefaultInputDeviceSampleRate(Float64 *outSampleRate)
{
    OSStatus error;
    AudioDeviceID deviceID = 0;

    AudioObjectPropertyAddress propertyAddress;
    UInt32 propertySize;
    propertyAddress.mSelector = kAudioHardwarePropertyDefaultInputDevice;
    propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
    propertyAddress.mElement = 0;
    propertySize = sizeof(AudioDeviceID);
    error = AudioHardwareServiceGetPropertyData(kAudioObjectSystemObject,
                                                &propertyAddress, 0, NULL,
                                                &propertySize,
                                                &deviceID);

    if (error) return error;

    propertyAddress.mSelector = kAudioDevicePropertyNominalSampleRate;
    propertyAddress.mScope = kAudioObjectPropertyScopeGlobal;
    propertyAddress.mElement = 0;

    propertySize = sizeof(Float64);
    error = AudioHardwareServiceGetPropertyData(deviceID,
                                                &propertyAddress, 0, NULL,
                                                &propertySize,
                                                outSampleRate);
    return error;
}

// Recorder
static void MyCopyEncoderCookieToFile(AudioQueueRef queue, AudioFileID theFile)
{
    OSStatus error;
    UInt32 propertySize;
    error = AudioQueueGetPropertySize(queue, kAudioConverterCompressionMagicCookie, &propertySize);

    if (error == noErr && propertySize > 0) {
        Byte *magicCookie = (Byte *)malloc(propertySize);
        CheckError(AudioQueueGetProperty(queue, kAudioQueueProperty_MagicCookie, magicCookie, &propertySize), "Couldn't get audio queue's magic cookie");

        CheckError(AudioFileSetProperty(theFile, kAudioFilePropertyMagicCookieData, propertySize, magicCookie), "Couldn't set audio file's magic cookie");

        free(magicCookie);
    }
}

// Player
static void MyCopyEncoderCookieToQueue(AudioFileID theFile, AudioQueueRef queue)
{
    UInt32 propertySize;
    // Just check for presence of cookie
    OSStatus result = AudioFileGetProperty(theFile, kAudioFilePropertyMagicCookieData, &propertySize, NULL);

    if (result == noErr && propertySize != 0) {
        Byte *magicCookie = (UInt8*)malloc(sizeof(UInt8) * propertySize);
        CheckError(AudioFileGetProperty(theFile, kAudioFilePropertyMagicCookieData, &propertySize, magicCookie), "Get cookie from file failed");

        CheckError(AudioQueueSetProperty(queue, kAudioQueueProperty_MagicCookie, magicCookie, propertySize), "Set cookie on file failed");

        free(magicCookie);
    }
}

static int MyComputeRecordBufferSize(const AudioStreamBasicDescription *format, AudioQueueRef queue, float seconds)
{
    int packets, frames, bytes;

    frames = (int)ceil(seconds * format->mSampleRate);

    if (format->mBytesPerFrame > 0) { // Not variable
        bytes = frames * format->mBytesPerFrame;
    } else { // variable bytes per frame
        UInt32 maxPacketSize;
        if (format->mBytesPerPacket > 0) {
            // Constant packet size
            maxPacketSize = format->mBytesPerPacket;
        } else {
            // Get the largest single packet size possible
            UInt32 propertySize = sizeof(maxPacketSize);
            CheckError(AudioQueueGetProperty(queue, kAudioConverterPropertyMaximumOutputPacketSize, &maxPacketSize, &propertySize), "Couldn't get queue's maximum output packet size");
        }

        if (format->mFramesPerPacket > 0) {
            packets = frames / format->mFramesPerPacket;
        } else {
            // Worst case scenario: 1 frame in a packet
            packets = frames;
        }

        // Sanity check

        if (packets == 0) {
            packets = 1;
        }
        bytes = packets * maxPacketSize;

    }

    return bytes;
}

void CalculateBytesForPlaythrough(AudioQueueRef queue,
                                  AudioStreamBasicDescription inDesc,
                                  Float64 inSeconds,
                                  UInt32 *outBufferSize,
                                  UInt32 *outNumPackets)
{
    UInt32 maxPacketSize;
    UInt32 propSize = sizeof(maxPacketSize);
    CheckError(AudioQueueGetProperty(queue,
                                    kAudioQueueProperty_MaximumOutputPacketSize,
                                    &maxPacketSize, &propSize), "Couldn't get file's max packet size");

    static const int maxBufferSize = 0x10000;
    static const int minBufferSize = 0x4000;

    if (inDesc.mFramesPerPacket) {
        Float64 numPacketsForTime = inDesc.mSampleRate / inDesc.mFramesPerPacket * inSeconds;
        *outBufferSize = numPacketsForTime * maxPacketSize;
    } else {
        *outBufferSize = maxBufferSize > maxPacketSize ? maxBufferSize : maxPacketSize;
    }

    if (*outBufferSize > maxBufferSize &&
        *outBufferSize > maxPacketSize) {
        *outBufferSize = maxBufferSize;
    } else {
        if (*outBufferSize < minBufferSize) {
            *outBufferSize = minBufferSize;
        }
    }
    *outNumPackets = *outBufferSize / maxPacketSize;
}


#pragma mark - Record callback function

static void MyAQInputCallback(void *inUserData,
                              AudioQueueRef inQueue,
                              AudioQueueBufferRef inBuffer,
                              const AudioTimeStamp *inStartTime,
                              UInt32 inNumPackets,
                              const AudioStreamPacketDescription *inPacketDesc)
{
//    NSLog(@"Input callback");
//    NSLog(@"Input thread = %@",[NSThread currentThread]);
    MyRecorder *recorder = (MyRecorder *)inUserData;
    MyPlayer *player = recorder->player;

    if (inNumPackets > 0) {

        // Enqueue on the output Queue!
        AudioQueueBufferRef outputBuffer;
        CheckError(AudioQueueAllocateBuffer(player->playerQueue, inBuffer->mAudioDataBytesCapacity, &outputBuffer), "Input callback failed to allocate new output buffer");


        memcpy(outputBuffer->mAudioData, inBuffer->mAudioData, inBuffer->mAudioDataByteSize);
        outputBuffer->mAudioDataByteSize = inBuffer->mAudioDataByteSize;

//        [NSData dataWithBytes:inBuffer->mAudioData length:inBuffer->mAudioDataByteSize];

        // Assuming LPCM so no packet descriptions
        CheckError(AudioQueueEnqueueBuffer(player->playerQueue, outputBuffer, 0, NULL), "Enqueing the buffer in input callback failed");
        recorder->recordPacket += inNumPackets;
    }


    if (recorder->running) {
        CheckError(AudioQueueEnqueueBuffer(inQueue, inBuffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
    }
}

static void MyAQOutputCallback(void *inUserData, AudioQueueRef inAQ, AudioQueueBufferRef inCompleteAQBuffer)
{
//    NSLog(@"Output thread = %@",[NSThread currentThread]);
//    NSLog(@"Output callback");
    MyPlayer *aqp = (MyPlayer *)inUserData;
    MyRecorder *recorder = aqp->recorder;
    if (aqp->isDone) return;
}

int main(int argc, const char * argv[])
{

    @autoreleasepool {
        MyRecorder recorder = {0};
        MyPlayer player = {0};

        recorder.player = &player;
        player.recorder = &recorder;

        AudioStreamBasicDescription recordFormat;
        memset(&recordFormat, 0, sizeof(recordFormat));

        recordFormat.mFormatID = kAudioFormatLinearPCM;
        recordFormat.mChannelsPerFrame = 2; //stereo

        // Begin my changes to make LPCM work
            recordFormat.mBitsPerChannel = 16;
            // Haven't checked if each of these flags is necessary, this is just what Chapter 2 used for LPCM.
            recordFormat.mFormatFlags = kAudioFormatFlagIsBigEndian | kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;

        // end my changes

        MyGetDefaultInputDeviceSampleRate(&recordFormat.mSampleRate);


        UInt32 propSize = sizeof(recordFormat);
        CheckError(AudioFormatGetProperty(kAudioFormatProperty_FormatInfo,
                                          0,
                                          NULL,
                                          &propSize,
                                          &recordFormat), "AudioFormatGetProperty failed");


        AudioQueueRef queue = {0};

        CheckError(AudioQueueNewInput(&recordFormat, MyAQInputCallback, &recorder, NULL, NULL, 0, &queue), "AudioQueueNewInput failed");

        recorder.recordQueue = queue;

        // Fills in ABSD a little more
        UInt32 size = sizeof(recordFormat);
        CheckError(AudioQueueGetProperty(queue,
                                         kAudioConverterCurrentOutputStreamDescription,
                                         &recordFormat,
                                         &size), "Couldn't get queue's format");

//        MyCopyEncoderCookieToFile(queue, recorder.recordFile);

        int bufferByteSize = MyComputeRecordBufferSize(&recordFormat,queue,0.5);
        NSLog(@"%d",__LINE__);
        // Create and Enqueue buffers
        int bufferIndex;
        for (bufferIndex = 0;
             bufferIndex < kNumberRecordBuffers;
             ++bufferIndex) {
            AudioQueueBufferRef buffer;
            CheckError(AudioQueueAllocateBuffer(queue,
                                                bufferByteSize,
                                                &buffer), "AudioQueueBufferRef failed");
            CheckError(AudioQueueEnqueueBuffer(queue, buffer, 0, NULL), "AudioQueueEnqueueBuffer failed");
        }

        // PLAYBACK SETUP

        AudioQueueRef playbackQueue;
        CheckError(AudioQueueNewOutput(&recordFormat,
                                       MyAQOutputCallback,
                                       &player, NULL, NULL, 0,
                                       &playbackQueue), "AudioOutputNewQueue failed");
        player.playerQueue = playbackQueue;


        UInt32 playBufferByteSize;
        CalculateBytesForPlaythrough(queue, recordFormat, 0.1, &playBufferByteSize, &player.numPacketsToRead);

        bool isFormatVBR = (recordFormat.mBytesPerPacket == 0
                            || recordFormat.mFramesPerPacket == 0);
        if (isFormatVBR) {
            NSLog(@"Not supporting VBR");
            player.packetDescs = (AudioStreamPacketDescription*) malloc(sizeof(AudioStreamPacketDescription) * player.numPacketsToRead);
        } else {
            player.packetDescs = NULL;
        }

        // END PLAYBACK

        recorder.running = TRUE;
        player.isDone = false;


        CheckError(AudioQueueStart(playbackQueue, NULL), "AudioQueueStart failed");
        CheckError(AudioQueueStart(queue, NULL), "AudioQueueStart failed");


        CFRunLoopRunInMode(kCFRunLoopDefaultMode, 10, TRUE);

        printf("Playing through, press <return> to stop:\n");
        getchar();

        printf("* done *\n");
        recorder.running = FALSE;
        player.isDone = true;
        CheckError(AudioQueueStop(playbackQueue, false), "Failed to stop playback queue");

        CheckError(AudioQueueStop(queue, TRUE), "AudioQueueStop failed");

        AudioQueueDispose(playbackQueue, FALSE);
        AudioQueueDispose(queue, TRUE);

    }
    return 0;
}
于 2013-08-10T00:19:02.397 に答える