1

audioqueuestartのプロパティを設定するにはどうすればよいですか?デバイスのサウンドを録音したいのですが、誰かが「kAudioSessionCategory_PlayAndRecord」を使用するように教えてくれました

UInt32 sessionCategory = kAudioSessionCategory_PlayAndRecord;
AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(sessionCategory), &sessionCategory);

しかし、私の単純なコードを適用すると、機能しません。プロパティを設定するにはどうすればよいですか?

#include <AudioToolbox/AudioQueue.h>
#include <AudioToolbox/AudioFile.h>
#include <AudioToolbox/AudioConverter.h>
#include <AudioToolbox/AudioToolbox.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/select.h>

#define AUDIO_BUFFERS 3

typedef struct AQCallbackStruct
{
    AudioStreamBasicDescription mDataFormat;
    AudioQueueRef queue;
    AudioQueueBufferRef mBuffers[AUDIO_BUFFERS];
    AudioFileID outputFile;
    unsigned long frameSize;
    long long recPtr;
    int run;
} AQCallbackStruct;

static void AQInputCallback(
    void                                 *aqr,
    AudioQueueRef                        inQ,
    AudioQueueBufferRef                  inQB,
    const AudioTimeStamp                 *timestamp,
    unsigned long                        frameSize,
    const AudioStreamPacketDescription   *mDataFormat)
{
    AQCallbackStruct *aqc = (AQCallbackStruct *) aqr;

    /* Write data to file */
    if (AudioFileWritePackets (aqc->outputFile, false, inQB->mAudioDataByteSize,
    mDataFormat, aqc->recPtr, &frameSize, inQB->mAudioData) == noErr)
    {
    aqc->recPtr += frameSize;
    }

    /* Don't re-queue the sound buffers if we're supposed to stop recording */
    if (!aqc->run)
      return;

    AudioQueueEnqueueBuffer (aqc->queue, inQB, 0, NULL);
}

int main(int argc, char *argv[])
{
    AQCallbackStruct aqc;
    AudioFileTypeID fileFormat;
    CFURLRef filename;
    struct timeval tv;
    int i;

    if (argc < 3)
    {
    fprintf(stderr, "Syntax: %s [filename.aif] [seconds]", argv[0]);
    exit(EXIT_FAILURE);
    }

    //how ?
    //UInt32 sessionCategory = kAudioSessionCategory_PlayAndRecord;
    //AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(sessionCategory), &sessionCategory);

    aqc.mDataFormat.mFormatID = kAudioFormatLinearPCM;//kAudioFormatLinearPCM;
    aqc.mDataFormat.mSampleRate = 44100.0;
    aqc.mDataFormat.mChannelsPerFrame = 2;
    aqc.mDataFormat.mBitsPerChannel = 16;
    aqc.mDataFormat.mBytesPerPacket =
    aqc.mDataFormat.mBytesPerFrame =
    aqc.mDataFormat.mChannelsPerFrame * sizeof (short int);
    aqc.mDataFormat.mFramesPerPacket = 1;
    aqc.mDataFormat.mFormatFlags =
        kLinearPCMFormatFlagIsBigEndian
      | kLinearPCMFormatFlagIsSignedInteger
      | kLinearPCMFormatFlagIsPacked;
    aqc.frameSize = 735;

    AudioQueueNewInput (&aqc.mDataFormat, AQInputCallback, &aqc, NULL,
    kCFRunLoopCommonModes, 0, &aqc.queue);

    /* Create output file */

    fileFormat = kAudioFileAIFFType;
    filename = CFURLCreateFromFileSystemRepresentation (NULL, (const UInt8*)argv[1], strlen (argv[1]), false);

    AudioFileCreateWithURL (
    filename,
    fileFormat,
    &aqc.mDataFormat,
    kAudioFileFlags_EraseFile,
    &aqc.outputFile
    );

    /* Initialize the recording buffers */

    for(i=0; i<AUDIO_BUFFERS; i++)
    {
    AudioQueueAllocateBuffer (aqc.queue, aqc.frameSize, &aqc.mBuffers[i]);
    AudioQueueEnqueueBuffer (aqc.queue, aqc.mBuffers[i], 0, NULL);
    //AudioQueueEnqueueBuffer (aqc.queue, aqc.mBuffers[i], 0x11, (const AudioStreamPacketDescription)"\x22");
    }

    aqc.recPtr = 0;
    aqc.run = 1;

    AudioQueueStart (aqc.queue, NULL);

    /* Hang around for a while while the recording takes place */

    tv.tv_sec = atof(argv[2]);
    tv.tv_usec = 0;
    select(0, NULL, NULL, NULL, &tv);

    /* Shut down recording */

    AudioQueueStop (aqc.queue, true);
    aqc.run = 0;

    AudioQueueDispose (aqc.queue, true);
    AudioFileClose (aqc.outputFile);

    exit(EXIT_SUCCESS);
}
4

0 に答える 0