4

OpenAL を使用して iOS でサウンド キャプチャを実行しようとしています (クロスプラットフォーム ライブラリを作成しているため、iOS 固有の方法でサウンドを録音することは避けています)。既定の OpenAL キャプチャは機能しませんが、既知の回避策があります。キャプチャを開始する前に出力コンテキストを開きます。このソリューションは、iOS 5.0 でうまくいきました。

ただし、iOS 5.1.1 では、この回避策は最初に記録しようとしたサンプルに対してのみ有効です。(キャプチャを開始する前に AudioSession を PlayAndRecord に切り替え、デフォルトの出力デバイスを開きます。サンプルを録音した後、デバイスを閉じ、セッションを元の状態に戻します。) 2 番目のサンプルでは、​​出力コンテキストを再度開いても、ヘルプと音はキャプチャされません。

この問題に対処する既知の方法はありますか?

// Here's what I do before starting the recording
oldAudioSessionCategory = [audioSession category];
[audioSession setCategory:AVAudioSessionCategoryPlayAndRecord error:nil];
[audioSession setActive:YES error:nil];
// We need to have an active context. If there is none, create one.
if (!alcGetCurrentContext()) {
    outputDevice = alcOpenDevice(NULL);
    outputContext = alcCreateContext(outputDevice, NULL);
    alcMakeContextCurrent(outputContext);
}

// Capture itself
inputDevice = alcCaptureOpenDevice(NULL, frequency, FORMAT, bufferSize);
....
alcCaptureCloseDevice(inputDevice);

// Restoring the audio state to whatever it had been before capture
if (outputContext) {
    alcDestroyContext(outputContext);
    alcCloseDevice(outputDevice);
}
[[AVAudioSession sharedInstance] setCategory:oldAudioSessionCategory 
                                 error:nil];
4

2 に答える 2

3

キャプチャ拡張機能をエミュレートするために使用するコードを次に示します。いくつかのコメント:

  1. プロジェクト全体では、スレッド化プリミティブなどに OpenKD が使用されています。おそらく、これらの呼び出しを置き換える必要があります。
  2. キャプチャを開始する際の遅延と戦わなければなりませんでした。その結果、常に音声入力を読み取り、不要なときは捨て続けています。(このような解決策は、たとえばhereで提案されています。)これには、マイクの制御を解放するために、onResignActive 通知をキャッチする必要があります。あなたはそのようなクラッジを使いたいかもしれないし、使いたくないかもしれません.
  3. の代わりにalcGetIntegerv(device, ALC_CAPTURE_SAMPLES, 1, &res)、別の関数を定義する必要がありますalcGetAvailableSamples

つまり、このコードをそのままプロジェクトで使用できる可能性は低いですが、必要に応じて調整できることを願っています。

#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include <KD/kd.h>
#include <AL/al.h>
#include <AL/alc.h>

#include <AudioToolbox/AudioToolbox.h>
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>

#include "KD/kdext.h"

struct InputDeviceData {
    int id;
    KDThreadMutex *mutex;
    AudioUnit audioUnit;
    int nChannels;
    int frequency;
    ALCenum format;
    int sampleSize;
    uint8_t *buf;
    size_t bufSize;    // in bytes
    size_t bufFilledBytes;  // in bytes
    bool started;
};

static struct InputDeviceData *cachedInData = NULL;

static OSStatus renderCallback (void                        *inRefCon,
                                AudioUnitRenderActionFlags  *ioActionFlags,
                                const AudioTimeStamp        *inTimeStamp,
                                UInt32                      inBusNumber,
                                UInt32                      inNumberFrames,
                                AudioBufferList             *ioData);
static AudioUnit getAudioUnit();
static void setupNotifications();
static void destroyCachedInData();
static struct InputDeviceData *setupCachedInData(AudioUnit audioUnit, ALCuint frequency, ALCenum format, ALCsizei bufferSizeInSamples);
static struct InputDeviceData *getInputDeviceData(AudioUnit audioUnit, ALCuint frequency, ALCenum format, ALCsizei bufferSizeInSamples);

/** I only have to use NSNotificationCenter instead of CFNotificationCenter
 *  because there is no published name for WillResignActive/WillBecomeActive
 *  notifications in CoreFoundation.
 */
@interface ALCNotificationObserver : NSObject
- (void)onResignActive;
@end
@implementation ALCNotificationObserver
- (void)onResignActive {
    destroyCachedInData();
}
@end

static void setupNotifications() {
    static ALCNotificationObserver *observer = NULL;
    if (!observer) {
        observer = [[ALCNotificationObserver alloc] init];
        [[NSNotificationCenter defaultCenter] addObserver:observer selector:@selector(onResignActive) name:UIApplicationWillResignActiveNotification object:nil];
    }
}

static OSStatus renderCallback (void                        *inRefCon,
                                AudioUnitRenderActionFlags  *ioActionFlags,
                                const AudioTimeStamp        *inTimeStamp,
                                UInt32                      inBusNumber,
                                UInt32                      inNumberFrames,
                                AudioBufferList             *ioData) {
    struct InputDeviceData *inData = (struct InputDeviceData*)inRefCon;

    kdThreadMutexLock(inData->mutex);
    size_t bytesToRender = inNumberFrames * inData->sampleSize;
    if (bytesToRender + inData->bufFilledBytes <= inData->bufSize) {
        OSStatus status;
        struct AudioBufferList audioBufferList; // 1 buffer is declared inside the structure itself.
        audioBufferList.mNumberBuffers = 1;
        audioBufferList.mBuffers[0].mNumberChannels = inData->nChannels;
        audioBufferList.mBuffers[0].mDataByteSize = bytesToRender;
        audioBufferList.mBuffers[0].mData = inData->buf + inData->bufFilledBytes;
        status = AudioUnitRender(inData->audioUnit, 
                                 ioActionFlags, 
                                 inTimeStamp, 
                                 inBusNumber, 
                                 inNumberFrames, 
                                 &audioBufferList);
        if (inData->started) {
            inData->bufFilledBytes += bytesToRender;
        }
    } else {
        kdLogFormatMessage("%s: buffer overflow", __FUNCTION__);
    }
    kdThreadMutexUnlock(inData->mutex);

    return 0;
}

static AudioUnit getAudioUnit() {
    static AudioUnit audioUnit = NULL;

    if (!audioUnit) {
        AudioComponentDescription ioUnitDescription;

        ioUnitDescription.componentType          = kAudioUnitType_Output;
        ioUnitDescription.componentSubType       = kAudioUnitSubType_VoiceProcessingIO;
        ioUnitDescription.componentManufacturer  = kAudioUnitManufacturer_Apple;
        ioUnitDescription.componentFlags         = 0;
        ioUnitDescription.componentFlagsMask     = 0;

        AudioComponent foundIoUnitReference = AudioComponentFindNext(NULL,
                                                                     &ioUnitDescription);
        AudioComponentInstanceNew(foundIoUnitReference,
                                  &audioUnit);

        if (audioUnit == NULL) {
            kdLogMessage("Could not obtain AudioUnit");
        }
    }

    return audioUnit;
}

static void destroyCachedInData() {
    OSStatus status;
    if (cachedInData) {
        status = AudioOutputUnitStop(cachedInData->audioUnit);
        status = AudioUnitUninitialize(cachedInData->audioUnit);
        free(cachedInData->buf);
        kdThreadMutexFree(cachedInData->mutex);
        free(cachedInData);
        cachedInData = NULL;
    }
}

static struct InputDeviceData *setupCachedInData(AudioUnit audioUnit, ALCuint frequency, ALCenum format, ALCsizei bufferSizeInSamples) {
    static int idCount = 0;
    OSStatus status;
    int bytesPerFrame = (format == AL_FORMAT_MONO8) ? 1 :
                        (format == AL_FORMAT_MONO16) ? 2 :
                        (format == AL_FORMAT_STEREO8) ? 2 :
                        (format == AL_FORMAT_STEREO16) ? 4 : -1;
    int channelsPerFrame = (format == AL_FORMAT_MONO8) ? 1 :
                           (format == AL_FORMAT_MONO16) ? 1 :
                           (format == AL_FORMAT_STEREO8) ? 2 :
                           (format == AL_FORMAT_STEREO16) ? 2 : -1;
    int bitsPerChannel = (format == AL_FORMAT_MONO8) ? 8 :
                         (format == AL_FORMAT_MONO16) ? 16 :
                         (format == AL_FORMAT_STEREO8) ? 8 :
                         (format == AL_FORMAT_STEREO16) ? 16 : -1;

    cachedInData = malloc(sizeof(struct InputDeviceData));
    cachedInData->id = ++idCount;
    cachedInData->format = format;
    cachedInData->frequency = frequency;
    cachedInData->mutex = kdThreadMutexCreate(NULL);
    cachedInData->audioUnit = audioUnit;
    cachedInData->nChannels = channelsPerFrame;
    cachedInData->sampleSize = bytesPerFrame;
    cachedInData->bufSize = bufferSizeInSamples * bytesPerFrame;
    cachedInData->buf = malloc(cachedInData->bufSize);
    cachedInData->bufFilledBytes = 0;
    cachedInData->started = FALSE;

    UInt32 enableOutput        = 1;    // to enable output
    status = AudioUnitSetProperty(audioUnit,
                                  kAudioOutputUnitProperty_EnableIO,
                                  kAudioUnitScope_Input,
                                  1,
                                  &enableOutput, sizeof(enableOutput));

    struct AudioStreamBasicDescription basicDescription;
    basicDescription.mSampleRate = (Float64)frequency;
    basicDescription.mFormatID = kAudioFormatLinearPCM;
    basicDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
    basicDescription.mBytesPerPacket = bytesPerFrame;
    basicDescription.mFramesPerPacket = 1;
    basicDescription.mBytesPerFrame = bytesPerFrame;
    basicDescription.mChannelsPerFrame = channelsPerFrame;
    basicDescription.mBitsPerChannel = bitsPerChannel;
    basicDescription.mReserved = 0;

    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioUnitProperty_StreamFormat, // property key 
                                  kAudioUnitScope_Output,        // scope
                                  1,                             // 1 is output
                                  &basicDescription, sizeof(basicDescription));      // value

    AURenderCallbackStruct renderCallbackStruct;
    renderCallbackStruct.inputProc = renderCallback;
    renderCallbackStruct.inputProcRefCon = cachedInData;
    status = AudioUnitSetProperty(audioUnit, 
                                  kAudioOutputUnitProperty_SetInputCallback, // property key 
                                  kAudioUnitScope_Output,        // scope
                                  1,                             // 1 is output
                                  &renderCallbackStruct, sizeof(renderCallbackStruct));      // value

    status = AudioOutputUnitStart(cachedInData->audioUnit);

    return cachedInData;
}

static struct InputDeviceData *getInputDeviceData(AudioUnit audioUnit, ALCuint frequency, ALCenum format, ALCsizei bufferSizeInSamples) {
    if (cachedInData && 
        (cachedInData->frequency != frequency ||
         cachedInData->format != format ||
         cachedInData->bufSize / cachedInData->sampleSize != bufferSizeInSamples)) {
            kdAssert(!cachedInData->started);
            destroyCachedInData();
        }
    if (!cachedInData) {
        setupCachedInData(audioUnit, frequency, format, bufferSizeInSamples);
        setupNotifications();
    }

    return cachedInData;
}


ALC_API ALCdevice* ALC_APIENTRY alcCaptureOpenDevice(const ALCchar *devicename, ALCuint frequency, ALCenum format, ALCsizei buffersizeInSamples) {
    kdAssert(devicename == NULL);    

    AudioUnit audioUnit = getAudioUnit();
    struct InputDeviceData *res = getInputDeviceData(audioUnit, frequency, format, buffersizeInSamples);
    return (ALCdevice*)res->id;
}

ALC_API ALCboolean ALC_APIENTRY alcCaptureCloseDevice(ALCdevice *device) {
    alcCaptureStop(device);
    return true;
}

ALC_API void ALC_APIENTRY alcCaptureStart(ALCdevice *device) {
    if (!cachedInData || (int)device != cachedInData->id) {
        // may happen after the app loses and regains active status.
        kdLogFormatMessage("Attempt to start a stale AL capture device");
        return;
    }
    cachedInData->started = TRUE;
}

ALC_API void ALC_APIENTRY alcCaptureStop(ALCdevice *device) {
    if (!cachedInData || (int)device != cachedInData->id) {
        // may happen after the app loses and regains active status.
        kdLogFormatMessage("Attempt to stop a stale AL capture device");
        return;
    }
    cachedInData->started = FALSE;
}

ALC_API ALCint ALC_APIENTRY alcGetAvailableSamples(ALCdevice *device) {
    if (!cachedInData || (int)device != cachedInData->id) {
        // may happen after the app loses and regains active status.
        kdLogFormatMessage("Attempt to get sample count from a stale AL capture device");
        return 0;
    }
    ALCint res;
    kdThreadMutexLock(cachedInData->mutex);
    res = cachedInData->bufFilledBytes / cachedInData->sampleSize;
    kdThreadMutexUnlock(cachedInData->mutex);
    return res;
}

ALC_API void ALC_APIENTRY alcCaptureSamples(ALCdevice *device, ALCvoid *buffer, ALCsizei samples) {    
    if (!cachedInData || (int)device != cachedInData->id) {
        // may happen after the app loses and regains active status.
        kdLogFormatMessage("Attempt to get samples from a stale AL capture device");
        return;
    }
    size_t bytesToCapture = samples * cachedInData->sampleSize;
    kdAssert(cachedInData->started);
    kdAssert(bytesToCapture <= cachedInData->bufFilledBytes);

    kdThreadMutexLock(cachedInData->mutex);
    memcpy(buffer, cachedInData->buf, bytesToCapture);
    memmove(cachedInData->buf, cachedInData->buf + bytesToCapture, cachedInData->bufFilledBytes - bytesToCapture);
    cachedInData->bufFilledBytes -= bytesToCapture;
    kdThreadMutexUnlock(cachedInData->mutex);
}
于 2012-08-03T09:38:55.290 に答える
1

Apple の OpenAL を機能させる方法を見つけました。alcMakeContextCurrent(NULL)私の元のコード スニペットでは、前に呼び出す必要がありますalcDestroyContext(outputContext)

于 2012-08-16T16:08:38.780 に答える