3

私は iOS 開発の初心者です。iOS で LinearPCM を MP3 にエンコードしています。AudioToolbox フレームワークと Lame を使用して、マイクからの生の PCM データを MP3 にエンコードしようとしています。 . エンコードされたストリームに存在するノイズと歪みだけが得られます。AudioQueue を正しくセットアップしたかどうか、またエンコードされたバッファを正しいワットで処理したかどうかはわかりません...オーディオ録音をセットアップするための私のコード:

サンプル プロジェクトhttps://github.com/vecter/Audio-Queue-Services-Example

- (void)setupAudioFormat:(AudioStreamBasicDescription*)format 
{
format->mSampleRate = 16000;
format->mFormatID = kAudioFormatLinearPCM;
format->mFramesPerPacket = 1;
format->mChannelsPerFrame = 1;
format->mBytesPerFrame = 2;
format->mBytesPerPacket = 2;
format->mBitsPerChannel = 16;
format->mReserved = 0;
format->mFormatFlags = kLinearPCMFormatFlagIsBigEndian     |
                       kLinearPCMFormatFlagIsSignedInteger |
                       kLinearPCMFormatFlagIsPacked;
}
- (void)recordPressed:(id)sender
{
if (!playState.playing)
{
    if (!recordState.recording)
    {
        printf("Starting recording\n");
          self.mergedData =[[NSMutableData alloc] init];
        [self startRecording];
    }
    else
    {
        printf("Stopping recording\n");
        [self stopRecording];
    }
}
else
{
    printf("Can't start recording, currently playing\n");
}
}

- (void)startRecording
{
[self setupAudioFormat:&recordState.dataFormat];

recordState.currentPacket = 0;
recordState.pThis=self;

OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
                            AudioInputCallback,
                            &recordState,
                            CFRunLoopGetCurrent(),
                            kCFRunLoopCommonModes,
                            0,
                            &recordState.queue);

if (status == 0)
{
    // Prime recording buffers with empty data
    for (int i = 0; i < NUM_BUFFERS; i++)
    {
        AudioQueueAllocateBuffer(recordState.queue, 16000, &recordState.buffers[i]);
        AudioQueueEnqueueBuffer (recordState.queue, recordState.buffers[i], 0, NULL);
    }

    status = AudioFileCreateWithURL(fileURL,
                                    kAudioFileAIFFType,
                                    &recordState.dataFormat,
                                    kAudioFileFlags_EraseFile,
                                    &recordState.audioFile);

    gfp = lame_init();
    lame_set_num_channels(gfp, 1);
    lame_set_in_samplerate(gfp, recordState.dataFormat.mSampleRate);
    lame_set_VBR(gfp, vbr_default);
    lame_init_params(gfp);
    if (status == 0)
    {
        recordState.recording = true;        
        status = AudioQueueStart(recordState.queue, NULL);
        if (status == 0)
        {
            mergeData =[[NSMutableData alloc]init];
            labelStatus.text = @"Recording";
        }
    }
}

if (status != 0)
{
    [self stopRecording];
    labelStatus.text = @"Record Failed";
}
}


- (void)stopRecording
{
recordState.recording = false;

AudioQueueStop(recordState.queue, true);
for(int i = 0; i < NUM_BUFFERS; i++)
{
    AudioQueueFreeBuffer(recordState.queue, recordState.buffers[i]);
}

AudioQueueDispose(recordState.queue, true);
AudioFileClose(recordState.audioFile);
labelStatus.text = @"Idle";
}

次に、AudioQueue コールバック関数が lame_encode_buffer を呼び出し、エンコードされたバッファをファイルに書き込みます。

void AudioInputCallback(void * inUserData, 
                    AudioQueueRef inAQ, 
                    AudioQueueBufferRef inBuffer, 
                    const AudioTimeStamp * inStartTime, 
                    UInt32 inNumberPacketDescriptions, 
                    const AudioStreamPacketDescription * inPacketDescs)
 {
RecordState * recordState = (RecordState*)inUserData;
if (!recordState->recording)
{
    printf("Not recording, returning\n");
}

printf("Writing buffer %lld\n", recordState->currentPacket);
OSStatus status = AudioFileWritePackets(recordState->audioFile,
                                        false,
                                        inBuffer->mAudioDataByteSize,
                                        inPacketDescs,
                                        recordState->currentPacket,
                                        &inNumberPacketDescriptions,
                                        inBuffer->mAudioData);
if (status == 0)
{
    recordState->currentPacket += inNumberPacketDescriptions;
}

AudioRecorderAppDelegate *this = recordState->pThis;

const int MP3_BUFFER_SIZE=inBuffer->mAudioDataByteSize*4;
unsigned char mEncodedBuffer[MP3_BUFFER_SIZE];

 int encodedBytes=lame_encode_buffer_interleaved(this->gfp, (short int *)inBuffer->mAudioData , inNumberPacketDescriptions, mEncodedBuffer, MP3_BUFFER_SIZE);
NSData* data = [NSData dataWithBytes:mEncodedBuffer length:encodedBytes];
[this writeData:data];
lame_encode_flush(this->gfp, mEncodedBuffer, MP3_BUFFER_SIZE);


memset(&mEncodedBuffer, 0, sizeof(mEncodedBuffer));
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);

 }

データの追加

- (void) writeData:(NSData *)data
{ 
[mergeData appendData:data];
NSArray *paths = NSSearchPathForDirectoriesInDomains(NSDocumentDirectory,
                                                     NSUserDomainMask, YES);
NSString* docDir = [paths objectAtIndex:0];

NSString* file = [docDir stringByAppendingString:@"/lame.mp3"];
[mergeData writeToFile:file atomically:YES];
NSLog(@"%@",file);
}

ここで何が悪いのか誰にもアドバイスできますか?

他に、既に完了したサンプル プロジェクトを投稿しますか?

4

2 に答える 2

1

私の場合、このロジックは機能しました:

int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);

NSMutableData *data1=[[NSMutableData alloc]initWithBytes:mp3_buffer length:encodedBytes];

[this writeData:data];
于 2013-08-16T09:53:19.480 に答える
1

これを試して

void AQRecorder::MyInputBufferHandler(  void *                              inUserData,
                                    AudioQueueRef                       inAQ,
                                    AudioQueueBufferRef                 inBuffer,
                                    const AudioTimeStamp *              inStartTime,
                                    UInt32                              inNumPackets,
                                    const AudioStreamPacketDescription* inPacketDesc)
{
  AQRecorder *aqr = (AQRecorder *)inUserData;
//    NSLog(@"%f",inStartTime->mSampleTime);
try
{
        if (inNumPackets > 0)
        {
            AudioFileWritePackets(aqr->mRecordFile, FALSE, inBuffer->mAudioDataByteSize, inPacketDesc, aqr->mRecordPacket, &inNumPackets, inBuffer->mAudioData);

            aqr->mRecordPacket += inNumPackets;

            int MP3_SIZE =inBuffer->mAudioDataByteSize * 4;
            unsigned char mp3_buffer[MP3_SIZE];
            AppDelegate *delegate =[[UIApplication sharedApplication]delegate];
            lame_t lame = lame_init();
            lame_set_in_samplerate(lame, 44100);
            lame_set_VBR(lame, vbr_default);
            lame_init_params(lame);

      //                int encodedBytes=lame_encode_buffer_interleaved(lame, (short int *)inBuffer->mAudioData , inNumPackets, mp3_buffer, MP3_SIZE);


            int encodedBytes = lame_encode_buffer(lame, (short*)inBuffer->mAudioData,  (short*)inBuffer->mAudioData, inNumPackets, mp3_buffer, MP3_SIZE);

            [delegate.mp3AudioData appendBytes:mp3_buffer length:encodedBytes];

            if (inBuffer->mAudioDataByteSize != 0) {
            }
            else
            {
                int encode=lame_encode_flush(lame, mp3_buffer, MP3_SIZE);
                [delegate.mp3AudioData appendBytes:mp3_buffer length:encode];
            }
            lame_close(lame);
        }

        if (aqr->IsRunning())
        {
            AudioQueueEnqueueBuffer(inAQ, inBuffer, 0, NULL);
        }
} catch (CAXException e)
{
    char buf[256];
    fprintf(stderr, "Error: %s (%s)\n", e.mOperation, e.FormatError(buf));
}
}
于 2013-08-20T12:12:01.703 に答える