1

こんにちは、iOS デバイスで録音されたオーディオを Web サービスにストリーミングするアプリケーションを実装しようとしています。ライブで音声を流してほしい。このために、Audio Queues を使用して実装しました。オーディオを録音できます。AudioInputCallBack が呼び出されますが、データを送信する要求は発生しません。

これについて私を助けてください。ここにサンプルコードを投稿しています

  - (void)setupAudioFormat:(AudioStreamBasicDescription*)format {
    format->mSampleRate = 16000.0;
format->mFormatID = kAudioFormatLinearPCM;
format->mFormatFlags = kAudioFormatFlagsNativeFloatPacked;
format->mFramesPerPacket  = 1;
format->mChannelsPerFrame = 1;
format->mBytesPerFrame    = sizeof(Float32)*2;
format->mBytesPerPacket   = sizeof(Float32)*2;
format->mBitsPerChannel   = sizeof(Float32) * 16;
 }

 - (void)startRecording {
[self setupAudioFormat:&recordState.dataFormat];

recordState.currentPacket = 0;
NSLog(@"Started recording1");
OSStatus status;
status = AudioQueueNewInput(&recordState.dataFormat,
                            AudioInputCallback,
                            &recordState,
                            CFRunLoopGetCurrent(),
                            kCFRunLoopCommonModes,
                            0,
                            &recordState.queue);

if (status == 0) {

    for (int i = 0; i < NUM_BUFFERS; i++) {
        NSLog(@"Allocated buufer %d",i);
        AudioQueueAllocateBuffer(recordState.queue, 256, &recordState.buffers[i]);
        AudioQueueEnqueueBuffer(recordState.queue, recordState.buffers[i], 0, nil);
    }

    recordState.recording = true;

    status = AudioQueueStart(recordState.queue, NULL);
}
   }
  //This is the callback function
   void AudioInputCallback(void * inUserData,  // Custom audio metadata
                    AudioQueueRef inAQ,
                    AudioQueueBufferRef inBuffer,
                    const AudioTimeStamp * inStartTime,
                    UInt32 inNumberPacketDescriptions,
                    const AudioStreamPacketDescription * inPacketDescs) {
NSLog(@"Audio input called");
RecordState * recordState = (RecordState*)inUserData;
recordState->currentPacket += inNumberPacketDescriptions;
AudioQueueEnqueueBuffer(recordState->queue, inBuffer, 0, NULL);
AUDIO_DATA_TYPE_FORMAT *samples = (AUDIO_DATA_TYPE_FORMAT*)inBuffer->mAudioData;
//Do something with the samples
NSData * data = [NSData dataWithBytes:samples length:sizeof(float)];
// Create the request.
NSMutableURLRequest *request = [NSMutableURLRequest requestWithURL:[NSURL URLWithString:@"https://stream.watsonplatform.net/speech-to-text/api/v1/recognize"]];
NSString *loginString = (@"e718d0a1-11e0-401c-a430-b552d692f04b:uXqASpwQDuHc");
NSData *loginData = [loginString dataUsingEncoding:NSUTF8StringEncoding];
NSString *base64LoginString = [[NSString alloc]initWithFormat:@"Basic\(%@)",[loginData  base64EncodedStringWithOptions: 0]];

request.HTTPMethod = @"POST";
[request setValue:base64LoginString forHTTPHeaderField: @"Authorization"];
[request setValue:@"audio/l16;rate=16000" forHTTPHeaderField: @"content-type"];

request.HTTPBody = data;

// Create url connection and fire request
ViewController *recorder=(__bridge ViewController*)refToSelf;
NSURLConnection *conn = [[NSURLConnection alloc] initWithRequest:request delegate:recorder];
   }
4

0 に答える 0