これは、私が以前持っていた未回答の質問への拡張です: AVCaptureSession is not recording audio from the mic in Swift
リアルタイムで変更されているビデオとマイクから録音されているオーディオの両方をどのように書き込めばよいのか、私にはよくわかりません。私は何ヶ月も探していましたが、何も見つかりませんでした。私の問題を他の問題と区別しているように見えるのは、出力からすべてを通常のビデオ。ここから、sampleBuffer からオーディオを取得するために何をすべきか、またはそれが正しいアプローチであるかどうかさえわかりませんが、他の人が captureOutput から AudioBufferList を取得するのを見てきました。
最低限、これは私のメインクラスにあるものです:
class CaptureVC: UIViewController, AVCapturePhotoCaptureDelegate, AVCaptureVideoDataOutputSampleBufferDelegate, UIImagePickerControllerDelegate, UINavigationControllerDelegate,UIPickerViewDataSource,UIPickerViewDelegate {
var captureSession: AVCaptureSession?
var stillImageOutput: AVCapturePhotoOutput?
var videoPreviewLayer: AVCaptureVideoPreviewLayer?
let videoOutput = AVCaptureVideoDataOutput()
let audioOutput = AVCaptureAudioDataOutput()
var assetWriter: AVAssetWriter?
var assetWriterPixelBufferInput: AVAssetWriterInputPixelBufferAdaptor?
var assetWriterAudioInput: AVAssetWriterInput?
var currentSampleTime: CMTime?
var currentVideoDimensions: CMVideoDimensions?
var videoIsRecording = false
override func viewDidLoad() {
super.viewDidLoad()
let backCamera = AVCaptureDevice.default(for:AVMediaType.video)
let microphone = AVCaptureDevice.default(.builtInMicrophone, for: AVMediaType.audio, position: .unspecified)
var error: NSError?
var videoInput: AVCaptureDeviceInput!
var micInput: AVCaptureDeviceInput!
do {
videoInput = try AVCaptureDeviceInput(device: backCamera!)
micInput = try AVCaptureDeviceInput(device: microphone!)
} catch let error1 as NSError {
error = error1
videoInput = nil
micInput = nil
print(error!.localizedDescription)
}
if error == nil &&
captureSession!.canAddInput(videoInput) &&
captureSession!.canAddInput(micInput){
captureSession!.addInput(videoInput)
captureSession!.addInput(micInput)
stillImageOutput = AVCapturePhotoOutput()
if captureSession!.canAddOutput(stillImageOutput!) {
captureSession!.addOutput(stillImageOutput!)
let q = DispatchQueue(label: "sample buffer delegate", qos: .default)
videoOutput.setSampleBufferDelegate(self, queue: q)
if captureSession!.canAddOutput(videoOutput){
captureSession!.addOutput(videoOutput)
}
audioOutput.setSampleBufferDelegate(self as? AVCaptureAudioDataOutputSampleBufferDelegate, queue: q)
if captureSession!.canAddOutput(audioOutput){
captureSession!.addOutput(audioOutput)
}
captureSession!.startRunning()
}
}
}
アセット ライターを作成するクラス
func createWriter() {
self.checkForAndDeleteFile()
do {
assetWriter = try AVAssetWriter(outputURL: movieURL() as URL, fileType: AVFileType.mov)
} catch let error as NSError {
print(error.localizedDescription)
return
}
let videoSettings = [
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoWidthKey : Int(currentVideoDimensions!.height), // note: these are swapped because of REASONS
AVVideoHeightKey : Int(currentVideoDimensions!.width)
] as [String : Any]
let assetWriterVideoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoSettings)
assetWriterVideoInput.expectsMediaDataInRealTime = true
let sourcePixelBufferAttributesDictionary = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32BGRA),
String(kCVPixelBufferWidthKey) : Int(currentVideoDimensions!.width),
String(kCVPixelBufferHeightKey) : Int(currentVideoDimensions!.height),
String(kCVPixelFormatOpenGLESCompatibility) : kCFBooleanTrue
] as [String : Any]
assetWriterPixelBufferInput = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: assetWriterVideoInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
var channelLayout = AudioChannelLayout()
memset(&channelLayout, 0, MemoryLayout<AudioChannelLayout>.size);
channelLayout.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo;
let audioSettings: [String: Any] = [AVFormatIDKey: kAudioFormatMPEG4AAC,
AVSampleRateKey: 44100,
AVNumberOfChannelsKey: 2]
assetWriterAudioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioSettings)
assetWriterAudioInput?.expectsMediaDataInRealTime = true
if assetWriter!.canAdd(assetWriterVideoInput) {
assetWriter!.add(assetWriterVideoInput)
} else {
print("cannot add \(assetWriterVideoInput)")
}
if assetWriter!.canAdd(assetWriterAudioInput!) {
assetWriter!.add(assetWriterAudioInput!)
} else {
print("cannot add \(String(describing: assetWriterAudioInput))")
}
}
記録するときは、ライターを作成し、書き込みを開始してセッションを開始します。
videoIsRecording = true
createWriter()
assetWriter?.startWriting()
assetWriter?.startSession(atSourceTime: currentSampleTime!)
録画を停止すると、別のビューに移動してビデオが表示されます。
assetWriter?.finishWriting(completionHandler: {[unowned self] () -> Void in
let firstAsset = AVURLAsset(url: self.movieURL() as URL)
guard let exporter = AVAssetExportSession(asset: firstAsset, presetName: AVAssetExportPresetHighestQuality) else { return }
guard let vidComp = self.getVideoComposition(asset: firstAsset,
videoSize: CGSize(width:1280,
height:720)) else {
print("Unable to create video composition")
return
}
print(vidComp.instructions)
exporter.videoComposition = vidComp
exporter.outputURL = self.movieURL() as URL
exporter.outputFileType = AVFileType.mov
exporter.exportAsynchronously() {
DispatchQueue.main.async(){
self.activityTextStatus.text = ""
fileURLSenderVal = self.movieURL() as URL
let manageCaptureVC = self.storyboard?.instantiateViewController(withIdentifier: "ManageCaptureVC") as! ManageCaptureVC
manageCaptureVC.fileURL = fileURLSenderVal
manageCaptureVC.imageCaptureMode = ManageCaptureVC.imageCaptureModes(rawValue: self.imageCaptureMode.rawValue)!
manageCaptureVC.delegate = self
self.present(manageCaptureVC, animated: true, completion: nil)
}
}
})
しかし、これは私が立ち往生しているところです-マイクの入力をどこでどのように録音しますか??
// live output from camera
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection){
if(captureOutput){
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
let cameraImage = CIImage(cvPixelBuffer: pixelBuffer!)
var orientation = UIImageOrientation.right
if(isFrontCamera){
orientation = UIImageOrientation.leftMirrored
}
image = UIImage(ciImage: cameraImage)
if let ciImage = image?.ciImage {
image = applyFilterAndReturnImage(ciImage: ciImage, orientation: orientation, currentCameraRes:currentCameraRes!)
let formatDescription = CMSampleBufferGetFormatDescription(sampleBuffer)!
self.currentVideoDimensions = CMVideoFormatDescriptionGetDimensions(formatDescription)
self.currentSampleTime = CMSampleBufferGetOutputPresentationTimeStamp(sampleBuffer)
if(videoIsRecording && self.assetWriterPixelBufferInput?.assetWriterInput.isReadyForMoreMediaData == true){
let filteredBuffer = buffer(from: image!)
let success = self.assetWriterPixelBufferInput?.append(filteredBuffer!, withPresentationTime: self.currentSampleTime!)
if success == false {
print("Pixel Buffer failed")
}
}
DispatchQueue.main.async(){
imageView!.image = image
}
}
}
繰り返しますが、私は何ヶ月もこれを繰り返してきました - ヘルプを見つけることができるドキュメントはありません。ありがとうございました