Skip to content

Instantly share code, notes, and snippets.

@chauhan130
Created June 15, 2019 18:53
Show Gist options
  • Save chauhan130/8d2f5bbe680353961fecacb80e254b92 to your computer and use it in GitHub Desktop.
Save chauhan130/8d2f5bbe680353961fecacb80e254b92 to your computer and use it in GitHub Desktop.
Attempt to set the video frame rate through AVAssetReaderVideoCompositionOutput.
func exportVideo(videoAsset: AVURLAsset, filePath: URL, size: CGSize, videoConfig: VideoConfig, completionHandler: @escaping (URL)->()) {
do {
// Reader and Writer
let writer = try AVAssetWriter(outputURL: filePath, fileType: AVFileType.mp4)
let reader = try AVAssetReader(asset: videoAsset)
writer.movieTimeScale = CMTimeScale(videoConfig.videoFrameRate)
// Tracks
let videoTracksInAsset = videoAsset.tracks(withMediaType: AVMediaType.video)
guard let videoTrack = videoTracksInAsset.first else {
return()
}
guard let audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first else {
return
}
print("Video Frame Rate: \(videoTrack.nominalFrameRate)")
print("Video Duration: \(videoAsset.duration.seconds)")
// Video Output Configuration
let videoCompressionProps: Dictionary<String, Any> = [
AVVideoAverageBitRateKey : videoConfig.videoBitrate,
AVVideoMaxKeyFrameIntervalKey : videoConfig.videoMaxKeyFrameInterval,
AVVideoProfileLevelKey : videoConfig.avVideoProfileLevel,
AVVideoExpectedSourceFrameRateKey: videoConfig.videoFrameRate
]
let videoOutputSettings: Dictionary<String, Any> = [
AVVideoWidthKey : size.width,
AVVideoHeightKey : size.height,
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoCompressionPropertiesKey : videoCompressionProps,
AVVideoScalingModeKey: AVVideoScalingModeResizeAspectFill
]
let videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoOutputSettings)
videoInput.expectsMediaDataInRealTime = false
videoInput.mediaTimeScale = CMTimeScale(videoConfig.videoFrameRate)
let sourcePixelBufferAttributesDictionary: Dictionary<String, Any> = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32RGBA)
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
videoInput.performsMultiPassEncodingIfSupported = true
guard writer.canAdd(videoInput) else {
return
}
writer.add(videoInput)
// Audio Output Configuration
var acl = AudioChannelLayout()
acl.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo
acl.mChannelBitmap = AudioChannelBitmap(rawValue: UInt32(0))
acl.mNumberChannelDescriptions = UInt32(0)
let acll = MemoryLayout<AudioChannelLayout>.size
let audioOutputSettings: Dictionary<String, Any> = [
AVFormatIDKey : UInt(kAudioFormatMPEG4AAC),
AVNumberOfChannelsKey : UInt(2),
AVSampleRateKey : videoConfig.audioSampleRate,
AVEncoderBitRateKey : videoConfig.audioBitrate,
AVChannelLayoutKey : NSData(bytes:&acl, length: acll)
]
let audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioOutputSettings)
audioInput.expectsMediaDataInRealTime = false
guard writer.canAdd(audioInput) else {
return
}
writer.add(audioInput)
let videoSettingsDictionary: Dictionary<String, Any> = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32RGBA)
]
let videoOutput = AVAssetReaderVideoCompositionOutput(videoTracks: videoTracksInAsset, videoSettings: videoSettingsDictionary)
videoOutput.alwaysCopiesSampleData = false
let videoComposition = AVMutableVideoComposition()
videoComposition.frameDuration = CMTimeMake(value: 8, timescale: Int32(8 * videoConfig.videoFrameRate))
print("VideoComposition FrameDuration: \(videoComposition.frameDuration.seconds)")
let passThroughInstruction = AVMutableVideoCompositionInstruction()
passThroughInstruction.timeRange = CMTimeRangeMake(start: .zero, duration: videoAsset.duration)
let passThroughLayer = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
passThroughInstruction.layerInstructions = [passThroughLayer]
videoComposition.instructions = [passThroughInstruction]
videoComposition.renderSize = videoTrack.naturalSize
videoOutput.videoComposition = videoComposition
if reader.canAdd(videoOutput) {
reader.add(videoOutput)
}
// Audio Input Configuration
let decompressionAudioSettings: Dictionary<String, Any> = [
AVFormatIDKey: UInt(kAudioFormatLinearPCM)
]
let readerAudioTrackOutput = AVAssetReaderTrackOutput(track: audioTrack, outputSettings: decompressionAudioSettings)
readerAudioTrackOutput.alwaysCopiesSampleData = true
guard reader.canAdd(readerAudioTrackOutput) else {
return
}
reader.add(readerAudioTrackOutput)
// Begin Compression
reader.timeRange = CMTimeRange(start: .zero, duration: videoAsset.duration)
writer.shouldOptimizeForNetworkUse = true
reader.startReading()
writer.startWriting()
writer.startSession(atSourceTime: .zero)
// Compress in Background
let operationHandle = VideoExportOperationHandle()
operationHandle.asset = videoAsset
compressQueue.async {
// Loop Video Frames
var frameCount = 0
var videoDone = false
var audioDone = false
while !videoDone || !audioDone {
// Check for Writer Errors (out of storage etc.)
if writer.status == AVAssetWriter.Status.failed {
reader.cancelReading()
writer.cancelWriting()
if let e = writer.error {
print(e)
return
}
}
// Check for Reader Errors (source file corruption etc.)
if reader.status == AVAssetReader.Status.failed {
reader.cancelReading()
writer.cancelWriting()
if let e = reader.error {
print(e)
return
}
}
// Check for Cancel
if operationHandle.cancel {
reader.cancelReading()
writer.cancelWriting()
return
}
// Check if enough data is ready for encoding a single frame
if videoInput.isReadyForMoreMediaData {
// Copy a single frame from source to destination with applied transforms
if let vBuffer = videoOutput.copyNextSampleBuffer(), CMSampleBufferDataIsReady(vBuffer) {
frameCount += 1
#if DEBUG
print("Encoding frame: ", frameCount)
#endif
autoreleasepool {
let presentationTime = CMSampleBufferGetPresentationTimeStamp(vBuffer)
let pixelBuffer = CMSampleBufferGetImageBuffer(vBuffer)!
_ = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
}
} else {
// Video source is depleted, mark as finished
if !videoDone {
videoInput.markAsFinished()
}
videoDone = true
}
}
if audioInput.isReadyForMoreMediaData {
// Copy a single audio sample from source to destination
if let aBuffer = readerAudioTrackOutput.copyNextSampleBuffer(), CMSampleBufferDataIsReady(aBuffer) {
_ = audioInput.append(aBuffer)
} else {
// Audio source is depleted, mark as finished
if !audioDone {
audioInput.markAsFinished()
}
audioDone = true
}
}
}
// Write everything to output file
writer.finishWriting(completionHandler: {
completionHandler(filePath)
})
}
} catch {
// Error During Reader or Writer Creation
print(error)
return
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment