Skip to content

Instantly share code, notes, and snippets.

@chauhan130
Last active June 15, 2019 08:36
Show Gist options
  • Save chauhan130/31b8bf648f0071a949d1627c78315056 to your computer and use it in GitHub Desktop.
Save chauhan130/31b8bf648f0071a949d1627c78315056 to your computer and use it in GitHub Desktop.
struct VideoConfig {
let videoBitrate: Int
let videoMaxKeyFrameInterval: Int
let avVideoProfileLevel: String
let audioSampleRate: Int
let audioBitrate: Int
let videoFrameRate: Float
}
func exportVideo(videoAsset: AVURLAsset, filePath: URL, size: CGSize, videoConfig: VideoConfig, completionHandler: @escaping (URL)->()) {
do {
// Reader and Writer
let writer = try AVAssetWriter(outputURL: filePath, fileType: AVFileType.mp4)
let reader = try AVAssetReader(asset: videoAsset)
// writer.movieTimeScale = CMTimeScale(videoConfig.videoFrameRate) // Does not work!
// Tracks
let videoTracksInAsset = videoAsset.tracks(withMediaType: AVMediaType.video)
guard let videoTrack = videoTracksInAsset.first else {
print("Video track not found.")
return
}
guard let audioTrack = videoAsset.tracks(withMediaType: AVMediaType.audio).first else {
print("Audio track not found.")
return
}
print("Video Frame Rate: \(videoTrack.nominalFrameRate)")
print("Video Duration: \(videoAsset.duration.seconds)")
// Video Output Configuration
let videoCompressionProps: Dictionary<String, Any> = [
AVVideoAverageBitRateKey : videoConfig.videoBitrate,
AVVideoMaxKeyFrameIntervalKey : videoConfig.videoMaxKeyFrameInterval,
AVVideoProfileLevelKey : videoConfig.avVideoProfileLevel,
// AVVideoExpectedSourceFrameRateKey: videoConfig.videoFrameRate // Does not work!
]
let videoOutputSettings: Dictionary<String, Any> = [
AVVideoWidthKey : size.width,
AVVideoHeightKey : size.height,
AVVideoCodecKey : AVVideoCodecType.h264,
AVVideoCompressionPropertiesKey : videoCompressionProps,
AVVideoScalingModeKey: AVVideoScalingModeResizeAspectFill
]
let videoInput = AVAssetWriterInput(mediaType: AVMediaType.video, outputSettings: videoOutputSettings)
videoInput.expectsMediaDataInRealTime = false
// videoInput.mediaTimeScale = CMTimeScale(videoConfig.videoFrameRate) // Does not work!
let sourcePixelBufferAttributesDictionary: Dictionary<String, Any> = [
String(kCVPixelBufferPixelFormatTypeKey) : Int(kCVPixelFormatType_32RGBA)
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(assetWriterInput: videoInput, sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary)
videoInput.performsMultiPassEncodingIfSupported = true
guard writer.canAdd(videoInput) else {
print("Cannot add video input")
return
}
writer.add(videoInput)
// Audio Output Configuration
var acl = AudioChannelLayout()
acl.mChannelLayoutTag = kAudioChannelLayoutTag_Stereo
acl.mChannelBitmap = AudioChannelBitmap(rawValue: UInt32(0))
acl.mNumberChannelDescriptions = UInt32(0)
let acll = MemoryLayout<AudioChannelLayout>.size
let audioOutputSettings: Dictionary<String, Any> = [
AVFormatIDKey : UInt(kAudioFormatMPEG4AAC),
AVNumberOfChannelsKey : UInt(2),
AVSampleRateKey : videoConfig.audioSampleRate,
AVEncoderBitRateKey : videoConfig.audioBitrate,
AVChannelLayoutKey : NSData(bytes:&acl, length: acll)
]
let audioInput = AVAssetWriterInput(mediaType: AVMediaType.audio, outputSettings: audioOutputSettings)
audioInput.expectsMediaDataInRealTime = false
guard writer.canAdd(audioInput) else {
print("Cannot add audio input")
return
}
writer.add(audioInput)
// Video Input Configuration
let videoOptions: Dictionary<String, Any> = [
kCVPixelBufferPixelFormatTypeKey as String : UInt(kCVPixelFormatType_422YpCbCr8_yuvs),
kCVPixelBufferIOSurfacePropertiesKey as String : [:]
]
let readerVideoTrackOutput = AVAssetReaderTrackOutput(track: videoTrack, outputSettings: videoOptions)
readerVideoTrackOutput.alwaysCopiesSampleData = true
guard reader.canAdd(readerVideoTrackOutput) else {
print("Cannot add video output")
return
}
reader.add(readerVideoTrackOutput)
// Audio Input Configuration
let decompressionAudioSettings: Dictionary<String, Any> = [
AVFormatIDKey: UInt(kAudioFormatLinearPCM)
]
let readerAudioTrackOutput = AVAssetReaderTrackOutput(track: audioTrack, outputSettings: decompressionAudioSettings)
readerAudioTrackOutput.alwaysCopiesSampleData = true
guard reader.canAdd(readerAudioTrackOutput) else {
print("Cannot add video output")
return
}
reader.add(readerAudioTrackOutput)
// Begin Compression
reader.timeRange = CMTimeRange(start: .zero, duration: videoAsset.duration)
writer.shouldOptimizeForNetworkUse = true
reader.startReading()
writer.startWriting()
writer.startSession(atSourceTime: .zero)
// Compress in Background
let operationHandle = VideoExportOperationHandle()
operationHandle.asset = videoAsset
compressQueue.async {
// Loop Video Frames
var frameCount = 0
var videoDone = false
var audioDone = false
while !videoDone || !audioDone {
// Check for Writer Errors (out of storage etc.)
if writer.status == AVAssetWriter.Status.failed {
reader.cancelReading()
writer.cancelWriting()
if let e = writer.error {
print(e)
return
}
}
// Check for Reader Errors (source file corruption etc.)
if reader.status == AVAssetReader.Status.failed {
reader.cancelReading()
writer.cancelWriting()
if let e = reader.error {
print(e)
return
}
}
// Check for Cancel
if operationHandle.cancel {
reader.cancelReading()
writer.cancelWriting()
return
}
// Check if enough data is ready for encoding a single frame
if videoInput.isReadyForMoreMediaData {
// Copy a single frame from source to destination with applied transforms
if let vBuffer = readerVideoTrackOutput.copyNextSampleBuffer(), CMSampleBufferDataIsReady(vBuffer) {
frameCount += 1
#if DEBUG
print("Encoding frame: ", frameCount)
#endif
autoreleasepool {
let presentationTime = CMSampleBufferGetPresentationTimeStamp(vBuffer)
let pixelBuffer = CMSampleBufferGetImageBuffer(vBuffer)!
_ = pixelBufferAdaptor.append(pixelBuffer, withPresentationTime: presentationTime)
}
} else {
// Video source is depleted, mark as finished
if !videoDone {
videoInput.markAsFinished()
}
videoDone = true
}
}
if audioInput.isReadyForMoreMediaData {
// Copy a single audio sample from source to destination
if let aBuffer = readerAudioTrackOutput.copyNextSampleBuffer(), CMSampleBufferDataIsReady(aBuffer) {
_ = audioInput.append(aBuffer)
} else {
// Audio source is depleted, mark as finished
if !audioDone {
audioInput.markAsFinished()
}
audioDone = true
}
}
}
// Write everything to output file
writer.finishWriting(completionHandler: {
completionHandler(filePath)
})
}
return
} catch {
// Error During Reader or Writer Creation
return
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment