iOS Development

ios – Overlay Static Textual content by way of AVMutableVideoComposition

Spread the love


In Swift for iOS, I’ve an array of AVURLAsset. I move it by way of a perform to sew/merge the video belongings collectively into one closing video. For every video, my aim is to overlay textual content centered within the body.

After I play the outputted video, the video belongings merge appropriately, however I am unable to know why not one of the textual content overlays. I attempted following an current reply, however to no avail. Any steering can be extraordinarily appreciated..

func merge(movies: [AVURLAsset], completion: @escaping (_ url: URL, _ asset: AVAssetExportSession)->()) {
let videoComposition = AVMutableComposition()
var lastTime: CMTime = .zero

var rely = 0
var maxVideoSize = CGSize.zero // For figuring out the utmost video dimension

guard let videoCompositionTrack = videoComposition.addMutableTrack(withMediaType: .video, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }
guard let audioCompositionTrack = videoComposition.addMutableTrack(withMediaType: .audio, preferredTrackID: Int32(kCMPersistentTrackID_Invalid)) else { return }

let mainComposition = AVMutableVideoComposition()
var parentLayers = [CALayer]() // To carry all particular person father or mother layers

for video in movies {
    
    if let videoTrack = video.tracks(withMediaType: .video)[safe: 0] {
        
        videoCompositionTrack.preferredTransform = videoTrack.preferredTransform
        
        do {
            strive videoCompositionTrack.insertTimeRange(CMTimeRangeMake(begin: .zero, period: video.period), of: videoTrack, at: lastTime)
            
            if let audioTrack = video.tracks(withMediaType: .audio)[safe: 0] {
                strive audioCompositionTrack.insertTimeRange(CMTimeRangeMake(begin: .zero, period: video.period), of: audioTrack, at: lastTime)
            }
            lastTime = CMTimeAdd(lastTime, video.period)

            // Get hold of video dimensions and replace max dimension if essential
            let videoSize = videoTrack.naturalSize.making use of(videoTrack.preferredTransform)
            let videoRect = CGRect(x: 0, y: 0, width: abs(videoSize.width), top: abs(videoSize.top))
            if videoRect.width > maxVideoSize.width {
                maxVideoSize.width = videoRect.width
            }
            if videoRect.top > maxVideoSize.top {
                maxVideoSize.top = videoRect.top
            }

            // Create and configure the textual content layer for this phase
            let textLayer = CATextLayer()
            textLayer.string = "TESTING"
            textLayer.foregroundColor = UIColor.white.cgColor
            textLayer.backgroundColor = UIColor.clear.cgColor
            textLayer.fontSize = 100
            textLayer.shadowOpacity = 0.5
            textLayer.alignmentMode = .middle
            textLayer.contentsScale = UIScreen.principal.scale // Ensures textual content is sharp
            textLayer.isWrapped = true // Permits textual content wrapping if wanted

            // Calculate body for centrally aligned textual content
            let textHeight: CGFloat = 120 // Alter as wanted
            let textWidth: CGFloat = videoRect.width // Padding from edges
            let xPos = (videoRect.width - textWidth) / 2
            let yPos = (videoRect.top - textHeight) / 2
            textLayer.body = CGRect(x: xPos, y: yPos, width: textWidth, top: textHeight)
            print(textLayer.body)

            // Create a father or mother layer for video and textual content
            let parentLayer = CALayer()
            let videoLayer = CALayer()
            parentLayer.body = videoRect
            videoLayer.body = videoRect
            textLayer.zPosition = 1 // Making certain textual content layer is on prime
            parentLayer.addSublayer(videoLayer)
            parentLayer.addSublayer(textLayer)
            parentLayers.append(parentLayer) // Add to array

            // Add father or mother layer to video composition
            let videoCompositionInstruction = AVMutableVideoCompositionInstruction()
            videoCompositionInstruction.timeRange = CMTimeRangeMake(begin: .zero, period: video.period)
            let layerInstruction = AVMutableVideoCompositionLayerInstruction(assetTrack: videoTrack)
            videoCompositionInstruction.layerInstructions = [layerInstruction]
            mainComposition.directions.append(videoCompositionInstruction)

            rely += 1
        } catch {
            print("Didn't insert observe")
            return
        }
    }
}

let mainParentLayer = CALayer()
mainParentLayer.body = CGRect(x: 0, y: 0, width: maxVideoSize.width, top: maxVideoSize.top)
for layer in parentLayers {
    mainParentLayer.addSublayer(layer)
}

// Set the renderSize and frameDuration of the mainComposition
mainComposition.renderSize = maxVideoSize
mainComposition.frameDuration = CMTime(worth: 1, timescale: 30) // Assuming 30 fps
mainComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: mainParentLayer, in: mainParentLayer)

let outputUrl = NSURL.fileURL(withPath: NSTemporaryDirectory() + "mergedVid" + ".mp4")

guard let exporter = AVAssetExportSession(asset: videoComposition, presetName: AVAssetExportPresetHighestQuality) else { return }

exporter.videoComposition = mainComposition
exporter.outputURL = outputUrl
exporter.outputFileType = .mp4
exporter.shouldOptimizeForNetworkUse = true

exporter.exportAsynchronously {
    DispatchQueue.principal.async {
        if let outputUrl = exporter.outputURL, exporter.standing == .accomplished {
            completion(outputUrl, exporter)
        } else if let error = exporter.error {
            print("Export failed: (error.localizedDescription)")
        }
    }
}

play(video: exporter.asset)
}

Leave a Reply

Your email address will not be published. Required fields are marked *