i see this tutorial and develop an app thats take a video and add square background . video screen size change to square size . but when i render, its contain a portion of black screen . i provide screen shot . and code . some one help me to do that ?
see this image
my code `func videoProcess() {
asset = AVURLAsset(url: videoAssetUrl!)
//make video composition track
guard let compositionTrack = composition.addMutableTrack(withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
let assetTrack = asset?.tracks(withMediaType: .video).first else {
return
}
do {
//set time to composition
let timeRange = CMTimeRange(start: .zero, duration: asset!.duration)
try compositionTrack.insertTimeRange(timeRange, of: assetTrack, at: .zero)
//audio asset extract
if let audioAssetTrack = asset?.tracks(withMediaType: .audio).first,
let composionAudioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid){
try composionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
}catch{
print(error)
}
//composiotion transformation
compositionTrack.preferredTransform = assetTrack.preferredTransform
//let videoInfo = orientation(from: assetTrack.preferredTransform)
let videoSize = assetTrack.naturalSize
// let videoSize: CGSize
// if videoInfo.isPortrait {
// videoSize = CGSize(
// width: assetTrack.naturalSize.height,
// height: assetTrack.naturalSize.width)
// } else {
// videoSize = assetTrack.naturalSize
// }
//add layer
//let videoSize = getVideoSize(assetUrl: videoAssetUrl!)
var backSize : CGSize = .zero
if videoSize.width >= videoSize.height {
backSize = CGSize(width: videoSize.width, height: videoSize.width)
}else {
backSize = CGSize(width: videoSize.height, height: videoSize.height)
}
let backgroundLayer = CALayer()
backgroundLayer.frame = CGRect(origin: .zero, size: backSize)
let videoLayer = CALayer()
videoLayer.frame = CGRect(origin: CGPoint(x: (backSize.width - videoSize.width) / 2, y: (backSize.height - videoSize.height) / 2), size: videoSize)
//videoLayer.backgroundColor = UIColor.green.cgColor
backgroundLayer.backgroundColor = UIColor.red.cgColor
//backgroundLayer.contents = UIImage(named: "img")?.cgImage
//backgroundLayer.contentsGravity = CALayerContentsGravity.resizeAspectFill
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: backSize)
outputLayer.addSublayer(backgroundLayer)
outputLayer.addSublayer(videoLayer)
//video composition
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = backSize
//videoComposition.renderScale = Float(UIScreen.main.scale)
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: outputLayer)
//video composition instruction
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: composition.duration)
videoComposition.instructions = [instruction]
//layer instrunction
let layerInstruction = compositionLayerInstruction(for: compositionTrack, assetTrack: assetTrack)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality) else {
print("Cannot create export session.")
return
}
//let paths = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask)
//let fileUrl = paths[0].appendingPathComponent("output").appendingPathExtension("mov")
let videoName = UUID().uuidString
let exportURL = URL(fileURLWithPath: NSTemporaryDirectory())
.appendingPathComponent(videoName)
.appendingPathExtension("mov")
export.videoComposition = videoComposition
export.outputFileType = .mov
export.outputURL = exportURL
export.exportAsynchronously(completionHandler: {
DispatchQueue.main.async {
switch export.status{
case .completed:
PHPhotoLibrary.shared().performChanges({
PHAssetChangeRequest.creationRequestForAssetFromVideo(atFileURL: exportURL)
}) { saved, error in
if saved {
print("Export success")
}else {
print("Export Failed")
}
}
break
default:
print("Export Failed")
}
}
})
}`
2 Likes
@joysolutioncat Do you still have issues with this?
1 Like
Hi I’m actually having a very similar issue to the one above, I’m using one of ray wanderlichs’s old tutorial on how to merge two videos using avfoundation and when ever i do that the output video is never centered, the scale aspect ratio of the video is fine its just the video isnt centered on the screen like its supposed to, any help please ?
here is my layer instruction code:
static func orientationFromTransform(_ transform: CGAffineTransform) → (orientation: UIImage.Orientation, isPortrait: Bool) {
var assetOrientation = UIImage.Orientation.up
var isPortrait = false
if transform.a == 0 && transform.b == 1.0 && transform.c == -1.0 && transform.d == 0 {
assetOrientation = .right
isPortrait = true
} else if transform.a == 0 && transform.b == -1.0 && transform.c == 1.0 && transform.d == 0 {
assetOrientation = .left
isPortrait = true
} else if transform.a == 1.0 && transform.b == 0 && transform.c == 0 && transform.d == 1.0 {
assetOrientation = .up
} else if transform.a == -1.0 && transform.b == 0 && transform.c == 0 && transform.d == -1.0 {
assetOrientation = .down
}
return (assetOrientation, isPortrait)
}
static func videoCompositionInstruction(_ track: AVCompositionTrack, asset: AVAsset) -> AVMutableVideoCompositionLayerInstruction {
let instruction = AVMutableVideoCompositionLayerInstruction(assetTrack: track)
let assetTrack = asset.tracks(withMediaType: AVMediaType.video)[0]
let transform = assetTrack.preferredTransform
let assetInfo = orientationFromTransform(transform)
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor), at: CMTime.zero)
} else {
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
var concat = assetTrack.preferredTransform.concatenating(scaleFactor)
.concatenating(CGAffineTransform(translationX: 0, y: UIScreen.main.bounds.width / 2))
if assetInfo.orientation == .down {
let fixUpsideDown = CGAffineTransform(rotationAngle: CGFloat(Double.pi))
let windowBounds = UIScreen.main.bounds
let yFix = assetTrack.naturalSize.height + windowBounds.height
let centerFix = CGAffineTransform(translationX: assetTrack.naturalSize.width, y: yFix)
concat = fixUpsideDown.concatenating(centerFix).concatenating(scaleFactor)
}
instruction.setTransform(concat, at: CMTime.zero)
}
return instruction
}
@samisays11 Do you still have issues with this?
i have the same issue. can you help?
dekple
July 22, 2020, 2:52am
8
samisays11:
var scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.width
if assetInfo.isPortrait {
scaleToFitRatio = UIScreen.main.bounds.width / assetTrack.naturalSize.height
let scaleFactor = CGAffineTransform(scaleX: scaleToFitRatio, y: scaleToFitRatio)
instruction.setTransform(assetTrack.preferredTransform.concatenating(scaleFactor), at: CMTime.zero)
I am also having this issue
@attari16 @dekple Do you still have issues with this?
@shogunkaramazov / @marinbenc Yes, I’m having same issue.
I’ve followed same tutorial.
Here is my output
Here is my code for same.
func exportVideo(videoURL: URL? = nil, asset: AVAsset? = nil) {
var asset = asset
if asset == nil, videoURL != nil {
asset = AVAsset(url: videoURL!)
}
guard let avAsset = asset else {return}
let composition: AVMutableComposition = AVMutableComposition()
guard let compositionVideoTrack = composition.addMutableTrack(
withMediaType: .video, preferredTrackID: kCMPersistentTrackID_Invalid),
let videoTrack = avAsset.tracks(withMediaType: AVMediaType.video).first
else {
print("Something is wrong with the asset.")
return
}
do {
let timeRange = CMTimeRange(start: .zero, duration: avAsset.duration)
try compositionVideoTrack.insertTimeRange(timeRange, of: videoTrack, at: .zero)
if !btnMute.isSelected {
if let audioAssetTrack = avAsset.tracks(withMediaType: .audio).first,
let compositionAudioTrack = composition.addMutableTrack(withMediaType: .audio, preferredTrackID: kCMPersistentTrackID_Invalid) {
try compositionAudioTrack.insertTimeRange(timeRange, of: audioAssetTrack, at: .zero)
}
}
} catch {
print(error)
return
}
compositionVideoTrack.preferredTransform = videoTrack.preferredTransform
let videoInfo = orientation(from: videoTrack.preferredTransform)
let videoSize: CGSize
if videoInfo.isPortrait {
videoSize = CGSize(width: videoTrack.naturalSize.height, height: videoTrack.naturalSize.width)
}
else {
videoSize = videoTrack.naturalSize
}
//videoSize will be 1920*1080 for landscape video
//viewForDraw's size will be (390, 693.33) on iPhone 12 simulator
let newLayerSize = fitIn(originalSize: videoSize, availableSize: viewForDraw.frame.size)
let backgroundLayer = CALayer()
backgroundLayer.frame = CGRect(origin: .zero, size: newLayerSize)
backgroundLayer.backgroundColor = UIColor.clear.cgColor
backgroundLayer.contents = canvasView.toImage().cgImage
backgroundLayer.contentsGravity = .resizeAspectFill
let videoLayer = CALayer()
let videoY = (newLayerSize.height - videoSize.height) / 2.0
videoLayer.frame = CGRect(origin: CGPoint(x: 0, y: videoY), size: videoSize)
videoLayer.backgroundColor = UIColor.clear.cgColor
let overlayLayer = CALayer()
overlayLayer.frame = CGRect(origin: .zero, size: newLayerSize)
overlayLayer.backgroundColor = UIColor.yellow.cgColor
let outputLayer = CALayer()
outputLayer.frame = CGRect(origin: .zero, size: newLayerSize)
outputLayer.addSublayer(backgroundLayer)
outputLayer.addSublayer(videoLayer)
// outputLayer.addSublayer(overlayLayer)
let videoComposition = AVMutableVideoComposition()
videoComposition.renderSize = newLayerSize
videoComposition.frameDuration = CMTime(value: 1, timescale: 30)
videoComposition.animationTool = AVVideoCompositionCoreAnimationTool(postProcessingAsVideoLayer: videoLayer, in: outputLayer)
let instruction = AVMutableVideoCompositionInstruction()
instruction.timeRange = CMTimeRange(start: .zero, duration: composition.duration)
videoComposition.instructions = [instruction]
let layerInstruction = compositionLayerInstruction(for: compositionVideoTrack, assetTrack: videoTrack)
instruction.layerInstructions = [layerInstruction]
guard let export = AVAssetExportSession(asset: composition, presetName: AVAssetExportPresetHighestQuality)
else {
print("Cannot create export session.")
return
}
export.videoComposition = videoComposition
let mutableVideoURL = FileManager.default.temporaryDirectory.appendingPathComponent("finalVideo").appendingPathExtension("mp4")
export.outputFileType = AVFileType.mp4
export.outputURL = mutableVideoURL
export.timeRange = CMTimeRange(start: .zero, duration: CMTime(value: CMTimeValue(min(Double(15.0), avAsset.duration.seconds)), timescale: 1)) // trim video to 15 seconds
removeFileAtURLIfExists(url: mutableVideoURL)
export.exportAsynchronously(completionHandler: {
timer.invalidate()
DispatchQueue.main.async {
switch export.status
{
case AVAssetExportSessionStatus.failed:
let errorMessage = export.error?.localizedDescription ?? "Something went wrong"
print("failed \(errorMessage)")
case AVAssetExportSessionStatus.cancelled:
print("cancelled export session")
case AVAssetExportSessionStatus.unknown:
print("unknown status of export session")
case AVAssetExportSessionStatus.waiting:
print("export session status: waiting")
case AVAssetExportSessionStatus.exporting:
print("export session status: complete \(export.progress)")
case .completed:
print("complete")
default:
print("-----Mutable video exportation complete.")
}
}
})
}