调用系统的这个玩意竟然会有这个问题,真是不爽.
于是乎开始查资料:
-(void)useTheSystemRecordViewController{
UIImagePickerController * imagePicker = [[UIImagePickerController alloc] init];
imagePicker.delegate = self;
//判断是否可以拍摄
if ( [UIImagePickerController isSourceTypeAvailable:UIImagePickerControllerSourceTypeCamera]) {
//判断是否拥有拍摄权限
AVAuthorizationStatus authStatus = [AVCaptureDevice authorizationStatusForMediaType:AVMediaTypeVideo];
if(authStatus == AVAuthorizationStatusRestricted || authStatus == AVAuthorizationStatusDenied){
return;
}
//拍摄
imagePicker.sourceType = UIImagePickerControllerSourceTypeCamera;
//录制的类型 下面为视频
imagePicker.mediaTypes=@[(NSString*)kUTTypeMovie];
//录制的时长
imagePicker.videoMaximumDuration=10.0;
//模态视图的弹出效果
imagePicker.modalPresentationStyle=UIModalPresentationOverFullScreen;
[self presentViewController:imagePicker animated:YES completion:nil];
}
}
-(void)encodeVideoOrientation:(NSURL*)anOutputFileURL{
AVURLAsset * videoAsset = [[AVURLAsset alloc]initWithURL:anOutputFileURL options:nil];
AVAssetExportSession * assetExport = [[AVAssetExportSession alloc] initWithAsset:videoAsset
presetName:AVAssetExportPresetMediumQuality];
NSString* mp4Path = [NSHomeDirectory() stringByAppendingPathComponent:@"/Documents/Movie.mp4"];
assetExport.outputURL = [NSURL fileURLWithPath: mp4Path];
assetExport.shouldOptimizeForNetworkUse = YES;
assetExport.outputFileType = AVFileTypeMPEG4;
assetExport.videoComposition = [self getVideoComposition:videoAsset];
[assetExport exportAsynchronouslyWithCompletionHandler:^{
switch ([assetExport status]) {
case AVAssetExportSessionStatusFailed:
{
NSLog(@"AVAssetExportSessionStatusFailed!");
break;
}
case AVAssetExportSessionStatusCancelled:
NSLog(@"Export canceled");
break;
case AVAssetExportSessionStatusCompleted:
NSLog(@"Successful!");
break;
default:
break;
}
}];
}
#pragma mark - 解决录像保存角度问题
-(AVMutableVideoComposition *) getVideoComposition:(AVAsset *)asset
{
AVAssetTrack *videoTrack = [[asset tracksWithMediaType:AVMediaTypeVideo] objectAtIndex:0];
AVMutableComposition *composition = [AVMutableComposition composition];
AVMutableVideoComposition *videoComposition = [AVMutableVideoComposition videoComposition];
CGSize videoSize = videoTrack.naturalSize;
BOOL isPortrait_ = [self isVideoPortrait:asset];
if(isPortrait_) {
NSLog(@"video is portrait ");
videoSize = CGSizeMake(videoSize.height, videoSize.width);
}
composition.naturalSize = videoSize;
videoComposition.renderSize = videoSize;
// videoComposition.renderSize = videoTrack.naturalSize; //
videoComposition.frameDuration = CMTimeMakeWithSeconds( 1 / videoTrack.nominalFrameRate, 600);
AVMutableCompositionTrack *compositionVideoTrack;
compositionVideoTrack = [composition addMutableTrackWithMediaType:AVMediaTypeVideo preferredTrackID:kCMPersistentTrackID_Invalid];
[compositionVideoTrack insertTimeRange:CMTimeRangeMake(kCMTimeZero, asset.duration) ofTrack:videoTrack atTime:kCMTimeZero error:nil];
AVMutableVideoCompositionLayerInstruction *layerInst;
layerInst = [AVMutableVideoCompositionLayerInstruction videoCompositionLayerInstructionWithAssetTrack:videoTrack];
[layerInst setTransform:videoTrack.preferredTransform atTime:kCMTimeZero];
AVMutableVideoCompositionInstruction *inst = [AVMutableVideoCompositionInstruction videoCompositionInstruction];
inst.timeRange = CMTimeRangeMake(kCMTimeZero, asset.duration);
inst.layerInstructions = [NSArray arrayWithObject:layerInst];
videoComposition.instructions = [NSArray arrayWithObject:inst];
return videoComposition;
}
-(BOOL) isVideoPortrait:(AVAsset *)asset
{
BOOL isPortrait = FALSE;
NSArray *tracks = [asset tracksWithMediaType:AVMediaTypeVideo];
if([tracks count] > 0) {
AVAssetTrack *videoTrack = [tracks objectAtIndex:0];
CGAffineTransform t = videoTrack.preferredTransform;
// Portrait
if(t.a == 0 && t.b == 1.0 && t.c == -1.0 && t.d == 0)
{
isPortrait = YES;
}
// PortraitUpsideDown
if(t.a == 0 && t.b == -1.0 && t.c == 1.0 && t.d == 0) {
isPortrait = YES;
}
// LandscapeRight
if(t.a == 1.0 && t.b == 0 && t.c == 0 && t.d == 1.0)
{
isPortrait = FALSE;
}
// LandscapeLeft
if(t.a == -1.0 && t.b == 0 && t.c == 0 && t.d == -1.0)
{
isPortrait = FALSE;
}
}
return isPortrait;
}
反正整个过程的逻辑大概就是判断视频方向,重新做一个压缩处理,转换成正确的方向.亲测可用!