给AVCaptureSession添加outputMetadata的方式得到人脸
self.metaDataOutput = [[AVCaptureMetadataOutput alloc] init];
if ([self.session canAddOutput:self.metaDataOutput]) {
[self.session addOutput:self.metaDataOutput];
NSArray *metadataObjectTypes = @[AVMetadataObjectTypeFace]; // 3
self.metaDataOutput.metadataObjectTypes = metadataObjectTypes;
dispatch_queue_t mainQueue = dispatch_get_main_queue();
[self.metaDataOutput setMetadataObjectsDelegate:self // 4
queue:mainQueue];
}else{
return;
}
https://stackoverflow.com/questions/41354698/face-detection-with-camera
//代理方法中获取到人脸数据,但是你打印会发现,该数值是处于0-1之间的数值,而且可能有点莫名其妙,它是wrt,以下代码,苹果API中有preViewLayer该类有专门的方法来进行坐标转化,但是由于很多场景下需要自己转换,例如使用opengl来处理这些坐标数据,以下代码转化成熟悉的屏幕坐标。
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputMetadataObjects:(NSArray *)metadataObjects fromConnection:(AVCaptureConnection *)connection{
for (AVMetadataFaceObject *faceframe in faces) { // 2
// CGRect bounds = face.bounds;
CGRect face = faceframe.bounds;
// wrt 图形
CGRect facePreviewBounds = CGRectMake(face.origin.y * self.bounds.size.width,
face.origin.x * self.bounds.size.height,
face.size.width * self.bounds.size.height,
face.size.height * self.bounds.size.width);
NSLog(@"%@",NSStringFromCGRect(facePreviewBounds));
//以下为使用opengl设置Viewport参数大小,不需要可以注释
CGFloat viewY = self.bounds.size.height - (face.origin.x * self.bounds.size.height)-face.size.width * self.bounds.size.height;
glViewport(face.origin.y * self.bounds.size.width, viewY, face.size.width * self.bounds.size.height, face.size.height * self.bounds.size.width);
}
}