本文介绍了iOS基于CoreImage的CIDetector进行人脸检测, 并附上相关demo。
-
demo 展示效果:
demo地址
-->swfit完整项目2020持续更新完善
- 注意本章的难点在于绘制人脸框, CIDetector对Image检测出来的frame坐标系是y轴向上的, 而绘制的人脸框需要的坐标系是y轴向下的.😝 网上很多博客在转换坐标系这块没有处理好, 详情请参考demo.
CoreImage
是一种图像处理和分析技术,为静态和视频图像提供近实时处理。它使用GPU或CPU渲染路径,对Core Graphics,Core Video和Image I / O框架中的图像数据类型进行操作。
CoreImage
封装了低级图形处理的细节(OpenGL,OpenGL ES或Metal, GCD), 直接提供API, 为开发者提高了开发效率.
详细流程可以查看demo, 而且代码注释已经十分清晰:
1. 展示Controller
import UIKit
import SnapKit
class FaceDetectionViewController: BaseViewController {
lazy var imageView: UIImageView = {
let imageview = UIImageView.init()
imageview.backgroundColor = .lightGray
imageview.contentMode = .scaleAspectFit
return imageview
}()
lazy var faceCountLabel: UILabel = {
let label = UILabel.init()
label.backgroundColor = .green
label.text = "识别到0个人脸"
return label
}()
override func viewDidLoad() {
super.viewDidLoad()
self.view.backgroundColor = .white
setUI()
}
private func setUI() {
let chosePicBtn = UIButton(type: .custom)
chosePicBtn.setTitle("选择照片", for: .normal)
chosePicBtn.backgroundColor = .orange
chosePicBtn.addTarget(self, action: #selector(choosePic), for: .touchUpInside)
let detectBtn = UIButton(type: .custom)
detectBtn.setTitle("开始检测", for: .normal)
detectBtn.backgroundColor = .orange
detectBtn.addTarget(self, action: #selector(startDetect), for: .touchUpInside)
view.addSubviews([imageView, chosePicBtn, detectBtn, faceCountLabel])
imageView.snp.makeConstraints { (make) in
make.left.equalToSuperview().offset(autoWidth(30))
make.top.equalToSuperview().offset(autoWidth(30) + LayoutTool.topSafeInset + 44)
make.right.equalToSuperview().offset(autoWidth(-30))
make.height.equalTo(autoHeihgt(300))
}
faceCountLabel.snp.makeConstraints { (make) in
make.top.equalTo(imageView.snp.bottom).offset(5)
make.left.right.equalTo(imageView)
make.height.equalTo(autoHeihgt(20))
}
chosePicBtn.snp.makeConstraints { (make) in
make.top.equalTo(faceCountLabel.snp.bottom).offset(30)
make.left.right.equalTo(imageView)
make.height.equalTo(autoHeihgt(45))
}
detectBtn.snp.makeConstraints { (make) in
make.top.equalTo(chosePicBtn.snp.bottom).offset(20)
make.left.right.equalTo(imageView)
make.height.equalTo(autoHeihgt(45))
}
}
// MARK: - 开始检测人脸
@objc private func startDetect() {
FaceRecognition.startRecognize(imageView: imageView) { (count) in
print("**********************************************************检测到\(count)个人脸")
self.faceCountLabel.text = "检测到\(count)个人脸"
}
}
// MARK: - 选择相册图片
@objc private func choosePic() {
//1. 判断是否允许该操作
if !UIImagePickerController.isSourceTypeAvailable(.photoLibrary) {
print("用户没开访问相册权限, 请到设置里面开启")
return
}
//创建照片选择器
let imagePC = UIImagePickerController()
//设置数据源
imagePC.sourceType = .photoLibrary
imagePC.delegate = self
present(imagePC, animated: true, completion: nil)
}
}
//MARK: 实现相册代理
extension FaceDetectionViewController: UIImagePickerControllerDelegate, UINavigationControllerDelegate {
func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [UIImagePickerController.InfoKey : Any]) {
//获取选中的图片
guard let selectorImage = info[UIImagePickerController.InfoKey.originalImage] as? UIImage else { return }
imageView.image = selectorImage
picker.dismiss(animated: true, completion: nil)
}
//选取完成后调用
func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
picker.dismiss(animated: true, completion: nil)
}
}
2. 检测类
import UIKit
class FaceRecognition: NSObject {
static func startRecognize(imageView: UIImageView, result: @escaping (_ faceCount: Int) -> ()) {
//删除子控件
imageView.removeSubviews()
guard let image = imageView.image else { return }
guard let ciImage = CIImage(image: image) else {return}
//检测质量参数
let options = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
//检测执行者
let detector = CIDetector(ofType: CIDetectorTypeFace, context: CIContext(), options: options)
//检测结果
guard let faceResultArr = detector?.features(in: ciImage) else {return}
//返回检测结果
result(faceResultArr.count)
//添加人脸框框到imageview
addBorderView(imageView: imageView, image: image, faceResultArr: faceResultArr)
}
static func addBorderView(imageView: UIImageView, image: UIImage, faceResultArr: [CIFeature]) {
// 添加识别的红框
let borderView = UIView(frame: CGRect(x: 0, y: 0, width: imageView.frame.width, height: imageView.frame.height))
imageView.addSubview(borderView)
//遍历扫描结果
for faceFeature in faceResultArr {
print("************************************************************image 的size: \(image.size)")
print("************************************************************imageview 的frame: \(imageView.frame)")
print("************************************************************faceFeature 的bounds: \(faceFeature.bounds)")
//拿到缩放比例 //imageView和image的尺寸比例 按照imageview.contentMode = .scaleAspectFit
let scale = getScale(imageView: imageView, image: image)
//人脸相对于缩放后的image的位置
let faceOriginX: CGFloat = faceFeature.bounds.minX / scale
let faceOriginY: CGFloat = faceFeature.bounds.minY / scale
let faceOriginW: CGFloat = faceFeature.bounds.width / scale
let faceOriginH: CGFloat = faceFeature.bounds.height / scale
//缩放后的image在imageView的frame
let imageY = (imageView.frame.height - (image.size.height / scale)) * 0.5
let imageX = (imageView.frame.width - (image.size.width / scale)) * 0.5
//let imageW = image.size.width / scale
let imageH = image.size.height / scale
//人脸相对新的image的frame
let newy = imageH - faceOriginH - faceOriginY
let newx = faceOriginX
let neww = faceOriginW
let newh = faceOriginH
//人脸相对于imageview的frame
let y0 = newy + imageY
let x0 = newx + imageX
let w0 = neww
let h0 = newh
let newrect = CGRect(x: x0, y: y0, width: w0, height: h0)
let view = addRedrectangleView(rect: newrect)
borderView.addSubview(view)
//如果识别到眼睛
//guard let feature = faceFeature as? CIFaceFeature else { return }
//左眼 : feature.hasLeftEyePosition 右眼:feature.hasRightEyePosition 嘴巴: feature.hasMouthPosition
}
}
// MARK: - 框框的绘制
static func addRedrectangleView(rect: CGRect) -> UIView{
let redView = UIView(frame: rect)
redView.layer.borderColor = UIColor.red.cgColor
redView.layer.borderWidth = 1
return redView
}
}
//imageView和image的尺寸比例 按照imageview.contentMode = .scaleAspectFit
extension FaceRecognition {
//imageView.contentMode = .scaleAspectFit
static func getScale(imageView: UIImageView, image: UIImage) -> CGFloat{
let viewSize = imageView.frame.size
let imageSize = image.size
let widthScale = imageSize.width / viewSize.width
let heightScale = imageSize.height / viewSize.height
return widthScale > heightScale ? widthScale : heightScale
}
}