iOS AVFoundation动态人脸识别功能


一、AVCaptureSession:设备输入数据管理对象

  • 可以根据AVCaptureSession创建对应的AVCaptureDeviceInputAVCaptureVideoDataOutput对象
  • 创建出来的Input、Output对象会被添加到AVCaptureSession中管理,代表输入、输出数据对象,它配置抽象硬件设备的ports。
1
2
3
4
5
6
7
// 1.创建媒体管理会话
AVCaptureSession *captureSession = [[AVCaptureSession alloc] init];
self.session = captureSession;
// 判断分辨率是否支持 640x480,支持就设置为:640x480
if ([captureSession canSetSessionPreset:AVCaptureSessionPreset640x480]) {
captureSession.sessionPreset = AVCaptureSessionPreset640x480;
}

二、AVCaptureDevice:代表硬件设备

  • 可以从这个类中获取手机硬件的照相机、声音传感器等
  • 当我们在应用程序中需要改变一些硬件设备的属性(切换摄像头、闪光模式改变、相机聚焦改变)的时候必须要先为设备加锁,修改完成后解锁。
1
2
3
4
5
6
7
8
9
10
11
12
//4. 移除旧输入,添加新输入
//4.1 设备加锁
session.beginConfiguration()

//4.2. 移除旧设备
session.removeInput(deviceIn)

//4.3 添加新设备
session.addInput(newVideoInput)

//4.4 设备解锁
session.commitConfiguration()
1
2
3
4
5
6
7
8
9
10
11
12
// 2.获取前置摄像头
AVCaptureDevice *captureDevice = nil;
NSArray *cameras = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *camera in cameras) {
if (camera.position == AVCaptureDevicePositionFront) {
captureDevice = camera;
}
}
if (!captureDevice) {
[DLLoading DLToolTipInWindow:@"无前置摄像头!"];
return;
}

三、AVCaptureDeviceInput设备输入数据管理对象

  • 可以根据AVCaptureDevice创建对应的AVCaptureDeviceInput对象
  • 该对象将会被添加到AVCaptureSession中管理,代表输入设备,它配置抽象硬件设备的ports,常用的有麦克风、相机等
1
2
3
4
5
6
7
// 3.创建输入数据对象
NSError *error = nil;
AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput deviceInputWithDevice:captureDevice error:&error];
if (error) {
[DLLoading DLToolTipInWindow:@"创建输入数据对象错误"];
return;
}

四、AVCaptureOutput输出数据

  • 输出的可以是图片(AVCaptureStillImageOutput)或者视频(AVCaptureMovieFileOutput)
1
2
3
4
5
6
7
// 4.创建输出数据对象
AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init];
captureOutput.alwaysDiscardsLateVideoFrames = YES;
[captureOutput setSampleBufferDelegate:self queue:dispatch_queue_create("cameraQueue", NULL)];

NSDictionary *videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA] forKey:(NSString*)kCVPixelBufferPixelFormatTypeKey];
[captureOutput setVideoSettings:videoSettings];

五、添加输入、输出数据对象到session中

1
2
3
4
5
6
7
// 5.添加【输入数据对象】和【输出数据对象】到会话中
if ([captureSession canAddInput:captureInput]) {
[captureSession addInput:captureInput];
}
if ([captureSession canAddOutput:captureOutput]) {
[captureSession addOutput:captureOutput];
}

六、AVCaptureVideoPreviewLayer创建实时预览图层

  • 我们手机的照片以及视频是怎样显示在手机屏幕上的呢,就是通过把这个对象添加到UIViewlayer上的。
1
2
3
4
5
6
7
// 6.创建实时预览图层
AVCaptureVideoPreviewLayer *previewlayer = [AVCaptureVideoPreviewLayer layerWithSession:captureSession];
[previewlayer connection].videoOrientation = (AVCaptureVideoOrientation)[[UIApplication sharedApplication] statusBarOrientation];
self.view.layer.masksToBounds = YES;
previewlayer.frame = CGRectMake((kMainScreenWidth-200)/2, 90, 200, 200);
previewlayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.scanView insertPreviewLayer:previewlayer];

人脸检测器

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
/*
CIDetector是Core Image框架中提供的一个识别类,包括对人脸、形状、条码、文本的识别,本文主要介绍人脸特征识别。
人脸识别功能不单单可以对人脸进行获取,还可以获取眼睛和嘴等面部特征信息。但是CIDetector不包括面纹编码提取,也就是说CIDetector只能判断是不是人脸,而不能判断这张人脸是谁的,比如说面部打卡这种功能是实现不了的。

CIDetectorTypeFace:
// 人脸识别探测器类型
CORE_IMAGE_EXPORT NSString* const CIDetectorTypeFace NS_AVAILABLE(10_7, 5_0);
// 矩形检测探测器类型
CORE_IMAGE_EXPORT NSString* const CIDetectorTypeRectangle NS_AVAILABLE(10_10, 8_0);
// 条码检测探测器类型
CORE_IMAGE_EXPORT NSString* const CIDetectorTypeQRCode NS_AVAILABLE(10_10, 8_0);
// 文本检测探测器类型
#if __OBJC2__
CORE_IMAGE_EXPORT NSString* const CIDetectorTypeText NS_AVAILABLE(10_11, 9_0);
#endif
*/

#pragma mark - 人脸检测器
- (CIDetector *)detector{
if (_detector == nil){
CIContext *context = [CIContext contextWithOptions:nil];
NSDictionary *options = [NSDictionary dictionaryWithObject:CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
_detector = [CIDetector detectorOfType:CIDetectorTypeFace context:context options:options];
}
return _detector;
}

检测人脸照片

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#pragma mark - 检测人脸照片
- (UIImage *)getFaceImageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
CIContext *temporaryContext = [CIContext contextWithOptions:nil];
CGImageRef videoImage;
if ([[UIApplication sharedApplication] statusBarOrientation] == UIInterfaceOrientationPortrait) {
videoImage = [temporaryContext createCGImage:ciImage fromRect:CGRectMake(0, 80, 480, 480)];
}else{
videoImage = [temporaryContext createCGImage:ciImage fromRect:CGRectMake(80, 0, 480, 480)];
}
UIImage *resultImg = [[UIImage alloc] initWithCGImage:videoImage];
CGImageRelease(videoImage);

//人脸检测
CIImage *resultCmg = [[CIImage alloc] initWithCGImage:resultImg.CGImage];
CIFaceFeature * faceFeature = [self.detector featuresInImage:resultCmg].linq_firstOrNil;
if (faceFeature && faceFeature.hasLeftEyePosition && faceFeature.hasRightEyePosition && faceFeature.hasMouthPosition) {
return resultImg;
}
return nil;
}

代理方法

  • 获取到outputSampleBuffer 后进行人脸识别操作。
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
#pragma mark - AVCaptureVideoDataOutputSampleBufferDelegate
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{
if (!self.isDetecting) {
self.isDetecting = YES;
[connection setVideoOrientation:(AVCaptureVideoOrientation)[[UIApplication sharedApplication] statusBarOrientation]];
UIImage *img = [self getFaceImageFromSampleBuffer:sampleBuffer];
if (img && self.timeoutTime > 2) {
dispatch_async(dispatch_get_main_queue(), ^{
[self.session stopRunning];
self.isDetecting = NO;
self.timeoutTime = 0;
[self.scanView startAnimating];
[self.viewModel faceScanWithImg:img];
});
}else{
self.isDetecting = NO;
}
}
}