-
Notifications
You must be signed in to change notification settings - Fork 0
/
DetectFace.m
213 lines (171 loc) · 8.76 KB
/
DetectFace.m
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
//
// DetectFace.m
// Selvee
//
// Created by Leonard Loo on 13/9/14.
// Copyright (c) 2014 Selvee. All rights reserved.
//
#import "DetectFace.h"
@interface DetectFace () <AVCaptureVideoDataOutputSampleBufferDelegate>
@property (nonatomic, strong) AVCaptureVideoPreviewLayer *previewLayer;
@property (nonatomic, strong) AVCaptureVideoDataOutput *videoDataOutput;
@property (nonatomic, strong) dispatch_queue_t videoDataOutputQueue;
@property (nonatomic, strong) CIDetector *faceDetector;
@end
@implementation DetectFace
- (void)setupAVCapture {
AVCaptureSession *session = [AVCaptureSession new];
[session setSessionPreset:AVCaptureSessionPreset640x480];
// Select a video device, make an input
AVCaptureDevice *device = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
//in real app you would use camera that user chose
if([UIImagePickerController isCameraDeviceAvailable:UIImagePickerControllerCameraDeviceFront]) {
for (AVCaptureDevice *d in [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo]) {
if ([d position] == AVCaptureDevicePositionFront)
device = d;
}
}
else
exit(0);
NSError *error = nil;
AVCaptureDeviceInput *deviceInput = [AVCaptureDeviceInput deviceInputWithDevice:device error:&error];
if(error != nil)
{
exit(0);
}
if ([session canAddInput:deviceInput])
[session addInput:deviceInput];
// Make a video data output
self.videoDataOutput = [AVCaptureVideoDataOutput new];
// we want BGRA, both CoreGraphics and OpenGL work well with 'BGRA'
NSDictionary *rgbOutputSettings = @{(id)kCVPixelBufferPixelFormatTypeKey : @(kCMPixelFormat_32BGRA)};
[self.videoDataOutput setVideoSettings:rgbOutputSettings];
[self.videoDataOutput setAlwaysDiscardsLateVideoFrames:YES]; // discard if the data output queue is blocked (as we process the still image)
self.videoDataOutputQueue = dispatch_queue_create("VideoDataOutputQueue", DISPATCH_QUEUE_SERIAL);
[self.videoDataOutput setSampleBufferDelegate:self queue:self.videoDataOutputQueue];
if ( [session canAddOutput:self.videoDataOutput] )
[session addOutput:self.videoDataOutput];
[[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo] setEnabled:NO];
self.previewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:session];
[self.previewLayer setBackgroundColor:[[UIColor blackColor] CGColor]];
[self.previewLayer setVideoGravity:AVLayerVideoGravityResizeAspect];
CALayer *rootLayer = [self.previewView layer];
[rootLayer setMasksToBounds:YES];
[self.previewLayer setFrame:[rootLayer bounds]];
[rootLayer addSublayer:self.previewLayer];
[session startRunning];
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
// got an image
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CFDictionaryRef attachments = CMCopyDictionaryOfAttachments(kCFAllocatorDefault, sampleBuffer, kCMAttachmentMode_ShouldPropagate);
CIImage *ciImage = [[CIImage alloc] initWithCVPixelBuffer:pixelBuffer options:(__bridge NSDictionary *)attachments];
self.outputImage = ciImage;
if (attachments)
CFRelease(attachments);
/* kCGImagePropertyOrientation values
The intended display orientation of the image. If present, this key is a CFNumber value with the same value as defined
by the TIFF and EXIF specifications -- see enumeration of integer constants.
The value specified where the origin (0,0) of the image is located. If not present, a value of 1 is assumed.
used when calling featuresInImage: options: The value for this key is an integer NSNumber from 1..8 as found in kCGImagePropertyOrientation.
If present, the detection will be done based on that orientation but the coordinates in the returned features will still be based on those of the image. */
int exifOrientation = 6; // 6 = 0th row is on the right, and 0th column is the top. Portrait mode.
NSDictionary *imageOptions = @{CIDetectorImageOrientation : @(exifOrientation)};
NSArray *features = [self.faceDetector featuresInImage:ciImage options:imageOptions];
// get the clean aperture
// the clean aperture is a rectangle that defines the portion of the encoded pixel dimensions
// that represents image data valid for display.
CMFormatDescriptionRef fdesc = CMSampleBufferGetFormatDescription(sampleBuffer);
CGRect clap = CMVideoFormatDescriptionGetCleanAperture(fdesc, false /*originIsTopLeft == false*/);
// called asynchronously as the capture output is capturing sample buffers, this method asks the face detector
// to detect features
dispatch_async(dispatch_get_main_queue(), ^(void) {
CGSize parentFrameSize = [self.previewView frame].size;
NSString *gravity = [self.previewLayer videoGravity];
CGRect previewBox = [DetectFace videoPreviewBoxForGravity:gravity frameSize:parentFrameSize apertureSize:clap.size];
if([self.delegate respondsToSelector:@selector(detectedFaceController:features:forVideoBox:withPreviewBox:)])
[self.delegate detectedFaceController:self features:features forVideoBox:clap withPreviewBox:previewBox];
});
}
- (void)startDetection
{
[self setupAVCapture];
[[self.videoDataOutput connectionWithMediaType:AVMediaTypeVideo] setEnabled:YES];
NSDictionary *detectorOptions = @{CIDetectorAccuracy : CIDetectorAccuracyLow};
self.faceDetector = [CIDetector detectorOfType:CIDetectorTypeFace context:nil options:detectorOptions];
}
- (void)stopDetection
{
[self teardownAVCapture];
}
// clean up capture setup
- (void)teardownAVCapture
{
if (self.videoDataOutputQueue)
self.videoDataOutputQueue = nil;
}
// find where the video box is positioned within the preview layer based on the video size and gravity
+ (CGRect)videoPreviewBoxForGravity:(NSString *)gravity frameSize:(CGSize)frameSize apertureSize:(CGSize)apertureSize
{
CGFloat apertureRatio = apertureSize.height / apertureSize.width;
CGFloat viewRatio = frameSize.width / frameSize.height;
CGSize size = CGSizeZero;
if ([gravity isEqualToString:AVLayerVideoGravityResizeAspectFill]) {
if (viewRatio > apertureRatio) {
size.width = frameSize.width;
size.height = apertureSize.width * (frameSize.width / apertureSize.height);
} else {
size.width = apertureSize.height * (frameSize.height / apertureSize.width);
size.height = frameSize.height;
}
} else if ([gravity isEqualToString:AVLayerVideoGravityResizeAspect]) {
if (viewRatio > apertureRatio) {
size.width = apertureSize.height * (frameSize.height / apertureSize.width);
size.height = frameSize.height;
} else {
size.width = frameSize.width;
size.height = apertureSize.width * (frameSize.width / apertureSize.height);
}
} else if ([gravity isEqualToString:AVLayerVideoGravityResize]) {
size.width = frameSize.width;
size.height = frameSize.height;
}
CGRect videoBox;
videoBox.size = size;
if (size.width < frameSize.width)
videoBox.origin.x = (frameSize.width - size.width) / 2;
else
videoBox.origin.x = (size.width - frameSize.width) / 2;
if ( size.height < frameSize.height )
videoBox.origin.y = (frameSize.height - size.height) / 2;
else
videoBox.origin.y = (size.height - frameSize.height) / 2;
return videoBox;
}
+ (CGRect)convertFrame:(CGRect)originalFrame previewBox:(CGRect)previewBox forVideoBox:(CGRect)videoBox isMirrored:(BOOL)isMirrored
{
// flip preview width and height
CGFloat temp = originalFrame.size.width;
originalFrame.size.width = originalFrame.size.height;
originalFrame.size.height = temp;
temp = originalFrame.origin.x;
originalFrame.origin.x = originalFrame.origin.y;
originalFrame.origin.y = temp;
// scale coordinates so they fit in the preview box, which may be scaled
CGFloat widthScaleBy = previewBox.size.width / videoBox.size.height;
CGFloat heightScaleBy = previewBox.size.height / videoBox.size.width;
originalFrame.size.width *= widthScaleBy;
originalFrame.size.height *= heightScaleBy;
originalFrame.origin.x *= widthScaleBy;
originalFrame.origin.y *= heightScaleBy;
if(isMirrored)
{
originalFrame = CGRectOffset(originalFrame, previewBox.origin.x + previewBox.size.width - originalFrame.size.width - (originalFrame.origin.x * 2), previewBox.origin.y);
}
else
{
originalFrame = CGRectOffset(originalFrame, previewBox.origin.x, previewBox.origin.y);
}
return originalFrame;
}
@end