I’m utilizing Google’s MLKit and MLKitFaceDetection for face detection. This code does detect the face in actual time with the digital camera session, however I would really like it to detect a centered face and supply directions to the person on find out how to organize their face to middle it. I share the view controller code the place this goal have to be carried out. Along with this, I would really like a pink sq. to be drawn for when a face is detected:
import UIKit
import AVFoundation
import MLKit
import MLKitFaceDetection
class FaceProgressViewController: UIViewController{
@IBOutlet weak var progressBarView: CircularProgressBarView!
@IBOutlet weak var previewView: PreviewView!
personal let captureSession = AVCaptureSession()
personal let captureOutput = AVCapturePhotoOutput()
personal let sessionQueue = DispatchQueue(label: "capture_queue")
personal var choices = FaceDetectorOptions()
personal let distanceToCamera: CGFloat = 0.0
personal var faceDetector: FaceDetector?
override func viewDidLoad() {
tremendous.viewDidLoad()
choices.performanceMode = .correct
choices.landmarkMode = .all
choices.classificationMode = .all
previewView.layer.cornerRadius = previewView.body.measurement.width/2
previewView.clipsToBounds = true
setup()
}
personal func setup() {
previewView.session = captureSession
sessionQueue.async {
self.setupSession()
self.captureSession.startRunning()
self.setupFaceDetector()
}
}
personal func setupSession() {
guard let captureDevice = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, place: .entrance) else {
return
}
captureSession.beginConfiguration()
do {
let enter = attempt AVCaptureDeviceInput(system: captureDevice)
if let session = previewView.session, session.canAddInput(enter) {
session.addInput(enter)
let output = AVCaptureVideoDataOutput()
output.setSampleBufferDelegate(self, queue: DispatchQueue(label: "video_queue"))
if session.canAddOutput(output) {
session.addOutput(output)
}
}
} catch {
print("Error al configurar la sesión de captura: (error.localizedDescription)")
}
if !captureSession.outputs.isEmpty {
captureSession.outputs.forEach { output in
captureSession.removeOutput(output)
}
}
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "video_queue"))
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
captureSession.commitConfiguration()
}
personal func setupFaceDetector() {
self.faceDetector = FaceDetector.faceDetector(choices: choices)
}
personal func calculateDistance(_ faceRect: CGRect, pixelBuffer: CVPixelBuffer) -> CGFloat {
let cameraFieldOfView: CGFloat = 60.0
let faceRealSize = tan(degreesToRadians(cameraFieldOfView) / 2.0) * 2.0 * distanceToCamera
let distance = faceRealSize / faceRect.measurement.width
return distance
}
personal func degreesToRadians(_ levels: CGFloat) -> CGFloat {
return levels * .pi / 180.0
}
}
extension FaceProgressViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
let visionImage = VisionImage(buffer: sampleBuffer)
visionImage.orientation = .up
self.faceDetector?.course of(visionImage) { (faces, error) in
if let error = error {
print("Error en la detección facial: (error.localizedDescription)")
return
}
if let detectedFaces = faces, !detectedFaces.isEmpty {
if let face = detectedFaces.first {
let faceRect = face.body
DispatchQueue.predominant.async {
self.drawFaceRect(faceRect)
}
print("Se detectó un rostro en el cuadro: (faceRect)")
}
} else {
DispatchQueue.predominant.async {
self.clearFaceRect()
}
print("No se detectaron rostros en el cuadro.")
}
}
}
personal func drawFaceRect(_ rect: CGRect) {
if let sublayers = previewView.layer.sublayers {
for layer in sublayers {
if layer.title == "faceRectLayer" {
layer.removeFromSuperlayer()
}
}
}
let faceRectLayer = CAShapeLayer()
faceRectLayer.title = "faceRectLayer"
faceRectLayer.strokeColor = UIColor.pink.cgColor
faceRectLayer.fillColor = UIColor.clear.cgColor
faceRectLayer.lineWidth = 2.0
let path = UIBezierPath(rect: rect)
faceRectLayer.path = path.cgPath
previewView.layer.addSublayer(faceRectLayer)
}
personal func clearFaceRect() {
previewView.clearFaceRect()
}
}
That is the code for the PreviewView class:
import UIKit
import AVFoundation
class PreviewView: UIView {
// MARK: - Properties
var videoPreviewLayer: AVCaptureVideoPreviewLayer {
guard let layer = layer as? AVCaptureVideoPreviewLayer else {
fatalError("Anticipated `AVCaptureVideoPreviewLayer`")
}
return layer
}
var session: AVCaptureSession? {
get { videoPreviewLayer.session }
set { videoPreviewLayer.session = newValue }
}
personal var faceRectLayer: CAShapeLayer?
// MARK: - Override
override class var layerClass: AnyClass {
AVCaptureVideoPreviewLayer.self
}
// MARK: - Initialization
override init(body: CGRect) {
tremendous.init(body: body)
// Configura la escala de la vista previa para que llene la vista
videoPreviewLayer.videoGravity = .resizeAspectFill
faceRectLayer = CAShapeLayer()
faceRectLayer?.strokeColor = UIColor.pink.cgColor // Puedes ajustar el shade
faceRectLayer?.fillColor = UIColor.clear.cgColor
faceRectLayer?.lineWidth = 2.0 // Puedes ajustar el grosor de lÃnea
layer.addSublayer(faceRectLayer!)
}
required init?(coder aDecoder: NSCoder) {
tremendous.init(coder: aDecoder)
// Configura la escala de la vista previa para que llene la vista
videoPreviewLayer.videoGravity = .resizeAspectFill
// Configura la capa para el rectángulo del rostro
faceRectLayer = CAShapeLayer()
faceRectLayer?.strokeColor = UIColor.pink.cgColor // Puedes ajustar el shade
faceRectLayer?.fillColor = UIColor.clear.cgColor
faceRectLayer?.lineWidth = 2.0 // Puedes ajustar el grosor de lÃnea
layer.addSublayer(faceRectLayer!)
}
func drawFaceRect(_ rect: CGRect, distance: CGFloat) {
CATransaction.start()
CATransaction.setDisableActions(true)
// Calcula el tamaño del rectángulo del rostro en función de la distancia
let scaleFactor = max(1.0, distance)
let scaledRect = CGRect(
x: rect.origin.x * scaleFactor,
y: rect.origin.y * scaleFactor,
width: rect.measurement.width * scaleFactor,
peak: rect.measurement.peak * scaleFactor
)
let path = UIBezierPath(rect: scaledRect)
faceRectLayer?.path = path.cgPath
faceRectLayer?.isHidden = false
CATransaction.commit()
}
func clearFaceRect() {
CATransaction.start()
CATransaction.setDisableActions(true)
faceRectLayer?.path = nil
faceRectLayer?.isHidden = true
CATransaction.commit()
}
}
The issue is that the sq. will not be drawn within the preview, I must get too near the digital camera for the sq. to be drawn however you’ll be able to barely see it. The preview is about to a measurement of 260x260px. I am unable to work out how I could make it detect a centered face and supply directions to the person to middle their face in entrance of the digital camera.
I hope you’ll be able to assist me, thanks!