using DlibDotNet;
using DlibDotNet.Dnn;
using System.Drawing;
using System.Drawing.Imaging;
using System.Runtime.InteropServices;
using System.Text;
using View_by_Distance.FaceRecognitionDotNet.Dlib.Python;
using View_by_Distance.FaceRecognitionDotNet.Extensions;
using View_by_Distance.Shared.Models;
using View_by_Distance.Shared.Models.Stateless;
namespace View_by_Distance.FaceRecognitionDotNet;
///
/// Provides the method to find and recognize face methods. This class cannot be inherited.
///
public sealed class FaceRecognition : DisposableObject
{
#region Fields
private readonly ShapePredictor _PosePredictor68Point;
private readonly ShapePredictor _PosePredictor5Point;
private readonly LossMmod _CnnFaceDetector;
private readonly LossMetric _FaceEncoder;
private readonly FrontalFaceDetector _FaceDetector;
#endregion
#region Constructors
///
/// Initializes a new instance of the class with the directory path that stores model files.
///
/// The directory path that stores model files.
/// The model file is not found.
/// The specified directory path is not found.
private FaceRecognition(string directory)
{
if (!Directory.Exists(directory))
throw new DirectoryNotFoundException(directory);
string? predictor68PointModel = Path.Combine(directory, FaceRecognitionModels.GetPosePredictorModelLocation());
if (!File.Exists(predictor68PointModel))
throw new FileNotFoundException(predictor68PointModel);
string? predictor5PointModel = Path.Combine(directory, FaceRecognitionModels.GetPosePredictorFivePointModelLocation());
if (!File.Exists(predictor5PointModel))
throw new FileNotFoundException(predictor5PointModel);
string? cnnFaceDetectionModel = Path.Combine(directory, FaceRecognitionModels.GetCnnFaceDetectorModelLocation());
if (!File.Exists(cnnFaceDetectionModel))
throw new FileNotFoundException(cnnFaceDetectionModel);
string? faceRecognitionModel = Path.Combine(directory, FaceRecognitionModels.GetFaceRecognitionModelLocation());
if (!File.Exists(faceRecognitionModel))
throw new FileNotFoundException(faceRecognitionModel);
_FaceDetector?.Dispose();
_FaceDetector = DlibDotNet.Dlib.GetFrontalFaceDetector();
_PosePredictor68Point?.Dispose();
_PosePredictor68Point = ShapePredictor.Deserialize(predictor68PointModel);
_PosePredictor5Point?.Dispose();
_PosePredictor5Point = ShapePredictor.Deserialize(predictor5PointModel);
_CnnFaceDetector?.Dispose();
_CnnFaceDetector = LossMmod.Deserialize(cnnFaceDetectionModel);
_FaceEncoder?.Dispose();
_FaceEncoder = LossMetric.Deserialize(faceRecognitionModel);
}
///
/// Initializes a new instance of the class with the instance that contains model binary datum.
///
/// The instance that contains model binary datum.
/// is null.
/// The model data is null.
private FaceRecognition(ModelParameter parameter)
{
if (parameter == null)
throw new NullReferenceException(nameof(parameter));
if (parameter.PosePredictor5FaceLandmarksModel == null)
throw new NullReferenceException(nameof(parameter.PosePredictor5FaceLandmarksModel));
if (parameter.PosePredictor68FaceLandmarksModel == null)
throw new NullReferenceException(nameof(parameter.PosePredictor68FaceLandmarksModel));
if (parameter.CnnFaceDetectorModel == null)
throw new NullReferenceException(nameof(parameter.CnnFaceDetectorModel));
if (parameter.FaceRecognitionModel == null)
throw new NullReferenceException(nameof(parameter.FaceRecognitionModel));
_FaceDetector?.Dispose();
_FaceDetector = DlibDotNet.Dlib.GetFrontalFaceDetector();
_PosePredictor68Point?.Dispose();
_PosePredictor68Point = ShapePredictor.Deserialize(parameter.PosePredictor68FaceLandmarksModel);
_PosePredictor5Point?.Dispose();
_PosePredictor5Point = ShapePredictor.Deserialize(parameter.PosePredictor5FaceLandmarksModel);
_CnnFaceDetector?.Dispose();
_CnnFaceDetector = LossMmod.Deserialize(parameter.CnnFaceDetectorModel);
_FaceEncoder?.Dispose();
_FaceEncoder = LossMetric.Deserialize(parameter.FaceRecognitionModel);
}
#endregion
#region Properties
///
/// Gets or sets the custom face detector that user defined.
///
public FaceDetector? CustomFaceDetector { get; set; }
///
/// Gets or sets the custom face landmark detector that user defined.
///
public FaceLandmarkDetector? CustomFaceLandmarkDetector { get; set; }
///
/// Gets or sets the character encoding to convert to array of for internal library.
///
public static Encoding InternalEncoding
{
get => DlibDotNet.Dlib.Encoding;
set => DlibDotNet.Dlib.Encoding = value ?? Encoding.UTF8;
}
#endregion
#region Methods
///
/// Returns an enumerable collection of array of bounding boxes of human faces in a image using the cnn face detector.
///
/// An enumerable collection of images.
/// The number of image looking for faces. Higher numbers find smaller faces.
/// The number of images to include in each GPU processing batch.
/// An enumerable collection of array of found face locations.
/// is null.
public IEnumerable BatchFaceLocations(IEnumerable images, int numberOfTimesToUpsample, int batchSize = 128)
{
if (images == null)
throw new NullReferenceException(nameof(images));
List? results = new();
Image[]? imagesArray = images.ToArray();
if (!imagesArray.Any())
return results;
IEnumerable[]? rawDetectionsBatched = RawFaceLocationsBatched(imagesArray, numberOfTimesToUpsample, batchSize).ToArray();
Image? image = imagesArray[0];
for (int index = 0; index < rawDetectionsBatched.Length; index++)
{
MModRect[]? faces = rawDetectionsBatched[index].ToArray();
Location[]? locations = faces.Select(rect => new Location(rect.DetectionConfidence, TrimBound(rect.Rect, image.Width, image.Height), image.Width, image.Height)).ToArray();
foreach (MModRect? face in faces)
face.Dispose();
results.Add(locations);
}
return results;
}
///
/// Compare a known face encoding against a candidate encoding to see if they match.
///
/// A known face encodings.
/// A single face encoding to compare against a known face encoding.
/// The distance between faces to consider it a match. Lower is more strict. The default value is 0.6.
/// A True/False value indicating which known a face encoding matches the face encoding to check.
/// or is null.
/// or .
public static bool CompareFace(FaceEncoding knownFaceEncoding, FaceEncoding faceEncodingToCheck, double tolerance = 0.6d)
{
if (knownFaceEncoding == null)
throw new NullReferenceException(nameof(knownFaceEncoding));
if (faceEncodingToCheck == null)
throw new NullReferenceException(nameof(faceEncodingToCheck));
knownFaceEncoding.ThrowIfDisposed();
faceEncodingToCheck.ThrowIfDisposed();
return FaceDistance(knownFaceEncoding, faceEncodingToCheck) <= tolerance;
}
///
/// Compare an enumerable collection of face encodings against a candidate encoding to see if they match.
///
/// An enumerable collection of known face encodings.
/// A single face encoding to compare against the enumerable collection.
/// The distance between faces to consider it a match. Lower is more strict. The default value is 0.6.
/// An enumerable collection of True/False values indicating which known face encodings match the face encoding to check.
/// or is null.
/// is disposed. Or contains disposed object.
public static IEnumerable CompareFaces(IEnumerable knownFaceEncodings, FaceEncoding faceEncodingToCheck, double tolerance = 0.6d)
{
if (knownFaceEncodings == null)
throw new NullReferenceException(nameof(knownFaceEncodings));
if (faceEncodingToCheck == null)
throw new NullReferenceException(nameof(faceEncodingToCheck));
faceEncodingToCheck.ThrowIfDisposed();
FaceEncoding[]? array = knownFaceEncodings.ToArray();
if (array.Any(encoding => encoding.IsDisposed))
throw new ObjectDisposedException($"{nameof(knownFaceEncodings)} contains disposed object.");
List? results = new();
if (array.Length == 0)
return results;
foreach (FaceEncoding? faceEncoding in array)
results.Add(FaceDistance(faceEncoding, faceEncodingToCheck) <= tolerance);
return results;
}
///
/// Create a new instance of the class.
///
/// The directory path that stores model files.
/// The model file is not found.
/// The specified directory path is not found.
public static FaceRecognition Create(string directory) => new(directory);
///
/// Create a new instance of the class.
///
/// The instance that contains model binary datum.
/// is null.
/// The model data is null.
public static FaceRecognition Create(ModelParameter parameter) => new(parameter);
///
/// Crop a specified image with enumerable collection of face locations.
///
/// The image contains a face.
/// The enumerable collection of location rectangle for faces.
///
/// or is null.
/// is disposed.
public static IEnumerable CropFaces(Image image, IEnumerable locations)
{
if (image == null)
throw new NullReferenceException(nameof(image));
if (locations == null)
throw new NullReferenceException(nameof(locations));
image.ThrowIfDisposed();
List? results = new();
foreach (Location? location in locations)
{
DlibDotNet.Rectangle rect = new(location.Left, location.Top, location.Right, location.Bottom);
DPoint[]? dPoint = new[]
{
new DPoint(rect.Left, rect.Top),
new DPoint(rect.Right, rect.Top),
new DPoint(rect.Left, rect.Bottom),
new DPoint(rect.Right, rect.Bottom),
};
int width = (int)rect.Width;
int height = (int)rect.Height;
switch (image.Mode)
{
case Mode.Rgb:
Matrix? rgb = image.Matrix as Matrix;
results.Add(new Image(DlibDotNet.Dlib.ExtractImage4Points(rgb, dPoint, width, height),
Mode.Rgb));
break;
case Mode.Greyscale:
Matrix? gray = image.Matrix as Matrix;
results.Add(new Image(DlibDotNet.Dlib.ExtractImage4Points(gray, dPoint, width, height),
Mode.Greyscale));
break;
}
}
return results;
}
///
/// Compare a face encoding to a known face encoding and get a euclidean distance for comparison face.
///
/// The face encoding to compare.
/// The face encoding to compare against.
/// The euclidean distance for comparison face. If 0, faces are completely equal.
/// or is null.
/// or is disposed.
public static double FaceDistance(FaceEncoding faceEncoding, FaceEncoding faceToCompare)
{
if (faceEncoding == null)
throw new NullReferenceException(nameof(faceEncoding));
if (faceToCompare == null)
throw new NullReferenceException(nameof(faceToCompare));
faceEncoding.ThrowIfDisposed();
faceToCompare.ThrowIfDisposed();
if (faceEncoding.Encoding.Size == 0)
return 0;
using Matrix? diff = faceEncoding.Encoding - faceToCompare.Encoding;
return DlibDotNet.Dlib.Length(diff);
}
///
/// Compare an enumerable collection of face encoding to a known face encoding and get an enumerable collection of euclidean distance for comparison face.
///
/// The enumerable collection of face encoding to compare.
/// The face encoding to compare against.
/// The enumerable collection of euclidean distance for comparison face. If 0, faces are completely equal.
/// or is null.
/// is disposed. Or contains disposed object.
public static List FaceDistances(IEnumerable faceEncodings, FaceEncoding faceToCompare)
{
if (faceEncodings == null)
throw new NullReferenceException(nameof(faceEncodings));
if (faceToCompare == null)
throw new NullReferenceException(nameof(faceToCompare));
faceToCompare.ThrowIfDisposed();
FaceEncoding[]? array = faceEncodings.ToArray();
if (array.Any(encoding => encoding.IsDisposed))
throw new ObjectDisposedException($"{nameof(faceEncodings)} contains disposed object.");
List? results = new();
if (array.Length == 0)
return results;
foreach (FaceEncoding? faceEncoding in array)
using (Matrix? diff = faceEncoding.Encoding - faceToCompare.Encoding)
results.Add(DlibDotNet.Dlib.Length(diff));
return results;
}
///
/// Returns an enumerable collection of face feature data corresponds to all faces in specified image.
///
/// The image contains faces. The image can contain multiple faces.
/// The enumerable collection of location rectangle for faces. If specified null, method will find face locations.
/// The number of times to re-sample the face when calculating encoding.
/// The dimension of vector which be returned from detector.
/// The model of face detector to detect in image. If is not null, this value is ignored.
/// An enumerable collection of face feature data corresponds to all faces in specified image.
/// is null.
/// contains no elements.
/// or this object or custom face landmark detector is disposed.
/// is not supported.
public List FaceEncodings(Image image, int numberOfTimesToUpsample, IEnumerable? knownFaceLocation, int numberOfJitters, PredictorModel predictorModel, Model model)
{
if (image == null)
throw new NullReferenceException(nameof(image));
if (predictorModel == PredictorModel.Custom)
throw new NotSupportedException("FaceRecognition.PredictorModel.Custom is not supported.");
if (knownFaceLocation != null && !knownFaceLocation.Any())
throw new InvalidOperationException($"{nameof(knownFaceLocation)} contains no elements.");
image.ThrowIfDisposed();
ThrowIfDisposed();
List rawLandmarks = RawFaceLandmarks(image, numberOfTimesToUpsample, knownFaceLocation, predictorModel, model);
List results = new();
foreach (FullObjectDetection landmark in rawLandmarks)
{
FaceEncoding? ret = new(FaceRecognitionModelV1.ComputeFaceDescriptor(_FaceEncoder, image, landmark, numberOfJitters));
landmark.Dispose();
results.Add(ret);
}
return results;
}
private static FacePoint[] Join(IEnumerable facePoints1, IEnumerable facePoints2)
{
List results = new();
results.AddRange(facePoints1);
results.AddRange(facePoints2);
return results.ToArray();
}
///
/// Returns an enumerable collection of dictionary of face parts locations (eyes, nose, etc) for each face in the image.
///
/// The image contains faces. The image can contain multiple faces.
/// The enumerable collection of location rectangle for faces. If specified null, method will find face locations.
/// The dimension of vector which be returned from detector.
/// The model of face detector to detect in image. If is not null, this value is ignored.
/// An enumerable collection of dictionary of face parts locations (eyes, nose, etc).
/// is null.
/// contains no elements.
/// or this object or custom face landmark detector is disposed.
/// The custom face landmark detector is not ready.
public List<(FacePart, FacePoint[])[]> GetFaceLandmarkCollection(Image faceImage, int numberOfTimesToUpsample, IEnumerable? faceLocations, PredictorModel predictorModel, Model model)
{
List<(FacePart, FacePoint[])[]> results = new();
if (faceImage == null)
throw new NullReferenceException(nameof(faceImage));
if (faceLocations != null && !faceLocations.Any())
throw new InvalidOperationException($"{nameof(faceLocations)} contains no elements.");
faceImage.ThrowIfDisposed();
ThrowIfDisposed();
if (predictorModel == PredictorModel.Custom)
throw new NotImplementedException();
List fullObjectDetections = RawFaceLandmarks(faceImage, numberOfTimesToUpsample, faceLocations, predictorModel, model);
List landmarksCollection = fullObjectDetections.Select(landmark => Enumerable.Range(0, (int)landmark.Parts)
.Select(index => new FacePoint(index, landmark.GetPart((uint)index).X, landmark.GetPart((uint)index).Y)).ToArray()).ToList();
foreach (FullObjectDetection? landmark in fullObjectDetections)
landmark.Dispose();
List<(FacePart, FacePoint[])> collection;
foreach (FacePoint[] facePoints in landmarksCollection)
{
collection = new();
switch (predictorModel)
{
case PredictorModel.Custom:
throw new NotImplementedException();
case PredictorModel.Large:
if (facePoints.Length != 68)
continue;
collection.Add(new(FacePart.Chin, facePoints.Skip(0).Take(17).ToArray()));
collection.Add(new(FacePart.LeftEyebrow, facePoints.Skip(17).Take(5).ToArray()));
collection.Add(new(FacePart.RightEyebrow, facePoints.Skip(22).Take(5).ToArray()));
collection.Add(new(FacePart.NoseBridge, facePoints.Skip(27).Take(5).ToArray()));
collection.Add(new(FacePart.NoseTip, facePoints.Skip(31).Take(5).ToArray()));
collection.Add(new(FacePart.LeftEye, facePoints.Skip(36).Take(6).ToArray()));
collection.Add(new(FacePart.RightEye, facePoints.Skip(42).Take(6).ToArray()));
collection.Add(new(FacePart.TopLip, Join(facePoints.Skip(48).Take(7), facePoints.Skip(60).Take(5))));
collection.Add(new(FacePart.BottomLip, Join(facePoints.Skip(55).Take(5), facePoints.Skip(65).Take(3))));
break;
case PredictorModel.Small:
if (facePoints.Length != 5)
continue;
collection.Add(new(FacePart.RightEye, facePoints.Skip(0).Take(2).ToArray()));
collection.Add(new(FacePart.LeftEye, facePoints.Skip(2).Take(2).ToArray()));
collection.Add(new(FacePart.NoseTip, facePoints.Skip(4).Take(1).ToArray()));
break;
}
results.Add(collection.ToArray());
}
return results;
}
///
/// Returns an enumerable collection of face location correspond to all faces in specified image.
///
/// The image contains faces. The image can contain multiple faces.
/// The number of times to up-sample the image when finding faces.
/// The model of face detector to detect in image.
/// An enumerable collection of face location correspond to all faces in specified image.
/// is null.
/// or this object is disposed.
public List FaceLocations(Model model, Image image, int numberOfTimesToUpsample, bool sortByPixelPercentage)
{
if (image == null)
throw new NullReferenceException(nameof(image));
image.ThrowIfDisposed();
ThrowIfDisposed();
List results = new();
foreach (MModRect? face in RawFaceLocations(image, numberOfTimesToUpsample, model))
{
Location? ret = TrimBound(face.Rect, image.Width, image.Height);
double confidence = face.DetectionConfidence;
face.Dispose();
results.Add(new Location(confidence, ret, image.Width, image.Height));
}
if (sortByPixelPercentage)
results = (from l in results orderby l.PixelPercentage select l).ToList();
return results;
}
///
/// Creates an from the array.
///
/// The array contains face encoding data.
/// The this method creates.
/// is null.
/// must be 128.
public static FaceEncoding LoadFaceEncoding(double[] encoding)
{
if (encoding == null)
throw new NullReferenceException(nameof(encoding));
if (encoding.Length != 128)
{
string message = $"{nameof(encoding)}.{nameof(encoding.Length)} must be 128.";
throw new ArgumentOutOfRangeException(message);
}
#pragma warning disable
Matrix? matrix = Matrix.CreateTemplateParameterizeMatrix(0, 1);
#pragma warning restore
matrix.SetSize(128);
matrix.Assign(encoding);
return new FaceEncoding(matrix);
}
#pragma warning disable CA1416
///
/// Creates an from the specified existing bitmap image.
///
/// The from which to create the new .
/// The this method creates.
/// is null.
/// The specified is not supported.
public static Image? LoadImage(Bitmap bitmap)
{
int width = bitmap.Width;
int height = bitmap.Height;
System.Drawing.Rectangle rect = new(0, 0, width, height);
PixelFormat format = bitmap.PixelFormat;
Mode mode;
int srcChannel;
int dstChannel;
switch (format)
{
case PixelFormat.Format8bppIndexed:
mode = Mode.Greyscale;
srcChannel = 1;
dstChannel = 1;
break;
case PixelFormat.Format24bppRgb:
mode = Mode.Rgb;
srcChannel = 3;
dstChannel = 3;
break;
case PixelFormat.Format32bppRgb:
case PixelFormat.Format32bppArgb:
mode = Mode.Rgb;
srcChannel = 4;
dstChannel = 3;
break;
default:
throw new ArgumentOutOfRangeException($"{nameof(bitmap)}", $"The specified {nameof(PixelFormat)} is not supported.");
}
BitmapData? data = null;
try
{
data = bitmap.LockBits(rect, ImageLockMode.ReadOnly, format);
unsafe
{
byte[]? array = new byte[width * height * dstChannel];
fixed (byte* pArray = &array[0])
{
byte* dst = pArray;
switch (srcChannel)
{
case 1:
{
IntPtr src = data.Scan0;
int stride = data.Stride;
for (int h = 0; h < height; h++)
Marshal.Copy(IntPtr.Add(src, h * stride), array, h * width, width * dstChannel);
}
break;
case 3:
case 4:
{
byte* src = (byte*)data.Scan0;
int stride = data.Stride;
for (int h = 0; h < height; h++)
{
int srcOffset = h * stride;
int dstOffset = h * width * dstChannel;
for (int w = 0; w < width; w++)
{
// BGR order to RGB order
dst[dstOffset + w * dstChannel + 0] = src[srcOffset + w * srcChannel + 2];
dst[dstOffset + w * dstChannel + 1] = src[srcOffset + w * srcChannel + 1];
dst[dstOffset + w * dstChannel + 2] = src[srcOffset + w * srcChannel + 0];
}
}
}
break;
}
IntPtr ptr = (IntPtr)pArray;
switch (mode)
{
case Mode.Rgb:
return new Image(new Matrix(ptr, height, width, width * 3), Mode.Rgb);
case Mode.Greyscale:
return new Image(new Matrix(ptr, height, width, width), Mode.Greyscale);
}
}
}
}
finally
{
if (data != null)
bitmap.UnlockBits(data);
}
return null;
}
#pragma warning restore CA1416
///
/// Creates an from the array.
///
/// The array contains image data.
/// The number of rows in a image data.
/// The number of columns in a image data.
/// The stride width in bytes.
/// A image color mode.
/// The this method creates.
/// is null.
/// is less than 0.
/// is less than 0.
/// is less than 0.
/// is less than .
/// x is less than .
public static Image? LoadImage(byte[] array, int row, int column, int stride, Mode mode)
{
if (array == null)
throw new NullReferenceException(nameof(array));
if (row < 0)
throw new ArgumentOutOfRangeException($"{nameof(row)}", $"{nameof(row)} is less than 0.");
if (column < 0)
throw new ArgumentOutOfRangeException($"{nameof(column)}", $"{nameof(column)} is less than 0.");
if (stride < 0)
throw new ArgumentOutOfRangeException($"{nameof(stride)}", $"{nameof(stride)} is less than 0.");
if (stride < column)
throw new ArgumentOutOfRangeException($"{nameof(stride)}", $"{nameof(stride)} is less than {nameof(column)}.");
int min = row * stride;
if (!(array.Length >= min))
throw new ArgumentOutOfRangeException("", $"{nameof(row)} x {nameof(stride)} is less than {nameof(Array)}.{nameof(Array.Length)}.");
unsafe
{
fixed (byte* p = &array[0])
{
IntPtr ptr = (IntPtr)p;
switch (mode)
{
case Mode.Rgb:
return new Image(new Matrix(ptr, row, column, stride), Mode.Rgb);
case Mode.Greyscale:
return new Image(new Matrix(ptr, row, column, stride), Mode.Greyscale);
}
}
}
return null;
}
///
/// Creates an from the unmanaged memory pointer indicates array image data.
///
/// The unmanaged memory pointer indicates array image data.
/// The number of rows in a image data.
/// The number of columns in a image data.
/// The stride width in bytes.
/// A image color mode.
/// The this method creates.
/// is .
/// is less than 0.
/// is less than 0.
/// is less than 0.
/// is less than .
public static Image? LoadImage(IntPtr array, int row, int column, int stride, Mode mode)
{
if (array == IntPtr.Zero)
throw new ArgumentException($"{nameof(array)} is {nameof(IntPtr)}.{nameof(IntPtr.Zero)}", nameof(array));
if (row < 0)
throw new ArgumentOutOfRangeException($"{nameof(row)}", $"{nameof(row)} is less than 0.");
if (column < 0)
throw new ArgumentOutOfRangeException($"{nameof(column)}", $"{nameof(column)} is less than 0.");
if (stride < 0)
throw new ArgumentOutOfRangeException($"{nameof(stride)}", $"{nameof(stride)} is less than 0.");
if (stride < column)
throw new ArgumentOutOfRangeException($"{nameof(stride)}", $"{nameof(stride)} is less than {nameof(column)}.");
return mode switch
{
Mode.Rgb => new Image(new Matrix(array, row, column, stride), mode),
Mode.Greyscale => new Image(new Matrix(array, row, column, stride), mode),
_ => null,
};
}
///
/// Creates an from the specified path.
///
/// A string that contains the path of the file from which to create the .
/// A image color mode.
/// The this method creates.
/// The specified path does not exist.
public static Image? LoadImageFile(string file, Mode mode = Mode.Rgb)
{
if (!File.Exists(file))
throw new FileNotFoundException(file);
return mode switch
{
Mode.Rgb => new Image(DlibDotNet.Dlib.LoadImageAsMatrix(file), mode),
Mode.Greyscale => new Image(DlibDotNet.Dlib.LoadImageAsMatrix(file), mode),
_ => null,
};
}
#region Helpers
private List RawFaceLandmarks(Image faceImage, int numberOfTimesToUpsample, IEnumerable? faceLocations, PredictorModel predictorModel, Model model)
{
IEnumerable locations;
if (faceLocations == null)
{
List? list = new();
IEnumerable? tmp = RawFaceLocations(faceImage, numberOfTimesToUpsample, model);
foreach (MModRect? mModRect in tmp)
{
list.Add(new Location(mModRect.DetectionConfidence, mModRect.Rect.Bottom, mModRect.Rect.Left, mModRect.Rect.Right, mModRect.Rect.Top, faceImage.Width, faceImage.Height));
mModRect.Dispose();
}
locations = list;
}
else
{
locations = faceLocations;
}
List results = new();
if (predictorModel == PredictorModel.Custom)
{
if (CustomFaceLandmarkDetector is null)
throw new NullReferenceException(nameof(CustomFaceLandmarkDetector));
foreach (Location? rect in locations)
{
FullObjectDetection? ret = CustomFaceLandmarkDetector.Detect(faceImage, rect);
results.Add(ret);
}
}
else
{
ShapePredictor? posePredictor = _PosePredictor68Point;
switch (predictorModel)
{
case PredictorModel.Small:
posePredictor = _PosePredictor5Point;
break;
}
foreach (Location? rect in locations)
{
FullObjectDetection? ret = posePredictor.Detect(faceImage.Matrix, new DlibDotNet.Rectangle(rect.Left, rect.Top, rect.Right, rect.Bottom));
results.Add(ret);
}
}
return results;
}
private IEnumerable RawFaceLocations(Image faceImage, int numberOfTimesToUpsample, Model model)
{
switch (model)
{
case Model.Custom:
if (CustomFaceDetector == null)
throw new NotSupportedException("The custom face detector is not ready.");
return CustomFaceDetector.Detect(faceImage, numberOfTimesToUpsample).Select(rect => new MModRect
{
Rect = new DlibDotNet.Rectangle(rect.Left, rect.Top, rect.Right, rect.Bottom),
DetectionConfidence = rect.Confidence
});
case Model.Cnn:
return CnnFaceDetectionModelV1.Detect(_CnnFaceDetector, faceImage, numberOfTimesToUpsample);
default:
IEnumerable>? locations = SimpleObjectDetector.RunDetectorWithUpscale2(_FaceDetector, faceImage, (uint)numberOfTimesToUpsample);
return locations.Select(tuple => new MModRect { Rect = tuple.Item1, DetectionConfidence = tuple.Item2 });
}
}
private IEnumerable> RawFaceLocationsBatched(IEnumerable faceImages, int numberOfTimesToUpsample, int batchSize = 128) => CnnFaceDetectionModelV1.DetectMulti(_CnnFaceDetector, faceImages, numberOfTimesToUpsample, batchSize);
private static Location TrimBound(DlibDotNet.Rectangle location, int width, int height) => new(Math.Max(location.Left, 0), Math.Max(location.Top, 0), Math.Min(location.Right, width), Math.Min(location.Bottom, height), width, height);
#endregion
#endregion
#region Methods
#region Overrides
///
/// Releases all unmanaged resources.
///
protected override void DisposeUnmanaged()
{
base.DisposeUnmanaged();
_PosePredictor68Point?.Dispose();
_PosePredictor5Point?.Dispose();
_CnnFaceDetector?.Dispose();
_FaceEncoder?.Dispose();
_FaceDetector?.Dispose();
}
#endregion
#endregion
}