AA.Compare Project to Match not runToDoCollectionFirst
Removed Layered AppSettings with Nested Objects at First Level
This commit is contained in:
51
FaceRecognitionDotNet/AA.FaceRecognitionDotNet.csproj
Normal file
51
FaceRecognitionDotNet/AA.FaceRecognitionDotNet.csproj
Normal file
@ -0,0 +1,51 @@
|
||||
<Project Sdk="Microsoft.NET.Sdk">
|
||||
<PropertyGroup>
|
||||
<ImplicitUsings>enable</ImplicitUsings>
|
||||
<Nullable>enable</Nullable>
|
||||
<OutputType>library</OutputType>
|
||||
<RuntimeIdentifier>win-x64</RuntimeIdentifier>
|
||||
<TargetFramework>net9.0</TargetFramework>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup>
|
||||
<PackageId>Phares.AA.FaceRecognitionDotNet</PackageId>
|
||||
<GeneratePackageOnBuild>false</GeneratePackageOnBuild>
|
||||
<Version>9.0.100.1</Version>
|
||||
<Authors>Mike Phares</Authors>
|
||||
<Company>Phares</Company>
|
||||
<IncludeSymbols>true</IncludeSymbols>
|
||||
<SymbolPackageFormat>snupkg</SymbolPackageFormat>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup>
|
||||
<AllowUnsafeBlocks>true</AllowUnsafeBlocks>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup>
|
||||
<IsWindows Condition="'$([System.Runtime.InteropServices.RuntimeInformation]::IsOSPlatform($([System.Runtime.InteropServices.OSPlatform]::Windows)))' == 'true'">true</IsWindows>
|
||||
<IsOSX Condition="'$([System.Runtime.InteropServices.RuntimeInformation]::IsOSPlatform($([System.Runtime.InteropServices.OSPlatform]::OSX)))' == 'true'">true</IsOSX>
|
||||
<IsLinux Condition="'$([System.Runtime.InteropServices.RuntimeInformation]::IsOSPlatform($([System.Runtime.InteropServices.OSPlatform]::Linux)))' == 'true'">true</IsLinux>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(IsWindows)'=='true'">
|
||||
<DefineConstants>Windows</DefineConstants>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(IsOSX)'=='true'">
|
||||
<DefineConstants>OSX</DefineConstants>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(IsLinux)'=='true'">
|
||||
<DefineConstants>Linux</DefineConstants>
|
||||
</PropertyGroup>
|
||||
<ItemGroup Condition="'$(RuntimeIdentifier)' == 'browser-wasm'">
|
||||
<SupportedPlatform Include="browser" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<PackageReference Include="DlibDotNet" Version="19.21.0.20220724" />
|
||||
<!--PackageReference Include="configuration.MKL" Version="19.21.0.20210302" /-->
|
||||
<!--PackageReference Include="DlibDotNet-WithCUDA" Version="19.17.0.20190429" /-->
|
||||
<!--PackageReference Include="configuration.CUDA92" Version="19.21.0.20210302" /-->
|
||||
<!--PackageReference Include="configuration.CUDA102" Version="19.21.0.20210302" /-->
|
||||
<!--PackageReference Include="configuration.CUDA110" Version="19.21.0.20210302" /-->
|
||||
<!--PackageReference Include="configuration.CUDA111" Version="19.21.0.20210302" /-->
|
||||
<!--PackageReference Include="configuration.CUDA112" Version="19.21.0.20210302" /-->
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ProjectReference Include="..\Shared\AA.Shared.csproj" />
|
||||
</ItemGroup>
|
||||
</Project>
|
97
FaceRecognitionDotNet/DisposableObject.cs
Normal file
97
FaceRecognitionDotNet/DisposableObject.cs
Normal file
@ -0,0 +1,97 @@
|
||||
namespace View_by_Distance.FaceRecognitionDotNet;
|
||||
|
||||
/// <summary>
|
||||
/// Represents a class which has managed or unmanaged resources.
|
||||
/// </summary>
|
||||
public abstract class DisposableObject : IDisposable
|
||||
{
|
||||
|
||||
#region Properties
|
||||
|
||||
/// <summary>
|
||||
/// Gets a value indicating whether this instance has been disposed.
|
||||
/// </summary>
|
||||
/// <returns>true if this instance has been disposed; otherwise, false.</returns>
|
||||
public bool IsDisposed
|
||||
{
|
||||
get;
|
||||
private set;
|
||||
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Methods
|
||||
|
||||
/// <summary>
|
||||
/// If this object is disposed, then <see cref="ObjectDisposedException"/> is thrown.
|
||||
/// </summary>
|
||||
public void ThrowIfDisposed() =>
|
||||
ObjectDisposedException.ThrowIf(IsDisposed, this);
|
||||
|
||||
internal void ThrowIfDisposed(string objectName)
|
||||
{
|
||||
#pragma warning disable CA1513
|
||||
if (IsDisposed)
|
||||
throw new ObjectDisposedException(objectName);
|
||||
#pragma warning restore CA1513
|
||||
}
|
||||
|
||||
#region Overrides
|
||||
|
||||
/// <summary>
|
||||
/// Releases all managed resources.
|
||||
/// </summary>
|
||||
protected virtual void DisposeManaged()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Releases all unmanaged resources.
|
||||
/// </summary>
|
||||
protected virtual void DisposeUnmanaged()
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#endregion
|
||||
|
||||
#region IDisposable Members
|
||||
|
||||
/// <summary>
|
||||
/// Releases all resources used by this <see cref="DisposableObject"/>.
|
||||
/// </summary>
|
||||
public void Dispose()
|
||||
{
|
||||
GC.SuppressFinalize(this);
|
||||
|
||||
Dispose(true);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Releases all resources used by this <see cref="DisposableObject"/>.
|
||||
/// </summary>
|
||||
/// <param name="disposing">Indicate value whether <see cref="IDisposable.Dispose"/> method was called.</param>
|
||||
private void Dispose(bool disposing)
|
||||
{
|
||||
if (IsDisposed)
|
||||
{
|
||||
return;
|
||||
|
||||
}
|
||||
|
||||
IsDisposed = true;
|
||||
|
||||
if (disposing)
|
||||
|
||||
DisposeManaged();
|
||||
|
||||
DisposeUnmanaged();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
}
|
111
FaceRecognitionDotNet/Dlib/Python/CnnFaceDetectionModelV1.cs
Normal file
111
FaceRecognitionDotNet/Dlib/Python/CnnFaceDetectionModelV1.cs
Normal file
@ -0,0 +1,111 @@
|
||||
using DlibDotNet;
|
||||
using DlibDotNet.Dnn;
|
||||
using View_by_Distance.Shared.Models.Stateless;
|
||||
|
||||
namespace View_by_Distance.FaceRecognitionDotNet.Dlib.Python;
|
||||
|
||||
internal sealed class CnnFaceDetectionModelV1
|
||||
{
|
||||
|
||||
#region Methods
|
||||
|
||||
public static IEnumerable<MModRect> Detect(LossMmod net, Image image, int upsampleNumTimes)
|
||||
{
|
||||
using PyramidDown? pyr = new(2);
|
||||
List<MModRect>? rects = [];
|
||||
|
||||
// Copy the data into dlib based objects
|
||||
using Matrix<RgbPixel>? matrix = new();
|
||||
Mode type = image.Mode;
|
||||
switch (type)
|
||||
{
|
||||
case Mode.Greyscale:
|
||||
case Mode.Rgb:
|
||||
DlibDotNet.Dlib.AssignImage(image.Matrix, matrix);
|
||||
break;
|
||||
default:
|
||||
throw new NotSupportedException("Unsupported image type, must be 8bit gray or RGB image.");
|
||||
}
|
||||
|
||||
// Upsampling the image will allow us to detect smaller faces but will cause the
|
||||
// program to use more RAM and run longer.
|
||||
int levels = upsampleNumTimes;
|
||||
while (levels > 0)
|
||||
{
|
||||
levels--;
|
||||
DlibDotNet.Dlib.PyramidUp<PyramidDown>(matrix, 2);
|
||||
}
|
||||
|
||||
OutputLabels<IEnumerable<MModRect>>? dets = net.Operator(matrix);
|
||||
|
||||
// Scale the detection locations back to the original image size
|
||||
// if the image was upscaled.
|
||||
foreach (MModRect? d in dets.First())
|
||||
{
|
||||
DRectangle drect = pyr.RectDown(new DRectangle(d.Rect), (uint)upsampleNumTimes);
|
||||
d.Rect = new Rectangle((int)drect.Left, (int)drect.Top, (int)drect.Right, (int)drect.Bottom);
|
||||
rects.Add(d);
|
||||
}
|
||||
|
||||
return rects;
|
||||
}
|
||||
|
||||
public static IEnumerable<IEnumerable<MModRect>> DetectMulti(LossMmod net, IEnumerable<Image> images, int upsampleNumTimes, int batchSize = 128)
|
||||
{
|
||||
List<Matrix<RgbPixel>>? destImages = [];
|
||||
List<IEnumerable<MModRect>>? allRects = [];
|
||||
|
||||
try
|
||||
{
|
||||
using PyramidDown? pyr = new(2);
|
||||
// Copy the data into dlib based objects
|
||||
foreach (Image? image in images)
|
||||
{
|
||||
Matrix<RgbPixel>? matrix = new();
|
||||
Mode type = image.Mode;
|
||||
switch (type)
|
||||
{
|
||||
case Mode.Greyscale:
|
||||
case Mode.Rgb:
|
||||
DlibDotNet.Dlib.AssignImage(image.Matrix, matrix);
|
||||
break;
|
||||
default:
|
||||
throw new NotSupportedException("Unsupported image type, must be 8bit gray or RGB image.");
|
||||
}
|
||||
|
||||
for (int i = 0; i < upsampleNumTimes; i++)
|
||||
DlibDotNet.Dlib.PyramidUp(matrix);
|
||||
|
||||
destImages.Add(matrix);
|
||||
}
|
||||
|
||||
for (int i = 1; i < destImages.Count; i++)
|
||||
if (destImages[i - 1].Columns != destImages[i].Columns || destImages[i - 1].Rows != destImages[i].Rows)
|
||||
throw new ArgumentException("Images in list must all have the same dimensions.");
|
||||
|
||||
OutputLabels<IEnumerable<MModRect>>? dets = net.Operator(destImages, (ulong)batchSize);
|
||||
foreach (IEnumerable<MModRect>? det in dets)
|
||||
{
|
||||
List<MModRect>? rects = [];
|
||||
foreach (MModRect? d in det)
|
||||
{
|
||||
DRectangle drect = pyr.RectDown(new DRectangle(d.Rect), (uint)upsampleNumTimes);
|
||||
d.Rect = new Rectangle((int)drect.Left, (int)drect.Top, (int)drect.Right, (int)drect.Bottom);
|
||||
rects.Add(d);
|
||||
}
|
||||
|
||||
allRects.Add(rects);
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
foreach (Matrix<RgbPixel>? matrix in destImages)
|
||||
matrix.Dispose();
|
||||
}
|
||||
|
||||
return allRects;
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
}
|
129
FaceRecognitionDotNet/Dlib/Python/FaceRecognitionModelV1.cs
Normal file
129
FaceRecognitionDotNet/Dlib/Python/FaceRecognitionModelV1.cs
Normal file
@ -0,0 +1,129 @@
|
||||
using DlibDotNet;
|
||||
using DlibDotNet.Dnn;
|
||||
|
||||
namespace View_by_Distance.FaceRecognitionDotNet.Dlib.Python;
|
||||
|
||||
internal sealed class FaceRecognitionModelV1
|
||||
{
|
||||
|
||||
#region Methods
|
||||
|
||||
public static Matrix<double> ComputeFaceDescriptor(LossMetric net, Image img, FullObjectDetection face, int numberOfJitters)
|
||||
{
|
||||
FullObjectDetection[]? faces = [face];
|
||||
return ComputeFaceDescriptors(net, img, faces, numberOfJitters).First();
|
||||
}
|
||||
|
||||
public static IEnumerable<Matrix<double>> ComputeFaceDescriptors(LossMetric net, Image img, IEnumerable<FullObjectDetection> faces, int numberOfJitters)
|
||||
{
|
||||
Image[]? batchImage = [img];
|
||||
IEnumerable<FullObjectDetection>[]? batchFaces = [faces];
|
||||
return BatchComputeFaceDescriptors(net, batchImage, batchFaces, numberOfJitters).First();
|
||||
}
|
||||
|
||||
public static IEnumerable<IEnumerable<Matrix<double>>> BatchComputeFaceDescriptors(LossMetric net,
|
||||
IList<Image> batchImages,
|
||||
IList<IEnumerable<FullObjectDetection>> batchFaces,
|
||||
int numberOfJitters)
|
||||
{
|
||||
if (batchImages.Count != batchFaces.Count)
|
||||
throw new ArgumentException("The array of images and the array of array of locations must be of the same size");
|
||||
|
||||
foreach (IEnumerable<FullObjectDetection>? faces in batchFaces)
|
||||
foreach (FullObjectDetection? f in faces)
|
||||
{
|
||||
if (f.Parts is not 68 and not 5)
|
||||
throw new ArgumentException("The full_object_detection must use the iBUG 300W 68 point face landmark style or dlib's 5 point style.");
|
||||
}
|
||||
|
||||
List<Array<Matrix<RgbPixel>>>? faceChipsArray = new(batchImages.Count);
|
||||
List<Matrix<RgbPixel>>? faceChips = [];
|
||||
for (int i = 0; i < batchImages.Count; ++i)
|
||||
{
|
||||
IEnumerable<FullObjectDetection>? faces = batchFaces[i];
|
||||
Image? img = batchImages[i];
|
||||
|
||||
List<ChipDetails>? dets = new(faces.Count());
|
||||
foreach (FullObjectDetection? f in faces)
|
||||
dets.Add(DlibDotNet.Dlib.GetFaceChipDetails(f, 150, 0.25));
|
||||
|
||||
Array<Matrix<RgbPixel>>? thisImageFaceChips = DlibDotNet.Dlib.ExtractImageChips<RgbPixel>(img.Matrix, dets);
|
||||
foreach (Matrix<RgbPixel>? chip in thisImageFaceChips)
|
||||
faceChips.Add(chip);
|
||||
faceChipsArray.Add(thisImageFaceChips);
|
||||
|
||||
foreach (ChipDetails? det in dets)
|
||||
det.Dispose();
|
||||
}
|
||||
|
||||
List<List<Matrix<double>>>? faceDescriptors = [];
|
||||
for (int i = 0, count = batchImages.Count; i < count; i++)
|
||||
faceDescriptors.Add([]);
|
||||
|
||||
if (numberOfJitters <= 1)
|
||||
{
|
||||
// extract descriptors and convert from float vectors to double vectors
|
||||
OutputLabels<Matrix<float>>? descriptors = net.Operator(faceChips, 16);
|
||||
int index = 0;
|
||||
Matrix<float>[]? list = descriptors.Select(matrix => matrix).ToArray();
|
||||
for (int i = 0; i < batchFaces.Count; ++i)
|
||||
for (int j = 0; j < batchFaces[i].Count(); ++j)
|
||||
faceDescriptors[i].Add(DlibDotNet.Dlib.MatrixCast<double>(list[index++]));
|
||||
|
||||
if (index != list.Length)
|
||||
throw new ApplicationException();
|
||||
}
|
||||
else
|
||||
{
|
||||
// extract descriptors and convert from float vectors to double vectors
|
||||
int index = 0;
|
||||
for (int i = 0; i < batchFaces.Count; ++i)
|
||||
for (int j = 0; j < batchFaces[i].Count(); ++j)
|
||||
{
|
||||
Matrix<RgbPixel>[]? tmp = JitterImage(faceChips[index++], numberOfJitters).ToArray();
|
||||
using (OutputLabels<Matrix<float>>? tmp2 = net.Operator(tmp, 16))
|
||||
using (MatrixOp? mat = DlibDotNet.Dlib.Mat(tmp2))
|
||||
{
|
||||
Matrix<double>? r = DlibDotNet.Dlib.Mean<double>(mat);
|
||||
faceDescriptors[i].Add(r);
|
||||
}
|
||||
|
||||
foreach (Matrix<RgbPixel>? matrix in tmp)
|
||||
matrix.Dispose();
|
||||
}
|
||||
|
||||
if (index != faceChips.Count)
|
||||
throw new ApplicationException();
|
||||
}
|
||||
|
||||
if (faceChipsArray.Count > 0)
|
||||
{
|
||||
foreach (Array<Matrix<RgbPixel>>? array in faceChipsArray)
|
||||
{
|
||||
foreach (Matrix<RgbPixel>? faceChip in array)
|
||||
faceChip.Dispose();
|
||||
array.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
return faceDescriptors;
|
||||
}
|
||||
|
||||
#region Helpers
|
||||
|
||||
private static readonly Rand _Rand = new();
|
||||
|
||||
private static IEnumerable<Matrix<RgbPixel>> JitterImage(Matrix<RgbPixel> img, int numberOfJitters)
|
||||
{
|
||||
List<Matrix<RgbPixel>>? crops = [];
|
||||
for (int i = 0; i < numberOfJitters; ++i)
|
||||
crops.Add(DlibDotNet.Dlib.JitterImage(img, _Rand));
|
||||
|
||||
return crops;
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#endregion
|
||||
|
||||
}
|
169
FaceRecognitionDotNet/Dlib/Python/SimpleObjectDetector.cs
Normal file
169
FaceRecognitionDotNet/Dlib/Python/SimpleObjectDetector.cs
Normal file
@ -0,0 +1,169 @@
|
||||
using DlibDotNet;
|
||||
using View_by_Distance.Shared.Models.Stateless;
|
||||
|
||||
namespace View_by_Distance.FaceRecognitionDotNet.Dlib.Python;
|
||||
|
||||
internal sealed class SimpleObjectDetector
|
||||
{
|
||||
|
||||
#region Methods
|
||||
|
||||
public static IEnumerable<Rectangle> RunDetectorWithUpscale1(FrontalFaceDetector detector,
|
||||
Image img,
|
||||
uint upsamplingAmount,
|
||||
double adjustThreshold,
|
||||
List<double> detectionConfidences,
|
||||
List<ulong> weightIndices)
|
||||
{
|
||||
List<Rectangle>? rectangles = [];
|
||||
|
||||
if (img.Mode == Mode.Greyscale)
|
||||
{
|
||||
Matrix<byte>? greyscaleMatrix = img.Matrix as Matrix<byte>;
|
||||
if (upsamplingAmount == 0)
|
||||
{
|
||||
detector.Operator(greyscaleMatrix, out IEnumerable<RectDetection>? rectDetections, adjustThreshold);
|
||||
|
||||
RectDetection[]? dets = rectDetections.ToArray();
|
||||
SplitRectDetections(dets, rectangles, detectionConfidences, weightIndices);
|
||||
|
||||
foreach (RectDetection? rectDetection in dets)
|
||||
rectDetection.Dispose();
|
||||
}
|
||||
else
|
||||
{
|
||||
using PyramidDown? pyr = new(2);
|
||||
Matrix<byte>? temp = null;
|
||||
|
||||
try
|
||||
{
|
||||
DlibDotNet.Dlib.PyramidUp(greyscaleMatrix, pyr, out temp);
|
||||
|
||||
uint levels = upsamplingAmount - 1;
|
||||
while (levels > 0)
|
||||
{
|
||||
levels--;
|
||||
DlibDotNet.Dlib.PyramidUp(temp);
|
||||
}
|
||||
|
||||
detector.Operator(temp, out IEnumerable<RectDetection>? rectDetections, adjustThreshold);
|
||||
|
||||
RectDetection[]? dets = rectDetections.ToArray();
|
||||
foreach (RectDetection? t in dets)
|
||||
t.Rect = pyr.RectDown(t.Rect, upsamplingAmount);
|
||||
|
||||
SplitRectDetections(dets, rectangles, detectionConfidences, weightIndices);
|
||||
|
||||
foreach (RectDetection? rectDetection in dets)
|
||||
rectDetection.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
temp?.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
return rectangles;
|
||||
}
|
||||
else
|
||||
{
|
||||
Matrix<RgbPixel>? rgbMatrix = img.Matrix as Matrix<RgbPixel>;
|
||||
if (upsamplingAmount == 0)
|
||||
{
|
||||
detector.Operator(rgbMatrix, out IEnumerable<RectDetection>? rectDetections, adjustThreshold);
|
||||
|
||||
RectDetection[]? dets = rectDetections.ToArray();
|
||||
SplitRectDetections(dets, rectangles, detectionConfidences, weightIndices);
|
||||
|
||||
foreach (RectDetection? rectDetection in dets)
|
||||
rectDetection.Dispose();
|
||||
}
|
||||
else
|
||||
{
|
||||
using PyramidDown? pyr = new(2);
|
||||
Matrix<RgbPixel>? temp = null;
|
||||
|
||||
try
|
||||
{
|
||||
DlibDotNet.Dlib.PyramidUp(rgbMatrix, pyr, out temp);
|
||||
|
||||
uint levels = upsamplingAmount - 1;
|
||||
while (levels > 0)
|
||||
{
|
||||
levels--;
|
||||
DlibDotNet.Dlib.PyramidUp(temp);
|
||||
}
|
||||
|
||||
detector.Operator(temp, out IEnumerable<RectDetection>? rectDetections, adjustThreshold);
|
||||
|
||||
RectDetection[]? dets = rectDetections.ToArray();
|
||||
foreach (RectDetection? t in dets)
|
||||
t.Rect = pyr.RectDown(t.Rect, upsamplingAmount);
|
||||
|
||||
SplitRectDetections(dets, rectangles, detectionConfidences, weightIndices);
|
||||
|
||||
foreach (RectDetection? rectDetection in dets)
|
||||
rectDetection.Dispose();
|
||||
}
|
||||
finally
|
||||
{
|
||||
temp?.Dispose();
|
||||
}
|
||||
}
|
||||
|
||||
return rectangles;
|
||||
}
|
||||
}
|
||||
|
||||
public static IEnumerable<Tuple<Rectangle, double>> RunDetectorWithUpscale2(FrontalFaceDetector detector,
|
||||
Image image,
|
||||
uint upsamplingAmount)
|
||||
{
|
||||
if (detector == null)
|
||||
throw new NullReferenceException(nameof(detector));
|
||||
if (image == null)
|
||||
throw new NullReferenceException(nameof(image));
|
||||
|
||||
detector.ThrowIfDisposed();
|
||||
image.ThrowIfDisposed();
|
||||
|
||||
List<double>? detectionConfidences = [];
|
||||
List<ulong>? weightIndices = [];
|
||||
const double adjustThreshold = 0.0;
|
||||
|
||||
Rectangle[]? rects = RunDetectorWithUpscale1(detector,
|
||||
image,
|
||||
upsamplingAmount,
|
||||
adjustThreshold,
|
||||
detectionConfidences,
|
||||
weightIndices).ToArray();
|
||||
|
||||
int index = 0;
|
||||
foreach (Rectangle rect in rects)
|
||||
yield return new Tuple<Rectangle, double>(rect, detectionConfidences[index++]);
|
||||
}
|
||||
|
||||
#region Helpers
|
||||
|
||||
private static void SplitRectDetections(RectDetection[] rectDetections,
|
||||
List<Rectangle> rectangles,
|
||||
List<double> detectionConfidences,
|
||||
List<ulong> weightIndices)
|
||||
{
|
||||
rectangles.Clear();
|
||||
detectionConfidences.Clear();
|
||||
weightIndices.Clear();
|
||||
|
||||
foreach (RectDetection? rectDetection in rectDetections)
|
||||
{
|
||||
rectangles.Add(rectDetection.Rect);
|
||||
detectionConfidences.Add(rectDetection.DetectionConfidence);
|
||||
weightIndices.Add(rectDetection.WeightIndex);
|
||||
}
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#endregion
|
||||
|
||||
}
|
26
FaceRecognitionDotNet/Extensions/FaceDetector.cs
Normal file
26
FaceRecognitionDotNet/Extensions/FaceDetector.cs
Normal file
@ -0,0 +1,26 @@
|
||||
using DlibDotNet;
|
||||
using View_by_Distance.Shared.Models;
|
||||
|
||||
namespace View_by_Distance.FaceRecognitionDotNet.Extensions;
|
||||
|
||||
/// <summary>
|
||||
/// An abstract base class that provides functionality to detect face locations from image.
|
||||
/// </summary>
|
||||
public abstract class FaceDetector : DisposableObject
|
||||
{
|
||||
|
||||
#region Methods
|
||||
|
||||
internal IEnumerable<Location> Detect(Image image, int numberOfTimesToUpsample) => RawDetect(image.Matrix, numberOfTimesToUpsample);
|
||||
|
||||
/// <summary>
|
||||
/// Returns an enumerable collection of face location correspond to all faces in specified image.
|
||||
/// </summary>
|
||||
/// <param name="matrix">The matrix contains a face.</param>
|
||||
/// <param name="numberOfTimesToUpsample">The number of times to up-sample the image when finding faces.</param>
|
||||
/// <returns>An enumerable collection of face location correspond to all faces.</returns>
|
||||
protected abstract IEnumerable<Location> RawDetect(MatrixBase matrix, int numberOfTimesToUpsample);
|
||||
|
||||
#endregion
|
||||
|
||||
}
|
36
FaceRecognitionDotNet/Extensions/FaceLandmarkDetector.cs
Normal file
36
FaceRecognitionDotNet/Extensions/FaceLandmarkDetector.cs
Normal file
@ -0,0 +1,36 @@
|
||||
using DlibDotNet;
|
||||
using View_by_Distance.Shared.Models;
|
||||
using View_by_Distance.Shared.Models.Stateless;
|
||||
|
||||
namespace View_by_Distance.FaceRecognitionDotNet.Extensions;
|
||||
|
||||
/// <summary>
|
||||
/// An abstract base class that provides functionality to detect face parts locations from face image.
|
||||
/// </summary>
|
||||
public abstract class FaceLandmarkDetector : DisposableObject
|
||||
{
|
||||
|
||||
#region Methods
|
||||
|
||||
internal FullObjectDetection Detect(Image image, Location location) => RawDetect(image.Matrix, location);
|
||||
|
||||
internal IEnumerable<Dictionary<FacePart, IEnumerable<FacePoint>>> GetLandmarks(IEnumerable<FacePoint[]> landmarkTuples) => RawGetLandmarks(landmarkTuples);
|
||||
|
||||
/// <summary>
|
||||
/// Returns an object contains information of face parts corresponds to specified location in specified image.
|
||||
/// </summary>
|
||||
/// <param name="matrix">The matrix contains a face.</param>
|
||||
/// <param name="location">The location rectangle for a face.</param>
|
||||
/// <returns>An object contains information of face parts.</returns>
|
||||
protected abstract FullObjectDetection RawDetect(MatrixBase matrix, Location location);
|
||||
|
||||
/// <summary>
|
||||
/// Returns an enumerable collection of dictionary of face parts locations (eyes, nose, etc).
|
||||
/// </summary>
|
||||
/// <param name="landmarkTuples">The enumerable collection of face parts location.</param>
|
||||
/// <returns>An enumerable collection of dictionary of face parts locations (eyes, nose, etc).</returns>
|
||||
protected abstract IEnumerable<Dictionary<FacePart, IEnumerable<FacePoint>>> RawGetLandmarks(IEnumerable<FacePoint[]> landmarkTuples);
|
||||
|
||||
#endregion
|
||||
|
||||
}
|
105
FaceRecognitionDotNet/FaceEncoding.cs
Normal file
105
FaceRecognitionDotNet/FaceEncoding.cs
Normal file
@ -0,0 +1,105 @@
|
||||
using DlibDotNet;
|
||||
using System.Runtime.Serialization;
|
||||
|
||||
namespace View_by_Distance.FaceRecognitionDotNet;
|
||||
|
||||
/// <summary>
|
||||
/// Represents a feature data of face. This class cannot be inherited.
|
||||
/// </summary>
|
||||
[Serializable]
|
||||
public sealed class FaceEncoding : DisposableObject, ISerializable
|
||||
{
|
||||
|
||||
#region Fields
|
||||
|
||||
[NonSerialized]
|
||||
private readonly Matrix<double> _Encoding;
|
||||
|
||||
#endregion
|
||||
|
||||
#region Constructors
|
||||
|
||||
internal FaceEncoding(Matrix<double> encoding) => _Encoding = encoding;
|
||||
|
||||
private FaceEncoding(SerializationInfo info, StreamingContext context)
|
||||
{
|
||||
if (info == null)
|
||||
throw new NullReferenceException(nameof(info));
|
||||
|
||||
double[]? array = info.GetValue(nameof(_Encoding), typeof(double[])) as double[];
|
||||
int? row = (int?)info.GetValue(nameof(_Encoding.Rows), typeof(int));
|
||||
int? column = (int?)info.GetValue(nameof(_Encoding.Columns), typeof(int));
|
||||
if (row is null)
|
||||
throw new NullReferenceException(nameof(row));
|
||||
if (column is null)
|
||||
throw new NullReferenceException(nameof(column));
|
||||
_Encoding = new Matrix<double>(array, row.Value, column.Value);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Properties
|
||||
|
||||
internal Matrix<double> Encoding => _Encoding;
|
||||
|
||||
/// <summary>
|
||||
/// Gets the size of feature data.
|
||||
/// </summary>
|
||||
/// <exception cref="ObjectDisposedException">This object is disposed.</exception>
|
||||
public int Size
|
||||
{
|
||||
get
|
||||
{
|
||||
ThrowIfDisposed();
|
||||
return _Encoding.Size;
|
||||
}
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Methods
|
||||
|
||||
/// <summary>
|
||||
/// Gets a feature data of face as raw format.
|
||||
/// </summary>
|
||||
/// <returns>A <see cref="double"/> array that represents a feature data.</returns>
|
||||
/// <remarks><see cref="FaceEncoding"/> class supports serialization. This method is for interoperability between FaceRecognitionotNet and dlib.</remarks>
|
||||
/// <exception cref="ObjectDisposedException">This object is disposed.</exception>
|
||||
public double[] GetRawEncoding()
|
||||
{
|
||||
ThrowIfDisposed();
|
||||
return _Encoding.ToArray();
|
||||
}
|
||||
|
||||
#region Overrides
|
||||
|
||||
/// <summary>
|
||||
/// Releases all unmanaged resources.
|
||||
/// </summary>
|
||||
protected override void DisposeUnmanaged()
|
||||
{
|
||||
base.DisposeUnmanaged();
|
||||
_Encoding?.Dispose();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#endregion
|
||||
|
||||
#region ISerializable Members
|
||||
|
||||
/// <summary>
|
||||
/// Populates a <see cref="SerializationInfo"/> with the data needed to serialize the target object.
|
||||
/// </summary>
|
||||
/// <param name="info">The <see cref="SerializationInfo"/> to populate with data.</param>
|
||||
/// <param name="context">The destination (see <see cref="StreamingContext"/>) for this serialization.</param>
|
||||
public void GetObjectData(SerializationInfo info, StreamingContext context)
|
||||
{
|
||||
info.AddValue(nameof(_Encoding), _Encoding.ToArray());
|
||||
info.AddValue(nameof(_Encoding.Rows), _Encoding.Rows);
|
||||
info.AddValue(nameof(_Encoding.Columns), _Encoding.Columns);
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
}
|
497
FaceRecognitionDotNet/FaceRecognition.cs
Normal file
497
FaceRecognitionDotNet/FaceRecognition.cs
Normal file
@ -0,0 +1,497 @@
|
||||
using DlibDotNet;
|
||||
using DlibDotNet.Dnn;
|
||||
using System.Collections.ObjectModel;
|
||||
using System.Drawing;
|
||||
using System.Drawing.Imaging;
|
||||
using System.Runtime.InteropServices;
|
||||
using View_by_Distance.FaceRecognitionDotNet.Dlib.Python;
|
||||
using View_by_Distance.FaceRecognitionDotNet.Extensions;
|
||||
using View_by_Distance.Shared.Models;
|
||||
using View_by_Distance.Shared.Models.Stateless;
|
||||
using View_by_Distance.Shared.Models.Stateless.Methods;
|
||||
|
||||
namespace View_by_Distance.FaceRecognitionDotNet;
|
||||
|
||||
public class FaceRecognition : DisposableObject
|
||||
{
|
||||
|
||||
public FaceDetector? CustomFaceDetector { get; set; }
|
||||
public FaceLandmarkDetector? CustomFaceLandmarkDetector { get; set; }
|
||||
|
||||
private readonly Model _Model;
|
||||
private readonly int _NumberOfJitters;
|
||||
private readonly LossMetric _FaceEncoder;
|
||||
private readonly LossMmod _CnnFaceDetector;
|
||||
private readonly int _NumberOfTimesToUpsample;
|
||||
private readonly PredictorModel _PredictorModel;
|
||||
private readonly FrontalFaceDetector _FaceDetector;
|
||||
private readonly ShapePredictor _PosePredictor5Point;
|
||||
private readonly ShapePredictor _PosePredictor68Point;
|
||||
|
||||
private record Record(Location Location, List<FaceEncoding?> FaceEncodings, List<List<FacePartAndFacePointArray>> FaceParts);
|
||||
|
||||
public FaceRecognition(int numberOfJitters, int numberOfTimesToUpsample, Model model, ModelParameter modelParameter, PredictorModel predictorModel)
|
||||
{
|
||||
if (modelParameter is null)
|
||||
throw new NullReferenceException(nameof(modelParameter));
|
||||
if (modelParameter.PosePredictor5FaceLandmarksModel is null)
|
||||
throw new NullReferenceException(nameof(modelParameter.PosePredictor5FaceLandmarksModel));
|
||||
if (modelParameter.PosePredictor68FaceLandmarksModel is null)
|
||||
throw new NullReferenceException(nameof(modelParameter.PosePredictor68FaceLandmarksModel));
|
||||
if (modelParameter.CnnFaceDetectorModel is null)
|
||||
throw new NullReferenceException(nameof(modelParameter.CnnFaceDetectorModel));
|
||||
if (modelParameter.FaceRecognitionModel is null)
|
||||
throw new NullReferenceException(nameof(modelParameter.FaceRecognitionModel));
|
||||
_Model = model;
|
||||
_PredictorModel = predictorModel;
|
||||
_NumberOfJitters = numberOfJitters;
|
||||
_NumberOfTimesToUpsample = numberOfTimesToUpsample;
|
||||
_FaceDetector?.Dispose();
|
||||
_FaceDetector = DlibDotNet.Dlib.GetFrontalFaceDetector();
|
||||
_PosePredictor68Point?.Dispose();
|
||||
_PosePredictor68Point = ShapePredictor.Deserialize(modelParameter.PosePredictor68FaceLandmarksModel);
|
||||
_PosePredictor5Point?.Dispose();
|
||||
_PosePredictor5Point = ShapePredictor.Deserialize(modelParameter.PosePredictor5FaceLandmarksModel);
|
||||
_CnnFaceDetector?.Dispose();
|
||||
_CnnFaceDetector = LossMmod.Deserialize(modelParameter.CnnFaceDetectorModel);
|
||||
_FaceEncoder?.Dispose();
|
||||
_FaceEncoder = LossMetric.Deserialize(modelParameter.FaceRecognitionModel);
|
||||
}
|
||||
|
||||
public static double FaceDistance(FaceEncoding faceEncoding, FaceEncoding faceToCompare)
|
||||
{
|
||||
if (faceEncoding is null)
|
||||
throw new NullReferenceException(nameof(faceEncoding));
|
||||
if (faceToCompare is null)
|
||||
throw new NullReferenceException(nameof(faceToCompare));
|
||||
faceEncoding.ThrowIfDisposed();
|
||||
faceToCompare.ThrowIfDisposed();
|
||||
if (faceEncoding.Encoding.Size == 0)
|
||||
return 0;
|
||||
using Matrix<double>? diff = faceEncoding.Encoding - faceToCompare.Encoding;
|
||||
return DlibDotNet.Dlib.Length(diff);
|
||||
}
|
||||
|
||||
private static FacePoint[] Join(IEnumerable<FacePoint> facePoints1, IEnumerable<FacePoint> facePoints2)
|
||||
{
|
||||
List<FacePoint> results = [.. facePoints1, .. facePoints2];
|
||||
return results.ToArray();
|
||||
}
|
||||
|
||||
private List<FacePartAndFacePointArray> GetFaceParts(FullObjectDetection fullObjectDetection)
|
||||
{
|
||||
List<FacePartAndFacePointArray> results = [];
|
||||
FacePoint[] facePoints = Enumerable.Range(0, (int)fullObjectDetection.Parts)
|
||||
.Select(index => new FacePoint(index, fullObjectDetection.GetPart((uint)index).X, fullObjectDetection.GetPart((uint)index).Y))
|
||||
.ToArray();
|
||||
switch (_PredictorModel)
|
||||
{
|
||||
case PredictorModel.Custom:
|
||||
throw new NotImplementedException();
|
||||
case PredictorModel.Large:
|
||||
if (facePoints.Length == 68)
|
||||
{
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.Chin, facePoints.Skip(0).Take(17).ToArray()));
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.LeftEyebrow, facePoints.Skip(17).Take(5).ToArray()));
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.RightEyebrow, facePoints.Skip(22).Take(5).ToArray()));
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.NoseBridge, facePoints.Skip(27).Take(5).ToArray()));
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.NoseTip, facePoints.Skip(31).Take(5).ToArray()));
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.LeftEye, facePoints.Skip(36).Take(6).ToArray()));
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.RightEye, facePoints.Skip(42).Take(6).ToArray()));
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.TopLip, Join(facePoints.Skip(48).Take(7), facePoints.Skip(60).Take(5))));
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.BottomLip, Join(facePoints.Skip(55).Take(5), facePoints.Skip(65).Take(3))));
|
||||
}
|
||||
break;
|
||||
case PredictorModel.Small:
|
||||
if (facePoints.Length == 5)
|
||||
{
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.RightEye, facePoints.Skip(0).Take(2).ToArray()));
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.LeftEye, facePoints.Skip(2).Take(2).ToArray()));
|
||||
results.Add(new FacePartAndFacePointArray(FacePart.NoseTip, facePoints.Skip(4).Take(1).ToArray()));
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
private MModRect[] GetMModRects(Image image)
|
||||
{
|
||||
switch (_Model)
|
||||
{
|
||||
case Model.Cnn:
|
||||
return CnnFaceDetectionModelV1.Detect(_CnnFaceDetector, image, _NumberOfTimesToUpsample).ToArray();
|
||||
case Model.Hog:
|
||||
IEnumerable<Tuple<DlibDotNet.Rectangle, double>>? locations = SimpleObjectDetector.RunDetectorWithUpscale2(_FaceDetector, image, (uint)_NumberOfTimesToUpsample);
|
||||
return locations.Select(l => new MModRect { Rect = l.Item1, DetectionConfidence = l.Item2 }).ToArray();
|
||||
case Model.Custom:
|
||||
if (CustomFaceDetector is null)
|
||||
throw new NotSupportedException("The custom face detector is not ready.");
|
||||
return CustomFaceDetector.Detect(image, _NumberOfTimesToUpsample).Select(rect => new MModRect
|
||||
{
|
||||
Rect = new DlibDotNet.Rectangle(rect.Left, rect.Top, rect.Right, rect.Bottom),
|
||||
DetectionConfidence = rect.Confidence
|
||||
}).ToArray();
|
||||
default:
|
||||
throw new Exception();
|
||||
}
|
||||
}
|
||||
|
||||
public List<Location> FaceLocations(Image image)
|
||||
{
|
||||
if (image is null)
|
||||
throw new NullReferenceException(nameof(image));
|
||||
image.ThrowIfDisposed();
|
||||
ThrowIfDisposed();
|
||||
List<Location> results = [];
|
||||
System.Drawing.Rectangle rectangle;
|
||||
IEnumerable<MModRect> mModRects = GetMModRects(image);
|
||||
foreach (MModRect? mModRect in mModRects)
|
||||
{
|
||||
rectangle = new(mModRect.Rect.Left, mModRect.Rect.Top, (int)mModRect.Rect.Width, (int)mModRect.Rect.Height);
|
||||
Location location = ILocation.TrimBound(mModRect.DetectionConfidence, rectangle, image.Width, image.Height, mModRects.Count());
|
||||
mModRect.Dispose();
|
||||
results.Add(location);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
private List<FullObjectDetection> GetFullObjectDetections(Image image, List<Location> locations)
|
||||
{
|
||||
List<FullObjectDetection> results = [];
|
||||
if (_PredictorModel == PredictorModel.Custom)
|
||||
{
|
||||
if (CustomFaceLandmarkDetector is null)
|
||||
throw new NullReferenceException(nameof(CustomFaceLandmarkDetector));
|
||||
foreach (Location location in locations)
|
||||
{
|
||||
FullObjectDetection fullObjectDetection = CustomFaceLandmarkDetector.Detect(image, location);
|
||||
results.Add(fullObjectDetection);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ShapePredictor posePredictor = _PredictorModel switch
|
||||
{
|
||||
PredictorModel.Large => _PosePredictor68Point,
|
||||
PredictorModel.Small => _PosePredictor5Point,
|
||||
PredictorModel.Custom => throw new NotImplementedException(),
|
||||
_ => throw new Exception()
|
||||
};
|
||||
foreach (Location location in locations)
|
||||
{
|
||||
DlibDotNet.Rectangle rectangle = new(location.Left, location.Top, location.Right, location.Bottom);
|
||||
FullObjectDetection fullObjectDetection = posePredictor.Detect(image.Matrix, rectangle);
|
||||
results.Add(fullObjectDetection);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
private List<Location> GetLocations(Image image)
|
||||
{
|
||||
List<Location> results = [];
|
||||
MModRect[] mModRects = GetMModRects(image);
|
||||
if (mModRects.Length != 0)
|
||||
{
|
||||
Location location;
|
||||
System.Drawing.Rectangle rectangle;
|
||||
foreach (MModRect? mModRect in mModRects)
|
||||
{
|
||||
rectangle = new(mModRect.Rect.Left, mModRect.Rect.Top, (int)mModRect.Rect.Width, (int)mModRect.Rect.Height);
|
||||
location = ILocation.TrimBound(mModRect.DetectionConfidence, rectangle, image.Width, image.Height, mModRects.Length);
|
||||
mModRect.Dispose();
|
||||
results.Add(location);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
public List<FaceRecognitionGroup> GetCollection(Image image, List<Location> locations, bool includeFaceEncoding, bool includeFaceParts)
|
||||
{
|
||||
List<FaceRecognitionGroup> results = [];
|
||||
if (image is null)
|
||||
throw new NullReferenceException(nameof(image));
|
||||
image.ThrowIfDisposed();
|
||||
ThrowIfDisposed();
|
||||
if (_PredictorModel == PredictorModel.Custom)
|
||||
throw new NotSupportedException("FaceRecognition.PredictorModel.Custom is not supported.");
|
||||
if (locations.Count == 0)
|
||||
locations.AddRange(GetLocations(image));
|
||||
List<FullObjectDetection> fullObjectDetections = GetFullObjectDetections(image, locations);
|
||||
if (fullObjectDetections.Count != locations.Count)
|
||||
throw new Exception();
|
||||
Record record;
|
||||
List<Record> records = [];
|
||||
foreach (Location location in locations)
|
||||
{
|
||||
record = new(location, [], []);
|
||||
records.Add(record);
|
||||
}
|
||||
if (locations.Count != records.Count)
|
||||
throw new Exception();
|
||||
if (!includeFaceEncoding)
|
||||
{
|
||||
for (int i = 0; i < records.Count; i++)
|
||||
records[i].FaceEncodings.Add(null);
|
||||
}
|
||||
else
|
||||
{
|
||||
Matrix<double> doubles;
|
||||
FaceEncoding faceEncoding;
|
||||
for (int i = 0; i < records.Count; i++)
|
||||
{
|
||||
doubles = FaceRecognitionModelV1.ComputeFaceDescriptor(_FaceEncoder, image, fullObjectDetections[i], _NumberOfJitters);
|
||||
faceEncoding = new(doubles);
|
||||
records[i].FaceEncodings.Add(faceEncoding);
|
||||
}
|
||||
}
|
||||
if (!includeFaceParts)
|
||||
{
|
||||
for (int i = 0; i < records.Count; i++)
|
||||
records[i].FaceParts.Add([]);
|
||||
}
|
||||
else
|
||||
{
|
||||
List<FacePartAndFacePointArray> faceParts;
|
||||
for (int i = 0; i < records.Count; i++)
|
||||
{
|
||||
faceParts = GetFaceParts(fullObjectDetections[i]);
|
||||
records[i].FaceParts.Add(faceParts);
|
||||
}
|
||||
}
|
||||
foreach (FullObjectDetection fullObjectDetection in fullObjectDetections)
|
||||
fullObjectDetection.Dispose();
|
||||
const int indexZero = 0;
|
||||
FaceRecognitionGroup faceRecognitionGroupB;
|
||||
Dictionary<FacePart, FacePoint[]> keyValuePairs;
|
||||
foreach (Record r in records)
|
||||
{
|
||||
if (r.FaceEncodings.Count != 1 || r.FaceParts.Count != 1)
|
||||
continue;
|
||||
if (r.FaceParts[indexZero].Count == 0)
|
||||
faceRecognitionGroupB = new(r.Location, r.FaceEncodings[indexZero], null);
|
||||
else
|
||||
{
|
||||
keyValuePairs = [];
|
||||
foreach (FacePartAndFacePointArray facePartAndFacePointArray in r.FaceParts[indexZero])
|
||||
keyValuePairs.Add(facePartAndFacePointArray.FacePart, facePartAndFacePointArray.FacePoints);
|
||||
faceRecognitionGroupB = new(r.Location, r.FaceEncodings[indexZero], keyValuePairs);
|
||||
}
|
||||
results.Add(faceRecognitionGroupB);
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
public static FaceEncoding LoadFaceEncoding(double[] encoding)
|
||||
{
|
||||
if (encoding is null)
|
||||
throw new NullReferenceException(nameof(encoding));
|
||||
if (encoding.Length != 128)
|
||||
{
|
||||
string message = $"{nameof(encoding)}.{nameof(encoding.Length)} must be 128.";
|
||||
throw new ArgumentOutOfRangeException(message);
|
||||
}
|
||||
#pragma warning disable
|
||||
Matrix<double>? matrix = Matrix<double>.CreateTemplateParameterizeMatrix(0, 1);
|
||||
#pragma warning restore
|
||||
matrix.SetSize(128);
|
||||
matrix.Assign(encoding);
|
||||
return new FaceEncoding(matrix);
|
||||
}
|
||||
|
||||
public static FaceEncoding LoadBFaceEncoding(double[] encoding)
|
||||
{
|
||||
if (encoding is null)
|
||||
throw new NullReferenceException(nameof(encoding));
|
||||
if (encoding.Length != 512)
|
||||
{
|
||||
string message = $"{nameof(encoding)}.{nameof(encoding.Length)} must be 512.";
|
||||
throw new ArgumentOutOfRangeException(message);
|
||||
}
|
||||
#pragma warning disable
|
||||
Matrix<double>? matrix = Matrix<double>.CreateTemplateParameterizeMatrix(0, 1);
|
||||
#pragma warning restore
|
||||
matrix.SetSize(512);
|
||||
matrix.Assign(encoding);
|
||||
return new FaceEncoding(matrix);
|
||||
}
|
||||
|
||||
public static Image LoadImageFile(string file, Mode mode = Mode.Rgb)
|
||||
{
|
||||
if (!File.Exists(file))
|
||||
throw new FileNotFoundException(file);
|
||||
return mode switch
|
||||
{
|
||||
Mode.Rgb => new Image(DlibDotNet.Dlib.LoadImageAsMatrix<RgbPixel>(file), mode),
|
||||
Mode.Greyscale => new Image(DlibDotNet.Dlib.LoadImageAsMatrix<byte>(file), mode),
|
||||
_ => throw new NotImplementedException()
|
||||
};
|
||||
}
|
||||
|
||||
#pragma warning disable CA1416
|
||||
|
||||
public static Image? LoadImage(Bitmap bitmap)
|
||||
{
|
||||
Mode mode;
|
||||
int dstChannel;
|
||||
int srcChannel;
|
||||
int width = bitmap.Width;
|
||||
int height = bitmap.Height;
|
||||
PixelFormat format = bitmap.PixelFormat;
|
||||
System.Drawing.Rectangle rect = new(0, 0, width, height);
|
||||
#pragma warning disable IDE0010
|
||||
switch (format)
|
||||
{
|
||||
case PixelFormat.Format8bppIndexed:
|
||||
mode = Mode.Greyscale;
|
||||
srcChannel = 1;
|
||||
dstChannel = 1;
|
||||
break;
|
||||
case PixelFormat.Format24bppRgb:
|
||||
mode = Mode.Rgb;
|
||||
srcChannel = 3;
|
||||
dstChannel = 3;
|
||||
break;
|
||||
case PixelFormat.Format32bppRgb:
|
||||
case PixelFormat.Format32bppArgb:
|
||||
mode = Mode.Rgb;
|
||||
srcChannel = 4;
|
||||
dstChannel = 3;
|
||||
break;
|
||||
default:
|
||||
throw new ArgumentOutOfRangeException($"{nameof(bitmap)}", $"The specified {nameof(PixelFormat)} is not supported.");
|
||||
}
|
||||
#pragma warning restore IDE0010
|
||||
BitmapData? data = null;
|
||||
try
|
||||
{
|
||||
data = bitmap.LockBits(rect, ImageLockMode.ReadOnly, format);
|
||||
unsafe
|
||||
{
|
||||
byte[]? array = new byte[width * height * dstChannel];
|
||||
fixed (byte* pArray = &array[0])
|
||||
{
|
||||
byte* dst = pArray;
|
||||
|
||||
switch (srcChannel)
|
||||
{
|
||||
case 1:
|
||||
{
|
||||
IntPtr src = data.Scan0;
|
||||
int stride = data.Stride;
|
||||
|
||||
for (int h = 0; h < height; h++)
|
||||
Marshal.Copy(IntPtr.Add(src, h * stride), array, h * width, width * dstChannel);
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
case 4:
|
||||
{
|
||||
byte* src = (byte*)data.Scan0;
|
||||
int stride = data.Stride;
|
||||
for (int h = 0; h < height; h++)
|
||||
{
|
||||
int srcOffset = h * stride;
|
||||
int dstOffset = h * width * dstChannel;
|
||||
for (int w = 0; w < width; w++)
|
||||
{
|
||||
// BGR order to RGB order
|
||||
dst[dstOffset + (w * dstChannel) + 0] = src[srcOffset + (w * srcChannel) + 2];
|
||||
dst[dstOffset + (w * dstChannel) + 1] = src[srcOffset + (w * srcChannel) + 1];
|
||||
dst[dstOffset + (w * dstChannel) + 2] = src[srcOffset + (w * srcChannel) + 0];
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
IntPtr ptr = (IntPtr)pArray;
|
||||
switch (mode)
|
||||
{
|
||||
case Mode.Rgb:
|
||||
return new Image(new Matrix<RgbPixel>(ptr, height, width, width * 3), Mode.Rgb);
|
||||
case Mode.Greyscale:
|
||||
return new Image(new Matrix<byte>(ptr, height, width, width), Mode.Greyscale);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
finally
|
||||
{
|
||||
if (data != null)
|
||||
bitmap.UnlockBits(data);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public static ReadOnlyCollection<LocationContainer> GetLocationContainers(int permyriad, ReadOnlyCollection<LocationContainer> locationContainers, LocationContainer locationContainer)
|
||||
{
|
||||
List<LocationContainer> results = [];
|
||||
int lengthPermyriad;
|
||||
if (locationContainers.Count != 0)
|
||||
{
|
||||
double length;
|
||||
LocationContainer result;
|
||||
if (locationContainer.Encoding is not FaceEncoding faceEncodingToCompare)
|
||||
throw new NullReferenceException(nameof(locationContainer));
|
||||
faceEncodingToCompare.ThrowIfDisposed();
|
||||
foreach (LocationContainer l in locationContainers)
|
||||
{
|
||||
#pragma warning disable CA1513
|
||||
if (l.Encoding is not FaceEncoding faceEncoding || faceEncoding.IsDisposed)
|
||||
throw new ObjectDisposedException($"{nameof(l)} contains disposed object.");
|
||||
#pragma warning restore CA1513
|
||||
using (Matrix<double> diff = faceEncoding.Encoding - faceEncodingToCompare.Encoding)
|
||||
length = DlibDotNet.Dlib.Length(diff);
|
||||
lengthPermyriad = (int)(length * permyriad);
|
||||
result = LocationContainer.Get(locationContainer, l, lengthPermyriad, keepExifDirectory: false, keepEncoding: false);
|
||||
results.Add(result);
|
||||
}
|
||||
}
|
||||
LocationContainer[] array = results.OrderBy(l => l.LengthPermyriad).ToArray();
|
||||
return array.AsReadOnly();
|
||||
}
|
||||
|
||||
public static List<FaceDistance> FaceDistances(ReadOnlyCollection<FaceDistance> faceDistances, FaceDistance faceDistanceToCompare)
|
||||
{
|
||||
List<FaceDistance> results = [];
|
||||
if (faceDistances.Count != 0)
|
||||
{
|
||||
double length;
|
||||
FaceDistance result;
|
||||
if (faceDistanceToCompare.Encoding is not FaceEncoding faceEncodingToCompare)
|
||||
throw new NullReferenceException(nameof(faceDistanceToCompare));
|
||||
faceEncodingToCompare.ThrowIfDisposed();
|
||||
foreach (FaceDistance faceDistance in faceDistances)
|
||||
{
|
||||
#pragma warning disable CA1513
|
||||
if (faceDistance.Encoding is not FaceEncoding faceEncoding || faceEncoding.IsDisposed)
|
||||
throw new ObjectDisposedException($"{nameof(faceDistances)} contains disposed object.");
|
||||
#pragma warning restore CA1513
|
||||
using (Matrix<double> diff = faceEncoding.Encoding - faceEncodingToCompare.Encoding)
|
||||
length = DlibDotNet.Dlib.Length(diff);
|
||||
result = new(faceDistance, length);
|
||||
results.Add(result);
|
||||
}
|
||||
}
|
||||
return results;
|
||||
}
|
||||
|
||||
#pragma warning restore CA1416
|
||||
|
||||
protected override void DisposeUnmanaged()
|
||||
{
|
||||
base.DisposeUnmanaged();
|
||||
_PosePredictor68Point?.Dispose();
|
||||
_PosePredictor5Point?.Dispose();
|
||||
_CnnFaceDetector?.Dispose();
|
||||
_FaceEncoder?.Dispose();
|
||||
_FaceDetector?.Dispose();
|
||||
}
|
||||
|
||||
}
|
6
FaceRecognitionDotNet/FaceRecognitionGroup.cs
Normal file
6
FaceRecognitionDotNet/FaceRecognitionGroup.cs
Normal file
@ -0,0 +1,6 @@
|
||||
using View_by_Distance.Shared.Models;
|
||||
using View_by_Distance.Shared.Models.Stateless;
|
||||
|
||||
namespace View_by_Distance.FaceRecognitionDotNet;
|
||||
|
||||
public record FaceRecognitionGroup(Location Location, FaceEncoding? FaceEncoding, Dictionary<FacePart, FacePoint[]>? KeyValuePairs);
|
22
FaceRecognitionDotNet/FaceRecognitionModels.cs
Normal file
22
FaceRecognitionDotNet/FaceRecognitionModels.cs
Normal file
@ -0,0 +1,22 @@
|
||||
namespace View_by_Distance.FaceRecognitionDotNet;
|
||||
|
||||
internal sealed class FaceRecognitionModels
|
||||
{
|
||||
|
||||
public static string GetPosePredictorModelLocation() => "shape_predictor_68_face_landmarks.dat";
|
||||
|
||||
public static string GetPosePredictorFivePointModelLocation() => "shape_predictor_5_face_landmarks.dat";
|
||||
|
||||
public static string GetFaceRecognitionModelLocation() => "dlib_face_recognition_resnet_model_v1.dat";
|
||||
|
||||
public static string GetCnnFaceDetectorModelLocation() => "mmod_human_face_detector.dat";
|
||||
|
||||
public static string GetPosePredictor194PointModelLocation() => "helen-dataset.dat";
|
||||
|
||||
public static string GetAgeNetworkModelLocation() => "adience-age-network.dat";
|
||||
|
||||
public static string GetGenderNetworkModelLocation() => "utkface-gender-network.dat";
|
||||
|
||||
public static string GetEmotionNetworkModelLocation() => "corrective-reannotation-of-fer-ck-kdef-emotion-network_test_best.dat";
|
||||
|
||||
}
|
129
FaceRecognitionDotNet/Image.cs
Normal file
129
FaceRecognitionDotNet/Image.cs
Normal file
@ -0,0 +1,129 @@
|
||||
using DlibDotNet;
|
||||
using DlibDotNet.Extensions;
|
||||
using System.Drawing;
|
||||
using View_by_Distance.Shared.Models.Stateless;
|
||||
|
||||
namespace View_by_Distance.FaceRecognitionDotNet;
|
||||
|
||||
/// <summary>
|
||||
/// Represents a image data. This class cannot be inherited.
|
||||
/// </summary>
|
||||
public sealed class Image : DisposableObject
|
||||
{
|
||||
|
||||
#region Fields
|
||||
|
||||
#endregion
|
||||
|
||||
#region Constructors
|
||||
|
||||
internal Image(MatrixBase matrix, Mode mode)
|
||||
{
|
||||
Matrix = matrix;
|
||||
Mode = mode;
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Properties
|
||||
|
||||
/// <summary>
|
||||
/// Gets the height of the image.
|
||||
/// </summary>
|
||||
/// <exception cref="ObjectDisposedException">This object is disposed.</exception>
|
||||
public int Height
|
||||
{
|
||||
get
|
||||
{
|
||||
ThrowIfDisposed();
|
||||
return Matrix.Rows;
|
||||
}
|
||||
}
|
||||
|
||||
internal MatrixBase Matrix { get; private set; }
|
||||
|
||||
internal Mode Mode { get; }
|
||||
|
||||
/// <summary>
|
||||
/// Gets the width of the image.
|
||||
/// </summary>
|
||||
/// <exception cref="ObjectDisposedException">This object is disposed.</exception>
|
||||
public int Width
|
||||
{
|
||||
get
|
||||
{
|
||||
ThrowIfDisposed();
|
||||
return Matrix.Columns;
|
||||
}
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Methods
|
||||
|
||||
/// <summary>
|
||||
/// Saves this <see cref="Image"/> to the specified file.
|
||||
/// </summary>
|
||||
/// <param name="fileName">A string that contains the name of the file to which to save this <see cref="Image"/>.</param>
|
||||
/// <param name="format">The <see cref="ImageFormat"/> for this <see cref="Image"/>.</param>
|
||||
/// <exception cref="NullReferenceException"><paramref name="fileName"/> is null.</exception>
|
||||
/// <exception cref="ObjectDisposedException">This object is disposed.</exception>
|
||||
public void Save(string fileName, ImageFormat format)
|
||||
{
|
||||
if (fileName == null)
|
||||
throw new NullReferenceException(nameof(fileName));
|
||||
|
||||
ThrowIfDisposed();
|
||||
|
||||
string? directory = Path.GetDirectoryName(fileName);
|
||||
if (!Directory.Exists(directory) && !string.IsNullOrWhiteSpace(directory))
|
||||
_ = Directory.CreateDirectory(directory);
|
||||
|
||||
switch (format)
|
||||
{
|
||||
case ImageFormat.Bmp:
|
||||
DlibDotNet.Dlib.SaveBmp(Matrix, fileName);
|
||||
break;
|
||||
case ImageFormat.Jpeg:
|
||||
DlibDotNet.Dlib.SaveJpeg(Matrix, fileName);
|
||||
break;
|
||||
case ImageFormat.Png:
|
||||
DlibDotNet.Dlib.SavePng(Matrix, fileName);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Converts this <see cref="Image"/> to a GDI+ <see cref="Bitmap"/>.
|
||||
/// </summary>
|
||||
/// <returns>A <see cref="Bitmap"/> that represents the converted <see cref="Image"/>.</returns>
|
||||
/// <exception cref="ObjectDisposedException">This object is disposed.</exception>
|
||||
/// <exception cref="NotSupportedException">A Greyscale image is not supported.</exception>
|
||||
public Bitmap ToBitmap()
|
||||
{
|
||||
ThrowIfDisposed();
|
||||
|
||||
if (Mode == Mode.Greyscale)
|
||||
throw new NotSupportedException();
|
||||
|
||||
return ((Matrix<RgbPixel>)Matrix).ToBitmap();
|
||||
}
|
||||
|
||||
#region Overrides
|
||||
|
||||
/// <summary>
|
||||
/// Releases all unmanaged resources.
|
||||
/// </summary>
|
||||
protected override void DisposeUnmanaged()
|
||||
{
|
||||
base.DisposeUnmanaged();
|
||||
Matrix?.Dispose();
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#endregion
|
||||
|
||||
}
|
49
FaceRecognitionDotNet/ModelParameter.cs
Normal file
49
FaceRecognitionDotNet/ModelParameter.cs
Normal file
@ -0,0 +1,49 @@
|
||||
namespace View_by_Distance.FaceRecognitionDotNet;
|
||||
|
||||
/// <summary>
|
||||
/// Describes the model binary datum. This class cannot be inherited.
|
||||
/// </summary>
|
||||
public sealed class ModelParameter
|
||||
{
|
||||
|
||||
#region Properties
|
||||
|
||||
/// <summary>
|
||||
/// Gets or sets the binary data of model for 68 points face landmarks.
|
||||
/// </summary>
|
||||
public byte[]? PosePredictor68FaceLandmarksModel
|
||||
{
|
||||
get;
|
||||
set;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gets or sets the binary data of model for 5 points face landmarks.
|
||||
/// </summary>
|
||||
public byte[]? PosePredictor5FaceLandmarksModel
|
||||
{
|
||||
get;
|
||||
set;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gets or sets the binary data of model for face encoding.
|
||||
/// </summary>
|
||||
public byte[]? FaceRecognitionModel
|
||||
{
|
||||
get;
|
||||
set;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gets or sets the binary data of model for face detector by using CNN.
|
||||
/// </summary>
|
||||
public byte[]? CnnFaceDetectorModel
|
||||
{
|
||||
get;
|
||||
set;
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
}
|
107
FaceRecognitionDotNet/Point.cs
Normal file
107
FaceRecognitionDotNet/Point.cs
Normal file
@ -0,0 +1,107 @@
|
||||
namespace View_by_Distance.FaceRecognitionDotNet;
|
||||
|
||||
/// <summary>
|
||||
/// Represents an ordered pair of integer x- and y-coordinates that defines a point in a two-dimensional plane.
|
||||
/// </summary>
|
||||
public readonly struct Point : IEquatable<Point>
|
||||
{
|
||||
|
||||
#region Constructors
|
||||
|
||||
/// <summary>
|
||||
/// Initializes a new instance of the <see cref="Point"/> structure with the specified coordinates.
|
||||
/// </summary>
|
||||
/// <param name="x">The horizontal position of the point.</param>
|
||||
/// <param name="y">The vertical position of the point.</param>
|
||||
public Point(int x, int y)
|
||||
{
|
||||
X = x;
|
||||
Y = y;
|
||||
}
|
||||
|
||||
internal Point(DlibDotNet.Point point)
|
||||
{
|
||||
X = point.X;
|
||||
Y = point.Y;
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Properties
|
||||
|
||||
/// <summary>
|
||||
/// Gets the x-coordinate of this <see cref="Point"/>.
|
||||
/// </summary>
|
||||
public int X
|
||||
{
|
||||
get;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Gets the y-coordinate of this <see cref="Point"/>.
|
||||
/// </summary>
|
||||
public int Y
|
||||
{
|
||||
get;
|
||||
}
|
||||
|
||||
#endregion
|
||||
|
||||
#region Methods
|
||||
|
||||
/// <summary>
|
||||
/// Compares two <see cref="Point"/> structures for equality.
|
||||
/// </summary>
|
||||
/// <param name="other">The point to compare to this instance.</param>
|
||||
/// <returns><code>true</code> if both <see cref="Point"/> structures contain the same <see cref="X"/> and <see cref="Y"/> values; otherwise, <code>false</code>.</returns>
|
||||
public bool Equals(Point other)
|
||||
{
|
||||
return X == other.X &&
|
||||
Y == other.Y;
|
||||
}
|
||||
|
||||
#region overrides
|
||||
|
||||
/// <summary>
|
||||
/// Determines whether the specified <see cref="object"/> is a <see cref="Point"/> and whether it contains the same coordinates as this <see cref="Point"/>.
|
||||
/// </summary>
|
||||
/// <param name="obj">The <see cref="object"/> to compare.</param>
|
||||
/// <returns><code>true</code> if <paramref name="obj"/> is a <see cref="Point"/> and contains the same <see cref="X"/> and <see cref="Y"/> values as this <see cref="Point"/>; otherwise, <code>false</code>.</returns>
|
||||
public override bool Equals(object? obj) => obj is Point point && Equals(point);
|
||||
|
||||
/// <summary>
|
||||
/// Returns the hash code for this <see cref="Point"/>.
|
||||
/// </summary>
|
||||
/// <returns>The hash code for this <see cref="Point"/> structure.</returns>
|
||||
#pragma warning disable IDE0070
|
||||
public override int GetHashCode()
|
||||
#pragma warning restore IDE0070
|
||||
{
|
||||
int hashCode = 1861411795;
|
||||
hashCode = hashCode * -1521134295 + X.GetHashCode();
|
||||
hashCode = hashCode * -1521134295 + Y.GetHashCode();
|
||||
return hashCode;
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Compares two <see cref="Point"/> structures for equality.
|
||||
/// </summary>
|
||||
/// <param name="point1">The first <see cref="Point"/> structure to compare.</param>
|
||||
/// <param name="point2">The second <see cref="Point"/> structure to compare.</param>
|
||||
/// <returns><code>true</code> if both the <see cref="X"/> and <see cref="Y"/> coordinates of <paramref name="point1"/> and <paramref name="point2"/> are equal; otherwise, <code>false</code>.</returns>
|
||||
public static bool operator ==(Point point1, Point point2) => point1.Equals(point2);
|
||||
|
||||
/// <summary>
|
||||
/// Compares two <see cref="Point"/> structures for inequality.
|
||||
/// </summary>
|
||||
/// <param name="point1">The first <see cref="Point"/> structure to compare.</param>
|
||||
/// <param name="point2">The second <see cref="Point"/> structure to compare.</param>
|
||||
/// <returns><code>true</code> if <paramref name="point1"/> and <paramref name="point2"/> have different <see cref="X"/> or <see cref="Y"/> coordinates; <code>false</code> if <paramref name="point1"/> and <paramref name="point2"/> have the same <see cref="X"/> and <see cref="Y"/> coordinates.</returns>
|
||||
|
||||
public static bool operator !=(Point point1, Point point2) => !(point1 == point2);
|
||||
|
||||
#endregion
|
||||
|
||||
#endregion
|
||||
|
||||
}
|
Reference in New Issue
Block a user