After testing E_Distance.SaveGroupedFaceEncodings
This commit is contained in:
@ -8,23 +8,23 @@ internal sealed class FaceRecognitionModelV1
|
||||
|
||||
#region Methods
|
||||
|
||||
public static Matrix<double> ComputeFaceDescriptor(LossMetric net, Image img, FullObjectDetection face, int numJitters)
|
||||
public static Matrix<double> ComputeFaceDescriptor(LossMetric net, Image img, FullObjectDetection face, int numberOfJitters)
|
||||
{
|
||||
FullObjectDetection[]? faces = new[] { face };
|
||||
return ComputeFaceDescriptors(net, img, faces, numJitters).First();
|
||||
return ComputeFaceDescriptors(net, img, faces, numberOfJitters).First();
|
||||
}
|
||||
|
||||
public static IEnumerable<Matrix<double>> ComputeFaceDescriptors(LossMetric net, Image img, IEnumerable<FullObjectDetection> faces, int numJitters)
|
||||
public static IEnumerable<Matrix<double>> ComputeFaceDescriptors(LossMetric net, Image img, IEnumerable<FullObjectDetection> faces, int numberOfJitters)
|
||||
{
|
||||
Image[]? batchImage = new[] { img };
|
||||
IEnumerable<FullObjectDetection>[]? batchFaces = new[] { faces };
|
||||
return BatchComputeFaceDescriptors(net, batchImage, batchFaces, numJitters).First();
|
||||
return BatchComputeFaceDescriptors(net, batchImage, batchFaces, numberOfJitters).First();
|
||||
}
|
||||
|
||||
public static IEnumerable<IEnumerable<Matrix<double>>> BatchComputeFaceDescriptors(LossMetric net,
|
||||
IList<Image> batchImages,
|
||||
IList<IEnumerable<FullObjectDetection>> batchFaces,
|
||||
int numJitters)
|
||||
int numberOfJitters)
|
||||
{
|
||||
if (batchImages.Count != batchFaces.Count)
|
||||
throw new ArgumentException("The array of images and the array of array of locations must be of the same size");
|
||||
@ -60,7 +60,7 @@ internal sealed class FaceRecognitionModelV1
|
||||
for (int i = 0, count = batchImages.Count; i < count; i++)
|
||||
faceDescriptors.Add(new List<Matrix<double>>());
|
||||
|
||||
if (numJitters <= 1)
|
||||
if (numberOfJitters <= 1)
|
||||
{
|
||||
// extract descriptors and convert from float vectors to double vectors
|
||||
OutputLabels<Matrix<float>>? descriptors = net.Operator(faceChips, 16);
|
||||
@ -80,7 +80,7 @@ internal sealed class FaceRecognitionModelV1
|
||||
for (int i = 0; i < batchFaces.Count; ++i)
|
||||
for (int j = 0; j < batchFaces[i].Count(); ++j)
|
||||
{
|
||||
Matrix<RgbPixel>[]? tmp = JitterImage(faceChips[index++], numJitters).ToArray();
|
||||
Matrix<RgbPixel>[]? tmp = JitterImage(faceChips[index++], numberOfJitters).ToArray();
|
||||
using (OutputLabels<Matrix<float>>? tmp2 = net.Operator(tmp, 16))
|
||||
using (MatrixOp? mat = DlibDotNet.Dlib.Mat(tmp2))
|
||||
{
|
||||
@ -113,10 +113,10 @@ internal sealed class FaceRecognitionModelV1
|
||||
|
||||
private static readonly Rand _Rand = new();
|
||||
|
||||
private static IEnumerable<Matrix<RgbPixel>> JitterImage(Matrix<RgbPixel> img, int numJitters)
|
||||
private static IEnumerable<Matrix<RgbPixel>> JitterImage(Matrix<RgbPixel> img, int numberOfJitters)
|
||||
{
|
||||
List<Matrix<RgbPixel>>? crops = new();
|
||||
for (int i = 0; i < numJitters; ++i)
|
||||
for (int i = 0; i < numberOfJitters; ++i)
|
||||
crops.Add(DlibDotNet.Dlib.JitterImage(img, _Rand));
|
||||
|
||||
return crops;
|
||||
|
@ -30,9 +30,9 @@ public sealed class FaceEncoding : DisposableObject, ISerializable
|
||||
int? row = (int?)info.GetValue(nameof(_Encoding.Rows), typeof(int));
|
||||
int? column = (int?)info.GetValue(nameof(_Encoding.Columns), typeof(int));
|
||||
if (row is null)
|
||||
throw new Exception($"{nameof(row)} is null");
|
||||
throw new ArgumentNullException(nameof(row));
|
||||
if (column is null)
|
||||
throw new Exception($"{nameof(column)} is null");
|
||||
throw new ArgumentNullException(nameof(column));
|
||||
_Encoding = new Matrix<double>(array, row.Value, column.Value);
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ public sealed class FaceRecognition : DisposableObject
|
||||
/// <param name="batchSize">The number of images to include in each GPU processing batch.</param>
|
||||
/// <returns>An enumerable collection of array of found face locations.</returns>
|
||||
/// <exception cref="ArgumentNullException"><paramref name="images"/> is null.</exception>
|
||||
public IEnumerable<Location[]> BatchFaceLocations(IEnumerable<Image> images, int numberOfTimesToUpsample = 1, int batchSize = 128)
|
||||
public IEnumerable<Location[]> BatchFaceLocations(IEnumerable<Image> images, int numberOfTimesToUpsample, int batchSize = 128)
|
||||
{
|
||||
if (images == null)
|
||||
throw new ArgumentNullException(nameof(images));
|
||||
@ -167,7 +167,7 @@ public sealed class FaceRecognition : DisposableObject
|
||||
for (int index = 0; index < rawDetectionsBatched.Length; index++)
|
||||
{
|
||||
MModRect[]? faces = rawDetectionsBatched[index].ToArray();
|
||||
Location[]? locations = faces.Select(rect => new Location(TrimBound(rect.Rect, image.Width, image.Height), rect.DetectionConfidence)).ToArray();
|
||||
Location[]? locations = faces.Select(rect => new Location(rect.DetectionConfidence, TrimBound(rect.Rect, image.Width, image.Height), image.Width, image.Height)).ToArray();
|
||||
foreach (MModRect? face in faces)
|
||||
face.Dispose();
|
||||
results.Add(locations);
|
||||
@ -358,7 +358,7 @@ public sealed class FaceRecognition : DisposableObject
|
||||
/// </summary>
|
||||
/// <param name="image">The image contains faces. The image can contain multiple faces.</param>
|
||||
/// <param name="knownFaceLocation">The enumerable collection of location rectangle for faces. If specified null, method will find face locations.</param>
|
||||
/// <param name="numJitters">The number of times to re-sample the face when calculating encoding.</param>
|
||||
/// <param name="numberOfJitters">The number of times to re-sample the face when calculating encoding.</param>
|
||||
/// <param name="predictorModel">The dimension of vector which be returned from detector.</param>
|
||||
/// <param name="model">The model of face detector to detect in image. If <paramref name="knownFaceLocation"/> is not null, this value is ignored.</param>
|
||||
/// <returns>An enumerable collection of face feature data corresponds to all faces in specified image.</returns>
|
||||
@ -366,11 +366,7 @@ public sealed class FaceRecognition : DisposableObject
|
||||
/// <exception cref="InvalidOperationException"><paramref name="knownFaceLocation"/> contains no elements.</exception>
|
||||
/// <exception cref="ObjectDisposedException"><paramref name="image"/> or this object or custom face landmark detector is disposed.</exception>
|
||||
/// <exception cref="NotSupportedException"><see cref="PredictorModel.Custom"/> is not supported.</exception>
|
||||
public List<FaceEncoding> FaceEncodings(Image image,
|
||||
IEnumerable<Location>? knownFaceLocation = null,
|
||||
int numJitters = 1,
|
||||
PredictorModel predictorModel = PredictorModel.Small,
|
||||
Model model = Model.Hog)
|
||||
public List<FaceEncoding> FaceEncodings(Image image, int numberOfTimesToUpsample, IEnumerable<Location>? knownFaceLocation, int numberOfJitters, PredictorModel predictorModel, Model model)
|
||||
{
|
||||
if (image == null)
|
||||
throw new ArgumentNullException(nameof(image));
|
||||
@ -383,12 +379,12 @@ public sealed class FaceRecognition : DisposableObject
|
||||
image.ThrowIfDisposed();
|
||||
ThrowIfDisposed();
|
||||
|
||||
IEnumerable<FullObjectDetection>? rawLandmarks = RawFaceLandmarks(image, knownFaceLocation, predictorModel, model);
|
||||
List<FullObjectDetection> rawLandmarks = RawFaceLandmarks(image, numberOfTimesToUpsample, knownFaceLocation, predictorModel, model);
|
||||
|
||||
List<FaceEncoding> results = new();
|
||||
foreach (FullObjectDetection? landmark in rawLandmarks)
|
||||
foreach (FullObjectDetection landmark in rawLandmarks)
|
||||
{
|
||||
FaceEncoding? ret = new(FaceRecognitionModelV1.ComputeFaceDescriptor(_FaceEncoder, image, landmark, numJitters));
|
||||
FaceEncoding? ret = new(FaceRecognitionModelV1.ComputeFaceDescriptor(_FaceEncoder, image, landmark, numberOfJitters));
|
||||
landmark.Dispose();
|
||||
results.Add(ret);
|
||||
}
|
||||
@ -396,6 +392,14 @@ public sealed class FaceRecognition : DisposableObject
|
||||
return results;
|
||||
}
|
||||
|
||||
private static FacePoint[] Join(IEnumerable<FacePoint> facePoints1, IEnumerable<FacePoint> facePoints2)
|
||||
{
|
||||
List<FacePoint> results = new();
|
||||
results.AddRange(facePoints1);
|
||||
results.AddRange(facePoints2);
|
||||
return results.ToArray();
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Returns an enumerable collection of dictionary of face parts locations (eyes, nose, etc) for each face in the image.
|
||||
/// </summary>
|
||||
@ -408,89 +412,53 @@ public sealed class FaceRecognition : DisposableObject
|
||||
/// <exception cref="InvalidOperationException"><paramref name="faceLocations"/> contains no elements.</exception>
|
||||
/// <exception cref="ObjectDisposedException"><paramref name="faceImage"/> or this object or custom face landmark detector is disposed.</exception>
|
||||
/// <exception cref="NotSupportedException">The custom face landmark detector is not ready.</exception>
|
||||
public List<Dictionary<FacePart, IEnumerable<FacePoint>>> FaceLandmark(Image faceImage,
|
||||
IEnumerable<Location>? faceLocations = null,
|
||||
PredictorModel predictorModel = PredictorModel.Large,
|
||||
Model model = Model.Hog)
|
||||
public List<(FacePart, FacePoint[])[]> GetFaceLandmarkCollection(Image faceImage, int numberOfTimesToUpsample, IEnumerable<Location>? faceLocations, PredictorModel predictorModel, Model model)
|
||||
{
|
||||
List<(FacePart, FacePoint[])[]> results = new();
|
||||
if (faceImage == null)
|
||||
throw new ArgumentNullException(nameof(faceImage));
|
||||
|
||||
if (faceLocations != null && !faceLocations.Any())
|
||||
throw new InvalidOperationException($"{nameof(faceLocations)} contains no elements.");
|
||||
|
||||
faceImage.ThrowIfDisposed();
|
||||
ThrowIfDisposed();
|
||||
|
||||
if (predictorModel == PredictorModel.Custom)
|
||||
throw new NotImplementedException();
|
||||
List<FullObjectDetection> fullObjectDetections = RawFaceLandmarks(faceImage, numberOfTimesToUpsample, faceLocations, predictorModel, model);
|
||||
List<FacePoint[]> landmarksCollection = fullObjectDetections.Select(landmark => Enumerable.Range(0, (int)landmark.Parts)
|
||||
.Select(index => new FacePoint(index, landmark.GetPart((uint)index).X, landmark.GetPart((uint)index).Y)).ToArray()).ToList();
|
||||
foreach (FullObjectDetection? landmark in fullObjectDetections)
|
||||
landmark.Dispose();
|
||||
List<(FacePart, FacePoint[])> collection;
|
||||
foreach (FacePoint[] facePoints in landmarksCollection)
|
||||
{
|
||||
if (CustomFaceLandmarkDetector == null)
|
||||
throw new NotSupportedException("The custom face landmark detector is not ready.");
|
||||
|
||||
if (CustomFaceLandmarkDetector.IsDisposed)
|
||||
throw new ObjectDisposedException($"{nameof(CustomFaceLandmarkDetector)}", "The custom face landmark detector is disposed.");
|
||||
}
|
||||
|
||||
FullObjectDetection[]? landmarks = RawFaceLandmarks(faceImage, faceLocations, predictorModel, model).ToArray();
|
||||
IEnumerable<FacePoint[]>? landmarkTuples = landmarks.Select(landmark => Enumerable.Range(0, (int)landmark.Parts)
|
||||
.Select(index => new FacePoint(index, landmark.GetPart((uint)index).X, landmark.GetPart((uint)index).Y)).ToArray());
|
||||
|
||||
List<Dictionary<FacePart, IEnumerable<FacePoint>>> results = new();
|
||||
|
||||
try
|
||||
{
|
||||
|
||||
// For a definition of each point index, see https://cdn-images-1.medium.com/max/1600/1*AbEg31EgkbXSQehuNJBlWg.png
|
||||
collection = new();
|
||||
switch (predictorModel)
|
||||
{
|
||||
case PredictorModel.Custom:
|
||||
throw new NotImplementedException();
|
||||
case PredictorModel.Large:
|
||||
results.AddRange(landmarkTuples.Select(landmarkTuple => new Dictionary<FacePart, IEnumerable<FacePoint>>
|
||||
{
|
||||
{ FacePart.Chin, Enumerable.Range(0,17).Select(i => landmarkTuple[i]).ToArray() },
|
||||
{ FacePart.LeftEyebrow, Enumerable.Range(17,5).Select(i => landmarkTuple[i]).ToArray() },
|
||||
{ FacePart.RightEyebrow, Enumerable.Range(22,5).Select(i => landmarkTuple[i]).ToArray() },
|
||||
{ FacePart.NoseBridge, Enumerable.Range(27,5).Select(i => landmarkTuple[i]).ToArray() },
|
||||
{ FacePart.NoseTip, Enumerable.Range(31,5).Select(i => landmarkTuple[i]).ToArray() },
|
||||
{ FacePart.LeftEye, Enumerable.Range(36,6).Select(i => landmarkTuple[i]).ToArray() },
|
||||
{ FacePart.RightEye, Enumerable.Range(42,6).Select(i => landmarkTuple[i]).ToArray() },
|
||||
{ FacePart.TopLip, Enumerable.Range(48,7).Select(i => landmarkTuple[i])
|
||||
.Concat( new [] { landmarkTuple[64] })
|
||||
.Concat( new [] { landmarkTuple[63] })
|
||||
.Concat( new [] { landmarkTuple[62] })
|
||||
.Concat( new [] { landmarkTuple[61] })
|
||||
.Concat( new [] { landmarkTuple[60] }) },
|
||||
{ FacePart.BottomLip, Enumerable.Range(54,6).Select(i => landmarkTuple[i])
|
||||
.Concat( new [] { landmarkTuple[48] })
|
||||
.Concat( new [] { landmarkTuple[60] })
|
||||
.Concat( new [] { landmarkTuple[67] })
|
||||
.Concat( new [] { landmarkTuple[66] })
|
||||
.Concat( new [] { landmarkTuple[65] })
|
||||
.Concat( new [] { landmarkTuple[64] }) }
|
||||
}));
|
||||
if (facePoints.Length != 68)
|
||||
continue;
|
||||
collection.Add(new(FacePart.Chin, facePoints.Skip(0).Take(17).ToArray()));
|
||||
collection.Add(new(FacePart.LeftEyebrow, facePoints.Skip(17).Take(5).ToArray()));
|
||||
collection.Add(new(FacePart.RightEyebrow, facePoints.Skip(22).Take(5).ToArray()));
|
||||
collection.Add(new(FacePart.NoseBridge, facePoints.Skip(27).Take(5).ToArray()));
|
||||
collection.Add(new(FacePart.NoseTip, facePoints.Skip(31).Take(5).ToArray()));
|
||||
collection.Add(new(FacePart.LeftEye, facePoints.Skip(36).Take(6).ToArray()));
|
||||
collection.Add(new(FacePart.RightEye, facePoints.Skip(42).Take(6).ToArray()));
|
||||
collection.Add(new(FacePart.TopLip, Join(facePoints.Skip(48).Take(7), facePoints.Skip(60).Take(5))));
|
||||
collection.Add(new(FacePart.BottomLip, Join(facePoints.Skip(55).Take(5), facePoints.Skip(65).Take(3))));
|
||||
break;
|
||||
case PredictorModel.Small:
|
||||
results.AddRange(landmarkTuples.Select(landmarkTuple => new Dictionary<FacePart, IEnumerable<FacePoint>>
|
||||
{
|
||||
{ FacePart.NoseTip, Enumerable.Range(4,1).Select(i => landmarkTuple[i]).ToArray() },
|
||||
{ FacePart.LeftEye, Enumerable.Range(2,2).Select(i => landmarkTuple[i]).ToArray() },
|
||||
{ FacePart.RightEye, Enumerable.Range(0,2).Select(i => landmarkTuple[i]).ToArray() }
|
||||
}));
|
||||
if (facePoints.Length != 5)
|
||||
continue;
|
||||
collection.Add(new(FacePart.RightEye, facePoints.Skip(0).Take(2).ToArray()));
|
||||
collection.Add(new(FacePart.LeftEye, facePoints.Skip(2).Take(2).ToArray()));
|
||||
collection.Add(new(FacePart.NoseTip, facePoints.Skip(4).Take(1).ToArray()));
|
||||
break;
|
||||
case PredictorModel.Custom:
|
||||
if (CustomFaceLandmarkDetector is null)
|
||||
throw new Exception($"{nameof(CustomFaceLandmarkDetector)} is null");
|
||||
results.AddRange(CustomFaceLandmarkDetector.GetLandmarks(landmarkTuples));
|
||||
break;
|
||||
default:
|
||||
throw new ArgumentOutOfRangeException(nameof(predictorModel), predictorModel, null);
|
||||
}
|
||||
results.Add(collection.ToArray());
|
||||
}
|
||||
finally
|
||||
{
|
||||
foreach (FullObjectDetection? landmark in landmarks)
|
||||
landmark.Dispose();
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
@ -503,7 +471,7 @@ public sealed class FaceRecognition : DisposableObject
|
||||
/// <returns>An enumerable collection of face location correspond to all faces in specified image.</returns>
|
||||
/// <exception cref="ArgumentNullException"><paramref name="image"/> is null.</exception>
|
||||
/// <exception cref="ObjectDisposedException"><paramref name="image"/> or this object is disposed.</exception>
|
||||
public List<Location> FaceLocations(Image image, int numberOfTimesToUpsample = 1, Model model = Model.Hog)
|
||||
public List<Location> FaceLocations(Model model, Image image, int numberOfTimesToUpsample, bool sortByPixelPercentage)
|
||||
{
|
||||
if (image == null)
|
||||
throw new ArgumentNullException(nameof(image));
|
||||
@ -517,9 +485,10 @@ public sealed class FaceRecognition : DisposableObject
|
||||
Location? ret = TrimBound(face.Rect, image.Width, image.Height);
|
||||
double confidence = face.DetectionConfidence;
|
||||
face.Dispose();
|
||||
results.Add(new Location(ret, confidence));
|
||||
results.Add(new Location(confidence, ret, image.Width, image.Height));
|
||||
}
|
||||
|
||||
if (sortByPixelPercentage)
|
||||
results = (from l in results orderby l.PixelPercentage select l).ToList();
|
||||
return results;
|
||||
}
|
||||
|
||||
@ -763,36 +732,33 @@ public sealed class FaceRecognition : DisposableObject
|
||||
|
||||
#region Helpers
|
||||
|
||||
private IEnumerable<FullObjectDetection> RawFaceLandmarks(Image faceImage,
|
||||
IEnumerable<Location>? faceLocations = null,
|
||||
PredictorModel predictorModel = PredictorModel.Large,
|
||||
Model model = Model.Hog)
|
||||
private List<FullObjectDetection> RawFaceLandmarks(Image faceImage, int numberOfTimesToUpsample, IEnumerable<Location>? faceLocations, PredictorModel predictorModel, Model model)
|
||||
{
|
||||
IEnumerable<Location> rects;
|
||||
IEnumerable<Location> locations;
|
||||
|
||||
if (faceLocations == null)
|
||||
{
|
||||
List<Location>? list = new();
|
||||
IEnumerable<MModRect>? tmp = RawFaceLocations(faceImage, 1, model);
|
||||
IEnumerable<MModRect>? tmp = RawFaceLocations(faceImage, numberOfTimesToUpsample, model);
|
||||
foreach (MModRect? mModRect in tmp)
|
||||
{
|
||||
list.Add(new Location(mModRect.DetectionConfidence, mModRect.Rect.Bottom, mModRect.Rect.Left, mModRect.Rect.Right, mModRect.Rect.Top));
|
||||
list.Add(new Location(mModRect.DetectionConfidence, mModRect.Rect.Bottom, mModRect.Rect.Left, mModRect.Rect.Right, mModRect.Rect.Top, faceImage.Width, faceImage.Height));
|
||||
mModRect.Dispose();
|
||||
}
|
||||
|
||||
rects = list;
|
||||
locations = list;
|
||||
}
|
||||
else
|
||||
{
|
||||
rects = faceLocations;
|
||||
locations = faceLocations;
|
||||
}
|
||||
|
||||
List<FullObjectDetection>? results = new();
|
||||
List<FullObjectDetection> results = new();
|
||||
if (predictorModel == PredictorModel.Custom)
|
||||
{
|
||||
if (CustomFaceLandmarkDetector is null)
|
||||
throw new Exception($"{nameof(CustomFaceLandmarkDetector)} is null");
|
||||
foreach (Location? rect in rects)
|
||||
throw new ArgumentNullException(nameof(CustomFaceLandmarkDetector));
|
||||
foreach (Location? rect in locations)
|
||||
{
|
||||
FullObjectDetection? ret = CustomFaceLandmarkDetector.Detect(faceImage, rect);
|
||||
results.Add(ret);
|
||||
@ -808,7 +774,7 @@ public sealed class FaceRecognition : DisposableObject
|
||||
break;
|
||||
}
|
||||
|
||||
foreach (Location? rect in rects)
|
||||
foreach (Location? rect in locations)
|
||||
{
|
||||
FullObjectDetection? ret = posePredictor.Detect(faceImage.Matrix, new DlibDotNet.Rectangle(rect.Left, rect.Top, rect.Right, rect.Bottom));
|
||||
results.Add(ret);
|
||||
@ -818,7 +784,7 @@ public sealed class FaceRecognition : DisposableObject
|
||||
return results;
|
||||
}
|
||||
|
||||
private IEnumerable<MModRect> RawFaceLocations(Image faceImage, int numberOfTimesToUpsample = 1, Model model = Model.Hog)
|
||||
private IEnumerable<MModRect> RawFaceLocations(Image faceImage, int numberOfTimesToUpsample, Model model)
|
||||
{
|
||||
switch (model)
|
||||
{
|
||||
@ -838,9 +804,9 @@ public sealed class FaceRecognition : DisposableObject
|
||||
}
|
||||
}
|
||||
|
||||
private IEnumerable<IEnumerable<MModRect>> RawFaceLocationsBatched(IEnumerable<Image> faceImages, int numberOfTimesToUpsample = 1, int batchSize = 128) => CnnFaceDetectionModelV1.DetectMulti(_CnnFaceDetector, faceImages, numberOfTimesToUpsample, batchSize);
|
||||
private IEnumerable<IEnumerable<MModRect>> RawFaceLocationsBatched(IEnumerable<Image> faceImages, int numberOfTimesToUpsample, int batchSize = 128) => CnnFaceDetectionModelV1.DetectMulti(_CnnFaceDetector, faceImages, numberOfTimesToUpsample, batchSize);
|
||||
|
||||
private static Location TrimBound(DlibDotNet.Rectangle location, int width, int height) => new(Math.Max(location.Left, 0), Math.Max(location.Top, 0), Math.Min(location.Right, width), Math.Min(location.Bottom, height));
|
||||
private static Location TrimBound(DlibDotNet.Rectangle location, int width, int height) => new(Math.Max(location.Left, 0), Math.Max(location.Top, 0), Math.Min(location.Right, width), Math.Min(location.Bottom, height), width, height);
|
||||
|
||||
#endregion
|
||||
|
||||
|
Reference in New Issue
Block a user