Commit 51d5d01d by 何阳

增加中文编码支持

parent c683a1ef

99.4 KB | W: | H:

99.4 KB | W: | H:

FaceServer/Face/heyang.jpg
FaceServer/Face/何阳.jpg
FaceServer/Face/heyang.jpg
FaceServer/Face/何阳.jpg
  • 2-up
  • Swipe
  • Onion skin
......@@ -213,7 +213,7 @@
<Content Include="App.png">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</Content>
<Content Include="Face\heyang.jpg">
<Content Include="Face\何阳.jpg">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</Content>
</ItemGroup>
......
......@@ -20,6 +20,7 @@ public class FaceRecognitionServer
public int RoiSize { set; get; } = default;
public FaceRecognitionServer(string faceDataPath, string modelPath)
{
FaceRecognition.InternalEncoding = Encoding.Default;
FaceRecognition = FaceRecognition ?? FaceRecognition.Create(modelPath);
KnownFaceEncodings = KnownFaceEncodings ?? LoadKnownFaces(FaceRecognition, faceDataPath);
}
......@@ -51,10 +52,6 @@ public class FaceRecognitionServer
Mat roiMat = new Mat(originalMat, roiRectangle);
originalMat.Dispose();
// 转换为灰度图
//Mat grayMat = new Mat();
//Cv2.CvtColor(roiMat, grayMat, ColorConversionCodes.BGR2GRAY);
//roiMat.Dispose();
var bitmap = OpenCvSharp.Extensions.BitmapConverter.ToBitmap(roiMat);
......@@ -64,61 +61,29 @@ public class FaceRecognitionServer
bitmap.Dispose();
// Detect faces
var faceLocations = FaceRecognition.FaceLocations(image);
if (!faceLocations.Any())
if (faceLocations.Count() == 0)
{
image.Dispose();
return new List<FaceData>();
}
var faceLandmarks = FaceRecognition.FaceLandmark(image, faceLocations).ToList();
var faceEncodings = FaceRecognition.FaceEncodings(image, faceLocations);
List<FaceData> faces = new List<FaceData>();
for (int i = 0; i < faceLocations.Count(); i++)
{
var landmarks = faceLandmarks[i];
var upperFacePoints = new List<FacePoint>();
upperFacePoints.AddRange(landmarks[FacePart.LeftEyebrow]);
upperFacePoints.AddRange(landmarks[FacePart.RightEyebrow]);
upperFacePoints.AddRange(landmarks[FacePart.LeftEye]);
upperFacePoints.AddRange(landmarks[FacePart.RightEye]);
upperFacePoints.AddRange(landmarks[FacePart.NoseBridge]);
// 计算上半脸特征点的边界
var minX = upperFacePoints.Min(point => point.Point.X) + Roi.X;
var minY = upperFacePoints.Min(point => point.Point.Y) + Roi.Y;
var maxX = upperFacePoints.Max(point => point.Point.X) + Roi.X;
var maxY = upperFacePoints.Max(point => point.Point.Y) + Roi.Y;
// 创建一个新的面部区域Location,只包括上半脸的特征点
var upperFaceLocation = new Location(minX, minY, maxX, maxY);
// 确保宽度和高度是正数
var width = Math.Max(0, upperFaceLocation.Right - upperFaceLocation.Left);
var height = Math.Max(0, upperFaceLocation.Bottom - upperFaceLocation.Top);
// Get the encodings based on the adjusted location
var faceEncodings = FaceRecognition.FaceEncodings(image, new[] { upperFaceLocation });
var encoding = faceEncodings.FirstOrDefault();
if (encoding == null)
{
Log.Warning($"人脸编码计算失败!");
continue; // Skip if no encoding is found
}
for (int i = 0; i < faceEncodings.Count(); i++)
{
var encoding = faceEncodings.ElementAt(i);
// Find match for known faces
var bestMatch = FindBestMatch(encoding, KnownFaceEncodings);
var location = faceLocations.ElementAt(i);
// 创建一个新的RectangleSerializable对象
// 映射回整个图像的坐标空间
var faceRect = new RectangleSerializable
(
upperFaceLocation.Left, // 上半脸位置的左边界
upperFaceLocation.Top, // 上半脸位置的上边界
width, // 宽度
height // 高度
location.Left + Roi.X, // 加上ROI的X偏移
location.Top + Roi.Y, // 加上ROI的Y偏移
location.Right - location.Left,
location.Bottom - location.Top
);
faces.Add(new FaceData
{
......@@ -126,6 +91,7 @@ public class FaceRecognitionServer
Name = bestMatch,
IsIdentified = bestMatch != "Unknown"
});
}
return faces;
......@@ -139,43 +105,11 @@ public class FaceRecognitionServer
foreach (var imagePath in Directory.GetFiles(faceDataPath, "*.jpg"))
{
var name = Path.GetFileNameWithoutExtension(imagePath);
using (var image = FaceRecognition.LoadImageFile(imagePath))
var image = FaceRecognition.LoadImageFile(imagePath);
var faceEncoding = faceRecognition.FaceEncodings(image).FirstOrDefault();
if (faceEncoding != null)
{
// Detect the face location and face landmarks for upper face parts only
var faceLocations = faceRecognition.FaceLocations(image).FirstOrDefault();
if (faceLocations == null)
{
Log.Warning($"{faceDataPath} 人脸模型加载失败!");
continue;
} // Skip if no face is detected
var faceLandmarks = faceRecognition.FaceLandmark(image,new[] { faceLocations }).FirstOrDefault();
if (faceLandmarks == null)
{
Log.Warning($"{faceDataPath} 人脸模型加载失败!");
continue;
} // Skip if no face is detected
var upperFaceLandmarks = new List<FacePoint>();
upperFaceLandmarks.AddRange(faceLandmarks[FacePart.LeftEyebrow]);
upperFaceLandmarks.AddRange(faceLandmarks[FacePart.RightEyebrow]);
upperFaceLandmarks.AddRange(faceLandmarks[FacePart.LeftEye]);
upperFaceLandmarks.AddRange(faceLandmarks[FacePart.RightEye]);
// Calculate the bounding box for the upper face landmarks
var minX = upperFaceLandmarks.Min(point => point.Point.X);
var minY = upperFaceLandmarks.Min(point => point.Point.Y);
var maxX = upperFaceLandmarks.Max(point => point.Point.X);
var maxY = upperFaceLandmarks.Max(point => point.Point.Y);
// Create a Location object representing the upper face region
var upperFaceLocation = new Location(minX, minY, maxX - minX, maxY - minY);
// Get the face encoding based on the upper face landmarks
var faceEncoding = faceRecognition.FaceEncodings(image, new[] { upperFaceLocation }).FirstOrDefault();
if (faceEncoding != null)
{
knownFaceEncodings.Add(name, faceEncoding);
}
knownFaceEncodings.Add(name, faceEncoding);
}
}
return knownFaceEncodings;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or sign in to comment