aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorbobzel <zzzman@gmail.com>2024-08-22 15:29:45 -0400
committerbobzel <zzzman@gmail.com>2024-08-22 15:29:45 -0400
commitf18978c507dd4c9c0940a1ce17daa2e7000bccdd (patch)
tree6cb3d3fd0e88385cd444f670f4b68db8e37d8708
parent979bd048c3a55fb437352d01bdc5083d1a000084 (diff)
from last
-rw-r--r--src/client/views/search/FaceRecognitionHandler.tsx116
1 files changed, 55 insertions, 61 deletions
diff --git a/src/client/views/search/FaceRecognitionHandler.tsx b/src/client/views/search/FaceRecognitionHandler.tsx
index f00c3fdf1..9b97fdfbd 100644
--- a/src/client/views/search/FaceRecognitionHandler.tsx
+++ b/src/client/views/search/FaceRecognitionHandler.tsx
@@ -7,6 +7,7 @@ import { listSpec } from '../../../fields/Schema';
import { Cast, ImageCast, NumCast, StrCast } from '../../../fields/Types';
import { DocumentType } from '../../documents/DocumentTypes';
import { DocumentManager } from '../../util/DocumentManager';
+import { ImageField } from '../../../fields/URLField';
/**
* A singleton class that handles face recognition and manages face Doc collections for each face found.
@@ -16,7 +17,8 @@ import { DocumentManager } from '../../util/DocumentManager';
* collection along with the numerical representation of the face, its face descriptor.
*
* Image Doc's that are added to one or more face collection Docs will be given these metadata fields:
- * <image data field>_faceDescriptors - a list of all the numerical face representations found in the image.
+ * <image data field>_faceDescriptors - list of all the numerical face representations found in the image.
+ * <image data field>_faces - list of unique face Docs corresponding to recognized faces in the image.
*
* unique face Doc's are created for each person identified and are stored in the Dashboard's uniqueFaces field
*
@@ -27,14 +29,33 @@ import { DocumentManager } from '../../util/DocumentManager';
*/
export class FaceRecognitionHandler {
static _instance: FaceRecognitionHandler;
- private _loadedModels: boolean = false;
- private _pendingLoadDocs: Doc[] = [];
+ private _apiModelReady = false;
+ private _pendingAPIModelReadyDocs: Doc[] = [];
+ public static get Instance() {
+ return FaceRecognitionHandler._instance ?? new FaceRecognitionHandler();
+ }
+
+ /**
+ * Loads an image
+ */
+ private static loadImage = (imgUrl: ImageField): Promise<HTMLImageElement> => {
+ const [name, type] = imgUrl.url.href.split('.');
+ const imageURL = `${name}_o.${type}`;
+
+ return new Promise((resolve, reject) => {
+ const img = new Image();
+ img.crossOrigin = 'anonymous';
+ img.onload = () => resolve(img);
+ img.onerror = err => reject(err);
+ img.src = imageURL;
+ });
+ };
/**
* return the metadata field name where unique face Docs are stored
* @param imgDoc image with faces
- * @returns name of field
+ * @returns name of field
*/
private static ImageDocFaceField = (imgDoc: Doc) => `${Doc.LayoutFieldKey(imgDoc)}_faces`;
@@ -68,7 +89,6 @@ export class FaceRecognitionHandler {
Cast(imgDoc[DocData][`${Doc.LayoutFieldKey(imgDoc)}_faceDescriptors`], listSpec('number'), null).push(faceDescriptor as unknown as number);
};
-
/**
* returns a list of all face collection Docs on the current dashboard
* @returns face collection Doc list
@@ -103,7 +123,8 @@ export class FaceRecognitionHandler {
public static UniqueFaceImages = (faceDoc: Doc) => DocListCast(faceDoc[DocData].face_images);
/**
- * Adds a face image to a unique face Doc, and updates the unique face's set of face image descriptors
+ * Adds a face image to a unique face Doc, adds the unique face Doc to the images list of reognized faces,
+ * and updates the unique face's set of face image descriptors
* @param img - image with faces to add to a face collection Doc
* @param faceDescriptor - the face descriptor for the face in the image to add
* @param faceDoc - unique face Doc
@@ -121,37 +142,33 @@ export class FaceRecognitionHandler {
*/
public static UniqueFaceRemoveFaceImage = (img: Doc, faceDoc: Doc) => {
Doc.RemoveDocFromList(faceDoc[DocData], 'face_images', img);
- const descriptorsEqual = (a:List<number>, b:List<number>) => a === b ? true : a.length === b.length ? a.every((element, index) => element === b[index]) : false;
+ const descriptorsEqual = (a: List<number>, b: List<number>) => (a === b ? true : a.length === b.length ? a.every((element, index) => element === b[index]) : false);
faceDoc[DocData].face_descriptors = new List<List<number>>(FaceRecognitionHandler.UniqueFaceDescriptors(faceDoc).filter(fd => !FaceRecognitionHandler.ImageDocFaceDescriptors(img).some(desc => descriptorsEqual(fd, desc))));
Doc.RemoveDocFromList(img[DocData], FaceRecognitionHandler.ImageDocFaceField(img), faceDoc);
};
constructor() {
FaceRecognitionHandler._instance = this;
- this.loadModels().then(() => this._pendingLoadDocs.forEach(this.classifyFacesInImage));
+ this.loadAPIModels().then(() => this._pendingAPIModelReadyDocs.forEach(this.classifyFacesInImage));
DocumentManager.Instance.AddAnyViewRenderedCB(dv => FaceRecognitionHandler.Instance.classifyFacesInImage(dv.Document));
}
/**
* Loads the face detection models.
*/
- loadModels = async () => {
+ private loadAPIModels = async () => {
const MODEL_URL = `/models`;
await faceapi.loadFaceDetectionModel(MODEL_URL);
await faceapi.loadFaceLandmarkModel(MODEL_URL);
await faceapi.loadFaceRecognitionModel(MODEL_URL);
- this._loadedModels = true;
+ this._apiModelReady = true;
};
- public static get Instance() {
- return FaceRecognitionHandler._instance ?? new FaceRecognitionHandler();
- }
-
/**
* Creates a new, empty unique face Doc
* @returns a unique face Doc
*/
- createUniqueFaceDoc = (dashboard: Doc) => {
+ private createUniqueFaceDoc = (dashboard: Doc) => {
const faceDocNum = NumCast(dashboard.uniqueFaces_count) + 1;
dashboard.uniqueFaces_count = faceDocNum; // TODO: improve to a better name
@@ -171,40 +188,30 @@ export class FaceRecognitionHandler {
* match them to existing unique faces, otherwise new unique face(s) are created.
* @param imgDoc The document being analyzed.
*/
- public classifyFacesInImage = async (imgDoc: Doc) => {
- if (!this._loadedModels || !Doc.ActiveDashboard) {
- this._pendingLoadDocs.push(imgDoc);
- return;
- }
-
- if (imgDoc.type === DocumentType.LOADING && !imgDoc.loadingError) {
+ private classifyFacesInImage = async (imgDoc: Doc) => {
+ const activeDashboard = Doc.ActiveDashboard;
+ if (!this._apiModelReady || !activeDashboard) {
+ this._pendingAPIModelReadyDocs.push(imgDoc);
+ } else if (imgDoc.type === DocumentType.LOADING && !imgDoc.loadingError) {
setTimeout(() => this.classifyFacesInImage(imgDoc), 1000);
- return;
- }
-
- const imgUrl = ImageCast(imgDoc[Doc.LayoutFieldKey(imgDoc)]);
- // If the doc isn't an image or currently already been examined or is being processed, stop examining the document.
- if (!imgUrl || DocListCast(Doc.MyFaceCollection.examinedFaceDocs).includes(imgDoc)) {
- return;
- }
- Doc.AddDocToList(Doc.MyFaceCollection, 'examinedFaceDocs', imgDoc);
-
- FaceRecognitionHandler.initImageDocFaceDescriptors(imgDoc);
-
- // Get the image the document contains and analyze for faces.
- const [name, type] = imgUrl.url.href.split('.');
- const imageURL = `${name}_o.${type}`;
- const img = await this.loadImage(imageURL);
- const imgDocFaceDescriptions = await faceapi.detectAllFaces(img).withFaceLandmarks().withFaceDescriptors();
-
- // For each face detected, find a match.
- for (const fd of imgDocFaceDescriptions) {
- const faceDescriptor = new List<number>(Array.from(fd.descriptor));
- const matchedUniqueFace = this.findMatchingFaceDoc(fd.descriptor) ?? this.createUniqueFaceDoc(Doc.ActiveDashboard);
- // Add image to unique face's image collection, and assign image metadata referencing unique face
- FaceRecognitionHandler.UniqueFaceAddFaceImage(imgDoc, faceDescriptor, matchedUniqueFace);
- // save the descriptor for the image's list of faces
- FaceRecognitionHandler.ImageDocAddFaceDescriptor(imgDoc, faceDescriptor);
+ } else {
+ const imgUrl = ImageCast(imgDoc[Doc.LayoutFieldKey(imgDoc)]);
+ if (imgUrl && !DocListCast(Doc.MyFaceCollection.examinedFaceDocs).includes(imgDoc)) { // only examine Docs that have an image and that haven't already been examined.
+ Doc.AddDocToList(Doc.MyFaceCollection, 'examinedFaceDocs', imgDoc);
+ FaceRecognitionHandler.initImageDocFaceDescriptors(imgDoc);
+ FaceRecognitionHandler.loadImage(imgUrl).then( // load image and analyze faces
+ img => faceapi.detectAllFaces(img).withFaceLandmarks().withFaceDescriptors()
+ .then(imgDocFaceDescriptions => { // For each face detected, find a match.
+ for (const fd of imgDocFaceDescriptions) {
+ const faceDescriptor = new List<number>(Array.from(fd.descriptor));
+ FaceRecognitionHandler.ImageDocAddFaceDescriptor(imgDoc, faceDescriptor); // add face descriptor to image's list of descriptors
+ const matchedUniqueFace = this.findMatchingFaceDoc(fd.descriptor) ?? this.createUniqueFaceDoc(activeDashboard);
+ FaceRecognitionHandler.UniqueFaceAddFaceImage(imgDoc, faceDescriptor, matchedUniqueFace); // add image/faceDescriptor to matched unique face
+ } //
+ return imgDocFaceDescriptions;
+ })
+ );
+ } // prettier-ignore
}
};
@@ -233,17 +240,4 @@ export class FaceRecognitionHandler {
}
return undefined;
};
-
- /**
- * Loads an image
- */
- private loadImage = (src: string): Promise<HTMLImageElement> => {
- return new Promise((resolve, reject) => {
- const img = new Image();
- img.crossOrigin = 'anonymous';
- img.onload = () => resolve(img);
- img.onerror = err => reject(err);
- img.src = src;
- });
- };
}