aboutsummaryrefslogtreecommitdiff
path: root/src/client/views/search
diff options
context:
space:
mode:
authorbobzel <zzzman@gmail.com>2024-09-02 09:26:37 -0400
committerbobzel <zzzman@gmail.com>2024-09-02 09:26:37 -0400
commitcda69e48361fce8d71a4dc66edd9dd976a27f52d (patch)
tree82b9a1a5967ae88a9534f89f7eaed3aeb289652f /src/client/views/search
parentc01828308714874589d1f60c33ca59df4c656c0c (diff)
parenta958577d4c27b276aa37484e3f895e196138b17c (diff)
Merge branch 'master' into alyssa-starter
Diffstat (limited to 'src/client/views/search')
-rw-r--r--src/client/views/search/FaceRecognitionHandler.tsx249
1 files changed, 249 insertions, 0 deletions
diff --git a/src/client/views/search/FaceRecognitionHandler.tsx b/src/client/views/search/FaceRecognitionHandler.tsx
new file mode 100644
index 000000000..4f6f5d314
--- /dev/null
+++ b/src/client/views/search/FaceRecognitionHandler.tsx
@@ -0,0 +1,249 @@
+import * as faceapi from 'face-api.js';
+import { FaceMatcher } from 'face-api.js';
+import { Doc, DocListCast } from '../../../fields/Doc';
+import { DocData } from '../../../fields/DocSymbols';
+import { List } from '../../../fields/List';
+import { ComputedField } from '../../../fields/ScriptField';
+import { DocCast, ImageCast, NumCast, StrCast } from '../../../fields/Types';
+import { ImageField } from '../../../fields/URLField';
+import { DocumentType } from '../../documents/DocumentTypes';
+import { Docs } from '../../documents/Documents';
+import { DocumentManager } from '../../util/DocumentManager';
+
+/**
+ * A singleton class that handles face recognition and manages face Doc collections for each face found.
+ * Displaying an image doc anywhere will trigger this class to test if the image contains any faces.
+ * If it does, each recognized face will be compared to a stored, global set of faces (each face is represented
+ * as a face collection Doc). If the face matches a face collection Doc, then it will be added to that
+ * collection along with the numerical representation of the face, its face descriptor.
+ *
+ * Image Doc's that are added to one or more face collection Docs will be given an annotation rectangle that
+ * highlights where the face is, and the annotation will have these fields:
+ * faceDescriptor - the numerical face representations found in the image.
+ * face - the unique face Docs corresponding to recognized face in the image.
+ * annotationOn - the image where the face was found
+ *
+ * unique face Doc's are created for each person identified and are stored in the Dashboard's myUniqueFaces field
+ *
+ * Each unique face Doc represents a unique face and collects all matching face images for that person. It has these fields:
+ * face - a string label for the person that was recognized (TODO: currently it's just a 'face#')
+ * face_annos - a list of face annotations, where each anno has
+ */
+export class FaceRecognitionHandler {
+ static _instance: FaceRecognitionHandler;
+ private _apiModelReady = false;
+ private _pendingAPIModelReadyDocs: Doc[] = [];
+
+ public static get Instance() {
+ return FaceRecognitionHandler._instance ?? new FaceRecognitionHandler();
+ }
+
+ /**
+ * Loads an image
+ */
+ private static loadImage = (imgUrl: ImageField): Promise<HTMLImageElement> => {
+ const [name, type] = imgUrl.url.href.split('.');
+ const imageURL = `${name}_o.${type}`;
+
+ return new Promise((resolve, reject) => {
+ const img = new Image();
+ img.crossOrigin = 'anonymous';
+ img.onload = () => resolve(img);
+ img.onerror = err => reject(err);
+ img.src = imageURL;
+ });
+ };
+
+ /**
+ * Returns an array of faceDocs for each face recognized in the image
+ * @param imgDoc image with faces
+ * @returns faceDoc array
+ */
+ public static ImageDocFaceAnnos = (imgDoc: Doc) => DocListCast(imgDoc[`${Doc.LayoutFieldKey(imgDoc)}_annotations`]).filter(doc => doc.face);
+
+ /**
+ * returns a list of all face collection Docs on the current dashboard
+ * @returns face collection Doc list
+ */
+ public static UniqueFaces = () => DocListCast(Doc.ActiveDashboard?.[DocData].myUniqueFaces);
+
+ /**
+ * Find a unique face from its name
+ * @param name name of unique face
+ * @returns unique face or undefined
+ */
+ public static FindUniqueFaceByName = (name: string) => FaceRecognitionHandler.UniqueFaces().find(faceDoc => faceDoc.title === name);
+
+ /**
+ * Removes a unique face from the set of recognized unique faces
+ * @param faceDoc unique face Doc
+ * @returns
+ */
+ public static DeleteUniqueFace = (faceDoc: Doc) => Doc.ActiveDashboard && Doc.RemoveDocFromList(Doc.ActiveDashboard[DocData], 'myUniqueFaces', faceDoc);
+
+ /**
+ * returns the labels associated with a face collection Doc
+ * @param faceDoc unique face Doc
+ * @returns label string
+ */
+ public static UniqueFaceLabel = (faceDoc: Doc) => StrCast(faceDoc[DocData].face);
+
+ public static SetUniqueFaceLabel = (faceDoc: Doc, value: string) => (faceDoc[DocData].face = value);
+ /**
+ * Returns all the face descriptors associated with a unique face Doc
+ * @param faceDoc unique face Doc
+ * @returns face descriptors
+ */
+ public static UniqueFaceDescriptors = (faceDoc: Doc) => DocListCast(faceDoc[DocData].face_annos).map(face => face.faceDescriptor as List<number>);
+
+ /**
+ * Returns a list of all face image Docs associated with a unique face Doc
+ * @param faceDoc unique face Doc
+ * @returns image Docs
+ */
+ public static UniqueFaceImages = (faceDoc: Doc) => DocListCast(faceDoc[DocData].face_annos).map(face => DocCast(face.annotationOn, face));
+
+ /**
+ * Adds a face image to a unique face Doc, adds the unique face Doc to the images list of reognized faces,
+ * and updates the unique face's set of face image descriptors
+ * @param img - image with faces to add to a face collection Doc
+ * @param faceAnno - a face annotation
+ */
+ public static UniqueFaceAddFaceImage = (faceAnno: Doc, faceDoc: Doc) => {
+ Doc.AddDocToList(faceDoc, 'face_annos', faceAnno);
+ };
+
+ /**
+ * Removes a face from a unique Face Doc, and updates the unique face's set of face image descriptors
+ * @param img - image with faces to remove
+ * @param faceDoc - unique face Doc
+ */
+ public static UniqueFaceRemoveFaceImage = (faceAnno: Doc, faceDoc: Doc) => {
+ Doc.RemoveDocFromList(faceDoc[DocData], 'face_annos', faceAnno);
+ faceAnno.face = undefined;
+ };
+
+ constructor() {
+ FaceRecognitionHandler._instance = this;
+ this.loadAPIModels().then(() => this._pendingAPIModelReadyDocs.forEach(this.classifyFacesInImage));
+ DocumentManager.Instance.AddAnyViewRenderedCB(dv => FaceRecognitionHandler.Instance.classifyFacesInImage(dv.Document));
+ }
+
+ /**
+ * Loads the face detection models.
+ */
+ private loadAPIModels = async () => {
+ const MODEL_URL = `/models`;
+ await faceapi.loadFaceDetectionModel(MODEL_URL);
+ await faceapi.loadFaceLandmarkModel(MODEL_URL);
+ await faceapi.loadFaceRecognitionModel(MODEL_URL);
+ this._apiModelReady = true;
+ };
+
+ /**
+ * Creates a new, empty unique face Doc
+ * @returns a unique face Doc
+ */
+ private createUniqueFaceDoc = (dashboard: Doc) => {
+ const faceDocNum = NumCast(dashboard[DocData].myUniqueFaces_count) + 1;
+ dashboard[DocData].myUniqueFaces_count = faceDocNum; // TODO: improve to a better name
+
+ const uniqueFaceDoc = Docs.Create.UniqeFaceDocument({
+ title: ComputedField.MakeFunction('this.face', undefined, undefined, 'this.face = value') as unknown as string,
+ _layout_reflowHorizontal: true,
+ _layout_reflowVertical: true,
+ _layout_nativeDimEditable: true,
+ _layout_borderRounding: '20px',
+ _layout_fitWidth: true,
+ _layout_autoHeight: true,
+ _face_showImages: true,
+ _width: 400,
+ _height: 100,
+ });
+ const uface = uniqueFaceDoc[DocData];
+ uface.face = `Face${faceDocNum}`;
+ uface.face_annos = new List<Doc>();
+ Doc.SetContainer(uniqueFaceDoc, Doc.MyFaceCollection);
+
+ Doc.ActiveDashboard && Doc.AddDocToList(Doc.ActiveDashboard[DocData], 'myUniqueFaces', uniqueFaceDoc);
+ return uniqueFaceDoc;
+ };
+
+ /**
+ * Finds the most similar matching Face Document to a face descriptor
+ * @param faceDescriptor face descriptor number list
+ * @returns face Doc
+ */
+ private findMatchingFaceDoc = (faceDescriptor: Float32Array) => {
+ if (!Doc.ActiveDashboard || FaceRecognitionHandler.UniqueFaces().length < 1) {
+ return undefined;
+ }
+
+ const faceDescriptors = FaceRecognitionHandler.UniqueFaces().map(faceDoc => {
+ const float32Array = FaceRecognitionHandler.UniqueFaceDescriptors(faceDoc).map(fd => new Float32Array(Array.from(fd)));
+ return new faceapi.LabeledFaceDescriptors(FaceRecognitionHandler.UniqueFaceLabel(faceDoc), float32Array);
+ });
+ const faceMatcher = new FaceMatcher(faceDescriptors, 0.6);
+ const match = faceMatcher.findBestMatch(faceDescriptor);
+ if (match.label !== 'unknown') {
+ for (const faceDoc of FaceRecognitionHandler.UniqueFaces()) {
+ if (FaceRecognitionHandler.UniqueFaceLabel(faceDoc) === match.label) {
+ return faceDoc;
+ }
+ }
+ }
+ return undefined;
+ };
+
+ /**
+ * When a document is added, this finds faces in the images and tries to
+ * match them to existing unique faces, otherwise new unique face(s) are created.
+ * @param imgDoc The document being analyzed.
+ */
+ private classifyFacesInImage = async (imgDoc: Doc) => {
+ if (!Doc.UserDoc().recognizeFaceImages) return;
+ const activeDashboard = Doc.ActiveDashboard;
+ if (!this._apiModelReady || !activeDashboard) {
+ this._pendingAPIModelReadyDocs.push(imgDoc);
+ } else if (imgDoc.type === DocumentType.LOADING && !imgDoc.loadingError) {
+ setTimeout(() => this.classifyFacesInImage(imgDoc), 1000);
+ } else {
+ const imgUrl = ImageCast(imgDoc[Doc.LayoutFieldKey(imgDoc)]);
+ if (imgUrl && !DocListCast(Doc.MyFaceCollection.examinedFaceDocs).includes(imgDoc[DocData])) {
+ // only examine Docs that have an image and that haven't already been examined.
+ Doc.AddDocToList(Doc.MyFaceCollection, 'examinedFaceDocs', imgDoc[DocData]);
+ FaceRecognitionHandler.loadImage(imgUrl).then(
+ // load image and analyze faces
+ img => faceapi
+ .detectAllFaces(img)
+ .withFaceLandmarks()
+ .withFaceDescriptors()
+ .then(imgDocFaceDescriptions => { // For each face detected, find a match.
+ const annos = [] as Doc[];
+ const scale = NumCast(imgDoc.data_nativeWidth) / img.width;
+ imgDocFaceDescriptions.forEach((fd, i) => {
+ const faceDescriptor = new List<number>(Array.from(fd.descriptor));
+ const matchedUniqueFace = this.findMatchingFaceDoc(fd.descriptor) ?? this.createUniqueFaceDoc(activeDashboard);
+ const faceAnno = Docs.Create.FreeformDocument([], {
+ title: ComputedField.MakeFunction(`this.face.face`, undefined, undefined, 'this.face.face = value') as unknown as string, //
+ annotationOn: imgDoc,
+ face: matchedUniqueFace[DocData],
+ faceDescriptor: faceDescriptor,
+ backgroundColor: 'transparent',
+ x: fd.alignedRect.box.left * scale,
+ y: fd.alignedRect.box.top * scale,
+ _width: fd.alignedRect.box.width * scale,
+ _height: fd.alignedRect.box.height * scale,
+ })
+ FaceRecognitionHandler.UniqueFaceAddFaceImage(faceAnno, matchedUniqueFace); // add image/faceDescriptor to matched unique face
+ annos.push(faceAnno);
+ });
+
+ imgDoc[DocData].data_annotations = new List<Doc>(annos);
+ return imgDocFaceDescriptions;
+ })
+ ); // prettier-ignore
+ }
+ }
+ };
+}