aboutsummaryrefslogtreecommitdiff
path: root/src/client/views/search
diff options
context:
space:
mode:
authorbobzel <zzzman@gmail.com>2024-08-27 13:49:07 -0400
committerbobzel <zzzman@gmail.com>2024-08-27 13:49:07 -0400
commitcf10fbb80a022ddb141e41203b9135b6f1ed9527 (patch)
treea07622cb43c0485eebb3b5011b74b236bdf957dd /src/client/views/search
parent248f9fdced99a36c28fb34f39b78e1267ec02486 (diff)
uniying image labels => tags. made face tags distinguishable from other tags - and drag off to place face collection.
Diffstat (limited to 'src/client/views/search')
-rw-r--r--src/client/views/search/FaceRecognitionHandler.tsx112
1 files changed, 39 insertions, 73 deletions
diff --git a/src/client/views/search/FaceRecognitionHandler.tsx b/src/client/views/search/FaceRecognitionHandler.tsx
index 021802061..4c10307e6 100644
--- a/src/client/views/search/FaceRecognitionHandler.tsx
+++ b/src/client/views/search/FaceRecognitionHandler.tsx
@@ -1,15 +1,14 @@
import * as faceapi from 'face-api.js';
import { FaceMatcher } from 'face-api.js';
-import { Doc, DocListCast, Opt } from '../../../fields/Doc';
+import { Doc, DocListCast } from '../../../fields/Doc';
import { DocData } from '../../../fields/DocSymbols';
import { List } from '../../../fields/List';
-import { listSpec } from '../../../fields/Schema';
-import { Cast, ImageCast, NumCast, StrCast } from '../../../fields/Types';
+import { ComputedField } from '../../../fields/ScriptField';
+import { DocCast, ImageCast, NumCast, StrCast } from '../../../fields/Types';
import { ImageField } from '../../../fields/URLField';
import { DocumentType } from '../../documents/DocumentTypes';
import { Docs } from '../../documents/Documents';
import { DocumentManager } from '../../util/DocumentManager';
-import { ComputedField } from '../../../fields/ScriptField';
/**
* A singleton class that handles face recognition and manages face Doc collections for each face found.
@@ -18,16 +17,17 @@ import { ComputedField } from '../../../fields/ScriptField';
* as a face collection Doc). If the face matches a face collection Doc, then it will be added to that
* collection along with the numerical representation of the face, its face descriptor.
*
- * Image Doc's that are added to one or more face collection Docs will be given these metadata fields:
- * <image data field>_faceDescriptors - list of all the numerical face representations found in the image.
- * <image data field>_faces - list of unique face Docs corresponding to recognized faces in the image.
+ * Image Doc's that are added to one or more face collection Docs will be given an annotation rectangle that
+ * highlights where the face is, and the annotation will have these fields:
+ * faceDescriptor - the numerical face representations found in the image.
+ * face - the unique face Docs corresponding to recognized face in the image.
+ * annotationOn - the image where the face was found
*
* unique face Doc's are created for each person identified and are stored in the Dashboard's myUniqueFaces field
*
* Each unique face Doc represents a unique face and collects all matching face images for that person. It has these fields:
* face - a string label for the person that was recognized (TODO: currently it's just a 'face#')
- * face_descriptors - a list of all the face descriptors for different images of the person
- * face_docList - a list of all image Docs that contain a face for the person
+ * face_annos - a list of face annotations, where each anno has
*/
export class FaceRecognitionHandler {
static _instance: FaceRecognitionHandler;
@@ -55,41 +55,11 @@ export class FaceRecognitionHandler {
};
/**
- * return the metadata field name where unique face Docs are stored
- * @param imgDoc image with faces
- * @returns name of field
- */
- private static ImageDocFaceField = (imgDoc: Doc) => `${Doc.LayoutFieldKey(imgDoc)}_faces`;
-
- /**
* Returns an array of faceDocs for each face recognized in the image
* @param imgDoc image with faces
* @returns faceDoc array
*/
- private static ImageDocFaces = (imgDoc: Doc) => DocListCast(imgDoc[`${Doc.LayoutFieldKey(imgDoc)}_faces`]);
-
- /**
- * initializes an image with an empty list of face descriptors
- * @param imgDoc image to initialize
- */
- private static initImageDocFaceDescriptors = (imgDoc: Doc) => {
- imgDoc[DocData][`${Doc.LayoutFieldKey(imgDoc)}_faceDescriptors`] = new List<List<number>>();
- };
- /**
- * returns the face descriptors for each face found on an image Doc
- * @param imgDoc
- * @returns list of face descriptors
- */
- public static ImageDocFaceDescriptors = (imgDoc: Doc) => imgDoc[DocData][`${Doc.LayoutFieldKey(imgDoc)}_faceDescriptors`] as List<List<number>>;
-
- /**
- * Adds a face descriptor for a face found in an image
- * @param imgDoc image Doc with face
- * @param faceDescriptor descriptor of a face
- */
- public static ImageDocAddFaceDescriptor = (imgDoc: Doc, faceDescriptor: List<number>) => {
- Cast(imgDoc[DocData][`${Doc.LayoutFieldKey(imgDoc)}_faceDescriptors`], listSpec('number'), null).push(faceDescriptor as unknown as number);
- };
+ public static ImageDocFaceAnnos = (imgDoc: Doc) => DocListCast(imgDoc[`${Doc.LayoutFieldKey(imgDoc)}_annotations`]).filter(doc => doc.face);
/**
* returns a list of all face collection Docs on the current dashboard
@@ -98,6 +68,13 @@ export class FaceRecognitionHandler {
public static UniqueFaces = () => DocListCast(Doc.ActiveDashboard?.[DocData].myUniqueFaces);
/**
+ * Find a unique face from its name
+ * @param name name of unique face
+ * @returns unique face or undefined
+ */
+ public static FindUniqueFaceByName = (name: string) => FaceRecognitionHandler.UniqueFaces().find(faceDoc => faceDoc.title === name);
+
+ /**
* Removes a unique face from the set of recognized unique faces
* @param faceDoc unique face Doc
* @returns
@@ -117,28 +94,23 @@ export class FaceRecognitionHandler {
* @param faceDoc unique face Doc
* @returns face descriptors
*/
- public static UniqueFaceDescriptors = (faceDoc: Doc) => faceDoc[DocData].face_descriptors as List<List<number>>;
+ public static UniqueFaceDescriptors = (faceDoc: Doc) => DocListCast(faceDoc[DocData].face_annos).map(face => face.faceDescriptor as List<number>);
/**
* Returns a list of all face image Docs associated with a unique face Doc
* @param faceDoc unique face Doc
* @returns image Docs
*/
- public static UniqueFaceImages = (faceDoc: Doc) => DocListCast(faceDoc[DocData].face_images);
+ public static UniqueFaceImages = (faceDoc: Doc) => DocListCast(faceDoc[DocData].face_annos).map(face => DocCast(face.annotationOn));
/**
* Adds a face image to a unique face Doc, adds the unique face Doc to the images list of reognized faces,
* and updates the unique face's set of face image descriptors
* @param img - image with faces to add to a face collection Doc
- * @param faceDescriptor - the face descriptor for the face in the image to add
- * @param faceDoc - unique face Doc
+ * @param faceAnno - a face annotation
*/
- public static UniqueFaceAddFaceImage = (img: Doc, faceDescriptor: Opt<List<number>>, faceDoc: Doc) => {
- Doc.AddDocToList(faceDoc, 'face_images', img);
- if (faceDescriptor) {
- Cast(faceDoc.face_descriptors, listSpec('number'), null).push(faceDescriptor as unknown as number); // items are lists of numbers, not numbers, but type system can't handle that
- Doc.AddDocToList(img[DocData], FaceRecognitionHandler.ImageDocFaceField(img), faceDoc);
- }
+ public static UniqueFaceAddFaceImage = (faceAnno: Doc, faceDoc: Doc) => {
+ Doc.AddDocToList(faceDoc, 'face_annos', faceAnno);
};
/**
@@ -146,11 +118,9 @@ export class FaceRecognitionHandler {
* @param img - image with faces to remove
* @param faceDoc - unique face Doc
*/
- public static UniqueFaceRemoveFaceImage = (img: Doc, faceDoc: Doc) => {
- Doc.RemoveDocFromList(faceDoc[DocData], 'face_images', img);
- const descriptorsEqual = (a: List<number>, b: List<number>) => (a === b ? true : a.length === b.length ? a.every((element, index) => element === b[index]) : false);
- faceDoc[DocData].face_descriptors = new List<List<number>>(FaceRecognitionHandler.UniqueFaceDescriptors(faceDoc).filter(fd => !FaceRecognitionHandler.ImageDocFaceDescriptors(img).some(desc => descriptorsEqual(fd, desc))));
- Doc.RemoveDocFromList(img[DocData], FaceRecognitionHandler.ImageDocFaceField(img), faceDoc);
+ public static UniqueFaceRemoveFaceImage = (faceAnno: Doc, faceDoc: Doc) => {
+ Doc.RemoveDocFromList(faceDoc[DocData], 'face_annos', faceAnno);
+ faceAnno.face = undefined;
};
constructor() {
@@ -192,8 +162,7 @@ export class FaceRecognitionHandler {
});
const uface = uniqueFaceDoc[DocData];
uface.face = `Face${faceDocNum}`;
- uface.face_images = new List<Doc>();
- uface.face_descriptors = new List<List<number>>();
+ uface.face_annos = new List<Doc>();
Doc.SetContainer(uniqueFaceDoc, Doc.MyFaceCollection);
Doc.ActiveDashboard && Doc.AddDocToList(Doc.ActiveDashboard[DocData], 'myUniqueFaces', uniqueFaceDoc);
@@ -243,7 +212,6 @@ export class FaceRecognitionHandler {
if (imgUrl && !DocListCast(Doc.MyFaceCollection.examinedFaceDocs).includes(imgDoc[DocData])) {
// only examine Docs that have an image and that haven't already been examined.
Doc.AddDocToList(Doc.MyFaceCollection, 'examinedFaceDocs', imgDoc[DocData]);
- FaceRecognitionHandler.initImageDocFaceDescriptors(imgDoc);
FaceRecognitionHandler.loadImage(imgUrl).then(
// load image and analyze faces
img => faceapi
@@ -255,22 +223,20 @@ export class FaceRecognitionHandler {
const scale = NumCast(imgDoc.data_nativeWidth) / img.width;
imgDocFaceDescriptions.forEach((fd, i) => {
const faceDescriptor = new List<number>(Array.from(fd.descriptor));
- FaceRecognitionHandler.ImageDocAddFaceDescriptor(imgDoc, faceDescriptor); // add face descriptor to image's list of descriptors
const matchedUniqueFace = this.findMatchingFaceDoc(fd.descriptor) ?? this.createUniqueFaceDoc(activeDashboard);
- FaceRecognitionHandler.UniqueFaceAddFaceImage(imgDoc, faceDescriptor, matchedUniqueFace); // add image/faceDescriptor to matched unique face
- annos.push(
- Docs.Create.FreeformDocument([], {
- title: ComputedField.MakeFunction(`this.face.face`, undefined, undefined, 'this.face.face = value') as unknown as string, //
- annotationOn: imgDoc,
- face: matchedUniqueFace[DocData],
- backgroundColor: '#55555555',
- x: fd.alignedRect.box.left * scale,
- y: fd.alignedRect.box.top * scale,
- _width: fd.alignedRect.box.width * scale,
- _height: fd.alignedRect.box.height * scale,
- })
- );
- Doc.SetContainer(annos.lastElement(), imgDoc[DocData]);
+ const faceAnno = Docs.Create.FreeformDocument([], {
+ title: ComputedField.MakeFunction(`this.face.face`, undefined, undefined, 'this.face.face = value') as unknown as string, //
+ annotationOn: imgDoc,
+ face: matchedUniqueFace[DocData],
+ faceDescriptor: faceDescriptor,
+ backgroundColor: 'transparent',
+ x: fd.alignedRect.box.left * scale,
+ y: fd.alignedRect.box.top * scale,
+ _width: fd.alignedRect.box.width * scale,
+ _height: fd.alignedRect.box.height * scale,
+ })
+ FaceRecognitionHandler.UniqueFaceAddFaceImage(faceAnno, matchedUniqueFace); // add image/faceDescriptor to matched unique face
+ annos.push(faceAnno);
});
imgDoc[DocData].data_annotations = new List<Doc>(annos);