aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/client/documents/Documents.ts1
-rw-r--r--src/client/views/search/FaceRecognitionHandler.tsx56
2 files changed, 38 insertions, 19 deletions
diff --git a/src/client/documents/Documents.ts b/src/client/documents/Documents.ts
index ac271526f..235f54fe8 100644
--- a/src/client/documents/Documents.ts
+++ b/src/client/documents/Documents.ts
@@ -363,6 +363,7 @@ export class DocumentOptions {
data?: FieldType;
data_useCors?: BOOLt = new BoolInfo('whether CORS protocol should be used for web page');
_face_showImages?: BOOLt = new BoolInfo('whether to show images in uniqe face Doc');
+ face?: DOCt = new DocInfo('face document');
columnHeaders?: List<SchemaHeaderField>; // headers for stacking views
schemaHeaders?: List<SchemaHeaderField>; // headers for schema view
dockingConfig?: STRt = new StrInfo('configuration of golden layout windows (applies only if doc is rendered as a CollectionDockingView)', false);
diff --git a/src/client/views/search/FaceRecognitionHandler.tsx b/src/client/views/search/FaceRecognitionHandler.tsx
index 4e500d24e..4906ba4b6 100644
--- a/src/client/views/search/FaceRecognitionHandler.tsx
+++ b/src/client/views/search/FaceRecognitionHandler.tsx
@@ -9,6 +9,7 @@ import { ImageField } from '../../../fields/URLField';
import { DocumentType } from '../../documents/DocumentTypes';
import { Docs } from '../../documents/Documents';
import { DocumentManager } from '../../util/DocumentManager';
+import { ComputedField } from '../../../fields/ScriptField';
/**
* A singleton class that handles face recognition and manages face Doc collections for each face found.
@@ -176,7 +177,7 @@ export class FaceRecognitionHandler {
dashboard[DocData].myUniqueFaces_count = faceDocNum; // TODO: improve to a better name
const uniqueFaceDoc = Docs.Create.UniqeFaceDocument({
- title: `Face ${faceDocNum}`,
+ title: ComputedField.MakeFunction('this.face') as unknown as string,
_layout_reflowHorizontal: true,
_layout_reflowVertical: true,
_layout_nativeDimEditable: true,
@@ -237,27 +238,44 @@ export class FaceRecognitionHandler {
setTimeout(() => this.classifyFacesInImage(imgDoc), 1000);
} else {
const imgUrl = ImageCast(imgDoc[Doc.LayoutFieldKey(imgDoc)]);
- if (imgUrl && !DocListCast(Doc.MyFaceCollection.examinedFaceDocs).includes(imgDoc[DocData])) { // only examine Docs that have an image and that haven't already been examined.
+ if (imgUrl && !DocListCast(Doc.MyFaceCollection.examinedFaceDocs).includes(imgDoc[DocData])) {
+ // only examine Docs that have an image and that haven't already been examined.
Doc.AddDocToList(Doc.MyFaceCollection, 'examinedFaceDocs', imgDoc[DocData]);
FaceRecognitionHandler.initImageDocFaceDescriptors(imgDoc);
- FaceRecognitionHandler.loadImage(imgUrl).then( // load image and analyze faces
- img => faceapi.detectAllFaces(img).withFaceLandmarks().withFaceDescriptors()
- .then(imgDocFaceDescriptions => { // For each face detected, find a match.
- const annos = [] as Doc[];
- const scale = NumCast(imgDoc.data_nativeWidth) / img.width;
- for (const fd of imgDocFaceDescriptions) {
- const faceDescriptor = new List<number>(Array.from(fd.descriptor));
- annos.push(Docs.Create.FreeformDocument([], {backgroundColor: "#55555555", x: fd.alignedRect.box.left*scale, y: fd.alignedRect.box.top*scale, _width: fd.alignedRect.box.width*scale, _height: fd.alignedRect.box.height*scale}))
- FaceRecognitionHandler.ImageDocAddFaceDescriptor(imgDoc, faceDescriptor); // add face descriptor to image's list of descriptors
- const matchedUniqueFace = this.findMatchingFaceDoc(fd.descriptor) ?? this.createUniqueFaceDoc(activeDashboard);
- FaceRecognitionHandler.UniqueFaceAddFaceImage(imgDoc, faceDescriptor, matchedUniqueFace); // add image/faceDescriptor to matched unique face
- }
+ FaceRecognitionHandler.loadImage(imgUrl).then(
+ // load image and analyze faces
+ img => faceapi
+ .detectAllFaces(img)
+ .withFaceLandmarks()
+ .withFaceDescriptors()
+ .then(imgDocFaceDescriptions => { // For each face detected, find a match.
+ const annos = [] as Doc[];
+ const scale = NumCast(imgDoc.data_nativeWidth) / img.width;
+ imgDocFaceDescriptions.forEach((fd, i) => {
+ const faceDescriptor = new List<number>(Array.from(fd.descriptor));
+ FaceRecognitionHandler.ImageDocAddFaceDescriptor(imgDoc, faceDescriptor); // add face descriptor to image's list of descriptors
+ const matchedUniqueFace = this.findMatchingFaceDoc(fd.descriptor) ?? this.createUniqueFaceDoc(activeDashboard);
+ FaceRecognitionHandler.UniqueFaceAddFaceImage(imgDoc, faceDescriptor, matchedUniqueFace); // add image/faceDescriptor to matched unique face
+ annos.push(
+ Docs.Create.FreeformDocument([], {
+ title: ComputedField.MakeFunction(`this.face.face`) as unknown as string, //
+ annotationOn: imgDoc,
+ face: matchedUniqueFace,
+ backgroundColor: '#55555555',
+ x: fd.alignedRect.box.left * scale,
+ y: fd.alignedRect.box.top * scale,
+ _width: fd.alignedRect.box.width * scale,
+ _height: fd.alignedRect.box.height * scale,
+ })
+ );
+ Doc.SetContainer(annos.lastElement(), imgDoc[DocData]);
+ });
- imgDoc[DocData].data_annotations = new List<Doc>(annos);
- return imgDocFaceDescriptions;
- })
- );
- } // prettier-ignore
+ imgDoc[DocData].data_annotations = new List<Doc>(annos);
+ return imgDocFaceDescriptions;
+ })
+ ); // prettier-ignore
+ }
}
};
}