aboutsummaryrefslogtreecommitdiff
path: root/src/client/views/search/FaceRecognitionHandler.tsx
blob: ed9746a4dd8d68ab6a9a7bccef61e691add7d2b6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
import * as faceapi from 'face-api.js';
import { FaceMatcher } from 'face-api.js';
import { Doc, DocListCast } from '../../../fields/Doc';
import { DocData } from '../../../fields/DocSymbols';
import { List } from '../../../fields/List';
import { ComputedField } from '../../../fields/ScriptField';
import { DocCast, ImageCast, ImageCastToNameType, NumCast, StrCast } from '../../../fields/Types';
import { ImageField } from '../../../fields/URLField';
import { DocumentType } from '../../documents/DocumentTypes';
import { Docs } from '../../documents/Documents';
import { DocumentManager } from '../../util/DocumentManager';
import { DocumentView } from '../nodes/DocumentView';

/**
 * A singleton class that handles face recognition and manages face Doc collections for each face found.
 * Displaying an image doc anywhere will trigger this class to test if the image contains any faces.
 * If it does, each recognized face will be compared to a stored, global set of faces (each face is represented
 * as a face collection Doc). If the face matches a face collection Doc, then it will be added to that
 * collection along with the numerical representation of the face, its face descriptor.
 *
 * Image Doc's that are added to one or more face collection Docs will be given an annotation rectangle that
 * highlights where the face is, and the annotation will have these fields:
 *     faceDescriptor - the numerical face representations found in the image.
 *     face - the unique face Docs corresponding to recognized face in the image.
 *     annotationOn - the image where the face was found
 *
 * unique face Doc's are created for each person identified and are stored in the Dashboard's myUniqueFaces field
 *
 * Each unique face Doc represents a unique face and collects all matching face images for that person.  It has these fields:
 *     face - a string label for the person that was recognized                                     (TODO: currently it's just a 'face#')
 *     face_annos - a list of face annotations, where each anno has
 */
export class FaceRecognitionHandler {
    static _instance: FaceRecognitionHandler;
    private _apiModelReady = false;
    private _pendingAPIModelReadyDocs: DocumentView[] = [];

    public static get Instance() {
        return FaceRecognitionHandler._instance ?? new FaceRecognitionHandler();
    }

    /**
     * Loads an image
     */
    private static loadImage = (imgUrl: ImageField): Promise<HTMLImageElement> => {
        const [name, type] = ImageCastToNameType(imgUrl);
        const imageURL = `${name}_o.${type}`;

        return new Promise((resolve, reject) => {
            const img = new Image();
            img.crossOrigin = 'anonymous';
            img.onload = () => resolve(img);
            img.onerror = err => reject(err);
            img.src = imageURL;
        });
    };

    /**
     * Returns an array of faceDocs for each face recognized in the image
     * @param imgDoc image with faces
     * @returns faceDoc array
     */
    public static ImageDocFaceAnnos = (imgDoc: Doc) => DocListCast(imgDoc[`${Doc.LayoutDataKey(imgDoc)}_annotations`]).filter(doc => doc.face);

    /**
     * returns a list of all face collection Docs on the current dashboard
     * @returns face collection Doc list
     */
    public static UniqueFaces = () => DocListCast(Doc.ActiveDashboard?.$myUniqueFaces);

    /**
     * Find a unique face from its name
     * @param name name of unique face
     * @returns unique face or undefined
     */
    public static FindUniqueFaceByName = (name: string) => FaceRecognitionHandler.UniqueFaces().find(faceDoc => faceDoc.title === name);

    /**
     * Removes a unique face from the set of recognized unique faces
     * @param faceDoc unique face Doc
     * @returns
     */
    public static DeleteUniqueFace = (faceDoc: Doc) => Doc.ActiveDashboard && Doc.RemoveDocFromList(Doc.ActiveDashboard[DocData], 'myUniqueFaces', faceDoc);

    /**
     * returns the labels associated with a face collection Doc
     * @param faceDoc unique face Doc
     * @returns label string
     */
    public static UniqueFaceLabel = (faceDoc: Doc) => StrCast(faceDoc.$face);

    public static SetUniqueFaceLabel = (faceDoc: Doc, value: string) => (faceDoc.$face = value);
    /**
     * Returns all the face descriptors associated with a unique face Doc
     * @param faceDoc unique face Doc
     * @returns face descriptors
     */
    public static UniqueFaceDescriptors = (faceDoc: Doc) => DocListCast(faceDoc.$face_annos).map(face => face.faceDescriptor as List<number>);

    /**
     * Returns a list of all face image Docs associated with a unique face Doc
     * @param faceDoc unique face Doc
     * @returns image Docs
     */
    public static UniqueFaceImages = (faceDoc: Doc) => DocListCast(faceDoc.$face_annos).map(face => DocCast(face.annotationOn, face));

    /**
     * Adds a face image to a unique face Doc, adds the unique face Doc to the images list of reognized faces,
     * and updates the unique face's set of face image descriptors
     * @param img - image with faces to add to a face collection Doc
     * @param faceAnno - a face annotation
     */
    public static UniqueFaceAddFaceImage = (faceAnno: Doc, faceDoc: Doc) => {
        Doc.AddDocToList(faceDoc, 'face_annos', faceAnno);
    };

    /**
     * Removes a face from a unique Face Doc, and updates the unique face's set of face image descriptors
     * @param img - image with faces to remove
     * @param faceDoc - unique face Doc
     */
    public static UniqueFaceRemoveFaceImage = (faceAnno: Doc, faceDoc: Doc) => {
        FaceRecognitionHandler.ImageDocFaceAnnos(faceAnno).forEach(face => Doc.RemoveDocFromList(faceDoc[DocData], 'face_annos', face) && (face.face = undefined));
    };

    constructor() {
        FaceRecognitionHandler._instance = this;
        this.loadAPIModels().then(() => this._pendingAPIModelReadyDocs.forEach(this.classifyFacesInImage));
        DocumentManager.Instance.AddAnyViewRenderedCB(dv => FaceRecognitionHandler.Instance.classifyFacesInImage(dv));
    }

    /**
     * Loads the face detection models.
     */
    private loadAPIModels = async () => {
        const MODEL_URL = `/models`;
        await faceapi.loadFaceDetectionModel(MODEL_URL);
        await faceapi.loadFaceLandmarkModel(MODEL_URL);
        await faceapi.loadFaceRecognitionModel(MODEL_URL);
        this._apiModelReady = true;
    };

    /**
     * Creates a new, empty unique face Doc
     * @returns a unique face Doc
     */
    private createUniqueFaceDoc = (dashboard: Doc) => {
        const faceDocNum = NumCast(dashboard.$myUniqueFaces_count) + 1;
        dashboard.$myUniqueFaces_count = faceDocNum; // TODO: improve to a better name

        const uniqueFaceDoc = Docs.Create.UniqeFaceDocument({
            title: ComputedField.MakeFunction('this.face', undefined, undefined, 'this.face = value') as unknown as string,
            _layout_reflowHorizontal: true,
            _layout_reflowVertical: true,
            _layout_nativeDimEditable: true,
            _layout_borderRounding: '20px',
            _layout_fitWidth: true,
            _layout_autoHeight: true,
            _face_showImages: true,
            _width: 400,
            _height: 100,
        });
        uniqueFaceDoc.$face = `Face${faceDocNum}`;
        uniqueFaceDoc.$face_annos = new List<Doc>();
        Doc.MyFaceCollection && Doc.SetContainer(uniqueFaceDoc, Doc.MyFaceCollection);

        Doc.ActiveDashboard && Doc.AddDocToList(Doc.ActiveDashboard[DocData], 'myUniqueFaces', uniqueFaceDoc);
        return uniqueFaceDoc;
    };

    /**
     * Finds the most similar matching Face Document to a face descriptor
     * @param faceDescriptor face descriptor number list
     * @returns face Doc
     */
    private findMatchingFaceDoc = (faceDescriptor: Float32Array) => {
        if (!Doc.ActiveDashboard || FaceRecognitionHandler.UniqueFaces().length < 1) {
            return undefined;
        }

        const faceDescriptors = FaceRecognitionHandler.UniqueFaces().map(faceDoc => {
            const float32Array = FaceRecognitionHandler.UniqueFaceDescriptors(faceDoc).map(fd => new Float32Array(Array.from(fd)));
            return new faceapi.LabeledFaceDescriptors(FaceRecognitionHandler.UniqueFaceLabel(faceDoc), float32Array);
        });
        const faceMatcher = new FaceMatcher(faceDescriptors, 0.6);
        const match = faceMatcher.findBestMatch(faceDescriptor);
        if (match.label !== 'unknown') {
            for (const faceDoc of FaceRecognitionHandler.UniqueFaces()) {
                if (FaceRecognitionHandler.UniqueFaceLabel(faceDoc) === match.label) {
                    return faceDoc;
                }
            }
        }
        return undefined;
    };

    /**
     * When a document is added, this finds faces in the images and tries to
     * match them to existing unique faces, otherwise new unique face(s) are created.
     * @param imgDoc The document being analyzed.
     */
    private classifyFacesInImage = async (imgDocView: DocumentView) => {
        const imgDoc = imgDocView.Document;
        if (!Doc.UserDoc().recognizeFaceImages) return;
        const activeDashboard = Doc.ActiveDashboard;
        if (!this._apiModelReady || !activeDashboard) {
            this._pendingAPIModelReadyDocs.push(imgDocView);
        } else if (imgDoc.type === DocumentType.LOADING && !imgDoc.loadingError) {
            setTimeout(() => this.classifyFacesInImage(imgDocView), 1000);
        } else {
            const imgUrl = ImageCast(imgDoc[Doc.LayoutDataKey(imgDoc)]);
            if (imgUrl && !DocListCast(Doc.MyFaceCollection?.examinedFaceDocs).includes(imgDoc[DocData])) {
                imgDocView.ComponentView?.autoTag?.();
                // only examine Docs that have an image and that haven't already been examined.
                Doc.MyFaceCollection && Doc.AddDocToList(Doc.MyFaceCollection, 'examinedFaceDocs', imgDoc[DocData]);
                FaceRecognitionHandler.loadImage(imgUrl).then(
                    // load image and analyze faces
                    img => faceapi
                            .detectAllFaces(img)
                            .withFaceLandmarks()
                            .withFaceDescriptors()
                            .then(imgDocFaceDescriptions => {  // For each face detected, find a match.
                                const annos = [] as Doc[];
                                const scale = NumCast(imgDoc.data_nativeWidth) / img.width;
                                const showTags= imgDocFaceDescriptions.length > 1;
                                imgDocFaceDescriptions.forEach(fd => {
                                    const faceDescriptor = new List<number>(Array.from(fd.descriptor));
                                    const matchedUniqueFace = this.findMatchingFaceDoc(fd.descriptor) ?? this.createUniqueFaceDoc(activeDashboard);
                                    const faceAnno = Docs.Create.FreeformDocument([], {
                                        title: ComputedField.MakeFunction(`this.face.face`, undefined, undefined, 'this.face.face = value') as unknown as string, //
                                        annotationOn: imgDoc,
                                        face: matchedUniqueFace[DocData],
                                        faceDescriptor: faceDescriptor,
                                        backgroundColor: 'transparent',
                                        x: fd.alignedRect.box.left * scale,
                                        y: fd.alignedRect.box.top * scale,
                                        _width: fd.alignedRect.box.width * scale,
                                        _height: fd.alignedRect.box.height * scale,
                                        _layout_showTags: showTags
                                    })
                                    FaceRecognitionHandler.UniqueFaceAddFaceImage(faceAnno, matchedUniqueFace); // add image/faceDescriptor to matched unique face
                                    annos.push(faceAnno);
                                });

                                imgDoc.$data_annotations = new List<Doc>(annos);
                                imgDoc._layout_showTags = annos.length > 0;
                                return imgDocFaceDescriptions;
                            })
                ); // prettier-ignore
            }
        }
    };
}