diff options
4 files changed, 48 insertions, 19 deletions
diff --git a/src/client/views/TagsView.tsx b/src/client/views/TagsView.tsx index d2ca77446..0ac015b36 100644 --- a/src/client/views/TagsView.tsx +++ b/src/client/views/TagsView.tsx @@ -1,6 +1,6 @@ import { FontAwesomeIcon } from '@fortawesome/react-fontawesome'; import { Button, Colors, IconButton } from 'browndash-components'; -import { action, computed, makeObservable, observable } from 'mobx'; +import { IReactionDisposer, action, computed, makeObservable, observable, reaction } from 'mobx'; import { observer } from 'mobx-react'; import React from 'react'; import ResizeObserver from 'resize-observer-polyfill'; @@ -248,11 +248,25 @@ export class TagsView extends ObservableReactComponent<TagViewProps> { makeObservable(this); } + @observable _panelHeightDirty = 0; @observable _currentInput = ''; @observable _isEditing = !StrListCast(this._props.View.dataDoc.tags).length; + _heightDisposer: IReactionDisposer | undefined; + + componentDidMount() { + this._heightDisposer = reaction( + () => this._props.View.screenToContentsTransform(), + xf => { + this._panelHeightDirty = this._panelHeightDirty + 1; + } + ); + } + componentWillUnmount() { + this._heightDisposer?.(); + } @computed get currentScale() { - return NumCast((this._props.View.Document.embedContainer as Doc)?._freeform_scale, 1); + return NumCast(DocCast(this._props.View.Document.embedContainer)?._freeform_scale, 1); } @computed get isEditing() { return this._isEditing && DocumentView.SelectedDocs().includes(this._props.View.Document); @@ -289,11 +303,14 @@ export class TagsView extends ObservableReactComponent<TagViewProps> { */ render() { const tagsList = new Set<string>(StrListCast(this._props.View.dataDoc.tags)); + const chatTagsList = new Set<string>(StrListCast(this._props.View.dataDoc.tags_chat)); const facesList = new Set<string>( DocListCast(this._props.View.dataDoc[Doc.LayoutFieldKey(this._props.View.Document) + '_annotations']) + .concat(this._props.View.Document) .filter(d => d.face) .map(doc => StrCast(DocCast(doc.face)?.title)) ); + this._panelHeightDirty; return !this._props.View.Document.showTags ? null : ( <div @@ -351,6 +368,18 @@ export class TagsView extends ObservableReactComponent<TagViewProps> { /> ); })} + {Array.from(chatTagsList).map(tag => { + return ( + <Button + style={{ margin: '2px 2px', border: '1px solid black', backgroundColor: 'lightpink', color: 'black' }} + text={tag} + color={SnappingManager.userVariantColor} + tooltip="Add existing tag" + onClick={() => this.submitTag(tag)} + key={tag} + /> + ); + })} </div> </div> ) : null} diff --git a/src/client/views/collections/collectionFreeForm/FaceCollectionBox.tsx b/src/client/views/collections/collectionFreeForm/FaceCollectionBox.tsx index c62303dc0..717081666 100644 --- a/src/client/views/collections/collectionFreeForm/FaceCollectionBox.tsx +++ b/src/client/views/collections/collectionFreeForm/FaceCollectionBox.tsx @@ -95,17 +95,18 @@ export class UniqueFaceBox extends ViewBoxBaseComponent<FieldViewProps>() { const faceDescriptorsAsFloat32Array = FaceRecognitionHandler.UniqueFaceDescriptors(this.Document).map(fd => new Float32Array(Array.from(fd))); const labeledFaceDescriptor = new faceapi.LabeledFaceDescriptors(FaceRecognitionHandler.UniqueFaceLabel(this.Document), faceDescriptorsAsFloat32Array); const faceMatcher = new FaceMatcher([labeledFaceDescriptor], 1); - const { faceAnno } = FaceRecognitionHandler.ImageDocFaceAnnos(imgDoc).reduce( - (prev, faceAnno) => { - const match = faceMatcher.matchDescriptor(new Float32Array(Array.from(faceAnno.faceDescriptor as List<number>))); - return match.distance < prev.dist ? { dist: match.distance, faceAnno } : prev; - }, - { dist: 1, faceAnno: undefined as Opt<Doc> } - ); + const faceAnno = + FaceRecognitionHandler.ImageDocFaceAnnos(imgDoc).reduce( + (prev, faceAnno) => { + const match = faceMatcher.matchDescriptor(new Float32Array(Array.from(faceAnno.faceDescriptor as List<number>))); + return match.distance < prev.dist ? { dist: match.distance, faceAnno } : prev; + }, + { dist: 1, faceAnno: undefined as Opt<Doc> } + ).faceAnno ?? imgDoc; // assign the face in the image that's closest to the face collection's face if (faceAnno) { - FaceRecognitionHandler.UniqueFaceRemoveFaceImage(faceAnno, DocCast(faceAnno.face)); + faceAnno.face && FaceRecognitionHandler.UniqueFaceRemoveFaceImage(faceAnno, DocCast(faceAnno.face)); FaceRecognitionHandler.UniqueFaceAddFaceImage(faceAnno, this.Document); faceAnno.face = this.Document; } diff --git a/src/client/views/collections/collectionFreeForm/ImageLabelBox.tsx b/src/client/views/collections/collectionFreeForm/ImageLabelBox.tsx index 5d3154e3c..e419e522c 100644 --- a/src/client/views/collections/collectionFreeForm/ImageLabelBox.tsx +++ b/src/client/views/collections/collectionFreeForm/ImageLabelBox.tsx @@ -163,7 +163,7 @@ export class ImageLabelBox extends ViewBoxBaseComponent<FieldViewProps>() { // Converts the images into a Base64 format, afterwhich the information is sent to GPT to label them. const imageInfos = this._selectedImages.map(async doc => { - if (!doc[DocData].tags) { + if (!doc[DocData].tags_chat) { const [name, type] = ImageCast(doc[Doc.LayoutFieldKey(doc)]).url.href.split('.'); return CollectionCardView.imageUrlToBase64(`${name}_o.${type}`).then(hrefBase64 => !hrefBase64 ? undefined : @@ -174,7 +174,7 @@ export class ImageLabelBox extends ViewBoxBaseComponent<FieldViewProps>() { (await Promise.all(imageInfos)).forEach(imageInfo => { if (imageInfo) { - imageInfo.doc[DocData].tags = (imageInfo.doc[DocData].tags as List<string>) ?? new List<string>(); + imageInfo.doc[DocData].tags_chat = (imageInfo.doc[DocData].tags_chat as List<string>) ?? new List<string>(); const labels = imageInfo.labels.split('\n'); labels.forEach(label => { @@ -184,8 +184,7 @@ export class ImageLabelBox extends ViewBoxBaseComponent<FieldViewProps>() { .replace(/^\d+\.\s*|-|f\*/, '') .replace(/^#/, '') .trim(); - imageInfo.doc[DocData][label] = true; - (imageInfo.doc[DocData].tags as List<string>).push(label); + (imageInfo.doc[DocData].tags_chat as List<string>).push(label); }); } }); @@ -200,8 +199,8 @@ export class ImageLabelBox extends ViewBoxBaseComponent<FieldViewProps>() { this.startLoading(); for (const doc of this._selectedImages) { - for (let index = 0; index < (doc[DocData].tags as List<string>).length; index++) { - const label = (doc[DocData].tags as List<string>)[index]; + for (let index = 0; index < (doc[DocData].tags_chat as List<string>).length; index++) { + const label = (doc[DocData].tags_chat as List<string>)[index]; const embedding = await gptGetEmbedding(label); doc[DocData][`tags_embedding_${index + 1}`] = new List<number>(embedding); } @@ -214,7 +213,7 @@ export class ImageLabelBox extends ViewBoxBaseComponent<FieldViewProps>() { // For each image, loop through the labels, and calculate similarity. Associate it with the // most similar one. this._selectedImages.forEach(doc => { - const embedLists = numberRange((doc[DocData].tags as List<string>).length).map(n => Array.from(NumListCast(doc[DocData][`tags_embedding_${n + 1}`]))); + const embedLists = numberRange((doc[DocData].tags_chat as List<string>).length).map(n => Array.from(NumListCast(doc[DocData][`tags_embedding_${n + 1}`]))); const bestEmbedScore = (embedding: Opt<number[]>) => Math.max(...embedLists.map((l, index) => (embedding && similarity(Array.from(embedding), l)!) || 0)); const {label: mostSimilarLabelCollect} = this._labelGroups.map(label => ({ label, similarityScore: bestEmbedScore(labelToEmbedding.get(label)) })) @@ -321,7 +320,7 @@ export class ImageLabelBox extends ViewBoxBaseComponent<FieldViewProps>() { await DocumentView.showDocument(doc, { willZoomCentered: true }); }}></img> <div className="image-information-labels" onClick={() => this._props.addDocTab(doc, OpenWhere.addRightKeyvalue)}> - {(doc[DocData].tags as List<string>).map(label => { + {(doc[DocData].tags_chat as List<string>).map(label => { return ( <div key={Utils.GenerateGuid()} className="image-label" style={{ backgroundColor: SettingsManager.userVariantColor, borderColor: SettingsManager.userColor }}> {label} diff --git a/src/client/views/search/FaceRecognitionHandler.tsx b/src/client/views/search/FaceRecognitionHandler.tsx index 4c10307e6..4f6f5d314 100644 --- a/src/client/views/search/FaceRecognitionHandler.tsx +++ b/src/client/views/search/FaceRecognitionHandler.tsx @@ -101,7 +101,7 @@ export class FaceRecognitionHandler { * @param faceDoc unique face Doc * @returns image Docs */ - public static UniqueFaceImages = (faceDoc: Doc) => DocListCast(faceDoc[DocData].face_annos).map(face => DocCast(face.annotationOn)); + public static UniqueFaceImages = (faceDoc: Doc) => DocListCast(faceDoc[DocData].face_annos).map(face => DocCast(face.annotationOn, face)); /** * Adds a face image to a unique face Doc, adds the unique face Doc to the images list of reognized faces, |