aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorbobzel <zzzman@gmail.com>2025-06-11 11:50:45 -0400
committerbobzel <zzzman@gmail.com>2025-06-11 11:50:45 -0400
commitbf33580a66c1f8ce87e85bea701415788a887401 (patch)
treeed027d8ecd0cfb190ed672d6f5a84425d906656e /src
parent403dcfb5e8b659f62ed51212ede3f5807caa58c6 (diff)
change how autoTag is triggered for images to not use a reaction
Diffstat (limited to 'src')
-rw-r--r--src/client/views/nodes/ImageBox.tsx83
-rw-r--r--src/client/views/search/FaceRecognitionHandler.tsx5
2 files changed, 34 insertions, 54 deletions
diff --git a/src/client/views/nodes/ImageBox.tsx b/src/client/views/nodes/ImageBox.tsx
index 1e16bbfc9..d7e21b0a6 100644
--- a/src/client/views/nodes/ImageBox.tsx
+++ b/src/client/views/nodes/ImageBox.tsx
@@ -142,56 +142,39 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
};
autoTag = async () => {
-
- try {
- // 1) grab the full-size URL
- const layoutKey = Doc.LayoutDataKey(this.Document);
- const url = ImageCastWithSuffix(this.Document[layoutKey], '_o') ?? '';
- if (!url) throw new Error('No image URL found');
-
- // 2) convert to base64
- const base64 = await imageUrlToBase64(url);
- if (!base64) throw new Error('Failed to load image data');
-
- // 3) ask GPT for labels one label: PERSON or LANDSCAPE
- const raw = await gptImageLabel(
- base64,
- `Classify this image as PERSON or LANDSCAPE. You may only respond with one of these two options. Then
- provide five additional descriptive tags to describe the image for a total of 6 words outputted,
- delimited by spaces. For example: "LANDSCAPE BUNNY NATURE FOREST PEACEFUL OUTDOORS". Then add one final lengthier summary tag (separated by underscores)
- that describes the image.`
- );
-
- const { nativeWidth, nativeHeight } = this.nativeSize;
- const aspectRatio = nativeWidth && nativeHeight
- ? (nativeWidth / nativeHeight).toFixed(2)
- : '1.00';
-
- // 4) normalize and prefix
- const label = raw
- .trim()
- .toUpperCase()
-
- // 5) stash it on the Doc
- // overwrite any old tags so re-runs still work
- const tokens = label.split(/\s+/);
- this.Document.$tags_chat = new List<string>();
- tokens.forEach(tok => {
- (this.Document.$tags_chat as List<string>).push(tok)});
- (this.Document.$tags_chat as List<string>).push(`ASPECT_${aspectRatio}`);
-
- // 6) flip on “show tags” in the layout
- // (same flag that ImageLabelBox.toggleDisplayInformation uses)
- this.Document._layout_showTags = true;
-
- } catch (err) {
- console.error('autoTag failed:', err);
- } finally {
- }
- };
-
-
-
+ if (this.Document.$tags_chat) return;
+ try {
+ // 1) grab the full-size URL
+ const layoutKey = Doc.LayoutDataKey(this.Document);
+ const url = ImageCastWithSuffix(this.Document[layoutKey], '_o');
+ if (!url) throw new Error('No image URL found');
+
+ // 2) convert to base64
+ const base64 = await imageUrlToBase64(url);
+ if (!base64) throw new Error('Failed to load image data');
+
+ // 3) ask GPT for labels one label: PERSON or LANDSCAPE
+ const label = await gptImageLabel(
+ base64,
+ `Classify this image as PERSON or LANDSCAPE. You may only respond with one of these two options.
+ Then provide five additional descriptive tags to describe the image for a total of 6 words outputted, delimited by spaces.
+ For example: "LANDSCAPE BUNNY NATURE FOREST PEACEFUL OUTDOORS".
+ Then add one final lengthier summary tag (separated by underscores) that describes the image.`
+ ).then(raw => raw.trim().toUpperCase());
+
+ const { nativeWidth, nativeHeight } = this.nativeSize;
+ const aspectRatio = ((nativeWidth || 1) / (nativeHeight || 1)).toFixed(2);
+
+ // 5) stash it on the Doc
+ // overwrite any old tags so re-runs still work
+ this.Document.$tags_chat = new List<string>([...label.split(/\s+/), `ASPECT_${aspectRatio}`]);
+
+ // 6) flip on “show tags” in the layout
+ this.Document._layout_showTags = true;
+ } catch (err) {
+ console.error('autoTag failed:', err);
+ }
+ };
getAnchor = (addAsAnnotation: boolean, pinProps?: PinProps) => {
const visibleAnchor = this._getAnchor?.(this._savedAnnotations, true); // use marquee anchor, otherwise, save zoom/pan as anchor
diff --git a/src/client/views/search/FaceRecognitionHandler.tsx b/src/client/views/search/FaceRecognitionHandler.tsx
index 256e68afd..dac91b89a 100644
--- a/src/client/views/search/FaceRecognitionHandler.tsx
+++ b/src/client/views/search/FaceRecognitionHandler.tsx
@@ -9,7 +9,6 @@ import { ImageField } from '../../../fields/URLField';
import { DocumentType } from '../../documents/DocumentTypes';
import { Docs } from '../../documents/Documents';
import { DocumentManager } from '../../util/DocumentManager';
-import { reaction } from 'mobx';
import { DocumentView } from '../nodes/DocumentView';
/**
@@ -210,9 +209,7 @@ export class FaceRecognitionHandler {
} else if (imgDoc.type === DocumentType.LOADING && !imgDoc.loadingError) {
setTimeout(() => this.classifyFacesInImage(imgDocView), 1000);
} else {
- reaction(() => ({sel:imgDocView.isSelected()}), ({sel}) => !sel &&
- imgDocView.ComponentView?.autoTag?.(), {fireImmediately: true}
- )
+ imgDocView.ComponentView?.autoTag?.();
const imgUrl = ImageCast(imgDoc[Doc.LayoutDataKey(imgDoc)]);
if (imgUrl && !DocListCast(Doc.MyFaceCollection?.examinedFaceDocs).includes(imgDoc[DocData])) {
// only examine Docs that have an image and that haven't already been examined.