aboutsummaryrefslogtreecommitdiff
path: root/src/client/views/nodes/ImageBox.tsx
diff options
context:
space:
mode:
Diffstat (limited to 'src/client/views/nodes/ImageBox.tsx')
-rw-r--r--src/client/views/nodes/ImageBox.tsx83
1 files changed, 33 insertions, 50 deletions
diff --git a/src/client/views/nodes/ImageBox.tsx b/src/client/views/nodes/ImageBox.tsx
index 1e16bbfc9..d7e21b0a6 100644
--- a/src/client/views/nodes/ImageBox.tsx
+++ b/src/client/views/nodes/ImageBox.tsx
@@ -142,56 +142,39 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
};
autoTag = async () => {
-
- try {
- // 1) grab the full-size URL
- const layoutKey = Doc.LayoutDataKey(this.Document);
- const url = ImageCastWithSuffix(this.Document[layoutKey], '_o') ?? '';
- if (!url) throw new Error('No image URL found');
-
- // 2) convert to base64
- const base64 = await imageUrlToBase64(url);
- if (!base64) throw new Error('Failed to load image data');
-
- // 3) ask GPT for labels one label: PERSON or LANDSCAPE
- const raw = await gptImageLabel(
- base64,
- `Classify this image as PERSON or LANDSCAPE. You may only respond with one of these two options. Then
- provide five additional descriptive tags to describe the image for a total of 6 words outputted,
- delimited by spaces. For example: "LANDSCAPE BUNNY NATURE FOREST PEACEFUL OUTDOORS". Then add one final lengthier summary tag (separated by underscores)
- that describes the image.`
- );
-
- const { nativeWidth, nativeHeight } = this.nativeSize;
- const aspectRatio = nativeWidth && nativeHeight
- ? (nativeWidth / nativeHeight).toFixed(2)
- : '1.00';
-
- // 4) normalize and prefix
- const label = raw
- .trim()
- .toUpperCase()
-
- // 5) stash it on the Doc
- // overwrite any old tags so re-runs still work
- const tokens = label.split(/\s+/);
- this.Document.$tags_chat = new List<string>();
- tokens.forEach(tok => {
- (this.Document.$tags_chat as List<string>).push(tok)});
- (this.Document.$tags_chat as List<string>).push(`ASPECT_${aspectRatio}`);
-
- // 6) flip on “show tags” in the layout
- // (same flag that ImageLabelBox.toggleDisplayInformation uses)
- this.Document._layout_showTags = true;
-
- } catch (err) {
- console.error('autoTag failed:', err);
- } finally {
- }
- };
-
-
-
+ if (this.Document.$tags_chat) return;
+ try {
+ // 1) grab the full-size URL
+ const layoutKey = Doc.LayoutDataKey(this.Document);
+ const url = ImageCastWithSuffix(this.Document[layoutKey], '_o');
+ if (!url) throw new Error('No image URL found');
+
+ // 2) convert to base64
+ const base64 = await imageUrlToBase64(url);
+ if (!base64) throw new Error('Failed to load image data');
+
+ // 3) ask GPT for labels one label: PERSON or LANDSCAPE
+ const label = await gptImageLabel(
+ base64,
+ `Classify this image as PERSON or LANDSCAPE. You may only respond with one of these two options.
+ Then provide five additional descriptive tags to describe the image for a total of 6 words outputted, delimited by spaces.
+ For example: "LANDSCAPE BUNNY NATURE FOREST PEACEFUL OUTDOORS".
+ Then add one final lengthier summary tag (separated by underscores) that describes the image.`
+ ).then(raw => raw.trim().toUpperCase());
+
+ const { nativeWidth, nativeHeight } = this.nativeSize;
+ const aspectRatio = ((nativeWidth || 1) / (nativeHeight || 1)).toFixed(2);
+
+ // 5) stash it on the Doc
+ // overwrite any old tags so re-runs still work
+ this.Document.$tags_chat = new List<string>([...label.split(/\s+/), `ASPECT_${aspectRatio}`]);
+
+ // 6) flip on “show tags” in the layout
+ this.Document._layout_showTags = true;
+ } catch (err) {
+ console.error('autoTag failed:', err);
+ }
+ };
getAnchor = (addAsAnnotation: boolean, pinProps?: PinProps) => {
const visibleAnchor = this._getAnchor?.(this._savedAnnotations, true); // use marquee anchor, otherwise, save zoom/pan as anchor