aboutsummaryrefslogtreecommitdiff
path: root/src/client/views/nodes/ImageBox.tsx
diff options
context:
space:
mode:
authorbobzel <zzzman@gmail.com>2024-10-07 18:13:38 -0400
committerbobzel <zzzman@gmail.com>2024-10-07 18:13:38 -0400
commit11244a698bce594982ee5dca55b9695bb774451c (patch)
tree428d056c6b759c0d955c93ea8da2bd07d88ab42d /src/client/views/nodes/ImageBox.tsx
parent4364cb0db3988537f6b9485146a16bc15d55e3cc (diff)
moved all quiz code out of LabelBox and ImageBox and into StyleProviderQuiz. changed quizBoxes and quizMode to be stored as Doc metadata. Extended styles to cover contextMenuItems. remove this.setListening() from comparisonBox until contextMenu selectedVal is fixed.
Diffstat (limited to 'src/client/views/nodes/ImageBox.tsx')
-rw-r--r--src/client/views/nodes/ImageBox.tsx365
1 files changed, 18 insertions, 347 deletions
diff --git a/src/client/views/nodes/ImageBox.tsx b/src/client/views/nodes/ImageBox.tsx
index 31f6df2ea..0b474076b 100644
--- a/src/client/views/nodes/ImageBox.tsx
+++ b/src/client/views/nodes/ImageBox.tsx
@@ -17,13 +17,11 @@ import { Cast, ImageCast, NumCast, RTFCast, StrCast } from '../../../fields/Type
import { ImageField } from '../../../fields/URLField';
import { TraceMobx } from '../../../fields/util';
import { emptyFunction } from '../../../Utils';
-import { gptAPICall, GPTCallType, gptImageLabel } from '../../apis/gpt/GPT';
import { Docs } from '../../documents/Documents';
import { DocumentType } from '../../documents/DocumentTypes';
import { DocUtils, FollowLinkScript } from '../../documents/DocUtils';
import { Networking } from '../../Network';
import { DragManager } from '../../util/DragManager';
-import { dropActionType } from '../../util/DropActionTypes';
import { SnappingManager } from '../../util/SnappingManager';
import { undoable, undoBatch } from '../../util/UndoManager';
import { CollectionFreeFormView } from '../collections/collectionFreeForm/CollectionFreeFormView';
@@ -38,16 +36,8 @@ import { StyleProp } from '../StyleProp';
import { DocumentView } from './DocumentView';
import { FieldView, FieldViewProps } from './FieldView';
import { FocusViewOptions } from './FocusViewOptions';
-import { ImageUtility } from './generativeFill/generativeFillUtils/ImageHandler';
import './ImageBox.scss';
import { OpenWhere } from './OpenWhere';
-// import stringSimilarity from 'string-similarity';
-
-enum quizMode {
- SMART = 'smart',
- NORMAL = 'normal',
- NONE = 'none',
-}
export class ImageEditorData {
// eslint-disable-next-line no-use-before-define
@@ -79,25 +69,24 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
public static LayoutString(fieldKey: string) {
return FieldView.LayoutString(ImageBox, fieldKey);
}
+ _ffref = React.createRef<CollectionFreeFormView>();
private _ignoreScroll = false;
private _forcedScroll = false;
private _dropDisposer?: DragManager.DragDropDisposer;
private _disposers: { [name: string]: IReactionDisposer } = {};
private _getAnchor: (savedAnnotations: Opt<ObservableMap<number, HTMLDivElement[]>>, addAsAnnotation: boolean) => Opt<Doc> = () => undefined;
private _overlayIconRef = React.createRef<HTMLDivElement>();
- private _marqueeref = React.createRef<MarqueeAnnotator>();
private _mainCont: React.RefObject<HTMLDivElement> = React.createRef();
private _annotationLayer: React.RefObject<HTMLDivElement> = React.createRef();
- private _imageRef: HTMLImageElement | null = null; // <video> ref
- @observable private _quizBoxes: Doc[] = [];
+ imageRef: HTMLImageElement | null = null; // <video> ref
+ marqueeref = React.createRef<MarqueeAnnotator>();
+
@observable private _searchInput = '';
- @observable private _quizMode = quizMode.NONE;
@observable private _savedAnnotations = new ObservableMap<number, (HTMLDivElement & { marqueeing?: boolean })[]>();
@observable private _curSuffix = '';
@observable private _error = '';
@observable private _loading = false;
@observable private _isHovering = false; // flag to switch between primary and alternate images on hover
- _ffref = React.createRef<CollectionFreeFormView>();
constructor(props: FieldViewProps) {
super(props);
@@ -110,6 +99,13 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
ele && (this._dropDisposer = DragManager.MakeDropTarget(ele, this.drop.bind(this), this.Document));
};
+ @computed get quizBoxes() {
+ return DocListCast(this.Document.quizBoxes);
+ }
+ @computed get quizMode() {
+ return StrCast(this.Document._quizMode);
+ }
+
getAnchor = (addAsAnnotation: boolean, pinProps?: PinProps) => {
const visibleAnchor = this._getAnchor?.(this._savedAnnotations, true); // use marquee anchor, otherwise, save zoom/pan as anchor
const anchor =
@@ -315,331 +311,10 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
return cropping;
};
- createCanvas = async () => {
- const canvas = document.createElement('canvas');
- const scaling = 1 / (this._props.NativeDimScaling?.() || 1);
- const w = AnchorMenu.Instance.marqueeWidth * scaling;
- const h = AnchorMenu.Instance.marqueeHeight * scaling;
- canvas.width = w;
- canvas.height = h;
- const ctx = canvas.getContext('2d'); // draw image to canvas. scale to target dimensions
- if (ctx) {
- this._imageRef && ctx.drawImage(this._imageRef, NumCast(this._marqueeref.current?.left) * scaling, NumCast(this._marqueeref.current?.top) * scaling, w, h, 0, 0, w, h);
- }
- const blob = await ImageUtility.canvasToBlob(canvas);
- return ImageBox.selectUrlToBase64(blob);
- };
-
- createSnapshotLink = (imagePath: string, downX?: number, downY?: number) => {
- const url = !imagePath.startsWith('/') ? ClientUtils.CorsProxy(imagePath) : imagePath;
- const width = NumCast(this.layoutDoc._width) || 1;
- const height = NumCast(this.layoutDoc._height);
- const imageSnapshot = Docs.Create.ImageDocument(url, {
- _nativeWidth: Doc.NativeWidth(this.layoutDoc),
- _nativeHeight: Doc.NativeHeight(this.layoutDoc),
- x: NumCast(this.layoutDoc.x) + width,
- y: NumCast(this.layoutDoc.y),
- onClick: FollowLinkScript(),
- _width: 150,
- _height: (height / width) * 150,
- title: '--snapshot' + NumCast(this.layoutDoc._layout_currentTimecode) + ' image-',
- });
- Doc.SetNativeWidth(imageSnapshot[DocData], Doc.NativeWidth(this.layoutDoc));
- Doc.SetNativeHeight(imageSnapshot[DocData], Doc.NativeHeight(this.layoutDoc));
- this._props.addDocument?.(imageSnapshot);
- DocUtils.MakeLink(imageSnapshot, this.getAnchor(true), { link_relationship: 'video snapshot' });
- setTimeout(() => downX !== undefined && downY !== undefined && DocumentView.getFirstDocumentView(imageSnapshot)?.startDragging(downX, downY, dropActionType.move, true));
- };
-
- static selectUrlToBase64 = async (blob: Blob): Promise<string> => {
- try {
- return new Promise((resolve, reject) => {
- const reader = new FileReader();
- reader.readAsDataURL(blob);
- reader.onloadend = () => resolve(reader.result as string);
- reader.onerror = error => reject(error);
- });
- } catch (error) {
- console.error('Error:', error);
- throw error;
- }
- };
-
- /**
- * Calls backend to find any text on an image. Gets the text and the
- * coordinates of the text and creates label boxes at those locations.
- * @param quiz
- * @param i
- */
- pushInfo = async (quiz: quizMode, i?: string) => {
- this._quizMode = quiz;
- this._loading = true;
-
- const img = {
- file: i ? i : this.paths[0],
- drag: i ? 'drag' : 'full',
- smart: quiz,
- };
- const response = await axios.post('http://localhost:105/labels/', img, {
- headers: {
- 'Content-Type': 'application/json',
- },
- });
- if (response.data['boxes'].length != 0) {
- this.createBoxes(response.data['boxes'], response.data['text']);
- } else {
- this._loading = false;
- }
- };
-
- /**
- * Creates label boxes over text on the image to be filled in.
- * @param boxes
- * @param texts
- */
- createBoxes = (boxes: [[[number, number]]], texts: [string]) => {
- for (let i = 0; i < boxes.length; i++) {
- const coords = boxes[i] ? boxes[i] : [];
- const width = coords[1][0] - coords[0][0];
- const height = coords[2][1] - coords[0][1];
- const text = texts[i];
-
- const newCol = Docs.Create.LabelDocument({
- _width: width,
- _height: height,
- _layout_fitWidth: true,
- title: '',
- });
- const scaling = 1 / (this._props.NativeDimScaling?.() || 1);
- newCol.x = coords[0][0] + NumCast(this._marqueeref.current?.left) * scaling;
- newCol.y = coords[0][1] + NumCast(this._marqueeref.current?.top) * scaling;
-
- newCol.zIndex = 1000;
- newCol.forceActive = true;
- newCol.quiz = text;
- newCol.showQuiz = false;
- newCol[DocData].textTransform = 'none';
- this._quizBoxes.push(newCol);
- this.addDocument(newCol);
- this._loading = false;
- }
- };
-
- /**
- * Create flashcards from an image.
- */
- getImageDesc = async () => {
- this._loading = true;
- try {
- const hrefBase64 = await this.createCanvas();
- const response = await gptImageLabel(hrefBase64, 'Make flashcards out of this image with each question and answer labeled as "question" and "answer". Do not label each flashcard and do not include asterisks: ');
- AnchorMenu.Instance.transferToFlashcard(response, NumCast(this.layoutDoc['x']), NumCast(this.layoutDoc['y']));
- } catch (error) {
- console.log('Error', error);
- }
- this._loading = false;
- };
-
- /**
- * Calls the createCanvas and pushInfo methods to convert the
- * image to a form that can be passed to GPT and find the locations
- * of the text.
- */
- makeLabels = async () => {
- try {
- const hrefBase64 = await this.createCanvas();
- this.pushInfo(quizMode.NORMAL, hrefBase64);
- } catch (error) {
- console.log('Error', error);
- }
- };
-
- /**
- * Determines whether two words should be considered
- * the same, allowing minor typos.
- * @param str1
- * @param str2
- * @returns
- */
- levenshteinDistance = (str1: string, str2: string) => {
- const len1 = str1.length;
- const len2 = str2.length;
- const dp = Array.from(Array(len1 + 1), () => Array(len2 + 1).fill(0));
-
- if (len1 === 0) return len2;
- if (len2 === 0) return len1;
-
- for (let i = 0; i <= len1; i++) dp[i][0] = i;
- for (let j = 0; j <= len2; j++) dp[0][j] = j;
-
- for (let i = 1; i <= len1; i++) {
- for (let j = 1; j <= len2; j++) {
- const cost = str1[i - 1] === str2[j - 1] ? 0 : 1;
- dp[i][j] = Math.min(
- dp[i - 1][j] + 1, // deletion
- dp[i][j - 1] + 1, // insertion
- dp[i - 1][j - 1] + cost // substitution
- );
- }
- }
-
- return dp[len1][len2];
- };
-
- /**
- * Different algorithm for determining string similarity.
- * @param str1
- * @param str2
- * @returns
- */
- jaccardSimilarity = (str1: string, str2: string) => {
- const set1 = new Set(str1.split(' '));
- const set2 = new Set(str2.split(' '));
-
- const intersection = new Set([...set1].filter(x => set2.has(x)));
- const union = new Set([...set1, ...set2]);
-
- return intersection.size / union.size;
- };
-
- /**
- * Averages the jaccardSimilarity and levenshteinDistance scores
- * to determine string similarity for the labelboxes answers and
- * the users response.
- * @param str1
- * @param str2
- * @returns
- */
- stringSimilarity(str1: string, str2: string) {
- const levenshteinDist = this.levenshteinDistance(str1, str2);
- const levenshteinScore = 1 - levenshteinDist / Math.max(str1.length, str2.length);
-
- const jaccardScore = this.jaccardSimilarity(str1, str2);
-
- // Combine the scores with a higher weight on Jaccard similarity
- return 0.5 * levenshteinScore + 0.5 * jaccardScore;
- }
-
- @computed get checkIcon() {
- return (
- <Tooltip title={<div className="dash-tooltip">Check</div>}>
- <div className="check-icon" onPointerDown={this.check}>
- <FontAwesomeIcon icon="circle-check" size="lg" />
- </div>
- </Tooltip>
- );
- }
-
- @computed get redoIcon() {
- return (
- <Tooltip title={<div className="dash-tooltip">Redo</div>}>
- <div className="redo-icon" onPointerDown={this.redo}>
- <FontAwesomeIcon icon="redo-alt" size="lg" />
- </div>
- </Tooltip>
- );
- }
-
- /**
- * Returns whether two strings are similar
- * @param input
- * @param target
- * @returns
- */
- compareWords = (input: string, target: string) => {
- const distance = this.stringSimilarity(input.toLowerCase(), target.toLowerCase());
- return distance >= 0.7;
- };
-
- /**
- * GPT returns a hex color for what color the label box should be based on
- * the correctness of the users answer.
- * @param inputString
- * @returns
- */
- extractHexAndSentences = (inputString: string) => {
- // Regular expression to match a hexadecimal number at the beginning followed by a period and sentences
- const regex = /^#([0-9A-Fa-f]+)\.\s*(.+)$/s;
- const match = inputString.match(regex);
-
- if (match) {
- const hexNumber = match[1];
- const sentences = match[2].trim();
- return { hexNumber, sentences };
- } else {
- return { error: 'The input string does not match the expected format.' };
- }
- };
-
- /**
- * Check whether the contents of the label boxes on an image are correct.
- */
- check = () => {
- this._loading = true;
- this._quizBoxes.forEach(async doc => {
- const input = StrCast(doc[DocData].title);
- if (this._quizMode == quizMode.SMART && input) {
- const questionText = 'Question: What was labeled in this image?';
- const rubricText = ' Rubric: ' + StrCast(doc.quiz);
- const queryText =
- questionText +
- ' UserAnswer: ' +
- input +
- '. ' +
- rubricText +
- '. One sentence and evaluate based on meaning, not wording. Provide a hex color at the beginning with a period after it on a scale of green (minor details missed) to red (big error) for how correct the answer is. Example: "#FFFFFF. Pasta is delicious."';
- const response = await gptAPICall(queryText, GPTCallType.QUIZ);
- const hexSent = this.extractHexAndSentences(response);
- doc.quiz = hexSent.sentences?.replace(/UserAnswer/g, "user's answer").replace(/Rubric/g, 'rubric');
- doc.backgroundColor = '#' + hexSent.hexNumber;
- } else {
- const match = this.compareWords(input, StrCast(doc.quiz));
- doc.backgroundColor = match ? '#11c249' : '#eb2d2d';
- }
- doc.showQuiz = true;
- });
- this._loading = false;
- };
-
- redo = () => {
- this._quizBoxes.forEach(doc => {
- doc[DocData].title = '';
- doc.backgroundColor = '#e4e4e4';
- doc.showQuiz = false;
- });
- };
-
- /**
- * Get rid of all the label boxes on the images.
- */
- exitQuizMode = () => {
- this._quizMode = quizMode.NONE;
- this._quizBoxes.forEach(doc => {
- this.removeDocument?.(doc);
- });
- this._quizBoxes = [];
- };
-
- @action
- setRef = (iref: HTMLImageElement | null) => {
- this._imageRef = iref;
- };
-
specificContextMenu = (): void => {
const field = Cast(this.dataDoc[this.fieldKey], ImageField);
if (field) {
const funcs: ContextMenuProps[] = [];
- const quizes: ContextMenuProps[] = [];
- quizes.push({
- description: 'Smart Check',
- event: this._quizMode == quizMode.NONE ? () => this.pushInfo(quizMode.SMART) : this.exitQuizMode,
- icon: 'pen-to-square',
- });
- quizes.push({
- description: 'Normal',
- event: this._quizMode == quizMode.NONE ? () => this.pushInfo(quizMode.NORMAL) : this.exitQuizMode,
- icon: 'pencil',
- });
funcs.push({ description: 'Rotate Clockwise 90', event: this.rotate, icon: 'redo-alt' });
funcs.push({ description: `Show ${this.layoutDoc._showFullRes ? 'Dynamic Res' : 'Full Res'}`, event: this.resolution, icon: 'expand' });
funcs.push({ description: 'Set Native Pixel Size', event: this.setNativeSize, icon: 'expand-arrows-alt' });
@@ -654,7 +329,6 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
}),
icon: 'pencil-alt',
});
- ContextMenu.Instance?.addItem({ description: 'Quiz Mode', subitems: quizes, icon: 'file-pen' });
ContextMenu.Instance?.addItem({ description: 'Options...', subitems: funcs, icon: 'asterisk' });
}
};
@@ -770,7 +444,7 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
<div className="imageBox-fader" style={{ opacity: backAlpha }}>
<img
alt=""
- ref={this.setRef}
+ ref={action((r: HTMLImageElement | null) => (this.imageRef = r))}
key="paths"
src={srcpath}
style={{ transform, transformOrigin }}
@@ -811,7 +485,7 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
e,
action(moveEv => {
MarqueeAnnotator.clearAnnotations(this._savedAnnotations);
- this._marqueeref.current?.onInitiateSelection([moveEv.clientX, moveEv.clientY]);
+ this.marqueeref.current?.onInitiateSelection([moveEv.clientX, moveEv.clientY]);
return true;
}),
returnFalse,
@@ -823,12 +497,11 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
@action
finishMarquee = () => {
this._getAnchor = AnchorMenu.Instance?.GetAnchor;
- AnchorMenu.Instance.gptFlashcards = this.getImageDesc;
+ this._props.styleProvider?.(this.Document, this._props, StyleProp.AnchorMenuItems);
AnchorMenu.Instance.addToCollection = this._props.DocumentView?.()._props.addDocument;
- AnchorMenu.Instance.makeLabels = this.makeLabels;
- AnchorMenu.Instance.marqueeWidth = this._marqueeref.current?.Width ?? 0;
- AnchorMenu.Instance.marqueeHeight = this._marqueeref.current?.Height ?? 0;
- this._marqueeref.current?.onTerminateSelection();
+ AnchorMenu.Instance.marqueeWidth = this.marqueeref.current?.Width ?? 0;
+ AnchorMenu.Instance.marqueeHeight = this.marqueeref.current?.Height ?? 0;
+ this.marqueeref.current?.onTerminateSelection();
this._props.select(false);
};
focus = (anchor: Doc, options: FocusViewOptions) => (anchor.type === DocumentType.CONFIG ? undefined : this._ffref.current?.focus(anchor, options));
@@ -898,7 +571,7 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
{!this._mainCont.current || !this.DocumentView || !this._annotationLayer.current ? null : (
<MarqueeAnnotator
Document={this.Document}
- ref={this._marqueeref}
+ ref={this.marqueeref}
scrollTop={0}
annotationLayerScrollTop={0}
scaling={returnOne}
@@ -915,8 +588,6 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
// anchorMenuFlashcard={() => this.getImageDesc()}
/>
)}
- {this._quizMode != quizMode.NONE ? this.checkIcon : null}
- {this._quizMode != quizMode.NONE ? this.redoIcon : null}
</div>
);
}