aboutsummaryrefslogtreecommitdiff
path: root/src/client/views/nodes
diff options
context:
space:
mode:
Diffstat (limited to 'src/client/views/nodes')
-rw-r--r--src/client/views/nodes/ImageBox.tsx6
-rw-r--r--src/client/views/nodes/formattedText/FormattedTextBox.tsx6
-rw-r--r--src/client/views/nodes/generativeFill/generativeFillUtils/generativeFillInterfaces.ts20
-rw-r--r--src/client/views/nodes/imageEditor/GenerativeFillButtons.scss (renamed from src/client/views/nodes/generativeFill/GenerativeFillButtons.scss)0
-rw-r--r--src/client/views/nodes/imageEditor/GenerativeFillButtons.tsx (renamed from src/client/views/nodes/generativeFill/GenerativeFillButtons.tsx)2
-rw-r--r--src/client/views/nodes/imageEditor/ImageEditor.scss (renamed from src/client/views/nodes/generativeFill/GenerativeFill.scss)68
-rw-r--r--src/client/views/nodes/imageEditor/ImageEditor.tsx (renamed from src/client/views/nodes/generativeFill/GenerativeFill.tsx)582
-rw-r--r--src/client/views/nodes/imageEditor/ImageEditorButtons.tsx69
-rw-r--r--src/client/views/nodes/imageEditor/imageEditingUtils/ImageHandler.ts (renamed from src/client/views/nodes/generativeFill/generativeFillUtils/ImageHandler.ts)2
-rw-r--r--src/client/views/nodes/imageEditor/imageEditorUtils/BrushHandler.ts29
-rw-r--r--src/client/views/nodes/imageEditor/imageEditorUtils/GenerativeFillMathHelpers.ts (renamed from src/client/views/nodes/generativeFill/generativeFillUtils/GenerativeFillMathHelpers.ts)2
-rw-r--r--src/client/views/nodes/imageEditor/imageEditorUtils/ImageHandler.ts312
-rw-r--r--src/client/views/nodes/imageEditor/imageEditorUtils/PointerHandler.ts (renamed from src/client/views/nodes/generativeFill/generativeFillUtils/PointerHandler.ts)2
-rw-r--r--src/client/views/nodes/imageEditor/imageEditorUtils/imageEditorConstants.ts (renamed from src/client/views/nodes/generativeFill/generativeFillUtils/generativeFillConstants.ts)1
-rw-r--r--src/client/views/nodes/imageEditor/imageEditorUtils/imageEditorInterfaces.ts43
-rw-r--r--src/client/views/nodes/imageEditor/imageToolUtils/BrushHandler.ts (renamed from src/client/views/nodes/generativeFill/generativeFillUtils/BrushHandler.ts)6
-rw-r--r--src/client/views/nodes/imageEditor/imageToolUtils/ImageHandler.ts312
17 files changed, 1214 insertions, 248 deletions
diff --git a/src/client/views/nodes/ImageBox.tsx b/src/client/views/nodes/ImageBox.tsx
index 8c7ec959e..d847b7940 100644
--- a/src/client/views/nodes/ImageBox.tsx
+++ b/src/client/views/nodes/ImageBox.tsx
@@ -32,6 +32,7 @@ import { MarqueeAnnotator } from '../MarqueeAnnotator';
import { OverlayView } from '../OverlayView';
import { AnchorMenu } from '../pdf/AnchorMenu';
import { PinDocView, PinProps } from '../PinFuncs';
+import { StickerPalette } from '../smartdraw/StickerPalette';
import { StyleProp } from '../StyleProp';
import { DocumentView } from './DocumentView';
import { FieldView, FieldViewProps } from './FieldView';
@@ -351,6 +352,11 @@ export class ImageBox extends ViewBoxAnnotatableComponent<FieldViewProps>() {
}),
icon: 'pencil-alt',
});
+ funcs.push({
+ description: this.Document.savedAsSticker ? 'Sticker Saved!' : 'Save to Stickers',
+ event: action(undoable(async () => await StickerPalette.addToPalette(this.Document), 'save to palette')),
+ icon: this.Document.savedAsSticker ? 'clipboard-check' : 'file-arrow-down',
+ });
ContextMenu.Instance?.addItem({ description: 'Options...', subitems: funcs, icon: 'asterisk' });
}
};
diff --git a/src/client/views/nodes/formattedText/FormattedTextBox.tsx b/src/client/views/nodes/formattedText/FormattedTextBox.tsx
index 35f1ec9ef..55ad543ca 100644
--- a/src/client/views/nodes/formattedText/FormattedTextBox.tsx
+++ b/src/client/views/nodes/formattedText/FormattedTextBox.tsx
@@ -66,6 +66,7 @@ import { RichTextRules } from './RichTextRules';
import { schema } from './schema_rts';
import { Property } from 'csstype';
import { LabelBox } from '../LabelBox';
+import { StickerPalette } from '../../smartdraw/StickerPalette';
// import * as applyDevTools from 'prosemirror-dev-tools';
export interface FormattedTextBoxProps extends FieldViewProps {
@@ -989,6 +990,11 @@ export class FormattedTextBox extends ViewBoxAnnotatableComponent<FormattedTextB
},
icon: this.Document._layout_autoHeight ? 'lock' : 'unlock',
});
+ optionItems.push({
+ description: this.Document.savedAsSticker ? 'Sticker Saved!' : 'Save to Stickers',
+ event: action(undoable(async () => await StickerPalette.addToPalette(this.Document), 'save to palette')),
+ icon: this.Document.savedAsSticker ? 'clipboard-check' : 'file-arrow-down',
+ });
!options && cm.addItem({ description: 'Options...', subitems: optionItems, icon: 'eye' });
const help = cm.findByDescription('Help...');
const helpItems = help?.subitems ?? [];
diff --git a/src/client/views/nodes/generativeFill/generativeFillUtils/generativeFillInterfaces.ts b/src/client/views/nodes/generativeFill/generativeFillUtils/generativeFillInterfaces.ts
deleted file mode 100644
index 1e7801056..000000000
--- a/src/client/views/nodes/generativeFill/generativeFillUtils/generativeFillInterfaces.ts
+++ /dev/null
@@ -1,20 +0,0 @@
-export interface CursorData {
- x: number;
- y: number;
- width: number;
-}
-
-export interface Point {
- x: number;
- y: number;
-}
-
-export enum BrushMode {
- ADD,
- SUBTRACT,
-}
-
-export interface ImageDimensions {
- width: number;
- height: number;
-}
diff --git a/src/client/views/nodes/generativeFill/GenerativeFillButtons.scss b/src/client/views/nodes/imageEditor/GenerativeFillButtons.scss
index 0180ef904..0180ef904 100644
--- a/src/client/views/nodes/generativeFill/GenerativeFillButtons.scss
+++ b/src/client/views/nodes/imageEditor/GenerativeFillButtons.scss
diff --git a/src/client/views/nodes/generativeFill/GenerativeFillButtons.tsx b/src/client/views/nodes/imageEditor/GenerativeFillButtons.tsx
index fe22b273d..32ed6b307 100644
--- a/src/client/views/nodes/generativeFill/GenerativeFillButtons.tsx
+++ b/src/client/views/nodes/imageEditor/GenerativeFillButtons.tsx
@@ -3,7 +3,7 @@ import * as React from 'react';
import ReactLoading from 'react-loading';
import { Button, IconButton, Type } from 'browndash-components';
import { AiOutlineInfo } from 'react-icons/ai';
-import { activeColor } from './generativeFillUtils/generativeFillConstants';
+import { activeColor } from './imageEditorUtils/imageEditorConstants';
interface ButtonContainerProps {
onClick: () => Promise<void>;
diff --git a/src/client/views/nodes/generativeFill/GenerativeFill.scss b/src/client/views/nodes/imageEditor/ImageEditor.scss
index c2669a950..c691e6a18 100644
--- a/src/client/views/nodes/generativeFill/GenerativeFill.scss
+++ b/src/client/views/nodes/imageEditor/ImageEditor.scss
@@ -2,7 +2,7 @@ $navHeight: 5rem;
$canvasSize: 1024px;
$scale: 0.5;
-.generativeFillContainer {
+.imageEditorContainer {
position: absolute;
top: 0;
left: 0;
@@ -13,7 +13,7 @@ $scale: 0.5;
flex-direction: column;
overflow: hidden;
- .generativeFillControls {
+ .imageEditorTopBar {
flex-shrink: 0;
height: $navHeight;
color: #000000;
@@ -27,6 +27,12 @@ $scale: 0.5;
border-bottom: 1px solid #c7cdd0;
padding: 0 2rem;
+ .imageEditorControls {
+ display: flex;
+ align-items: center;
+ gap: 1.5rem;
+ }
+
h1 {
font-size: 1.5rem;
}
@@ -69,13 +75,48 @@ $scale: 0.5;
}
}
- .iconContainer {
+ .sideControlsContainer {
+ width: 160px;
position: absolute;
- top: 2rem;
- left: 2rem;
- display: flex;
- flex-direction: column;
- gap: 2rem;
+ left: 0;
+ height: 100%;
+
+ .sideControls {
+ position: absolute;
+ width: 120px;
+ top: 3rem;
+ left: 2rem;
+ display: flex;
+ flex-direction: column;
+ gap: 1rem;
+
+ .imageToolsContainer {
+ display: flex;
+ flex-direction: column;
+ gap: 10px;
+ }
+
+ .cutToolsContainer {
+ display: grid;
+ gap: 5px;
+ grid-template-columns: 1fr 1fr;
+ }
+
+ .undoRedoContainer {
+ justify-content: center;
+ display: flex;
+ flex-direction: row;
+ }
+
+ .sliderContainer {
+ margin: 3rem 0;
+ height: 225px;
+ width: 100%;
+ display: flex;
+ justify-content: center;
+ cursor: pointer;
+ }
+ }
}
.editsBox {
@@ -86,7 +127,18 @@ $scale: 0.5;
flex-direction: column;
gap: 1rem;
+ .originalImageLabel {
+ position: absolute;
+ bottom: 10;
+ left: 10;
+ color: #ffffff;
+ font-size: 0.8rem;
+ letter-spacing: 1px;
+ text-transform: uppercase;
+ }
+
img {
+ cursor: pointer;
transition: all 0.2s ease-in-out;
&:hover {
opacity: 0.8;
diff --git a/src/client/views/nodes/generativeFill/GenerativeFill.tsx b/src/client/views/nodes/imageEditor/ImageEditor.tsx
index 261eb4bb4..a39878924 100644
--- a/src/client/views/nodes/generativeFill/GenerativeFill.tsx
+++ b/src/client/views/nodes/imageEditor/ImageEditor.tsx
@@ -4,7 +4,7 @@
/* eslint-disable jsx-a11y/click-events-have-key-events */
/* eslint-disable react/function-component-definition */
import { Checkbox, FormControlLabel, Slider, TextField } from '@mui/material';
-import { IconButton } from 'browndash-components';
+import { Button, IconButton, Type } from 'browndash-components';
import * as React from 'react';
import { useEffect, useRef, useState } from 'react';
import { CgClose } from 'react-icons/cg';
@@ -20,17 +20,16 @@ import { CollectionDockingView } from '../../collections/CollectionDockingView';
import { CollectionFreeFormView } from '../../collections/collectionFreeForm';
import { ImageEditorData } from '../ImageBox';
import { OpenWhereMod } from '../OpenWhere';
-import './GenerativeFill.scss';
-import { EditButtons, CutButtons } from './GenerativeFillButtons';
-import { BrushHandler, BrushType } from './generativeFillUtils/BrushHandler';
-import { APISuccess, ImageUtility } from './generativeFillUtils/ImageHandler';
-import { PointerHandler } from './generativeFillUtils/PointerHandler';
-import { activeColor, canvasSize, eraserColor, freeformRenderSize, newCollectionSize, offsetDistanceY, offsetX } from './generativeFillUtils/generativeFillConstants';
-import { CursorData, ImageDimensions, Point } from './generativeFillUtils/generativeFillInterfaces';
+import './ImageEditor.scss';
+import { ApplyFuncButtons, ImageToolButton } from './ImageEditorButtons';
+import { BrushHandler } from './imageEditorUtils/BrushHandler';
+import { APISuccess, ImageUtility } from './imageEditorUtils/ImageHandler';
+import { PointerHandler } from './imageEditorUtils/PointerHandler';
+import { activeColor, bgColor, brushWidthOffset, canvasSize, eraserColor, freeformRenderSize, newCollectionSize, offsetDistanceY, offsetX } from './imageEditorUtils/imageEditorConstants';
+import { CutMode, CursorData, ImageDimensions, ImageEditTool, ImageToolType, Point } from './imageEditorUtils/imageEditorInterfaces';
import { DocumentView } from '../DocumentView';
-import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
-import { ImageField } from '../../../../fields/URLField';
-import { resolve } from 'url';
+import { DocData } from '../../../../fields/DocSymbols';
+import { SettingsManager } from '../../../util/SettingsManager';
interface GenerativeFillProps {
imageEditorOpen: boolean;
@@ -41,7 +40,14 @@ interface GenerativeFillProps {
// Added field on image doc: gen_fill_children: List of children Docs
-const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addDoc }: GenerativeFillProps) => {
+/**
+ * The image editor interface can be accessed by opening a document's context menu, then going to Options --> Open Image Editor.
+ * The image editor supports various operations on images. Currently, there is a Generative Fill feature that allows users to erase
+ * part of an image, add an optional prompt, and send this to GPT. GPT then returns a newly generated image that replaces the erased
+ * portion based on the optional prompt. There is also an image cutting tool that allows users to cut images in different ways to
+ * reshape the images, take out portions of images, and overall use them more creatively (see the header comment for cutImage() for more information).
+ */
+const ImageEditor = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addDoc }: GenerativeFillProps) => {
const canvasRef = useRef<HTMLCanvasElement>(null);
const canvasBackgroundRef = useRef<HTMLCanvasElement>(null);
const drawingAreaRef = useRef<HTMLDivElement>(null);
@@ -55,13 +61,14 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
// format: array of [image source, corresponding image Doc]
const [edits, setEdits] = useState<{ url: string; saveRes: Doc | undefined }[]>([]);
const [edited, setEdited] = useState(false);
- // const [brushStyle] = useState<BrushStyle>(BrushStyle.ADD);
+ const [isFirstDoc, setIsFirstDoc] = useState<boolean>(true);
const [input, setInput] = useState('');
const [loading, setLoading] = useState(false);
const [canvasDims, setCanvasDims] = useState<ImageDimensions>({
width: canvasSize,
height: canvasSize,
});
+ const [cutType, setCutType] = useState<CutMode>(CutMode.IN);
// whether to create a new collection or not
const [isNewCollection, setIsNewCollection] = useState(true);
// the current image in the main canvas
@@ -82,6 +89,24 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
// constants for image cutting
const cutPts = useRef<Point[]>([]);
+ /**
+ *
+ * @param type The new tool type we are changing to
+ */
+ const changeTool = (type: ImageToolType) => {
+ switch (type) {
+ case ImageToolType.GenerativeFill:
+ setCurrTool(genFillTool);
+ setCursorData(prev => ({ ...prev, width: genFillTool.sliderDefault as number }));
+ break;
+ case ImageToolType.Cut:
+ setCurrTool(cutTool);
+ setCursorData(prev => ({ ...prev, width: cutTool.sliderDefault as number }));
+ break;
+ default:
+ break;
+ }
+ };
// Undo and Redo
const handleUndo = () => {
const ctx = ImageUtility.getCanvasContext(canvasRef);
@@ -121,6 +146,7 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
ctx.clearRect(0, 0, canvasSize, canvasSize);
undoStack.current = [];
redoStack.current = [];
+ cutPts.current.length = 0;
ImageUtility.drawImgToCanvas(currImg.current, canvasRef, canvasDims.width, canvasDims.height);
};
@@ -161,7 +187,7 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
x: currPoint.x - e.movementX / canvasScale,
y: currPoint.y - e.movementY / canvasScale,
};
- const pts = BrushHandler.createBrushPathOverlay(lastPoint, currPoint, cursorData.width / 2 / canvasScale, ctx, eraserColor, BrushType.CUT);
+ const pts = BrushHandler.createBrushPathOverlay(lastPoint, currPoint, cursorData.width / 2 / canvasScale, ctx, eraserColor);
cutPts.current.push(...pts);
};
@@ -261,7 +287,7 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
}));
};
- // Get AI Edit
+ // Get AI Edit for Generative Fill
const getEdit = async () => {
const img = currImg.current;
if (!img) return;
@@ -282,32 +308,14 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
// create first image
if (!newCollectionRef.current) {
- if (!isNewCollection && imageRootDoc) {
- // if the parent hasn't been set yet
- if (!parentDoc.current) parentDoc.current = imageRootDoc;
- } else {
- if (!(originalImg.current && imageRootDoc)) return;
- // create new collection and add it to the view
- newCollectionRef.current = Docs.Create.FreeformDocument([], {
- x: NumCast(imageRootDoc.x) + NumCast(imageRootDoc._width) + offsetX,
- y: NumCast(imageRootDoc.y),
- _width: newCollectionSize,
- _height: newCollectionSize,
- title: 'Image edit collection',
- });
- DocUtils.MakeLink(imageRootDoc, newCollectionRef.current, { link_relationship: 'Image Edit Version History' });
-
- // opening new tab
- CollectionDockingView.AddSplit(newCollectionRef.current, OpenWhereMod.right);
-
- // add the doc to the main freeform
- // eslint-disable-next-line no-use-before-define
- await createNewImgDoc(originalImg.current, true);
- }
+ createNewCollection();
} else {
childrenDocs.current = [];
}
-
+ if (!(originalImg.current && imageRootDoc)) return;
+ // add the doc to the main freeform
+ // eslint-disable-next-line no-use-before-define
+ await createNewImgDoc(originalImg.current, true);
originalImg.current = currImg.current;
originalDoc.current = parentDoc.current;
const { urls } = res as APISuccess;
@@ -334,66 +342,137 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
setLoading(false);
};
- const cutImage = async () => {
+ /**
+ * This function performs image cutting based on the inputted BrushMode. There are currently four ways to cut images:
+ * 1. By outlining the area that should be kept (BrushMode.IN)
+ * 2. By outlining the area that should be removed (BrushMode.OUT)
+ * 3. By drawing in the area that should be kept (where the image is brushed, the image will remain and everything else will be removed) (BrushMode.DRAW_IN)
+ * 4. By drawing the area that she be removed, so this operates as an eraser (BrushMode.ERASE)
+ * @param currCutType BrushMode enum that determines what kind of cutting operation to perform
+ * @param firstDoc boolean for whether it's the first edited image. This is for positioning of the edited images when they render on the canvas.
+ */
+ const cutImage = async (currCutType: CutMode, brushWidth: number, prevEdits: { url: string; saveRes: Doc | undefined }[], firstDoc: boolean) => {
const img = currImg.current;
const canvas = canvasRef.current;
if (!canvas || !img) return;
- canvas.width = img.naturalWidth;
- canvas.height = img.naturalHeight;
const ctx = ImageUtility.getCanvasContext(canvasRef);
if (!ctx) return;
- ctx.globalCompositeOperation = 'source-over';
- setLoading(true);
- setEdited(true);
// get the original image
const canvasOriginalImg = ImageUtility.getCanvasImg(img);
if (!canvasOriginalImg) return;
- // draw the image onto the canvas
- ctx.drawImage(img, 0, 0);
- // get the mask which i assume is the thing the user draws on
- // const canvasMask = ImageUtility.getCanvasMask(canvas, canvasOriginalImg);
- // if (!canvasMask) return;
- // canvasMask.width = canvas.width;
- // canvasMask.height = canvas.height;
- // now put the user's path around the mask
- if (cutPts.current.length) {
+ setLoading(true);
+ const currPts = [...cutPts.current];
+ if (currCutType !== CutMode.ERASE) handleReset(); // gets rid of the visible brush strokes (mostly needed for line_in) unless it's erasing (which depends on the brush strokes)
+ let minX = img.width;
+ let maxX = 0;
+ let minY = img.height;
+ let maxY = 0;
+ // currPts is populated by the brush strokes' points, so this code is drawing a path along the points
+ if (currPts.length) {
ctx.beginPath();
- ctx.moveTo(cutPts.current[0].x, cutPts.current[0].y); // later check edge case where cutPts is empty
- for (let i = 0; i < cutPts.current.length; i++) {
- ctx.lineTo(cutPts.current[i].x, cutPts.current[i].y);
+ ctx.moveTo(currPts[0].x, currPts[0].y);
+ for (let i = 0; i < currPts.length; i++) {
+ ctx.lineTo(currPts[i].x, currPts[i].y);
+ minX = Math.min(currPts[i].x, minX);
+ minY = Math.min(currPts[i].y, minY);
+ maxX = Math.max(currPts[i].x, maxX);
+ maxY = Math.max(currPts[i].y, maxY);
+ }
+ switch (
+ currCutType // use different canvas operations depending on the type of cutting we're applying
+ ) {
+ case CutMode.IN:
+ ctx.closePath();
+ ctx.globalCompositeOperation = 'destination-in';
+ ctx.fill();
+ break;
+ case CutMode.OUT:
+ ctx.closePath();
+ ctx.globalCompositeOperation = 'destination-out';
+ ctx.fill();
+ break;
+ case CutMode.DRAW_IN:
+ ctx.globalCompositeOperation = 'destination-in';
+ ctx.lineWidth = brushWidth + brushWidthOffset; // added offset because width gets cut off a little bit
+ ctx.stroke();
+ break;
}
- ctx.closePath();
- ctx.stroke();
- ctx.fill();
- // ctx.clip();
}
- const url = canvas.toDataURL(); // this does the same thing as convert img to canvasurl
+
+ const url = canvas.toDataURL();
if (!newCollectionRef.current) {
- if (!isNewCollection && imageRootDoc) {
- // if the parent hasn't been set yet
- if (!parentDoc.current) parentDoc.current = imageRootDoc;
- } else {
- if (!(originalImg.current && imageRootDoc)) return;
- // create new collection and add it to the view
- newCollectionRef.current = Docs.Create.FreeformDocument([], {
- x: NumCast(imageRootDoc.x) + NumCast(imageRootDoc._width) + offsetX,
- y: NumCast(imageRootDoc.y),
- _width: newCollectionSize,
- _height: newCollectionSize,
- title: 'Image edit collection',
- });
- DocUtils.MakeLink(imageRootDoc, newCollectionRef.current, { link_relationship: 'Image Edit Version History' });
- // opening new tab
- CollectionDockingView.AddSplit(newCollectionRef.current, OpenWhereMod.right);
- }
+ createNewCollection();
}
+
const image = new Image();
image.src = url;
- await createNewImgDoc(image, true);
- // add the doc to the main freeform
- // eslint-disable-next-line no-use-before-define
- setLoading(false);
- cutPts.current.length = 0;
+ image.onload = async () => {
+ let finalImg: HTMLImageElement | undefined = image;
+ let finalImgURL: string = url;
+ // crop the image for these brush modes to remove excess blank space around the image contents
+ if (currCutType == CutMode.IN || currCutType == CutMode.DRAW_IN) {
+ const croppedData = cropImage(image, minX, maxX, minY, maxY);
+ finalImg = croppedData;
+ finalImgURL = croppedData.src;
+ }
+ currImg.current = finalImg;
+ const newImgDoc = await createNewImgDoc(finalImg, firstDoc);
+ if (newImgDoc) {
+ // set the image to transparent to remove the background / brushstrokes
+ const docData = newImgDoc[DocData];
+ docData.backgroundColor = 'transparent';
+ docData.disableMixBlend = true;
+ if (firstDoc) setIsFirstDoc(false);
+ setEdits([...prevEdits, { url: finalImgURL, saveRes: undefined }]);
+ }
+ setLoading(false);
+ cutPts.current.length = 0;
+ };
+ };
+
+ /**
+ * Creates a new collection to put the image edits on. Adds to a new tab on the right if "Create New Collection" is checked.
+ * @returns
+ */
+ const createNewCollection = () => {
+ if (!isNewCollection && imageRootDoc) {
+ // if the parent hasn't been set yet
+ if (!parentDoc.current) parentDoc.current = imageRootDoc;
+ } else {
+ if (!(originalImg.current && imageRootDoc)) return;
+ // create new collection and add it to the view
+ newCollectionRef.current = Docs.Create.FreeformDocument([], {
+ x: NumCast(imageRootDoc.x) + NumCast(imageRootDoc._width) + offsetX,
+ y: NumCast(imageRootDoc.y),
+ _width: newCollectionSize,
+ _height: newCollectionSize,
+ title: 'Image edit collection',
+ });
+ DocUtils.MakeLink(imageRootDoc, newCollectionRef.current, { link_relationship: 'Image Edit Version History' });
+ // opening new tab
+ CollectionDockingView.AddSplit(newCollectionRef.current, OpenWhereMod.right);
+ }
+ };
+
+ /**
+ * This function crops an image based on the inputted dimensions. This is used to automatically adjust the images that are
+ * edited to be smaller than the original (i.e. for cutting into a small part of the image)
+ */
+ const cropImage = (image: HTMLImageElement, minX: number, maxX: number, minY: number, maxY: number) => {
+ const croppedCanvas = document.createElement('canvas');
+ const croppedCtx = croppedCanvas.getContext('2d');
+ if (!croppedCtx) return image;
+ const cropWidth = Math.abs(maxX - minX);
+ const cropHeight = Math.abs(maxY - minY);
+ croppedCanvas.width = cropWidth;
+ croppedCanvas.height = cropHeight;
+ croppedCtx.globalCompositeOperation = 'source-over';
+ croppedCtx.clearRect(0, 0, cropWidth, cropHeight);
+ croppedCtx.drawImage(image, minX, minY, cropWidth, cropHeight, 0, 0, cropWidth, cropHeight);
+ const croppedURL = croppedCanvas.toDataURL();
+ const croppedImage = new Image();
+ croppedImage.src = croppedURL;
+ return croppedImage;
};
// adjusts all the img positions to be aligned
@@ -416,7 +495,7 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
};
// creates a new image document and returns its reference
- const createNewImgDoc = async (img: HTMLImageElement, firstDoc: boolean): Promise<Doc | undefined> => {
+ const createNewImgDoc = async (img: HTMLImageElement, firstDoc: boolean, parent?: Doc): Promise<Doc | undefined> => {
if (!imageRootDoc) return undefined;
const { src } = img;
const [result] = await Networking.PostToServer('/uploadRemoteImage', { sources: [src] });
@@ -495,14 +574,22 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
DocumentView.addViewRenderedCb(newCollectionRef.current, dv => (dv.ComponentView as CollectionFreeFormView)?.fitContentOnce());
}
setEdits([]);
+ setIsFirstDoc(true);
};
- return (
- <div className="generativeFillContainer" style={{ display: imageEditorOpen ? 'flex' : 'none' }}>
- <div className="generativeFillControls">
+ // defines the tools and sets current tool
+ const genFillTool: ImageEditTool = { type: ImageToolType.GenerativeFill, name: 'Generative Fill', btnText: 'GET EDITS', icon: 'fill', applyFunc: getEdit, sliderMin: 25, sliderMax: 500, sliderDefault: 150 };
+ const cutTool: ImageEditTool = { type: ImageToolType.Cut, name: 'Cut', btnText: 'CUT IMAGE', icon: 'scissors', applyFunc: cutImage, sliderMin: 1, sliderMax: 50, sliderDefault: 5 };
+ const imageEditTools: ImageEditTool[] = [genFillTool, cutTool];
+ const [currTool, setCurrTool] = useState<ImageEditTool>(genFillTool);
+
+ // the top controls for making a new collection, resetting, and applying edits,
+ function renderControls() {
+ return (
+ <div className="imageEditorTopBar">
<h1>Image Editor</h1>
{/* <IconButton text="Cut out" icon={<FontAwesomeIcon icon="scissors" />} /> */}
- <div style={{ display: 'flex', alignItems: 'center', gap: '1.5rem' }}>
+ <div className="imageEditorControls">
<FormControlLabel
control={
<Checkbox
@@ -518,153 +605,201 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
labelPlacement="end"
sx={{ whiteSpace: 'nowrap' }}
/>
- <EditButtons onClick={getEdit} loading={loading} onReset={handleReset} />
- <CutButtons onClick={cutImage} loading={loading} onReset={handleReset} />
+ <ApplyFuncButtons onClick={() => currTool.applyFunc(cutType, cursorData.width, edits, isFirstDoc)} loading={loading} onReset={handleReset} btnText={currTool.btnText} />
<IconButton color={activeColor} tooltip="close" icon={<CgClose size="16px" />} onClick={handleViewClose} />
</div>
</div>
- {/* Main canvas for editing */}
- <div
- className="drawingArea" // this only works if pointerevents: none is set on the custom pointer
- ref={drawingAreaRef}
- onPointerOver={updateCursorData}
- onPointerMove={updateCursorData}
- onPointerDown={handlePointerDown}
- onPointerUp={handlePointerUp}>
- <canvas ref={canvasRef} width={canvasDims.width} height={canvasDims.height} style={{ transform: `scale(${canvasScale})` }} />
- <canvas ref={canvasBackgroundRef} width={canvasDims.width} height={canvasDims.height} style={{ transform: `scale(${canvasScale})` }} />
- <div
- className="pointer"
- style={{
- left: cursorData.x,
- top: cursorData.y,
- width: cursorData.width,
- height: cursorData.width,
- }}>
- <div className="innerPointer" />
- </div>
- {/* Icons */}
- <div className="iconContainer">
+ );
+ }
+
+ // the side icons including tool type, the slider, and undo/redo
+ function renderSideIcons() {
+ return (
+ <div className="sideControlsContainer" style={{ backgroundColor: bgColor }}>
+ <div className="sideControls">
+ <div className="imageToolsContainer">
+ {imageEditTools.map(tool => {
+ return ImageToolButton(tool, tool.type === currTool.type, changeTool);
+ })}
+ </div>
+ {currTool.type == ImageToolType.Cut && (
+ <div className="cutToolsContainer">
+ <Button
+ style={{ width: '100%' }}
+ text="Keep in"
+ type={Type.TERT}
+ color={cutType == CutMode.IN ? SettingsManager.userColor : bgColor}
+ onClick={() => {
+ setCutType(CutMode.IN);
+ }}
+ />
+ <Button
+ style={{ width: '100%' }}
+ text="Keep out"
+ type={Type.TERT}
+ color={cutType == CutMode.OUT ? SettingsManager.userColor : bgColor}
+ onClick={() => {
+ setCutType(CutMode.OUT);
+ }}
+ />
+ <Button
+ style={{ width: '100%' }}
+ text="Draw in"
+ type={Type.TERT}
+ color={cutType == CutMode.DRAW_IN ? SettingsManager.userColor : bgColor}
+ onClick={() => {
+ setCutType(CutMode.DRAW_IN);
+ }}
+ />
+ <Button
+ style={{ width: '100%' }}
+ text="Erase"
+ type={Type.TERT}
+ color={cutType == CutMode.ERASE ? SettingsManager.userColor : bgColor}
+ onClick={() => {
+ setCutType(CutMode.ERASE);
+ }}
+ />
+ </div>
+ )}
+ <div className="sliderContainer" onPointerDown={e => e.stopPropagation()}>
+ {currTool.type === ImageToolType.GenerativeFill && (
+ <Slider
+ sx={{
+ '& input[type="range"]': {
+ WebkitAppearance: 'slider-vertical',
+ },
+ }}
+ orientation="vertical"
+ min={genFillTool.sliderMin}
+ max={genFillTool.sliderMax}
+ defaultValue={genFillTool.sliderDefault}
+ size="small"
+ valueLabelDisplay="auto"
+ onChange={(e, val) => {
+ setCursorData(prev => ({ ...prev, width: val as number }));
+ }}
+ />
+ )}
+ {currTool.type === ImageToolType.Cut && (
+ <Slider
+ sx={{
+ '& input[type="range"]': {
+ WebkitAppearance: 'slider-vertical',
+ },
+ }}
+ orientation="vertical"
+ min={cutTool.sliderMin}
+ max={cutTool.sliderMax}
+ defaultValue={cutTool.sliderDefault}
+ size="small"
+ valueLabelDisplay="auto"
+ onChange={(e, val) => {
+ setCursorData(prev => ({ ...prev, width: val as number }));
+ }}
+ />
+ )}
+ </div>
{/* Undo and Redo */}
- <IconButton
- style={{ cursor: 'pointer' }}
- onPointerDown={e => {
- e.stopPropagation();
- handleUndo();
- }}
- onPointerUp={e => {
- e.stopPropagation();
- }}
- color={activeColor}
- tooltip="Undo"
- icon={<IoMdUndo />}
- />
- <IconButton
- style={{ cursor: 'pointer' }}
- onPointerDown={e => {
- e.stopPropagation();
- handleRedo();
- }}
- onPointerUp={e => {
- e.stopPropagation();
- }}
- color={activeColor}
- tooltip="Redo"
- icon={<IoMdRedo />}
- />
- <div onPointerDown={e => e.stopPropagation()} style={{ height: 225, width: '100%', display: 'flex', justifyContent: 'center', cursor: 'pointer' }}>
- <Slider
- sx={{
- '& input[type="range"]': {
- WebkitAppearance: 'slider-vertical',
- },
+ <div className="undoRedoContainer">
+ <IconButton
+ style={{ cursor: 'pointer' }}
+ onPointerDown={e => {
+ e.stopPropagation();
+ handleUndo();
}}
- orientation="vertical"
- min={25}
- max={500}
- defaultValue={150}
- size="small"
- valueLabelDisplay="auto"
- onChange={(e: any, val: any) => {
- setCursorData(prev => ({ ...prev, width: val as number }));
+ onPointerUp={e => {
+ e.stopPropagation();
}}
+ color={activeColor}
+ tooltip="Undo"
+ icon={<IoMdUndo />}
/>
- </div>
- <div onPointerDown={e => e.stopPropagation()} style={{ height: 225, width: '100%', display: 'flex', justifyContent: 'center', cursor: 'pointer' }}>
- <Slider
- sx={{
- '& input[type="range"]': {
- WebkitAppearance: 'slider-vertical',
- },
+ <IconButton
+ style={{ cursor: 'pointer' }}
+ onPointerDown={e => {
+ e.stopPropagation();
+ handleRedo();
}}
- orientation="vertical"
- min={1}
- max={500}
- defaultValue={150}
- size="small"
- valueLabelDisplay="auto"
- onChange={(e: any, val: any) => {
- setCursorData(prev => ({ ...prev, width: val as number }));
+ onPointerUp={e => {
+ e.stopPropagation();
}}
+ color={activeColor}
+ tooltip="Redo"
+ icon={<IoMdRedo />}
/>
</div>
</div>
- {/* Edits thumbnails */}
- <div className="editsBox">
- {edits.map((edit, i) => (
+ </div>
+ );
+ }
+
+ // circular pointer for drawing/erasing
+ function renderPointer() {
+ return (
+ <div
+ className="pointer"
+ style={{
+ left: cursorData.x,
+ top: cursorData.y,
+ width: cursorData.width,
+ height: cursorData.width,
+ }}>
+ <div className="innerPointer" />
+ </div>
+ );
+ }
+
+ // the previews for each edit
+ function renderEditThumbnails() {
+ return (
+ <div className="editsBox">
+ {edits.map((edit, i) => (
+ <img
+ // eslint-disable-next-line react/no-array-index-key
+ key={i}
+ alt="image edits"
+ width={75}
+ src={edit.url}
+ onClick={async () => {
+ const img = new Image();
+ img.src = edit.url;
+ ImageUtility.drawImgToCanvas(img, canvasRef, img.width, img.height);
+ currImg.current = img;
+ parentDoc.current = edit.saveRes ?? null;
+ }}
+ />
+ ))}
+ {/* Original img thumbnail */}
+ {edits.length > 0 && (
+ <div style={{ position: 'relative' }}>
+ <label className="originalImageLabel">Original</label>
<img
- // eslint-disable-next-line react/no-array-index-key
- key={i}
- alt="image edits"
+ alt="image stuff"
width={75}
- src={edit.url}
- style={{ cursor: 'pointer' }}
- onClick={async () => {
+ src={originalImg.current?.src}
+ onClick={() => {
+ if (!originalImg.current) return;
const img = new Image();
- img.src = edit.url;
+ img.src = originalImg.current.src;
ImageUtility.drawImgToCanvas(img, canvasRef, canvasDims.width, canvasDims.height);
currImg.current = img;
- parentDoc.current = edit.saveRes ?? null;
+ if (!parentDoc.current) parentDoc.current = originalDoc.current;
}}
/>
- ))}
- {/* Original img thumbnail */}
- {edits.length > 0 && (
- <div style={{ position: 'relative' }}>
- <label
- style={{
- position: 'absolute',
- bottom: 10,
- left: 10,
- color: '#ffffff',
- fontSize: '0.8rem',
- letterSpacing: '1px',
- textTransform: 'uppercase',
- }}>
- Original
- </label>
- <img
- alt="image stuff"
- width={75}
- src={originalImg.current?.src}
- style={{ cursor: 'pointer' }}
- onClick={() => {
- if (!originalImg.current) return;
- const img = new Image();
- img.src = originalImg.current.src;
- ImageUtility.drawImgToCanvas(img, canvasRef, canvasDims.width, canvasDims.height);
- currImg.current = img;
- parentDoc.current = originalDoc.current;
- }}
- />
- </div>
- )}
- </div>
+ </div>
+ )}
</div>
+ );
+ }
+
+ // the prompt box for generative fill
+ function renderPromptBox() {
+ return (
<div>
<TextField
value={input}
- onChange={(e: any) => setInput(e.target.value)}
+ onChange={e => setInput(e.target.value)}
disabled={isBrushing}
type="text"
label="Prompt"
@@ -680,8 +815,29 @@ const GenerativeFill = ({ imageEditorOpen, imageEditorSource, imageRootDoc, addD
}}
/>
</div>
+ );
+ }
+
+ return (
+ <div className="imageEditorContainer" style={{ display: imageEditorOpen ? 'flex' : 'none' }}>
+ {renderControls()}
+ {/* Main canvas for editing */}
+ <div
+ className="drawingArea" // this only works if pointerevents: none is set on the custom pointer
+ ref={drawingAreaRef}
+ onPointerOver={updateCursorData}
+ onPointerMove={updateCursorData}
+ onPointerDown={handlePointerDown}
+ onPointerUp={handlePointerUp}>
+ <canvas ref={canvasRef} width={canvasDims.width} height={canvasDims.height} style={{ transform: `scale(${canvasScale})` }} />
+ <canvas ref={canvasBackgroundRef} width={canvasDims.width} height={canvasDims.height} style={{ transform: `scale(${canvasScale})` }} />
+ {renderPointer()}
+ {renderSideIcons()}
+ {renderEditThumbnails()}
+ </div>
+ {currTool.type === ImageToolType.GenerativeFill && renderPromptBox()}
</div>
);
};
-export default GenerativeFill;
+export default ImageEditor;
diff --git a/src/client/views/nodes/imageEditor/ImageEditorButtons.tsx b/src/client/views/nodes/imageEditor/ImageEditorButtons.tsx
new file mode 100644
index 000000000..cb963616b
--- /dev/null
+++ b/src/client/views/nodes/imageEditor/ImageEditorButtons.tsx
@@ -0,0 +1,69 @@
+import './GenerativeFillButtons.scss';
+import * as React from 'react';
+import ReactLoading from 'react-loading';
+import { Button, IconButton, Type } from 'browndash-components';
+import { AiOutlineInfo } from 'react-icons/ai';
+import { bgColor } from './imageEditorUtils/imageEditorConstants';
+import { ImageEditTool, ImageToolType } from './imageEditorUtils/imageEditorInterfaces';
+import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
+import { SettingsManager } from '../../../util/SettingsManager';
+
+interface ButtonContainerProps {
+ onClick: () => Promise<void>;
+ loading: boolean;
+ onReset: () => void;
+ btnText: string;
+}
+
+export function ApplyFuncButtons({ loading, onClick: getEdit, onReset, btnText }: ButtonContainerProps) {
+ return (
+ <div className="generativeFillBtnContainer">
+ <Button text="RESET" type={Type.PRIM} color={SettingsManager.userVariantColor} onClick={onReset} />
+ {loading ? (
+ <Button
+ text={btnText}
+ type={Type.TERT}
+ color={SettingsManager.userVariantColor}
+ icon={<ReactLoading type="spin" color="#ffffff" width={20} height={20} />}
+ iconPlacement="right"
+ onClick={() => {
+ if (!loading) getEdit();
+ }}
+ />
+ ) : (
+ <Button
+ text={btnText}
+ type={Type.TERT}
+ color={SettingsManager.userVariantColor}
+ onClick={() => {
+ if (!loading) getEdit();
+ }}
+ />
+ )}
+ <IconButton
+ type={Type.SEC}
+ color={SettingsManager.userVariantColor}
+ tooltip="Open Documentation"
+ icon={<AiOutlineInfo size="16px" />}
+ onClick={() => window.open('https://brown-dash.github.io/Dash-Documentation/features/generativeai/#editing', '_blank')}
+ />
+ </div>
+ );
+}
+
+export function ImageToolButton(tool: ImageEditTool, isActive: boolean, selectTool: (type: ImageToolType) => void) {
+ return (
+ <div className="imageEditorButtonContainer">
+ <Button
+ style={{ width: '100%' }}
+ text={tool.name}
+ type={Type.TERT}
+ color={isActive ? SettingsManager.userVariantColor : bgColor}
+ icon={<FontAwesomeIcon icon={tool.icon} />}
+ onClick={() => {
+ selectTool(tool.type);
+ }}
+ />
+ </div>
+ );
+}
diff --git a/src/client/views/nodes/generativeFill/generativeFillUtils/ImageHandler.ts b/src/client/views/nodes/imageEditor/imageEditingUtils/ImageHandler.ts
index 24dba1778..514e8a94f 100644
--- a/src/client/views/nodes/generativeFill/generativeFillUtils/ImageHandler.ts
+++ b/src/client/views/nodes/imageEditor/imageEditingUtils/ImageHandler.ts
@@ -1,5 +1,5 @@
import { RefObject } from 'react';
-import { bgColor, canvasSize } from './generativeFillConstants';
+import { bgColor, canvasSize } from '../ImageEditorUtils/imageEditorConstants';
export interface APISuccess {
status: 'success';
diff --git a/src/client/views/nodes/imageEditor/imageEditorUtils/BrushHandler.ts b/src/client/views/nodes/imageEditor/imageEditorUtils/BrushHandler.ts
new file mode 100644
index 000000000..ed39375e0
--- /dev/null
+++ b/src/client/views/nodes/imageEditor/imageEditorUtils/BrushHandler.ts
@@ -0,0 +1,29 @@
+import { GenerativeFillMathHelpers } from './GenerativeFillMathHelpers';
+import { eraserColor } from './imageEditorConstants';
+import { Point } from './imageEditorInterfaces';
+
+export class BrushHandler {
+ static brushCircleOverlay = (x: number, y: number, brushRadius: number, ctx: CanvasRenderingContext2D, fillColor: string /* , erase: boolean */) => {
+ ctx.globalCompositeOperation = 'destination-out';
+ ctx.fillStyle = fillColor;
+ ctx.shadowColor = eraserColor;
+ ctx.shadowBlur = 5;
+ ctx.beginPath();
+ ctx.arc(x, y, brushRadius, 0, 2 * Math.PI);
+ ctx.fill();
+ ctx.closePath();
+ };
+
+ static createBrushPathOverlay = (startPoint: Point, endPoint: Point, brushRadius: number, ctx: CanvasRenderingContext2D, fillColor: string) => {
+ const dist = GenerativeFillMathHelpers.distanceBetween(startPoint, endPoint);
+ const pts: Point[] = [];
+ for (let i = 0; i < dist; i += 5) {
+ const s = i / dist;
+ const x = startPoint.x * (1 - s) + endPoint.x * s;
+ const y = startPoint.y * (1 - s) + endPoint.y * s;
+ pts.push({ x: startPoint.x, y: startPoint.y });
+ BrushHandler.brushCircleOverlay(x, y, brushRadius, ctx, fillColor);
+ }
+ return pts;
+ };
+}
diff --git a/src/client/views/nodes/generativeFill/generativeFillUtils/GenerativeFillMathHelpers.ts b/src/client/views/nodes/imageEditor/imageEditorUtils/GenerativeFillMathHelpers.ts
index 6da8c3da0..f820300f3 100644
--- a/src/client/views/nodes/generativeFill/generativeFillUtils/GenerativeFillMathHelpers.ts
+++ b/src/client/views/nodes/imageEditor/imageEditorUtils/GenerativeFillMathHelpers.ts
@@ -1,4 +1,4 @@
-import { Point } from './generativeFillInterfaces';
+import { Point } from './imageEditorInterfaces';
export class GenerativeFillMathHelpers {
static distanceBetween = (p1: Point, p2: Point) => Math.sqrt((p2.x - p1.x) ** 2 + (p2.y - p1.y) ** 2);
diff --git a/src/client/views/nodes/imageEditor/imageEditorUtils/ImageHandler.ts b/src/client/views/nodes/imageEditor/imageEditorUtils/ImageHandler.ts
new file mode 100644
index 000000000..ece0f4d7f
--- /dev/null
+++ b/src/client/views/nodes/imageEditor/imageEditorUtils/ImageHandler.ts
@@ -0,0 +1,312 @@
+import { RefObject } from 'react';
+import { bgColor, canvasSize } from './imageEditorConstants';
+
+export interface APISuccess {
+ status: 'success';
+ urls: string[];
+}
+
+export interface APIError {
+ status: 'error';
+ message: string;
+}
+
+export class ImageUtility {
+ /**
+ *
+ * @param canvas Canvas to convert
+ * @returns Blob of canvas
+ */
+ static canvasToBlob = (canvas: HTMLCanvasElement): Promise<Blob> =>
+ new Promise(resolve => {
+ canvas.toBlob(blob => {
+ if (blob) {
+ resolve(blob);
+ }
+ }, 'image/png');
+ });
+
+ // given a square api image, get the cropped img
+ static getCroppedImg = (img: HTMLImageElement, width: number, height: number): HTMLCanvasElement | undefined => {
+ // Create a new canvas element
+ const canvas = document.createElement('canvas');
+ canvas.width = width;
+ canvas.height = height;
+ const ctx = canvas.getContext('2d');
+ if (ctx) {
+ // Clear the canvas
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
+ if (width < height) {
+ // horizontal padding, x offset
+ const xOffset = (canvasSize - width) / 2;
+ ctx.drawImage(img, xOffset, 0, canvas.width, canvas.height, 0, 0, canvas.width, canvas.height);
+ } else {
+ // vertical padding, y offset
+ const yOffset = (canvasSize - height) / 2;
+ ctx.drawImage(img, 0, yOffset, canvas.width, canvas.height, 0, 0, canvas.width, canvas.height);
+ }
+ return canvas;
+ }
+ return undefined;
+ };
+
+ // converts an image to a canvas data url
+ static convertImgToCanvasUrl = async (imageSrc: string, width: number, height: number): Promise<string> =>
+ new Promise<string>((resolve, reject) => {
+ const img = new Image();
+ img.onload = () => {
+ const canvas = this.getCroppedImg(img, width, height);
+ if (canvas) {
+ const dataUrl = canvas.toDataURL();
+ resolve(dataUrl);
+ }
+ };
+ img.onerror = error => {
+ reject(error);
+ };
+ img.src = imageSrc;
+ });
+
+ // calls the openai api to get image edits
+ static getEdit = async (imgBlob: Blob, maskBlob: Blob, prompt: string, n?: number): Promise<APISuccess | APIError> => {
+ const apiUrl = 'https://api.openai.com/v1/images/edits';
+ const fd = new FormData();
+ fd.append('image', imgBlob, 'image.png');
+ fd.append('mask', maskBlob, 'mask.png');
+ fd.append('prompt', prompt);
+ fd.append('size', '1024x1024');
+ fd.append('n', n ? JSON.stringify(n) : '1');
+ fd.append('response_format', 'b64_json');
+
+ try {
+ const res = await fetch(apiUrl, {
+ method: 'POST',
+ headers: {
+ Authorization: `Bearer ${process.env.OPENAI_KEY}`,
+ },
+ body: fd,
+ });
+ const data = await res.json();
+ console.log(data.data);
+ return {
+ status: 'success',
+ urls: (data.data as { b64_json: string }[]).map(urlData => `data:image/png;base64,${urlData.b64_json}`),
+ };
+ } catch (err) {
+ console.log(err);
+ return { status: 'error', message: 'API error.' };
+ }
+ };
+
+ // mock api call
+ static mockGetEdit = async (mockSrc: string): Promise<APISuccess | APIError> => ({
+ status: 'success',
+ urls: [mockSrc, mockSrc, mockSrc],
+ });
+
+ // Gets the canvas rendering context of a canvas
+ static getCanvasContext = (canvasRef: RefObject<HTMLCanvasElement>): CanvasRenderingContext2D | null => {
+ if (!canvasRef.current) return null;
+ const ctx = canvasRef.current.getContext('2d');
+ if (!ctx) return null;
+ return ctx;
+ };
+
+ // Helper for downloading the canvas (for debugging)
+ static downloadCanvas = (canvas: HTMLCanvasElement) => {
+ const url = canvas.toDataURL();
+ const downloadLink = document.createElement('a');
+ downloadLink.href = url;
+ downloadLink.download = 'canvas';
+
+ downloadLink.click();
+ downloadLink.remove();
+ };
+
+ // Download the canvas (for debugging)
+ static downloadImageCanvas = (imgUrl: string) => {
+ const img = new Image();
+ img.src = imgUrl;
+ img.onload = () => {
+ const canvas = document.createElement('canvas');
+ canvas.width = canvasSize;
+ canvas.height = canvasSize;
+ const ctx = canvas.getContext('2d');
+ ctx?.drawImage(img, 0, 0, canvasSize, canvasSize);
+
+ this.downloadCanvas(canvas);
+ };
+ };
+
+ // Clears the canvas
+ static clearCanvas = (canvasRef: React.RefObject<HTMLCanvasElement>) => {
+ const ctx = this.getCanvasContext(canvasRef);
+ if (!ctx || !canvasRef.current) return;
+ ctx.clearRect(0, 0, canvasRef.current.width, canvasRef.current.height);
+ };
+
+ // Draws the image to the current canvas
+ static drawImgToCanvas = (img: HTMLImageElement, canvasRef: React.RefObject<HTMLCanvasElement>, width: number, height: number) => {
+ const drawImg = (htmlImg: HTMLImageElement) => {
+ const ctx = this.getCanvasContext(canvasRef);
+ if (!ctx) return;
+ ctx.globalCompositeOperation = 'source-over';
+ ctx.clearRect(0, 0, canvasRef.current?.width || width, canvasRef.current?.height || height);
+ ctx.drawImage(htmlImg, 0, 0, width, height);
+ };
+
+ if (img.complete) {
+ drawImg(img);
+ } else {
+ img.onload = () => {
+ drawImg(img);
+ };
+ }
+ };
+
+ // Gets the image mask for the openai endpoint
+ static getCanvasMask = (srcCanvas: HTMLCanvasElement, paddedCanvas: HTMLCanvasElement): HTMLCanvasElement | undefined => {
+ const canvas = document.createElement('canvas');
+ canvas.width = canvasSize;
+ canvas.height = canvasSize;
+ const ctx = canvas.getContext('2d');
+ if (!ctx) return undefined;
+ ctx?.clearRect(0, 0, canvasSize, canvasSize);
+ ctx.drawImage(paddedCanvas, 0, 0);
+
+ // extract and set padding data
+ if (srcCanvas.height > srcCanvas.width) {
+ // horizontal padding, x offset
+ const xOffset = (canvasSize - srcCanvas.width) / 2;
+ ctx?.clearRect(xOffset, 0, srcCanvas.width, srcCanvas.height);
+ ctx.drawImage(srcCanvas, xOffset, 0, srcCanvas.width, srcCanvas.height);
+ } else {
+ // vertical padding, y offset
+ const yOffset = (canvasSize - srcCanvas.height) / 2;
+ ctx?.clearRect(0, yOffset, srcCanvas.width, srcCanvas.height);
+ ctx.drawImage(srcCanvas, 0, yOffset, srcCanvas.width, srcCanvas.height);
+ }
+ return canvas;
+ };
+
+ // Fills in the blank areas of the image with an image reflection (to fill in a square-shaped canvas)
+ static drawHorizontalReflection = (ctx: CanvasRenderingContext2D, canvas: HTMLCanvasElement, xOffset: number) => {
+ const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+ const { data } = imageData;
+ for (let i = 0; i < canvas.height; i++) {
+ for (let j = 0; j < xOffset; j++) {
+ const targetIdx = 4 * (i * canvas.width + j);
+ const sourceI = i;
+ const sourceJ = xOffset + (xOffset - j);
+ const sourceIdx = 4 * (sourceI * canvas.width + sourceJ);
+ data[targetIdx] = data[sourceIdx];
+ data[targetIdx + 1] = data[sourceIdx + 1];
+ data[targetIdx + 2] = data[sourceIdx + 2];
+ }
+ }
+ for (let i = 0; i < canvas.height; i++) {
+ for (let j = canvas.width - 1; j >= canvas.width - 1 - xOffset; j--) {
+ const targetIdx = 4 * (i * canvas.width + j);
+ const sourceI = i;
+ const sourceJ = canvas.width - 1 - xOffset - (xOffset - (canvas.width - j));
+ const sourceIdx = 4 * (sourceI * canvas.width + sourceJ);
+ data[targetIdx] = data[sourceIdx];
+ data[targetIdx + 1] = data[sourceIdx + 1];
+ data[targetIdx + 2] = data[sourceIdx + 2];
+ }
+ }
+ ctx.putImageData(imageData, 0, 0);
+ };
+
+ // Fills in the blank areas of the image with an image reflection (to fill in a square-shaped canvas)
+ static drawVerticalReflection = (ctx: CanvasRenderingContext2D, canvas: HTMLCanvasElement, yOffset: number) => {
+ const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+ const { data } = imageData;
+ for (let j = 0; j < canvas.width; j++) {
+ for (let i = 0; i < yOffset; i++) {
+ const targetIdx = 4 * (i * canvas.width + j);
+ const sourceJ = j;
+ const sourceI = yOffset + (yOffset - i);
+ const sourceIdx = 4 * (sourceI * canvas.width + sourceJ);
+ data[targetIdx] = data[sourceIdx];
+ data[targetIdx + 1] = data[sourceIdx + 1];
+ data[targetIdx + 2] = data[sourceIdx + 2];
+ }
+ }
+ for (let j = 0; j < canvas.width; j++) {
+ for (let i = canvas.height - 1; i >= canvas.height - 1 - yOffset; i--) {
+ const targetIdx = 4 * (i * canvas.width + j);
+ const sourceJ = j;
+ const sourceI = canvas.height - 1 - yOffset - (yOffset - (canvas.height - i));
+ const sourceIdx = 4 * (sourceI * canvas.width + sourceJ);
+ data[targetIdx] = data[sourceIdx];
+ data[targetIdx + 1] = data[sourceIdx + 1];
+ data[targetIdx + 2] = data[sourceIdx + 2];
+ }
+ }
+ ctx.putImageData(imageData, 0, 0);
+ };
+
+ // Gets the unaltered (besides filling in padding) version of the image for the api call
+ static getCanvasImg = (img: HTMLImageElement): HTMLCanvasElement | undefined => {
+ const canvas = document.createElement('canvas');
+ canvas.width = canvasSize;
+ canvas.height = canvasSize;
+ const ctx = canvas.getContext('2d');
+ if (!ctx) return undefined;
+ // fix scaling
+ const scale = Math.min(canvasSize / img.width, canvasSize / img.height);
+ const width = Math.floor(img.width * scale);
+ const height = Math.floor(img.height * scale);
+ ctx?.clearRect(0, 0, canvasSize, canvasSize);
+ ctx.fillStyle = bgColor;
+ ctx.fillRect(0, 0, canvasSize, canvasSize);
+
+ // extract and set padding data
+ if (img.naturalHeight > img.naturalWidth) {
+ // horizontal padding, x offset
+ const xOffset = Math.floor((canvasSize - width) / 2);
+ ctx.drawImage(img, xOffset, 0, width, height);
+
+ // draw reflected image padding
+ this.drawHorizontalReflection(ctx, canvas, xOffset);
+ } else {
+ // vertical padding, y offset
+ const yOffset = Math.floor((canvasSize - height) / 2);
+ ctx.drawImage(img, 0, yOffset, width, height);
+
+ // draw reflected image padding
+ this.drawVerticalReflection(ctx, canvas, yOffset);
+ }
+ return canvas;
+ };
+
+ /**
+ * Converts a url to base64 (tainted canvas workaround)
+ */
+ static urlToBase64 = async (imageUrl: string): Promise<string | undefined> => {
+ try {
+ const res = await fetch(imageUrl);
+ const blob = await res.blob();
+
+ return new Promise<string>((resolve, reject) => {
+ const reader = new FileReader();
+ reader.onload = () => {
+ const base64Data = reader.result?.toString().split(',')[1];
+ if (base64Data) {
+ resolve(base64Data);
+ } else {
+ reject(new Error('Failed to convert.'));
+ }
+ };
+ reader.onerror = () => {
+ reject(new Error('Error reading image data'));
+ };
+ reader.readAsDataURL(blob);
+ });
+ } catch (err) {
+ console.error(err);
+ }
+ return undefined;
+ };
+}
diff --git a/src/client/views/nodes/generativeFill/generativeFillUtils/PointerHandler.ts b/src/client/views/nodes/imageEditor/imageEditorUtils/PointerHandler.ts
index 260923a64..e86f46636 100644
--- a/src/client/views/nodes/generativeFill/generativeFillUtils/PointerHandler.ts
+++ b/src/client/views/nodes/imageEditor/imageEditorUtils/PointerHandler.ts
@@ -1,4 +1,4 @@
-import { Point } from './generativeFillInterfaces';
+import { Point } from './imageEditorInterfaces';
export class PointerHandler {
static getPointRelativeToElement = (element: HTMLElement, e: React.PointerEvent | PointerEvent, scale: number): Point => {
diff --git a/src/client/views/nodes/generativeFill/generativeFillUtils/generativeFillConstants.ts b/src/client/views/nodes/imageEditor/imageEditorUtils/imageEditorConstants.ts
index 4772304bc..594d6d9fc 100644
--- a/src/client/views/nodes/generativeFill/generativeFillUtils/generativeFillConstants.ts
+++ b/src/client/views/nodes/imageEditor/imageEditorUtils/imageEditorConstants.ts
@@ -3,6 +3,7 @@ export const freeformRenderSize = 300;
export const offsetDistanceY = freeformRenderSize + 400;
export const offsetX = 200;
export const newCollectionSize = 500;
+export const brushWidthOffset = 10;
export const activeColor = '#1976d2';
export const eraserColor = '#e1e9ec';
diff --git a/src/client/views/nodes/imageEditor/imageEditorUtils/imageEditorInterfaces.ts b/src/client/views/nodes/imageEditor/imageEditorUtils/imageEditorInterfaces.ts
new file mode 100644
index 000000000..a14b55439
--- /dev/null
+++ b/src/client/views/nodes/imageEditor/imageEditorUtils/imageEditorInterfaces.ts
@@ -0,0 +1,43 @@
+import { IconProp } from '@fortawesome/fontawesome-svg-core';
+import { Doc } from '../../../../../fields/Doc';
+
+export interface CursorData {
+ x: number;
+ y: number;
+ width: number;
+}
+
+export interface Point {
+ x: number;
+ y: number;
+}
+
+export enum ImageToolType {
+ GenerativeFill = 'genFill',
+ Cut = 'cut',
+}
+
+export enum CutMode {
+ IN,
+ OUT,
+ DRAW_IN,
+ ERASE,
+}
+
+export interface ImageEditTool {
+ type: ImageToolType;
+ name: string;
+ btnText: string;
+ icon: IconProp;
+ // this is the function that the image tool applies, so it can be defined depending on the tool
+ applyFunc: (currCutType: CutMode, brushWidth: number, prevEdits: { url: string; saveRes: Doc | undefined }[], isFirstDoc: boolean) => Promise<void>;
+ // these optional parameters are here because different tools require different brush sizes and defaults
+ sliderMin?: number;
+ sliderMax?: number;
+ sliderDefault?: number;
+}
+
+export interface ImageDimensions {
+ width: number;
+ height: number;
+}
diff --git a/src/client/views/nodes/generativeFill/generativeFillUtils/BrushHandler.ts b/src/client/views/nodes/imageEditor/imageToolUtils/BrushHandler.ts
index 8a66d7347..7139bebc3 100644
--- a/src/client/views/nodes/generativeFill/generativeFillUtils/BrushHandler.ts
+++ b/src/client/views/nodes/imageEditor/imageToolUtils/BrushHandler.ts
@@ -1,6 +1,6 @@
-import { GenerativeFillMathHelpers } from './GenerativeFillMathHelpers';
-import { eraserColor } from './generativeFillConstants';
-import { Point } from './generativeFillInterfaces';
+import { GenerativeFillMathHelpers } from '../imageEditorUtils/GenerativeFillMathHelpers';
+import { eraserColor } from '../imageEditorUtils/imageEditorConstants';
+import { Point } from '../imageEditorUtils/imageEditorInterfaces';
import { points } from '@turf/turf';
export enum BrushType {
diff --git a/src/client/views/nodes/imageEditor/imageToolUtils/ImageHandler.ts b/src/client/views/nodes/imageEditor/imageToolUtils/ImageHandler.ts
new file mode 100644
index 000000000..b9723b5be
--- /dev/null
+++ b/src/client/views/nodes/imageEditor/imageToolUtils/ImageHandler.ts
@@ -0,0 +1,312 @@
+import { RefObject } from 'react';
+import { bgColor, canvasSize } from '../imageEditorUtils/imageEditorConstants';
+
+export interface APISuccess {
+ status: 'success';
+ urls: string[];
+}
+
+export interface APIError {
+ status: 'error';
+ message: string;
+}
+
+export class ImageUtility {
+ /**
+ *
+ * @param canvas Canvas to convert
+ * @returns Blob of canvas
+ */
+ static canvasToBlob = (canvas: HTMLCanvasElement): Promise<Blob> =>
+ new Promise(resolve => {
+ canvas.toBlob(blob => {
+ if (blob) {
+ resolve(blob);
+ }
+ }, 'image/png');
+ });
+
+ // given a square api image, get the cropped img
+ static getCroppedImg = (img: HTMLImageElement, width: number, height: number): HTMLCanvasElement | undefined => {
+ // Create a new canvas element
+ const canvas = document.createElement('canvas');
+ canvas.width = width;
+ canvas.height = height;
+ const ctx = canvas.getContext('2d');
+ if (ctx) {
+ // Clear the canvas
+ ctx.clearRect(0, 0, canvas.width, canvas.height);
+ if (width < height) {
+ // horizontal padding, x offset
+ const xOffset = (canvasSize - width) / 2;
+ ctx.drawImage(img, xOffset, 0, canvas.width, canvas.height, 0, 0, canvas.width, canvas.height);
+ } else {
+ // vertical padding, y offset
+ const yOffset = (canvasSize - height) / 2;
+ ctx.drawImage(img, 0, yOffset, canvas.width, canvas.height, 0, 0, canvas.width, canvas.height);
+ }
+ return canvas;
+ }
+ return undefined;
+ };
+
+ // converts an image to a canvas data url
+ static convertImgToCanvasUrl = async (imageSrc: string, width: number, height: number): Promise<string> =>
+ new Promise<string>((resolve, reject) => {
+ const img = new Image();
+ img.onload = () => {
+ const canvas = this.getCroppedImg(img, width, height);
+ if (canvas) {
+ const dataUrl = canvas.toDataURL();
+ resolve(dataUrl);
+ }
+ };
+ img.onerror = error => {
+ reject(error);
+ };
+ img.src = imageSrc;
+ });
+
+ // calls the openai api to get image edits
+ static getEdit = async (imgBlob: Blob, maskBlob: Blob, prompt: string, n?: number): Promise<APISuccess | APIError> => {
+ const apiUrl = 'https://api.openai.com/v1/images/edits';
+ const fd = new FormData();
+ fd.append('image', imgBlob, 'image.png');
+ fd.append('mask', maskBlob, 'mask.png');
+ fd.append('prompt', prompt);
+ fd.append('size', '1024x1024');
+ fd.append('n', n ? JSON.stringify(n) : '1');
+ fd.append('response_format', 'b64_json');
+
+ try {
+ const res = await fetch(apiUrl, {
+ method: 'POST',
+ headers: {
+ Authorization: `Bearer ${process.env.OPENAI_KEY}`,
+ },
+ body: fd,
+ });
+ const data = await res.json();
+ console.log(data.data);
+ return {
+ status: 'success',
+ urls: (data.data as { b64_json: string }[]).map(urlData => `data:image/png;base64,${urlData.b64_json}`),
+ };
+ } catch (err) {
+ console.log(err);
+ return { status: 'error', message: 'API error.' };
+ }
+ };
+
+ // mock api call
+ static mockGetEdit = async (mockSrc: string): Promise<APISuccess | APIError> => ({
+ status: 'success',
+ urls: [mockSrc, mockSrc, mockSrc],
+ });
+
+ // Gets the canvas rendering context of a canvas
+ static getCanvasContext = (canvasRef: RefObject<HTMLCanvasElement>): CanvasRenderingContext2D | null => {
+ if (!canvasRef.current) return null;
+ const ctx = canvasRef.current.getContext('2d');
+ if (!ctx) return null;
+ return ctx;
+ };
+
+ // Helper for downloading the canvas (for debugging)
+ static downloadCanvas = (canvas: HTMLCanvasElement) => {
+ const url = canvas.toDataURL();
+ const downloadLink = document.createElement('a');
+ downloadLink.href = url;
+ downloadLink.download = 'canvas';
+
+ downloadLink.click();
+ downloadLink.remove();
+ };
+
+ // Download the canvas (for debugging)
+ static downloadImageCanvas = (imgUrl: string) => {
+ const img = new Image();
+ img.src = imgUrl;
+ img.onload = () => {
+ const canvas = document.createElement('canvas');
+ canvas.width = canvasSize;
+ canvas.height = canvasSize;
+ const ctx = canvas.getContext('2d');
+ ctx?.drawImage(img, 0, 0, canvasSize, canvasSize);
+
+ this.downloadCanvas(canvas);
+ };
+ };
+
+ // Clears the canvas
+ static clearCanvas = (canvasRef: React.RefObject<HTMLCanvasElement>) => {
+ const ctx = this.getCanvasContext(canvasRef);
+ if (!ctx || !canvasRef.current) return;
+ ctx.clearRect(0, 0, canvasRef.current.width, canvasRef.current.height);
+ };
+
+ // Draws the image to the current canvas
+ static drawImgToCanvas = (img: HTMLImageElement, canvasRef: React.RefObject<HTMLCanvasElement>, width: number, height: number) => {
+ const drawImg = (htmlImg: HTMLImageElement) => {
+ const ctx = this.getCanvasContext(canvasRef);
+ if (!ctx) return;
+ ctx.globalCompositeOperation = 'source-over';
+ ctx.clearRect(0, 0, width, height);
+ ctx.drawImage(htmlImg, 0, 0, width, height);
+ };
+
+ if (img.complete) {
+ drawImg(img);
+ } else {
+ img.onload = () => {
+ drawImg(img);
+ };
+ }
+ };
+
+ // Gets the image mask for the openai endpoint
+ static getCanvasMask = (srcCanvas: HTMLCanvasElement, paddedCanvas: HTMLCanvasElement): HTMLCanvasElement | undefined => {
+ const canvas = document.createElement('canvas');
+ canvas.width = canvasSize;
+ canvas.height = canvasSize;
+ const ctx = canvas.getContext('2d');
+ if (!ctx) return undefined;
+ ctx?.clearRect(0, 0, canvasSize, canvasSize);
+ ctx.drawImage(paddedCanvas, 0, 0);
+
+ // extract and set padding data
+ if (srcCanvas.height > srcCanvas.width) {
+ // horizontal padding, x offset
+ const xOffset = (canvasSize - srcCanvas.width) / 2;
+ ctx?.clearRect(xOffset, 0, srcCanvas.width, srcCanvas.height);
+ ctx.drawImage(srcCanvas, xOffset, 0, srcCanvas.width, srcCanvas.height);
+ } else {
+ // vertical padding, y offset
+ const yOffset = (canvasSize - srcCanvas.height) / 2;
+ ctx?.clearRect(0, yOffset, srcCanvas.width, srcCanvas.height);
+ ctx.drawImage(srcCanvas, 0, yOffset, srcCanvas.width, srcCanvas.height);
+ }
+ return canvas;
+ };
+
+ // Fills in the blank areas of the image with an image reflection (to fill in a square-shaped canvas)
+ static drawHorizontalReflection = (ctx: CanvasRenderingContext2D, canvas: HTMLCanvasElement, xOffset: number) => {
+ const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+ const { data } = imageData;
+ for (let i = 0; i < canvas.height; i++) {
+ for (let j = 0; j < xOffset; j++) {
+ const targetIdx = 4 * (i * canvas.width + j);
+ const sourceI = i;
+ const sourceJ = xOffset + (xOffset - j);
+ const sourceIdx = 4 * (sourceI * canvas.width + sourceJ);
+ data[targetIdx] = data[sourceIdx];
+ data[targetIdx + 1] = data[sourceIdx + 1];
+ data[targetIdx + 2] = data[sourceIdx + 2];
+ }
+ }
+ for (let i = 0; i < canvas.height; i++) {
+ for (let j = canvas.width - 1; j >= canvas.width - 1 - xOffset; j--) {
+ const targetIdx = 4 * (i * canvas.width + j);
+ const sourceI = i;
+ const sourceJ = canvas.width - 1 - xOffset - (xOffset - (canvas.width - j));
+ const sourceIdx = 4 * (sourceI * canvas.width + sourceJ);
+ data[targetIdx] = data[sourceIdx];
+ data[targetIdx + 1] = data[sourceIdx + 1];
+ data[targetIdx + 2] = data[sourceIdx + 2];
+ }
+ }
+ ctx.putImageData(imageData, 0, 0);
+ };
+
+ // Fills in the blank areas of the image with an image reflection (to fill in a square-shaped canvas)
+ static drawVerticalReflection = (ctx: CanvasRenderingContext2D, canvas: HTMLCanvasElement, yOffset: number) => {
+ const imageData = ctx.getImageData(0, 0, canvas.width, canvas.height);
+ const { data } = imageData;
+ for (let j = 0; j < canvas.width; j++) {
+ for (let i = 0; i < yOffset; i++) {
+ const targetIdx = 4 * (i * canvas.width + j);
+ const sourceJ = j;
+ const sourceI = yOffset + (yOffset - i);
+ const sourceIdx = 4 * (sourceI * canvas.width + sourceJ);
+ data[targetIdx] = data[sourceIdx];
+ data[targetIdx + 1] = data[sourceIdx + 1];
+ data[targetIdx + 2] = data[sourceIdx + 2];
+ }
+ }
+ for (let j = 0; j < canvas.width; j++) {
+ for (let i = canvas.height - 1; i >= canvas.height - 1 - yOffset; i--) {
+ const targetIdx = 4 * (i * canvas.width + j);
+ const sourceJ = j;
+ const sourceI = canvas.height - 1 - yOffset - (yOffset - (canvas.height - i));
+ const sourceIdx = 4 * (sourceI * canvas.width + sourceJ);
+ data[targetIdx] = data[sourceIdx];
+ data[targetIdx + 1] = data[sourceIdx + 1];
+ data[targetIdx + 2] = data[sourceIdx + 2];
+ }
+ }
+ ctx.putImageData(imageData, 0, 0);
+ };
+
+ // Gets the unaltered (besides filling in padding) version of the image for the api call
+ static getCanvasImg = (img: HTMLImageElement): HTMLCanvasElement | undefined => {
+ const canvas = document.createElement('canvas');
+ canvas.width = canvasSize;
+ canvas.height = canvasSize;
+ const ctx = canvas.getContext('2d');
+ if (!ctx) return undefined;
+ // fix scaling
+ const scale = Math.min(canvasSize / img.width, canvasSize / img.height);
+ const width = Math.floor(img.width * scale);
+ const height = Math.floor(img.height * scale);
+ ctx?.clearRect(0, 0, canvasSize, canvasSize);
+ ctx.fillStyle = bgColor;
+ ctx.fillRect(0, 0, canvasSize, canvasSize);
+
+ // extract and set padding data
+ if (img.naturalHeight > img.naturalWidth) {
+ // horizontal padding, x offset
+ const xOffset = Math.floor((canvasSize - width) / 2);
+ ctx.drawImage(img, xOffset, 0, width, height);
+
+ // draw reflected image padding
+ this.drawHorizontalReflection(ctx, canvas, xOffset);
+ } else {
+ // vertical padding, y offset
+ const yOffset = Math.floor((canvasSize - height) / 2);
+ ctx.drawImage(img, 0, yOffset, width, height);
+
+ // draw reflected image padding
+ this.drawVerticalReflection(ctx, canvas, yOffset);
+ }
+ return canvas;
+ };
+
+ /**
+ * Converts a url to base64 (tainted canvas workaround)
+ */
+ static urlToBase64 = async (imageUrl: string): Promise<string | undefined> => {
+ try {
+ const res = await fetch(imageUrl);
+ const blob = await res.blob();
+
+ return new Promise<string>((resolve, reject) => {
+ const reader = new FileReader();
+ reader.onload = () => {
+ const base64Data = reader.result?.toString().split(',')[1];
+ if (base64Data) {
+ resolve(base64Data);
+ } else {
+ reject(new Error('Failed to convert.'));
+ }
+ };
+ reader.onerror = () => {
+ reject(new Error('Error reading image data'));
+ };
+ reader.readAsDataURL(blob);
+ });
+ } catch (err) {
+ console.error(err);
+ }
+ return undefined;
+ };
+}