diff options
Diffstat (limited to 'src/client/views/nodes/DataVizBox/DocCreatorMenu/Backend/TemplateMenuAIUtils.ts')
-rw-r--r-- | src/client/views/nodes/DataVizBox/DocCreatorMenu/Backend/TemplateMenuAIUtils.ts | 122 |
1 files changed, 122 insertions, 0 deletions
diff --git a/src/client/views/nodes/DataVizBox/DocCreatorMenu/Backend/TemplateMenuAIUtils.ts b/src/client/views/nodes/DataVizBox/DocCreatorMenu/Backend/TemplateMenuAIUtils.ts new file mode 100644 index 000000000..08818dd6c --- /dev/null +++ b/src/client/views/nodes/DataVizBox/DocCreatorMenu/Backend/TemplateMenuAIUtils.ts @@ -0,0 +1,122 @@ +import { ClientUtils } from '../../../../../../ClientUtils'; +import { Networking } from '../../../../../Network'; +import { gptImageCall, gptAPICall, GPTCallType } from '../../../../../apis/gpt/GPT'; +import { Col } from '../DocCreatorMenu'; +import { TemplateFieldSize, TemplateFieldType } from '../TemplateBackend'; +import { ViewType } from '../TemplateFieldTypes/TemplateField'; +import { Template } from '../Template'; +import { Upload } from '../../../../../../server/SharedMediaTypes'; + +export class TemplateMenuAIUtils { + public static generateGPTImage = async (prompt: string): Promise<string | undefined> => { + try { + const res = await gptImageCall(prompt); + + if (res) { + const result = (await Networking.PostToServer('/uploadRemoteImage', { sources: res })) as Upload.FileInformation[]; + const source = ClientUtils.prepend(result[0].accessPaths.agnostic.client); + return source; + } + } catch (e) { + console.log(e); + } + }; + + public static renderGPTImageCall = async (template: Template, col: Col, fieldNumber: number): Promise<boolean> => { + const generateAndLoadImage = async (id: number, prompt: string) => { + const field = template.getFieldByID(id); + const url = await this.generateGPTImage(prompt); + field?.setContent(url ?? '', ViewType.IMG); + field?.setTitle(col.title); + }; + + const fieldContent: string = template.compiledContent; + + try { + const sysPrompt = + `#${Math.random() * 100}: Your job is to create a prompt for an AI image generator to help it generate an image based on existing content in a template and a user prompt. Your prompt should focus heavily on visual elements to help the image generator; avoid unecessary info that might distract it. ONLY INCLUDE THE PROMPT, NO OTHER TEXT OR EXPLANATION. The existing content is as follows: ` + + fieldContent + + ' **** The user prompt is: ' + + col.desc; + + const prompt = await gptAPICall(sysPrompt, GPTCallType.COMPLETEPROMPT); + + await generateAndLoadImage(fieldNumber, prompt); + } catch (e) { + console.log(e); + } + return true; + }; + + public static renderGPTTextCall = async (template: Template, col: Col, fieldNum: number | undefined): Promise<boolean> => { + const wordLimit = (size: TemplateFieldSize) => { + switch (size) { + case TemplateFieldSize.TINY: + return 2; + case TemplateFieldSize.SMALL: + return 5; + case TemplateFieldSize.MEDIUM: + return 20; + case TemplateFieldSize.LARGE: + return 50; + case TemplateFieldSize.HUGE: + return 100; + default: + return 10; + } + }; + + const textAssignment = `--- title: ${col.title}, prompt: ${col.desc}, word limit: ${wordLimit(col.sizes[0])} words, assigned field: ${fieldNum} ---`; + + const fieldContent: string = template.compiledContent; + + try { + const prompt = fieldContent + textAssignment; + + const res = await gptAPICall(`${Math.random() * 100000}: ${prompt}`, GPTCallType.FILL); + + if (res) { + const assignments: { [title: string]: { number: string; content: string } } = JSON.parse(res); + Object.entries(assignments).forEach(([, /* title */ info]) => { + const field = template.getFieldByID(Number(info.number)); + + field?.setContent(info.content ?? '', ViewType.TEXT); + field?.setTitle(col.title); + }); + } + } catch (err) { + console.log(err); + } + + return true; + }; + + /** + * Populates a preset template framework with content from a datavizbox or any AI-generated content. + * @param template the preloaded template framework being filled in + * @param assignments a list of template field numbers (from top to bottom) and their assigned columns from the linked dataviz + * @returns a doc containing the fully rendered template + */ + public static applyGPTContentToTemplate = async (template: Template, assignments: { [field: string]: Col }): Promise<Template | undefined> => { + const GPTTextCalls = Object.entries(assignments).filter(([, col]) => col.type === TemplateFieldType.TEXT && col.AIGenerated); + const GPTIMGCalls = Object.entries(assignments).filter(([, col]) => col.type === TemplateFieldType.VISUAL && col.AIGenerated); + + if (GPTTextCalls.length) { + const promises = GPTTextCalls.map(([id, col]) => { + return TemplateMenuAIUtils.renderGPTTextCall(template, col, Number(id)); + }); + + await Promise.all(promises); + } + + if (GPTIMGCalls.length) { + const promises = GPTIMGCalls.map(async ([id, col]) => { + return TemplateMenuAIUtils.renderGPTImageCall(template, col, Number(id)); + }); + + await Promise.all(promises); + } + + return template; + }; +} |