aboutsummaryrefslogtreecommitdiff
path: root/src/client/apis/gpt
diff options
context:
space:
mode:
Diffstat (limited to 'src/client/apis/gpt')
-rw-r--r--src/client/apis/gpt/GPT.ts171
-rw-r--r--src/client/apis/gpt/PresCustomization.ts115
2 files changed, 203 insertions, 83 deletions
diff --git a/src/client/apis/gpt/GPT.ts b/src/client/apis/gpt/GPT.ts
index d64b38655..29b6ab989 100644
--- a/src/client/apis/gpt/GPT.ts
+++ b/src/client/apis/gpt/GPT.ts
@@ -1,28 +1,40 @@
import { ChatCompletionMessageParam, Image } from 'openai/resources';
import { openai } from './setup';
+export enum GPTDocCommand {
+ AssignTags = 1,
+ Filter = 2,
+ GetInfo = 3,
+ Sort = 4,
+}
+
+export const DescriptionSeperator = '======';
+export const DocSeperator = '------';
+
enum GPTCallType {
SUMMARY = 'summary',
COMPLETION = 'completion',
EDIT = 'edit',
- CHATCARD = 'chatcard',
- FLASHCARD = 'flashcard',
- QUIZ = 'quiz',
- SORT = 'sort',
+ CHATCARD = 'chatcard', // a single flashcard style response to a question
+ FLASHCARD = 'flashcard', // a set of flashcard qustion/answer responses to a topic
DESCRIBE = 'describe',
MERMAID = 'mermaid',
DATA = 'data',
+ STACK = 'stack',
+ PRONUNCIATION = 'pronunciation',
DRAW = 'draw',
COLOR = 'color',
- RUBRIC = 'rubric',
- TYPE = 'type',
- SUBSET = 'subset',
- INFO = 'info',
TEMPLATE = 'template',
VIZSUM = 'vizsum',
VIZSUM2 = 'vizsum2',
FILL = 'fill',
COMPLETEPROMPT = 'completeprompt',
+ QUIZDOC = 'quiz_doc',
+ MAKERUBRIC = 'make_rubric', // create a definition rubric for a document to be used when quizzing the user
+ COMMANDTYPE = 'command_type', // Determine the type of command being made (GPTQueryType - eg., AssignTags, Sort, Filter, DocInfo, GenInfo) and possibly some parameters (eg, Tag type for Tags)
+ SUBSETDOCS = 'subset_docs', // select a subset of documents based on their descriptions
+ DOCINFO = 'doc_info', // provide information about a document
+ SORTDOCS = 'sort_docs',
}
type GPTCallOpts = {
@@ -32,11 +44,16 @@ type GPTCallOpts = {
prompt: string;
};
-const callTypeMap: { [type: string]: GPTCallOpts } = {
+const callTypeMap: { [type in GPTCallType]: GPTCallOpts } = {
// newest model: gpt-4
summary: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: 'Summarize the text given in simpler terms.' },
edit: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: 'Reword the text.' },
- flashcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Make flashcards out of this text with each question and answer labeled. Do not label each flashcard and do not include asterisks: ' },
+ stack: {
+ model: 'gpt-4o',
+ maxTokens: 2048,
+ temp: 0.7,
+ prompt: 'Create a stack of at least 10 flashcards out of this text with each question and answer labeled as question and answer. Each flashcard should have a title that represents the question in just a few words and label it "title". For some questions, ask "what is this image of" but tailored to stacks theme and the image and write a keyword that represents the image and label it "keyword". Otherwise, write none. Do not label each flashcard and do not include asterisks.',
+ },
completion: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: "You are a helpful assistant. Answer the user's prompt." },
mermaid: {
@@ -51,19 +68,37 @@ const callTypeMap: { [type: string]: GPTCallOpts } = {
temp: 0.5,
prompt: "You are a helpful resarch assistant. Analyze the user's data to find meaningful patterns and/or correlation. Please only return a JSON with a correlation column 1 propert, a correlation column 2 property, and an analysis property. ",
},
- sort: {
+ sort_docs: {
model: 'gpt-4o',
maxTokens: 2048,
temp: 0.25,
- prompt: "The user is going to give you a list of descriptions. Each one is separated by `======` on either side. Descriptions will vary in length, so make sure to only separate when you see `======`. Sort them by the user's specifications. Make sure each description is only in the list once. Each item should be separated by `======`. Immediately afterward, surrounded by `------` on BOTH SIDES, provide some insight into your reasoning for the way you sorted (and mention nothing about the formatting details given in this description). It is VERY important that you format it exactly as described, ensuring the proper number of `=` and `-` (6 of each) and NO commas",
+ prompt: `The user is going to give you a list of descriptions.
+ Each one is separated by '${DescriptionSeperator}' on either side.
+ Descriptions will vary in length, so make sure to only separate when you see '${DescriptionSeperator}'.
+ Sort them by the user's specifications.
+ Make sure each description is only in the list once. Each item should be separated by '${DescriptionSeperator}'.
+ Immediately afterward, surrounded by '${DocSeperator}' on BOTH SIDES, provide some insight into your reasoning for the way you sorted (and mention nothing about the formatting details given in this description).
+ It is VERY important that you format it exactly as described, ensuring the proper number of '${DescriptionSeperator[0]}' and '${DocSeperator[0]}' (${DescriptionSeperator.length} of each) and NO commas`,
},
describe: { model: 'gpt-4-vision-preview', maxTokens: 2048, temp: 0, prompt: 'Describe these images in 3-5 words' },
+ flashcard: {
+ model: 'gpt-4-turbo',
+ maxTokens: 512,
+ temp: 0.5,
+ prompt: 'Make flashcards out of this text with each question and answer labeled as question and answer. Create a title for each question and asnwer that is labeled as "title". Do not label each flashcard and do not include asterisks: ',
+ },
chatcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Answer the following question as a short flashcard response. Do not include a label.' },
- quiz: {
+ quiz_doc: {
model: 'gpt-4-turbo',
maxTokens: 1024,
temp: 0,
- prompt: 'List unique differences between the content of the UserAnswer and Rubric. Before each difference, label it and provide any additional information the UserAnswer missed and explain it in second person without separating it into UserAnswer and Rubric content and additional information. If there are no differences, say correct',
+ prompt: 'List unique differences between the content of the UserAnswer and Rubric. Before each difference, label it and provide any additional information the UserAnswer missed and explain it in second person without separating it into UserAnswer and Rubric content and additional information. If the Rubric is incorrect, explain why. If there are no differences, say correct. If it is empty, say there is nothing for me to evaluate. If it is comparing two words, look for spelling and not capitalization and not punctuation.',
+ },
+ pronunciation: {
+ model: 'gpt-4-turbo',
+ maxTokens: 1024,
+ temp: 0.1, //0.3
+ prompt: "BRIEFLY (<50 words) describe any differences between the rubric and the user's answer answer in second person. If there are no differences, say correct",
},
template: {
model: 'gpt-4-turbo',
@@ -102,6 +137,46 @@ const callTypeMap: { [type: string]: GPTCallOpts } = {
temp: 0.5,
prompt: 'You will be coloring drawings. You will be given what the drawing is, then a list of descriptions for parts of the drawing. Based on each description, respond with the stroke and fill color that it should be. Follow the rules: 1. Avoid using black for stroke color 2. Make the stroke color 1-3 shades darker than the fill color 3. Use the same colors when possible. Format as {#abcdef #abcdef}, making sure theres a color for each description, and do not include any additional text.',
},
+ command_type: {
+ model: 'gpt-4-turbo',
+ maxTokens: 1024,
+ temp: 0,
+ prompt: `I'm going to provide you with a question.
+ Based on the question, is the user asking you to
+ ${GPTDocCommand.AssignTags}. Assigns docs with tags(like star / heart etc)/labels.
+ ${GPTDocCommand.GetInfo}. Provide information about a specific doc.
+ ${GPTDocCommand.Filter}. Filter docs based on a question/information.
+ ${GPTDocCommand.Sort}. Put docs in a specific order.
+ Answer with only the number for ${GPTDocCommand.GetInfo}-${GPTDocCommand.Sort}.
+ For number one, provide the number (${GPTDocCommand.AssignTags}) and the appropriate tag`,
+ },
+ subset_docs: {
+ model: 'gpt-4-turbo',
+ maxTokens: 1024,
+ temp: 0,
+ prompt: `I'm going to give you a list of descriptions.
+ Each one is separated by '${DescriptionSeperator}' on either side.
+ Descriptions will vary in length, so make sure to only separate when you see '${DescriptionSeperator}'.
+ Based on the question the user asks, provide a subset of the given descriptions that best matches the user's specifications.
+ Make sure each description is only in the list once. Each item should be separated by '${DescriptionSeperator}'.
+ Immediately afterward, surrounded by '${DocSeperator}' on BOTH SIDES, provide some insight into your reasoning in the 2nd person (and mention nothing about the formatting details given in this description).
+ It is VERY important that you format it exactly as described, ensuring the proper number of '${DescriptionSeperator[0]}' and '${DocSeperator[0]}' (${DescriptionSeperator.length} of each) and NO commas`,
+ },
+
+ doc_info: {
+ model: 'gpt-4-turbo',
+ maxTokens: 1024,
+ temp: 0,
+ prompt: `Answer the user's question with a short (<100 word) response.
+ If a particular document is selected I will provide that information (which may help with your response)`,
+ },
+ make_rubric: {
+ model: 'gpt-4-turbo',
+ maxTokens: 1024,
+ temp: 0,
+ prompt: `BRIEFLY (<25 words) provide a definition for the following term.
+ It will be used as a rubric to evaluate the user's understanding of the topic`,
+ },
};
let lastCall = '';
let lastResp = '';
@@ -111,14 +186,16 @@ let lastResp = '';
* @param inputText Text to process
* @returns AI Output
*/
-const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: any, dontCache?: boolean) => {
- const inputText = [GPTCallType.SUMMARY, GPTCallType.FLASHCARD, GPTCallType.QUIZ].includes(callType) ? inputTextIn + '.' : inputTextIn;
- const opts: GPTCallOpts = callTypeMap[callType];
- if (lastCall === inputText && dontCache !== true) return lastResp;
+const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: string, dontCache?: boolean) => {
+ const inputText = inputTextIn + ([GPTCallType.SUMMARY, GPTCallType.FLASHCARD, GPTCallType.QUIZDOC, GPTCallType.STACK].includes(callType) ? '.' : '');
+ const opts = callTypeMap[callType];
+ if (!opts) {
+ console.log('The query type:' + callType + ' requires a configuration.');
+ return 'Error connecting with API.';
+ }
+ if (lastCall === inputText && dontCache !== true && lastResp) return lastResp;
try {
- lastCall = inputText;
-
- const usePrompt = prompt ? prompt + opts.prompt : opts.prompt;
+ const usePrompt = prompt ? prompt + '.' + opts.prompt : opts.prompt;
const messages: ChatCompletionMessageParam[] = [
{ role: 'system', content: usePrompt },
{ role: 'user', content: inputText },
@@ -130,8 +207,12 @@ const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: a
temperature: opts.temp,
max_tokens: opts.maxTokens,
});
- lastResp = response.choices[0].message.content ?? '';
- return lastResp;
+ const result = response.choices[0].message.content ?? '';
+ if (!dontCache) {
+ lastResp = result;
+ lastCall = inputText;
+ }
+ return result;
} catch (err) {
console.log(err);
return 'Error connecting with API.';
@@ -168,7 +249,7 @@ const gptGetEmbedding = async (src: string): Promise<number[]> => {
return [];
}
};
-const gptImageLabel = async (src: string): Promise<string> => {
+const gptImageLabel = async (src: string, prompt: string): Promise<string> => {
try {
const response = await openai.chat.completions.create({
model: 'gpt-4o',
@@ -176,7 +257,7 @@ const gptImageLabel = async (src: string): Promise<string> => {
{
role: 'user',
content: [
- { type: 'text', text: 'Give three labels to describe this image.' },
+ { type: 'text', text: prompt },
{
type: 'image_url',
image_url: {
@@ -189,6 +270,7 @@ const gptImageLabel = async (src: string): Promise<string> => {
],
});
if (response.choices[0].message.content) {
+ console.log(response.choices[0].message.content);
return response.choices[0].message.content;
}
return 'Missing labels';
@@ -228,6 +310,41 @@ const gptHandwriting = async (src: string): Promise<string> => {
}
};
+const gptDescribeImage = async (image: string): Promise<string> => {
+ try {
+ const response = await openai.chat.completions.create({
+ model: 'gpt-4o',
+ temperature: 0,
+ messages: [
+ {
+ role: 'user',
+ content: [
+ {
+ type: 'text',
+ text: `Very briefly identify what this drawing is and list all the drawing elements and their location within the image. Do not include anything about the drawing style.`,
+ },
+ {
+ type: 'image_url',
+ image_url: {
+ url: `${image}`,
+ detail: 'low',
+ },
+ },
+ ],
+ },
+ ],
+ });
+ if (response.choices[0].message.content) {
+ console.log('GPT DESCRIPTION', response.choices[0].message.content);
+ return response.choices[0].message.content;
+ }
+ return 'Unknown drawing';
+ } catch (err) {
+ console.log(err);
+ return 'Error connecting with API';
+ }
+};
+
const gptDrawingColor = async (image: string, coords: string[]): Promise<string> => {
try {
const response = await openai.chat.completions.create({
@@ -255,11 +372,11 @@ const gptDrawingColor = async (image: string, coords: string[]): Promise<string>
if (response.choices[0].message.content) {
return response.choices[0].message.content;
}
- return 'Missing labels';
+ return 'Unknown drawing';
} catch (err) {
console.log(err);
return 'Error connecting with API';
}
};
-export { gptAPICall, gptImageCall, GPTCallType, gptImageLabel, gptGetEmbedding, gptHandwriting, gptDrawingColor };
+export { gptAPICall, gptImageCall, GPTCallType, gptImageLabel, gptGetEmbedding, gptHandwriting, gptDescribeImage, gptDrawingColor };
diff --git a/src/client/apis/gpt/PresCustomization.ts b/src/client/apis/gpt/PresCustomization.ts
index 2262886a2..c465f098f 100644
--- a/src/client/apis/gpt/PresCustomization.ts
+++ b/src/client/apis/gpt/PresCustomization.ts
@@ -1,3 +1,5 @@
+import { PresEffect, PresEffectDirection } from '../../views/nodes/trails/PresEnums';
+import { AnimationSettingsProperties, easeItems } from '../../views/nodes/trails/SpringUtils';
import { openai } from './setup';
export enum CustomizationType {
@@ -10,15 +12,17 @@ interface PromptInfo {
}
const prompts: { [key: string]: PromptInfo } = {
trails: {
- description:
- 'We are customizing the properties and transition of a slide in a presentation. You are given the current properties of the slide in a json with the fields [title, presentation_transition, presentation_effect, config_zoom, presentation_effectDirection], as well as the prompt for how the user wants to change it. Return a json with the required fields: [title, presentation_transition, presentation_effect, config_zoom, presentation_effectDirection] by applying the changes in the prompt to the current state of the slide.',
+ description: `We are customizing the properties and transition of a slide in a presentation.
+ You are given the current properties of the slide in a json with the fields [title, presentation_transition, presentation_effect, config_zoom, presentation_effectDirection],
+ as well as the prompt for how the user wants to change it.
+ Return a json with the required fields: [title, presentation_transition, presentation_effect, config_zoom, presentation_effectDirection] by applying the changes in the prompt to the current state of the slide.`,
features: [],
},
};
// Allows you to register properties that are customizable
export const addCustomizationProperty = (type: CustomizationType, name: string, description: string, values?: string[]) => {
- values ? prompts[type].features.push({ name, description, values }) : prompts[type].features.push({ name, description });
+ prompts[type].features.push({ name, description, ...(values ? { values } : {}) });
};
// All the registered fields, make sure to update during registration, this
@@ -41,35 +45,34 @@ export const gptSlideProperties = [
// Registers slide properties
const setupPresSlideCustomization = () => {
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'title', 'is the title/name of the slide.');
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'presentation_transition', 'is a number in milliseconds for how long it should take to transition/move to a slide.');
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'presentation_easeFunc', 'is the easing function for the movement to the slide.', ['Ease', 'Ease In', 'Ease Out', 'Ease Out', 'Ease In Out', 'Linear']);
-
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'presentation_effect', 'is an effect applied to the slide when we transition to it.', ['None', 'Expand', 'Fade in', 'Bounce', 'Flip', 'Rotate', 'Roll']);
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'presentation_effectDirection', 'is what direction the effect is applied.', ['Enter from left', 'Enter from right', 'Enter from bottom', 'Enter from Top', 'Enter from center']);
- addCustomizationProperty(
- CustomizationType.PRES_TRAIL_SLIDE,
- 'presentation_effectTiming',
- "is a json object of the format: {type: string, stiffness: number, damping: number, mass: number}. Type is always “custom”. Controls the spring-based timing of the presentation effect animation. Stiffness, damping, and mass control the physics-based properties of spring animations. This is used to create a more natural looking timing, bouncy effects, etc. Use spring physics to adjust these parameters to match the user's description of how they want to animate the effect."
- );
-
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'config_zoom', 'is a number from 0 to 1.0 indicating the percentage we should zoom into the slide.');
-
- // boolean values
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'presentation_playAudio', 'is a boolean value indicating if we should play audio when we go to the slide.');
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'presentation_zoomText', 'is a boolean value indicating if we should zoom into text selections when we go to the slide.');
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'presentation_hideBefore', 'is a boolean value indicating if we should hide the slide before going to it.');
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'presentation_hide', 'is a boolean value indicating if we should hide the slide during the presentation.');
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'presentation_hideAfter', 'is a boolean value indicating if we should hide the slide after going to it.');
- addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, 'presentation_openInLightbox', 'is a boolean value indicating if we should open the slide in an overlay or lightbox view during the presentation.');
-};
+ const add = (name: string, val:string, opts?:string[]) => addCustomizationProperty(CustomizationType.PRES_TRAIL_SLIDE, name, val, opts);
+ const addBool = (name: string, val:string) => add(name, 'is a boolean value indicating if we should ' + val);
+ add('title', 'is the title/name of the slide.');
+ add('config_zoom', 'is a number from 0 to 1.0 indicating the percentage we should zoom into the slide.');
+ add('presentation_transition', 'is a number in milliseconds for how long it should take to transition/move to a slide.');
+ add('presentation_easeFunc', 'is the easing function for the movement to the slide.', easeItems.filter(val => val.text !== 'Custom').map(val => val.text))
+ add('presentation_effect', 'is an effect applied to the slide when we transition to it.', Object.keys(PresEffect));
+ add('presentation_effectDirection','is what direction the effect is applied.', Object.keys(PresEffectDirection).filter(key => key !== PresEffectDirection.None));
+ add('presentation_effectTiming', `is a json object of the format: {type: string, ${AnimationSettingsProperties.stiffness}: number, ${AnimationSettingsProperties.damping}: number, ${AnimationSettingsProperties.mass}: number}.
+ Type is always “custom”. Controls the spring-based timing of the presentation effect animation.
+ Stiffness, damping, and mass control the physics-based properties of spring animations.
+ This is used to create a more natural looking timing, bouncy effects, etc.
+ Use spring physics to adjust these parameters to match the user's description of how they want to animate the effect.`);
+
+
+ addBool('presentation_playAudio', 'play audio when we go to the slide.');
+ addBool('presentation_zoomText', 'zoom into text selections when we go to the slide.');
+ addBool('presentation_hideBefore', 'hide the slide before going to it.');
+ addBool('presentation_hide', 'hide the slide during the presentation.');
+ addBool('presentation_hideAfter', 'hide the slide after going to it.');
+ addBool('presentation_openInLightbox', 'open the slide in an overlay or lightbox view during the presentation.');
+}; // prettier-ignore
setupPresSlideCustomization();
-export const getSlideTransitionSuggestions = async (inputText: string) => {
+export const getSlideTransitionSuggestions = (inputText: string) => {
/**
- * Prompt: Generate an entrance animations from slower and gentler
- * to bouncier and more high energy
+ * Prompt: Generate entrance animations from slower and gentler to bouncier and more high energy
*
* Format:
* {
@@ -81,13 +84,19 @@ export const getSlideTransitionSuggestions = async (inputText: string) => {
* }
*/
- const prompt =
- "I want to generate four distinct types of slide effect animations. Return a json of the form {effect: string, direction: string, stiffness: number, damping: number, mass: number}[] with four elements. Effect is the type of animation; its only possible values are ['Expand', 'Fade in', 'Bounce', 'Flip', 'Rotate', 'Roll']. Direction is the direction that the animation starts from; its only possible values are ['Enter from left', 'Enter from right', 'Enter from bottom', 'Enter from Top', 'Enter from center']. Stiffness, damping, and mass control the physics-based properties of spring animations. This is used to create a more natural-looking timing, bouncy effects, etc. Use spring physics to adjust these parameters to animate the effect.";
+ const prompt = `I want to generate four distinct types of slide effect animations.
+ Return a json of the form { ${AnimationSettingsProperties.effect}: string, ${AnimationSettingsProperties.direction}: string, ${AnimationSettingsProperties.stiffness}: number, ${AnimationSettingsProperties.damping}: number, ${AnimationSettingsProperties.mass}: number}[] with four elements.
+ ${AnimationSettingsProperties.effect} is the type of animation; its only possible values are [${Object.keys(PresEffect).filter(key => key !== PresEffect.None).join(',')}].
+ ${AnimationSettingsProperties.direction} is the direction that the animation starts from;
+ its only possible values are [${Object.values(PresEffectDirection).filter(key => key !== PresEffectDirection.None).join(',')}].
+ ${AnimationSettingsProperties.stiffness}, ${AnimationSettingsProperties.damping}, and ${AnimationSettingsProperties.mass} control the physics-based properties of spring animations.
+ This is used to create a more natural-looking timing, bouncy effects, etc.
+ Use spring physics to adjust these parameters to animate the effect.`; // prettier-ignore
const customInput = inputText ?? 'Make them as contrasting as possible with different effects and timings ranging from gentle to energetic.';
- try {
- const response = await openai.chat.completions.create({
+ return openai.chat.completions
+ .create({
model: 'gpt-4',
messages: [
{ role: 'system', content: prompt },
@@ -95,39 +104,33 @@ export const getSlideTransitionSuggestions = async (inputText: string) => {
],
temperature: 0,
max_tokens: 1000,
+ })
+ .then(response => response.choices[0].message?.content ?? '')
+ .catch(err => {
+ console.log(err);
+ return 'Error connecting with API.';
});
- return response.choices[0].message?.content;
- } catch (err) {
- console.log(err);
- return 'Error connecting with API.';
- }
};
-export const gptTrailSlideCustomization = async (inputText: string, properties: any | any[]) => {
- let prompt = prompts.trails.description;
-
- prompts.trails.features.forEach(feature => {
- prompt += feature.name + ' ' + feature.description;
- if (feature.values) {
- prompt += `Its only possible values are [${feature.values.join(', ')}].`;
- }
- });
+export const gptTrailSlideCustomization = (inputText: string, properties: string) => {
+ const preamble = prompts.trails.description + prompts.trails.features.map(feature => feature.name + ' ' + feature.description + (feature.values ? `Its only possible values are [${feature.values.join(', ')}]` : '')).join('. ');
- prompt += 'Set unchanged values to null and make sure you include new properties if they are specified in the prompt even if they do not exist in current properties. Please only return the json with the keys described and their values.';
+ const prompt = `Set unchanged values to null and make sure you include new properties if they are specified in the prompt even if they do not exist in current properties.
+ Please only return the json with the keys described and their values.`;
- try {
- const response = await openai.chat.completions.create({
+ return openai.chat.completions
+ .create({
model: 'gpt-4',
messages: [
- { role: 'system', content: prompt },
- { role: 'user', content: `Prompt: ${inputText}, Current properties: ${JSON.stringify(properties)}` },
+ { role: 'system', content: preamble + prompt },
+ { role: 'user', content: `Prompt: ${inputText}, Current properties: ${properties}` },
],
temperature: 0,
max_tokens: 1000,
+ })
+ .then(response => response.choices[0].message?.content ?? '')
+ .catch(err => {
+ console.log(err);
+ return 'Error connecting with API.';
});
- return response.choices[0].message?.content;
- } catch (err) {
- console.log(err);
- return 'Error connecting with API.';
- }
};