aboutsummaryrefslogtreecommitdiff
path: root/src/client/apis/gpt/GPT.ts
diff options
context:
space:
mode:
authorbobzel <zzzman@gmail.com>2025-02-19 15:05:47 -0500
committerbobzel <zzzman@gmail.com>2025-02-19 15:05:47 -0500
commitf6581aebd603dcdd84e033acffe46352ce5a4485 (patch)
tree63c5735a015f6ba7a0c302d0d81a66488de5ae04 /src/client/apis/gpt/GPT.ts
parentcf791104c0b1608e37c3cf2d25dac7f6f58a1b66 (diff)
more gptpopup cleanup.
Diffstat (limited to 'src/client/apis/gpt/GPT.ts')
-rw-r--r--src/client/apis/gpt/GPT.ts59
1 files changed, 30 insertions, 29 deletions
diff --git a/src/client/apis/gpt/GPT.ts b/src/client/apis/gpt/GPT.ts
index e1ae99d97..6d9bc1d06 100644
--- a/src/client/apis/gpt/GPT.ts
+++ b/src/client/apis/gpt/GPT.ts
@@ -1,12 +1,11 @@
import { ChatCompletionMessageParam, Image } from 'openai/resources';
import { openai } from './setup';
-export enum GPTTypeStyle {
+export enum GPTDocCommand {
AssignTags = 1,
Filter = 2,
- DocInfo = 3,
- GeneralInfo = 4,
- SortDocs = 5,
+ GetInfo = 3,
+ Sort = 4,
}
export const DescriptionSeperator = '======';
@@ -18,8 +17,6 @@ enum GPTCallType {
EDIT = 'edit',
CHATCARD = 'chatcard', // a single flashcard style response to a question
FLASHCARD = 'flashcard', // a set of flashcard qustion/answer responses to a topic
- QUIZ = 'quiz',
- SORT = 'sort',
DESCRIBE = 'describe',
MERMAID = 'mermaid',
DATA = 'data',
@@ -27,15 +24,17 @@ enum GPTCallType {
PRONUNCIATION = 'pronunciation',
DRAW = 'draw',
COLOR = 'color',
- RUBRIC = 'rubric', // needs to be filled in below
- TYPE = 'type', // needs to be filled in below
- SUBSET = 'subset', // needs to be filled in below
- INFO = 'info', // needs to be filled in below
TEMPLATE = 'template',
VIZSUM = 'vizsum',
VIZSUM2 = 'vizsum2',
FILL = 'fill',
COMPLETEPROMPT = 'completeprompt',
+ QUIZDOC = 'quiz_doc',
+ MAKERUBRIC = 'make_rubric', // create a definition rubric for a document to be used when quizzing the user
+ COMMANDTYPE = 'command_type', // Determine the type of command being made (GPTQueryType - eg., AssignTags, Sort, Filter, DocInfo, GenInfo) and possibly some parameters (eg, Tag type for Tags)
+ SUBSETDOCS = 'subset_docs', // select a subset of documents based on their descriptions
+ DOCINFO = 'doc_info', // provide information about a document
+ SORTDOCS = 'sort_docs',
}
type GPTCallOpts = {
@@ -69,7 +68,7 @@ const callTypeMap: { [type in GPTCallType]: GPTCallOpts } = {
temp: 0.5,
prompt: "You are a helpful resarch assistant. Analyze the user's data to find meaningful patterns and/or correlation. Please only return a JSON with a correlation column 1 propert, a correlation column 2 property, and an analysis property. ",
},
- sort: {
+ sort_docs: {
model: 'gpt-4o',
maxTokens: 2048,
temp: 0.25,
@@ -89,7 +88,7 @@ const callTypeMap: { [type in GPTCallType]: GPTCallOpts } = {
prompt: 'Make flashcards out of this text with each question and answer labeled as question and answer. Create a title for each question and asnwer that is labeled as "title". Do not label each flashcard and do not include asterisks: ',
},
chatcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Answer the following question as a short flashcard response. Do not include a label.' },
- quiz: {
+ quiz_doc: {
model: 'gpt-4-turbo',
maxTokens: 1024,
temp: 0,
@@ -138,20 +137,20 @@ const callTypeMap: { [type in GPTCallType]: GPTCallOpts } = {
temp: 0.5,
prompt: 'You will be coloring drawings. You will be given what the drawing is, then a list of descriptions for parts of the drawing. Based on each description, respond with the stroke and fill color that it should be. Follow the rules: 1. Avoid using black for stroke color 2. Make the stroke color 1-3 shades darker than the fill color 3. Use the same colors when possible. Format as {#abcdef #abcdef}, making sure theres a color for each description, and do not include any additional text.',
},
- type: {
+ command_type: {
model: 'gpt-4-turbo',
maxTokens: 1024,
temp: 0,
prompt: `I'm going to provide you with a question.
Based on the question, is the user asking you to
- ${GPTTypeStyle.AssignTags}. Assigns docs with tags(like star / heart etc)/labels,
- ${GPTTypeStyle.DocInfo}. Provide information about a specific doc
- ${GPTTypeStyle.Filter}. Filter docs based on a question/information
- ${GPTTypeStyle.GeneralInfo}. Provide general information
- ${GPTTypeStyle.SortDocs}. Put cards in a specific order.
- Answer with only the number for 2-5. For number one, provide the number (1) and the appropriate tag`,
+ ${GPTDocCommand.AssignTags}. Assigns docs with tags(like star / heart etc)/labels.
+ ${GPTDocCommand.GetInfo}. Provide information about a specific doc.
+ ${GPTDocCommand.Filter}. Filter docs based on a question/information.
+ ${GPTDocCommand.Sort}. Put docs in a specific order.
+ Answer with only the number for ${GPTDocCommand.GetInfo}-${GPTDocCommand.Sort}.
+ For number one, provide the number (${GPTDocCommand.AssignTags}) and the appropriate tag`,
},
- subset: {
+ subset_docs: {
model: 'gpt-4-turbo',
maxTokens: 1024,
temp: 0,
@@ -164,14 +163,14 @@ const callTypeMap: { [type in GPTCallType]: GPTCallOpts } = {
It is VERY important that you format it exactly as described, ensuring the proper number of '${DescriptionSeperator[0]}' and '${DocSeperator[0]}' (${DescriptionSeperator.length} of each) and NO commas`,
},
- info: {
+ doc_info: {
model: 'gpt-4-turbo',
maxTokens: 1024,
temp: 0,
prompt: `Answer the user's question with a short (<100 word) response.
If a particular document is selected I will provide that information (which may help with your response)`,
},
- rubric: {
+ make_rubric: {
model: 'gpt-4-turbo',
maxTokens: 1024,
temp: 0,
@@ -188,17 +187,15 @@ let lastResp = '';
* @returns AI Output
*/
const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: string, dontCache?: boolean) => {
- const inputText = inputTextIn + ([GPTCallType.SUMMARY, GPTCallType.FLASHCARD, GPTCallType.QUIZ, GPTCallType.STACK].includes(callType) ? '.' : '');
+ const inputText = inputTextIn + ([GPTCallType.SUMMARY, GPTCallType.FLASHCARD, GPTCallType.QUIZDOC, GPTCallType.STACK].includes(callType) ? '.' : '');
const opts = callTypeMap[callType];
if (!opts) {
console.log('The query type:' + callType + ' requires a configuration.');
return 'Error connecting with API.';
}
- if (lastCall === inputText && dontCache !== true) return lastResp;
+ if (lastCall === inputText && dontCache !== true && lastResp) return lastResp;
try {
- lastCall = inputText;
-
- const usePrompt = prompt ? prompt + opts.prompt : opts.prompt;
+ const usePrompt = prompt ? prompt + '.' + opts.prompt : opts.prompt;
const messages: ChatCompletionMessageParam[] = [
{ role: 'system', content: usePrompt },
{ role: 'user', content: inputText },
@@ -210,8 +207,12 @@ const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: s
temperature: opts.temp,
max_tokens: opts.maxTokens,
});
- lastResp = response.choices[0].message.content ?? '';
- return lastResp;
+ const result = response.choices[0].message.content ?? '';
+ if (!dontCache) {
+ lastResp = result;
+ lastCall = inputText;
+ }
+ return result;
} catch (err) {
console.log(err);
return 'Error connecting with API.';