import { ChatCompletionMessageParam } from 'openai/resources'; import { openai } from './setup'; enum GPTCallType { SUMMARY = 'summary', COMPLETION = 'completion', EDIT = 'edit', CHATCARD = 'chatcard', FLASHCARD = 'flashcard', QUIZ = 'quiz', SORT = 'sort', DESCRIBE = 'describe', MERMAID = 'mermaid', DATA = 'data', TEMPLATE = "template", VIZSUM = 'vizsum', VIZSUM2 = 'vizsum2' } type GPTCallOpts = { model: string; maxTokens: number; temp: number; prompt: string; }; const callTypeMap: { [type: string]: GPTCallOpts } = { // newest model: gpt-4 summary: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: 'Summarize the text given in simpler terms.' }, edit: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: 'Reword the text.' }, flashcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Make flashcards out of this text with each question and answer labeled. Do not label each flashcard and do not include asterisks: ' }, completion: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: "You are a helpful assistant. Answer the user's prompt." }, mermaid: { model: 'gpt-4-turbo', maxTokens: 2048, temp: 0, prompt: "(Heres an example of changing color of a pie chart to help you pie title Example \"Red\": 20 \"Blue\": 50 \"Green\": 30 %%{init: {'theme': 'base', 'themeVariables': {'pie1': '#0000FF', 'pie2': '#00FF00', 'pie3': '#FF0000'}}}%% keep in mind that pie1 is the highest since its sorted in descending order. Heres an example of a mindmap: mindmap root((mindmap)) Origins Long history ::icon(fa fa-book) Popularisation British popular psychology author Tony Buzan Research On effectivness
and features On Automatic creation Uses Creative techniques Strategic planning Argument mapping Tools Pen and paper Mermaid. ", }, data: { model: 'gpt-3.5-turbo', maxTokens: 256, temp: 0.5, prompt: "You are a helpful resarch assistant. Analyze the user's data to find meaningful patterns and/or correlation. Please only return a JSON with a correlation column 1 propert, a correlation column 2 property, and an analysis property. ", }, sort: { model: 'gpt-4o', maxTokens: 2048, temp: 0.5, prompt: "I'm going to give you a list of descriptions. Each one is seperated by ====== on either side. They will vary in length, so make sure to only seperate when you see ======. Sort them into lists by shared content. MAKE SURE EACH DESCRIPTOR IS IN ONLY ONE LIST. Generate only the list with each list seperated by ====== with the elements seperated by ~~~~~~. Try to do around 4 groups, but a little more or less is ok.", }, describe: { model: 'gpt-4-vision-preview', maxTokens: 2048, temp: 0, prompt: 'Describe these images in 3-5 words' }, chatcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Answer the following question as a short flashcard response. Do not include a label.' }, quiz: { model: 'gpt-4-turbo', maxTokens: 1024, temp: 0, prompt: 'List unique differences between the content of the UserAnswer and Rubric. Before each difference, label it and provide any additional information the UserAnswer missed and explain it in second person without separating it into UserAnswer and Rubric content and additional information. If there are no differences, say correct', }, template: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'You will be given a list of field descriptions for multiple templates in the format {field #0: “description”}{field #1: “description”}{...}, and a list of column descriptions in the format {“title”: “description}{...}. Your job is to match columns with fields based on their descriptions. Your output should be in the following JSON format: {“Template title”:{“#”: “title”, “#”: “title”, “#”: “title” …}, “Template title”:{“#”: “title”, “#”: “title”, “#”: “title” …}} where “Template title” represents the template, # represents the field # and “title” the title of the column assigned to it. A filled out example might look like {“fivefield2”:{0:”Name”, 1:”Image”, 2:”Caption”, 3:”Position”, 4:”Stats”}, “fivefield3”:{0:”Image”, 1:”Name”, 2:”Caption”, 3:”Stats”, 4:”Position”}. Include one object for each template. IT IS VERY IMPORTANT THAT YOU ONLY INCLUDE TEXT IN THE FORMAT ABOVE, WITH NO ADDITIONS WHATSOEVER. Do not include extraneous ‘#’ characters, ‘column’ for columns, or ‘template’ for templates: ONLY THE TITLES AND NUMBERS. Do this for each template whose fields are described. The descriptions are as follows: ' }, vizsum: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Your job is to provide brief descriptions for columns in a dataset based on example rows. Your descriptions should be geared towards how each column’s data might fit together into a visual template. Would they make good titles, main focuses, captions, descriptions, etc. Pay special attention to connections between columns, i.e. is there one column that specifically seems to describe/be related to another more than the rest? You should provide your analysis in JSON format like so: {“col1”:”description”, “col2”:”description”, …}. DO NOT INCLUDE ANY OTHER TEXT, ONLY THE JSON.'}, vizsum2: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Your job is to provide structured information on columns in a dataset based on example rows. You will categorize each column in two ways: by type and size. The size categories are as follows: tiny (one or two words), small (a sentence/multiple words), medium (a few sentences), large (a longer paragraph), and huge (a very long or multiple paragraphs). The type categories are as follows: visual (links/file paths to images, pdfs, maps, or any other visual media type), and text (plain text that isn’t a link/file path). Visual media should be assumed to have size “medium” “large” or “huge”. You will give your responses in JSON format, like so: {“col1”:{“type”:”text”, “size”:”small”}, “col2”:{“type”:”visual”, “size”:”medium”}, …}. DO NOT INCLUDE ANY OTHER TEXT, ONLY THE JSON.'} }; let lastCall = ''; let lastResp = ''; /** * Calls the OpenAI API. * * @param inputText Text to process * @returns AI Output */ const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: any) => { const inputText = [GPTCallType.SUMMARY, GPTCallType.FLASHCARD, GPTCallType.QUIZ].includes(callType) ? inputTextIn + '.' : inputTextIn; const opts: GPTCallOpts = callTypeMap[callType]; if (lastCall === inputText) return lastResp; try { lastCall = inputText; const usePrompt = prompt ? opts.prompt + prompt : opts.prompt; const messages: ChatCompletionMessageParam[] = [ { role: 'system', content: usePrompt }, { role: 'user', content: inputText }, ]; const response = await openai.chat.completions.create({ model: opts.model, messages: messages, temperature: opts.temp, max_tokens: opts.maxTokens, }); lastResp = response.choices[0].message.content ?? ''; return lastResp; } catch (err) { console.log(err); return 'Error connecting with API.'; } }; const gptImageCall = async (prompt: string, n?: number) => { try { const response = await openai.images.generate({ prompt: prompt, n: n ?? 1, size: '1024x1024', }); return response.data.map((data: any) => data.url); // return response.data.data[0].url; } catch (err) { console.error(err); } return undefined; }; const gptGetEmbedding = async (src: string): Promise => { try { const embeddingResponse = await openai.embeddings.create({ model: 'text-embedding-3-large', input: [src], encoding_format: 'float', dimensions: 256, }); // Assume the embeddingResponse structure is correct; adjust based on actual API response const { embedding } = embeddingResponse.data[0]; return embedding; } catch (err) { console.log(err); return []; } }; const gptImageLabel = async (src: string): Promise => { try { const response = await openai.chat.completions.create({ model: 'gpt-4o', messages: [ { role: 'user', content: [ { type: 'text', text: 'Give three to five labels to describe this image.' }, { type: 'image_url', image_url: { url: `${src}`, detail: 'low', }, }, ], }, ], }); if (response.choices[0].message.content) { return response.choices[0].message.content; } return 'Missing labels'; } catch (err) { console.log(err); return 'Error connecting with API'; } }; export { gptAPICall, gptImageCall, GPTCallType, gptImageLabel, gptGetEmbedding };