import { ChatCompletionMessageParam } from 'openai/resources'; import { openai } from './setup'; enum GPTCallType { SUMMARY = 'summary', COMPLETION = 'completion', EDIT = 'edit', CHATCARD = 'chatcard', FLASHCARD = 'flashcard', QUIZ = 'quiz', SORT = 'sort', DESCRIBE = 'describe', MERMAID = 'mermaid', DATA = 'data', STACK = 'stack', PRONUNCIATION = 'pronunciation', } type GPTCallOpts = { model: string; maxTokens: number; temp: number; prompt: string; }; const callTypeMap: { [type: string]: GPTCallOpts } = { // newest model: gpt-4 summary: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: 'Summarize the text given in simpler terms.' }, edit: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: 'Reword the text.' }, flashcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Make flashcards out of this text with each question and answer labeled as question and answer. Do not label each flashcard and do not include asterisks: ' }, stack: { model: 'gpt-4o', maxTokens: 2048, temp: 0.7, prompt: 'Create a stack of flashcards out of this text with each question and answer labeled as question and answer. For some questions, ask "what is this image of" but tailored to stacks theme and the image and write a keyword that represents the image and label it "keyword". Otherwise, write none. Do not label each flashcard and do not include asterisks.', }, completion: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: "You are a helpful assistant. Answer the user's prompt." }, mermaid: { model: 'gpt-4-turbo', maxTokens: 2048, temp: 0, prompt: "(Heres an example of changing color of a pie chart to help you pie title Example \"Red\": 20 \"Blue\": 50 \"Green\": 30 %%{init: {'theme': 'base', 'themeVariables': {'pie1': '#0000FF', 'pie2': '#00FF00', 'pie3': '#FF0000'}}}%% keep in mind that pie1 is the highest since its sorted in descending order. Heres an example of a mindmap: mindmap root((mindmap)) Origins Long history ::icon(fa fa-book) Popularisation British popular psychology author Tony Buzan Research On effectivness
and features On Automatic creation Uses Creative techniques Strategic planning Argument mapping Tools Pen and paper Mermaid. ", }, data: { model: 'gpt-3.5-turbo', maxTokens: 256, temp: 0.5, prompt: "You are a helpful resarch assistant. Analyze the user's data to find meaningful patterns and/or correlation. Please only return a JSON with a correlation column 1 propert, a correlation column 2 property, and an analysis property. ", }, sort: { model: 'gpt-4o', maxTokens: 2048, temp: 0.5, prompt: "I'm going to give you a list of descriptions. Each one is seperated by ====== on either side. They will vary in length, so make sure to only seperate when you see ======. Sort them into lists by shared content. MAKE SURE EACH DESCRIPTOR IS IN ONLY ONE LIST. Generate only the list with each list seperated by ====== with the elements seperated by ~~~~~~. Try to do around 4 groups, but a little more or less is ok.", }, describe: { model: 'gpt-4-vision-preview', maxTokens: 2048, temp: 0, prompt: 'Describe these images in 3-5 words' }, chatcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Answer the following question as a short flashcard response. Do not include a label.' }, quiz: { model: 'gpt-4-turbo', maxTokens: 1024, temp: 0, prompt: 'List unique differences between the content of the UserAnswer and Rubric. Before each difference, label it and provide any additional information the UserAnswer missed and explain it in second person without separating it into UserAnswer and Rubric content and additional information. If the Rubric is incorrect, explain why. If there are no differences, say correct. If it is empty, say there is nothing for me to evaluate. If it is comparing two words, look for spelling and not capitalization and not punctuation.', }, pronunciation: { model: 'gpt-4-turbo', maxTokens: 1024, temp: 0.1, //0.3 prompt: '', }, }; let lastCall = ''; let lastResp = ''; /** * Calls the OpenAI API. * * @param inputText Text to process * @returns AI Output */ const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: any) => { const inputText = [GPTCallType.SUMMARY, GPTCallType.FLASHCARD, GPTCallType.QUIZ, GPTCallType.STACK].includes(callType) ? inputTextIn + '.' : inputTextIn; const opts: GPTCallOpts = callTypeMap[callType]; if (lastCall === inputText) return lastResp; try { lastCall = inputText; const usePrompt = prompt ? opts.prompt + prompt : opts.prompt; const messages: ChatCompletionMessageParam[] = [ { role: 'system', content: usePrompt }, { role: 'user', content: inputText }, ]; const response = await openai.chat.completions.create({ model: opts.model, messages: messages, temperature: opts.temp, max_tokens: opts.maxTokens, }); lastResp = response.choices[0].message.content ?? ''; console.log('RESP:' + lastResp); return lastResp; } catch (err) { console.log(err); return 'Error connecting with API.'; } }; const gptImageCall = async (prompt: string, n?: number) => { try { const response = await openai.images.generate({ prompt: prompt, n: n ?? 1, size: '1024x1024', }); return response.data.map((data: any) => data.url); // return response.data.data[0].url; } catch (err) { console.error(err); } return undefined; }; const gptGetEmbedding = async (src: string): Promise => { try { const embeddingResponse = await openai.embeddings.create({ model: 'text-embedding-3-large', input: [src], encoding_format: 'float', dimensions: 256, }); // Assume the embeddingResponse structure is correct; adjust based on actual API response const { embedding } = embeddingResponse.data[0]; return embedding; } catch (err) { console.log(err); return []; } }; const gptImageLabel = async (src: string, prompt: string): Promise => { try { const response = await openai.chat.completions.create({ model: 'gpt-4o', messages: [ { role: 'user', content: [ { type: 'text', text: prompt }, { type: 'image_url', image_url: { url: `${src}`, detail: 'low', }, }, ], }, ], }); if (response.choices[0].message.content) { console.log(response.choices[0].message.content); return response.choices[0].message.content; } return 'Missing labels'; } catch (err) { console.log(err); return 'Error connecting with API'; } }; export { gptAPICall, gptImageCall, GPTCallType, gptImageLabel, gptGetEmbedding };