From 5ff0bef5d3c4825aa7210a26c98aae3b24f4a835 Mon Sep 17 00:00:00 2001 From: alyssaf16 Date: Fri, 17 May 2024 13:18:40 -0400 Subject: chatcards, quizcards, and ai flashcards --- src/client/apis/gpt/GPT.ts | 36 +++++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 9 deletions(-) (limited to 'src/client/apis/gpt') diff --git a/src/client/apis/gpt/GPT.ts b/src/client/apis/gpt/GPT.ts index 2757fc830..6600ddab2 100644 --- a/src/client/apis/gpt/GPT.ts +++ b/src/client/apis/gpt/GPT.ts @@ -1,10 +1,13 @@ import { ClientOptions, OpenAI } from 'openai'; +import { ChatCompletionMessageParam } from 'openai/resources'; enum GPTCallType { SUMMARY = 'summary', COMPLETION = 'completion', EDIT = 'edit', + CHATCARD = 'chatcard', FLASHCARD = 'flashcard', + QUIZ = 'quiz', } type GPTCallOpts = { @@ -15,10 +18,17 @@ type GPTCallOpts = { }; const callTypeMap: { [type: string]: GPTCallOpts } = { - summary: { model: 'gpt-3.5-turbo-instruct', maxTokens: 256, temp: 0.5, prompt: 'Summarize this text in simpler terms: ' }, - edit: { model: 'gpt-3.5-turbo-instruct', maxTokens: 256, temp: 0.5, prompt: 'Reword this: ' }, - flashcard: { model: 'gpt-3.5-turbo-instruct', maxTokens: 512, temp: 0.5, prompt: 'Make flashcards out of this text with questions and answers: ' }, - completion: { model: 'gpt-3.5-turbo-instruct', maxTokens: 256, temp: 0.5, prompt: '' }, + summary: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: 'Summarize this text in simpler terms: ' }, + edit: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: 'Reword this: ' }, + flashcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Make flashcards out of this text with each question and answer labeled. Do not label each flashcard and do not include asterisks: ' }, + completion: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: '' }, + chatcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Answer the following question as a short flashcard response. Do not include a label.' }, + quiz: { + model: 'gpt-4-turbo', + maxTokens: 1024, + temp: 0, + prompt: 'List unique differences between the content of the UserAnswer and Rubric. Before each difference, label it and provide any additional information the UserAnswer missed and explain it in second person without separating it into UserAnswer and Rubric content and additional information. If there are no differences, say correct', + }, }; /** @@ -28,7 +38,8 @@ const callTypeMap: { [type: string]: GPTCallOpts } = { * @returns AI Output */ const gptAPICall = async (inputText: string, callType: GPTCallType) => { - if (callType === GPTCallType.SUMMARY || callType == GPTCallType.FLASHCARD) inputText += '.'; + if (!inputText) return 'Please provide a response.'; + if (callType === GPTCallType.SUMMARY || callType == GPTCallType.FLASHCARD || GPTCallType.QUIZ) inputText += '.'; const opts: GPTCallOpts = callTypeMap[callType]; try { const configuration: ClientOptions = { @@ -36,13 +47,20 @@ const gptAPICall = async (inputText: string, callType: GPTCallType) => { dangerouslyAllowBrowser: true, }; const openai = new OpenAI(configuration); - const response = await openai.completions.create({ + + let messages: ChatCompletionMessageParam[] = [ + { role: 'system', content: opts.prompt }, + { role: 'user', content: inputText }, + ]; + + const response = await openai.chat.completions.create({ model: opts.model, - max_tokens: opts.maxTokens, + messages: messages, temperature: opts.temp, - prompt: `${opts.prompt}${inputText}`, + max_tokens: opts.maxTokens, }); - return response.choices[0].text; + + return response.choices[0].message.content; } catch (err) { console.log(err); return 'Error connecting with API.'; -- cgit v1.2.3-70-g09d2