diff options
Diffstat (limited to 'src/client/apis')
| -rw-r--r-- | src/client/apis/gpt/GPT.ts | 35 |
1 files changed, 16 insertions, 19 deletions
diff --git a/src/client/apis/gpt/GPT.ts b/src/client/apis/gpt/GPT.ts index 30194f9f8..1f50ad0b8 100644 --- a/src/client/apis/gpt/GPT.ts +++ b/src/client/apis/gpt/GPT.ts @@ -5,7 +5,8 @@ enum GPTCallType { SUMMARY = 'summary', COMPLETION = 'completion', EDIT = 'edit', - DATA = 'data', + // MERMAID='mermaid' + SORT = 'sort' } type GPTCallOpts = { @@ -15,45 +16,40 @@ type GPTCallOpts = { prompt: string; }; -/** - * Replace completions (deprecated) with chat - */ - const callTypeMap: { [type: string]: GPTCallOpts } = { - // newest model: gpt-4 - summary: { model: 'gpt-3.5-turbo', maxTokens: 256, temp: 0.5, prompt: 'Summarize the text given in simpler terms.' }, - edit: { model: 'gpt-3.5-turbo', maxTokens: 256, temp: 0.5, prompt: 'Reword the text.' }, - completion: { model: 'gpt-3.5-turbo', maxTokens: 256, temp: 0.5, prompt: "You are a helpful assistant. Answer the user's prompt." }, - data: { model: 'gpt-3.5-turbo', maxTokens: 256, temp: 0.5, prompt: "You are a helpful resarch assistant. Analyze the user's data to find meaningful patterns and/or correlation. Please keep your response short and to the point." }, + summary: { model: 'gpt-3.5-turbo-instruct', maxTokens: 256, temp: 0.5, prompt: 'Summarize this text in simpler terms: ' }, + edit: { model: 'gpt-3.5-turbo-instruct', maxTokens: 256, temp: 0.5, prompt: 'Reword this: ' }, + completion: { model: 'gpt-3.5-turbo-instruct', maxTokens: 256, temp: 0.5, prompt: '' }, + sort:{model:'gpt-4-turbo',maxTokens:2048,temp:0,prompt:"I'm going to give you a list of strings. Sort them into lists by shared content. The format of should be: '[string1, string2], [string 3, string4][string5, string6]' where the actual numbers of strings in each category / total categorys is arbritray and up to your interpretation"} }; -/** + +/**` * Calls the OpenAI API. * * @param inputText Text to process * @returns AI Output */ -const gptAPICall = async (inputText: string, callType: GPTCallType, prompt?: any) => { +const gptAPICall = async (inputText: string, callType: GPTCallType) => { if (callType === GPTCallType.SUMMARY) inputText += '.'; const opts: GPTCallOpts = callTypeMap[callType]; try { const configuration: ClientOptions = { - apiKey: process.env.OPENAI_KEY, + apiKey: "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", dangerouslyAllowBrowser: true, }; const openai = new OpenAI(configuration); - let usePrompt = prompt ? opts.prompt + prompt : opts.prompt; let messages: ChatCompletionMessageParam[] = [ - { role: 'system', content: usePrompt }, + { role: 'system', content: opts.prompt }, { role: 'user', content: inputText }, ]; const response = await openai.chat.completions.create({ model: opts.model, - max_tokens: opts.maxTokens, + messages: messages, temperature: opts.temp, - messages, + max_tokens: opts.maxTokens, }); const content = response.choices[0].message.content; return content; @@ -76,11 +72,12 @@ const gptImageCall = async (prompt: string, n?: number) => { n: n ?? 1, size: '1024x1024', }); - return response.data.map(data => data.url); + return response.data.map((data: any) => data.url); + // return response.data.data[0].url; } catch (err) { console.error(err); return; } }; -export { gptAPICall, gptImageCall, GPTCallType }; +export { gptAPICall, gptImageCall, GPTCallType };
\ No newline at end of file |
