aboutsummaryrefslogtreecommitdiff
path: root/src/client/apis/gpt/GPT.ts
diff options
context:
space:
mode:
Diffstat (limited to 'src/client/apis/gpt/GPT.ts')
-rw-r--r--src/client/apis/gpt/GPT.ts57
1 files changed, 50 insertions, 7 deletions
diff --git a/src/client/apis/gpt/GPT.ts b/src/client/apis/gpt/GPT.ts
index 078ac3e55..6857246da 100644
--- a/src/client/apis/gpt/GPT.ts
+++ b/src/client/apis/gpt/GPT.ts
@@ -5,6 +5,8 @@ enum GPTCallType {
SUMMARY = 'summary',
COMPLETION = 'completion',
EDIT = 'edit',
+ SORT = 'sort',
+ DESCRIBE = 'describe',
MERMAID = 'mermaid',
DATA = 'data',
}
@@ -16,10 +18,6 @@ type GPTCallOpts = {
prompt: string;
};
-/**
- * Replace completions (deprecated) with chat
- */
-
const callTypeMap: { [type: string]: GPTCallOpts } = {
// newest model: gpt-4
summary: { model: 'gpt-3.5-turbo', maxTokens: 256, temp: 0.5, prompt: 'Summarize the text given in simpler terms.' },
@@ -37,6 +35,13 @@ const callTypeMap: { [type: string]: GPTCallOpts } = {
temp: 0.5,
prompt: "You are a helpful resarch assistant. Analyze the user's data to find meaningful patterns and/or correlation. Please only return a JSON with a correlation column 1 propert, a correlation column 2 property, and an analysis property. ",
},
+ sort: {
+ model: 'gpt-4o',
+ maxTokens: 2048,
+ temp: 0.5,
+ prompt: "I'm going to give you a list of descriptions. Each one is seperated by ====== on either side. They will vary in length, so make sure to only seperate when you see ======. Sort them into lists by shared content. MAKE SURE EACH DESCRIPTOR IS IN ONLY ONE LIST. Generate only the list with each list seperated by ====== with the elements seperated by ~~~~~~. Try to do around 4 groups, but a little more or less is ok.",
+ },
+ describe: { model: 'gpt-4-vision-preview', maxTokens: 2048, temp: 0, prompt: 'Describe these images in 3-5 words' },
};
let lastCall = '';
@@ -53,7 +58,7 @@ const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: a
if (lastCall === inputText) return lastResp;
try {
const configuration: ClientOptions = {
- apiKey: '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',
+ apiKey: process.env.OPENAI_KEY,
dangerouslyAllowBrowser: true,
};
lastCall = inputText;
@@ -69,6 +74,7 @@ const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: a
model: opts.model,
messages: messages,
temperature: opts.temp,
+ max_tokens: opts.maxTokens,
});
lastResp = response.choices[0].message.content ?? '';
return lastResp;
@@ -78,6 +84,42 @@ const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: a
}
};
+const gptImageLabel = async (imgUrl: string): Promise<string> => {
+ try {
+ const configuration: ClientOptions = {
+ apiKey: process.env.OPENAI_KEY,
+ dangerouslyAllowBrowser: true,
+ };
+
+ const openai = new OpenAI(configuration);
+ const response = await openai.chat.completions.create({
+ model: 'gpt-4o',
+ messages: [
+ {
+ role: 'user',
+ content: [
+ { type: 'text', text: 'Describe this image in 3-5 words' },
+ {
+ type: 'image_url',
+ image_url: {
+ url: `${imgUrl}`,
+ },
+ },
+ ],
+ },
+ ],
+ });
+ if (response.choices[0].message.content) {
+ return response.choices[0].message.content;
+ } else {
+ return ':(';
+ }
+ } catch (err) {
+ console.log(err);
+ return 'Error connecting with API';
+ }
+};
+
const gptImageCall = async (prompt: string, n?: number) => {
try {
const configuration: ClientOptions = {
@@ -91,11 +133,12 @@ const gptImageCall = async (prompt: string, n?: number) => {
n: n ?? 1,
size: '1024x1024',
});
- return response.data.map(data => data.url);
+ return response.data.map((data: any) => data.url);
+ // return response.data.data[0].url;
} catch (err) {
console.error(err);
}
return undefined;
};
-export { gptAPICall, gptImageCall, GPTCallType };
+export { gptAPICall, gptImageCall, gptImageLabel, GPTCallType };