aboutsummaryrefslogtreecommitdiff
path: root/src/client/apis
diff options
context:
space:
mode:
Diffstat (limited to 'src/client/apis')
-rw-r--r--src/client/apis/gpt/GPT.ts90
1 files changed, 81 insertions, 9 deletions
diff --git a/src/client/apis/gpt/GPT.ts b/src/client/apis/gpt/GPT.ts
index 55667684e..455352068 100644
--- a/src/client/apis/gpt/GPT.ts
+++ b/src/client/apis/gpt/GPT.ts
@@ -5,6 +5,9 @@ enum GPTCallType {
SUMMARY = 'summary',
COMPLETION = 'completion',
EDIT = 'edit',
+ SORT = 'sort',
+ DESCRIBE = 'describe',
+ MERMAID = 'mermaid',
DATA = 'data',
}
@@ -15,21 +18,30 @@ type GPTCallOpts = {
prompt: string;
};
-/**
- * Replace completions (deprecated) with chat
- */
-
const callTypeMap: { [type: string]: GPTCallOpts } = {
// newest model: gpt-4
summary: { model: 'gpt-3.5-turbo', maxTokens: 256, temp: 0.5, prompt: 'Summarize the text given in simpler terms.' },
edit: { model: 'gpt-3.5-turbo', maxTokens: 256, temp: 0.5, prompt: 'Reword the text.' },
completion: { model: 'gpt-3.5-turbo', maxTokens: 256, temp: 0.5, prompt: "You are a helpful assistant. Answer the user's prompt." },
+ mermaid: {
+ model: 'gpt-4-turbo',
+ maxTokens: 2048,
+ temp: 0,
+ prompt: "(Heres an example of changing color of a pie chart to help you pie title Example \"Red\": 20 \"Blue\": 50 \"Green\": 30 %%{init: {'theme': 'base', 'themeVariables': {'pie1': '#0000FF', 'pie2': '#00FF00', 'pie3': '#FF0000'}}}%% keep in mind that pie1 is the highest since its sorted in descending order. Heres an example of a mindmap: mindmap root((mindmap)) Origins Long history ::icon(fa fa-book) Popularisation British popular psychology author Tony Buzan Research On effectivness<br/>and features On Automatic creation Uses Creative techniques Strategic planning Argument mapping Tools Pen and paper Mermaid. ",
+ },
data: {
model: 'gpt-3.5-turbo',
maxTokens: 256,
temp: 0.5,
prompt: "You are a helpful resarch assistant. Analyze the user's data to find meaningful patterns and/or correlation. Please only return a JSON with a correlation column 1 propert, a correlation column 2 property, and an analysis property. ",
},
+ sort: {
+ model: 'gpt-4o',
+ maxTokens: 2048,
+ temp: 0.5,
+ prompt: "I'm going to give you a list of descriptions. Each one is seperated by ====== on either side. They will vary in length, so make sure to only seperate when you see ======. Sort them into lists by shared content. MAKE SURE EACH DESCRIPTOR IS IN ONLY ONE LIST. Generate only the list with each list seperated by ====== with the elements seperated by ~~~~~~. Try to do around 4 groups, but a little more or less is ok.",
+ },
+ describe: { model: 'gpt-4-vision-preview', maxTokens: 2048, temp: 0, prompt: 'Describe these images in 3-5 words' },
};
let lastCall = '';
@@ -60,9 +72,9 @@ const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: a
const response = await openai.chat.completions.create({
model: opts.model,
- max_tokens: opts.maxTokens,
+ messages: messages,
temperature: opts.temp,
- messages,
+ max_tokens: opts.maxTokens,
});
lastResp = response.choices[0].message.content ?? '';
return lastResp;
@@ -71,7 +83,6 @@ const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: a
return 'Error connecting with API.';
}
};
-
const gptImageCall = async (prompt: string, n?: number) => {
try {
const configuration: ClientOptions = {
@@ -85,11 +96,72 @@ const gptImageCall = async (prompt: string, n?: number) => {
n: n ?? 1,
size: '1024x1024',
});
- return response.data.map(data => data.url);
+ return response.data.map((data: any) => data.url);
+ // return response.data.data[0].url;
} catch (err) {
console.error(err);
}
return undefined;
};
-export { gptAPICall, gptImageCall, GPTCallType };
+const gptGetEmbedding = async (src: string): Promise<number[]> => {
+ try {
+ const configuration: ClientOptions = {
+ apiKey: process.env.OPENAI_KEY,
+ dangerouslyAllowBrowser: true,
+ };
+ const openai = new OpenAI(configuration);
+ const embeddingResponse = await openai.embeddings.create({
+ model: 'text-embedding-3-large',
+ input: [src],
+ encoding_format: 'float',
+ dimensions: 256,
+ });
+
+ // Assume the embeddingResponse structure is correct; adjust based on actual API response
+ const embedding = embeddingResponse.data[0].embedding;
+ return embedding;
+ } catch (err) {
+ console.log(err);
+ return [];
+ }
+};
+
+const gptImageLabel = async (src: string): Promise<string> => {
+ try {
+ const configuration: ClientOptions = {
+ apiKey: process.env.OPENAI_KEY,
+ dangerouslyAllowBrowser: true,
+ };
+
+ const openai = new OpenAI(configuration);
+ const response = await openai.chat.completions.create({
+ model: 'gpt-4o',
+ messages: [
+ {
+ role: 'user',
+ content: [
+ { type: 'text', text: 'Give three to five labels to describe this image.' },
+ {
+ type: 'image_url',
+ image_url: {
+ url: `${src}`,
+ detail: 'low',
+ },
+ },
+ ],
+ },
+ ],
+ });
+ if (response.choices[0].message.content) {
+ return response.choices[0].message.content;
+ } else {
+ return 'Missing labels';
+ }
+ } catch (err) {
+ console.log(err);
+ return 'Error connecting with API';
+ }
+};
+
+export { gptAPICall, gptImageCall, GPTCallType, gptImageLabel, gptGetEmbedding };