aboutsummaryrefslogtreecommitdiff
path: root/src/client/apis/gpt/GPT.ts
blob: 20c810651bc120b7f7682d7fd11abdf832082531 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
import { ChatCompletionMessageParam } from 'openai/resources';
import { openai } from './setup';

enum GPTCallType {
    SUMMARY = 'summary',
    COMPLETION = 'completion',
    EDIT = 'edit',
    CHATCARD = 'chatcard',
    FLASHCARD = 'flashcard',
    QUIZ = 'quiz',
    SORT = 'sort',
    DESCRIBE = 'describe',
    MERMAID = 'mermaid',
    DATA = 'data',
    DRAW = 'draw',
    COLOR = 'color',
}

type GPTCallOpts = {
    model: string;
    maxTokens: number;
    temp: number;
    prompt: string;
};

const callTypeMap: { [type: string]: GPTCallOpts } = {
    // newest model: gpt-4
    summary: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: 'Summarize the text given in simpler terms.' },
    edit: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: 'Reword the text.' },
    flashcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Make flashcards out of this text with each question and answer labeled. Do not label each flashcard and do not include asterisks: ' },
    completion: { model: 'gpt-4-turbo', maxTokens: 256, temp: 0.5, prompt: "You are a helpful assistant. Answer the user's prompt." },
    mermaid: {
        model: 'gpt-4-turbo',
        maxTokens: 2048,
        temp: 0,
        prompt: "(Heres an example of changing color of a pie chart to help you pie title Example \"Red\": 20 \"Blue\": 50 \"Green\": 30 %%{init: {'theme': 'base', 'themeVariables': {'pie1': '#0000FF', 'pie2': '#00FF00', 'pie3': '#FF0000'}}}%% keep in mind that pie1 is the highest since its sorted in descending order. Heres an example of a mindmap: mindmap  root((mindmap))    Origins      Long history      ::icon(fa fa-book)      Popularisation        British popular psychology author Tony Buzan    Research      On effectivness<br/>and features      On Automatic creation       Uses            Creative techniques            Strategic planning            Argument mapping    Tools      Pen and paper     Mermaid.  ",
    },
    data: {
        model: 'gpt-3.5-turbo',
        maxTokens: 256,
        temp: 0.5,
        prompt: "You are a helpful resarch assistant. Analyze the user's data to find meaningful patterns and/or correlation. Please only return a JSON with a correlation column 1 propert, a correlation column 2 property, and an analysis property. ",
    },
    sort: {
        model: 'gpt-4o',
        maxTokens: 2048,
        temp: 0.5,
        prompt: "I'm going to give you a list of descriptions. Each one is seperated by ====== on either side. They will vary in length, so make sure to only seperate when you see ======. Sort them into lists by shared content. MAKE SURE EACH DESCRIPTOR IS IN ONLY ONE LIST. Generate only the list with each list seperated by ====== with the elements seperated by ~~~~~~. Try to do around 4 groups, but a little more or less is ok.",
    },
    describe: { model: 'gpt-4-vision-preview', maxTokens: 2048, temp: 0, prompt: 'Describe these images in 3-5 words' },
    chatcard: { model: 'gpt-4-turbo', maxTokens: 512, temp: 0.5, prompt: 'Answer the following question as a short flashcard response. Do not include a label.' },
    quiz: {
        model: 'gpt-4-turbo',
        maxTokens: 1024,
        temp: 0,
        prompt: 'List unique differences between the content of the UserAnswer and Rubric. Before each difference, label it and provide any additional information the UserAnswer missed and explain it in second person without separating it into UserAnswer and Rubric content and additional information. If there are no differences, say correct',
    },
    draw: {
        model: 'gpt-4o',
        maxTokens: 1024,
        temp: 0.8,
        prompt: 'Given an item, a level of complexity from 1-10, and a size in pixels, generate a detailed and colored line drawing representation of it. Make sure every element has the stroke field filled out. More complex drawings will have much more detail and strokes. The drawing should be in SVG format with no additional text or comments. For path coordinates, make sure you format with a comma between numbers, like M100,200 C150,250 etc. The only supported commands are line, ellipse, circle, rect, polygon, and path with M, Q, C, and L so only use those.',
    },
    color: {
        model: 'gpt-4o',
        maxTokens: 1024,
        temp: 0.5,
        prompt: 'You will be coloring drawings. You will be given what the drawing is, then a list of descriptions for parts of the drawing. Based on each description, respond with the stroke and fill color that it should be. Follow the rules: 1. Avoid using black for stroke color 2. Make the stroke color 1-3 shades darker than the fill color 3. Use the same colors when possible. Format as {#abcdef #abcdef}, making sure theres a color for each description, and do not include any additional text.',
    },
};

let lastCall = '';
let lastResp = '';
/**
 * Calls the OpenAI API.
 *
 * @param inputText Text to process
 * @returns AI Output
 */
const gptAPICall = async (inputTextIn: string, callType: GPTCallType, prompt?: any, dontCache?: boolean) => {
    const inputText = [GPTCallType.SUMMARY, GPTCallType.FLASHCARD, GPTCallType.QUIZ].includes(callType) ? inputTextIn + '.' : inputTextIn;
    const opts: GPTCallOpts = callTypeMap[callType];
    if (lastCall === inputText && dontCache !== true) return lastResp;
    try {
        lastCall = inputText;

        const usePrompt = prompt ? opts.prompt + prompt : opts.prompt;
        const messages: ChatCompletionMessageParam[] = [
            { role: 'system', content: usePrompt },
            { role: 'user', content: inputText },
        ];

        const response = await openai.chat.completions.create({
            model: opts.model,
            messages: messages,
            temperature: opts.temp,
            max_tokens: opts.maxTokens,
        });
        lastResp = response.choices[0].message.content ?? '';
        return lastResp;
    } catch (err) {
        console.log(err);
        return 'Error connecting with API.';
    }
};
const gptImageCall = async (prompt: string, n?: number) => {
    try {
        const response = await openai.images.generate({
            prompt: prompt,
            n: n ?? 1,
            size: '1024x1024',
        });
        return response.data.map((data: any) => data.url);
        // return response.data.data[0].url;
    } catch (err) {
        console.error(err);
    }
    return undefined;
};
const gptGetEmbedding = async (src: string): Promise<number[]> => {
    try {
        const embeddingResponse = await openai.embeddings.create({
            model: 'text-embedding-3-large',
            input: [src],
            encoding_format: 'float',
            dimensions: 256,
        });

        // Assume the embeddingResponse structure is correct; adjust based on actual API response
        const { embedding } = embeddingResponse.data[0];
        return embedding;
    } catch (err) {
        console.log(err);
        return [];
    }
};
const gptImageLabel = async (src: string): Promise<string> => {
    try {
        const response = await openai.chat.completions.create({
            model: 'gpt-4o',
            messages: [
                {
                    role: 'user',
                    content: [
                        { type: 'text', text: 'Give three labels to describe this image.' },
                        {
                            type: 'image_url',
                            image_url: {
                                url: `${src}`,
                                detail: 'low',
                            },
                        },
                    ],
                },
            ],
        });
        if (response.choices[0].message.content) {
            return response.choices[0].message.content;
        }
        return 'Missing labels';
    } catch (err) {
        console.log(err);
        return 'Error connecting with API';
    }
};
const gptHandwriting = async (src: string): Promise<string> => {
    try {
        const response = await openai.chat.completions.create({
            model: 'gpt-4o',
            temperature: 0,
            messages: [
                {
                    role: 'user',
                    content: [
                        { type: 'text', text: 'What is this does this handwriting say. Only return the text' },
                        {
                            type: 'image_url',
                            image_url: {
                                url: `${src}`,
                                detail: 'low',
                            },
                        },
                    ],
                },
            ],
        });
        if (response.choices[0].message.content) {
            return response.choices[0].message.content;
        }
        return 'Missing labels';
    } catch (err) {
        console.log(err);
        return 'Error connecting with API';
    }
};

const gptDrawingColor = async (image: string, coords: string[]): Promise<string> => {
    try {
        const response = await openai.chat.completions.create({
            model: 'gpt-4o',
            temperature: 0,
            messages: [
                {
                    role: 'user',
                    content: [
                        {
                            type: 'text',
                            text: `Identify what the drawing in the image represents in 1-5 words. Then, given a list of a list of coordinates, where each list is the coordinates for one stroke of the drawing, determine which part of the drawing it is. Return just what the item it is, followed by ~~~ then only your descriptions in a list like [description, description, ...]. Here are the coordinates: ${coords}`,
                        },
                        {
                            type: 'image_url',
                            image_url: {
                                url: `${image}`,
                                detail: 'low',
                            },
                        },
                    ],
                },
            ],
        });
        if (response.choices[0].message.content) {
            return response.choices[0].message.content;
        }
        return 'Missing labels';
    } catch (err) {
        console.log(err);
        return 'Error connecting with API';
    }
};

export { gptAPICall, gptImageCall, GPTCallType, gptImageLabel, gptGetEmbedding, gptHandwriting, gptDrawingColor };