1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
|
import { ClientOptions, OpenAI } from 'openai';
import { ChatCompletionMessageParam } from 'openai/resources';
enum GPTCallType {
SUMMARY = 'summary',
COMPLETION = 'completion',
EDIT = 'edit',
SORT = 'sort',
DESCRIBE = 'describe'
}
type GPTCallOpts = {
model: string;
maxTokens: number;
temp: number;
prompt: string;
};
const callTypeMap: { [type: string]: GPTCallOpts } = {
summary: { model: 'gpt-3.5-turbo-instruct', maxTokens: 256, temp: 0.5, prompt: 'Summarize this text in simpler terms: ' },
edit: { model: 'gpt-3.5-turbo-instruct', maxTokens: 256, temp: 0.5, prompt: 'Reword this: ' },
completion: { model: 'gpt-3.5-turbo-instruct', maxTokens: 256, temp: 0.5, prompt: '' },
sort:{model:'gpt-4o',maxTokens:2048,temp:0.5,prompt:"I'm going to give you a list of descriptions. Each one is seperated by ====== on either side. They will vary in length, so make sure to only seperate when you see ======. Sort them into lists by shared content. MAKE SURE EACH DESCRIPTOR IS IN ONLY ONE LIST. Generate only the list with each list seperated by ====== with the elements seperated by ~~~~~~. Try to do around 4 groups, but a little more or less is ok."},
describe:{model:'gpt-4-vision-preview',maxTokens:2048,temp:0,prompt:"Describe these images in 3-5 words"},
};
/**`
* Calls the OpenAI API.
*
* @param inputText Text to process
* @returns AI Output
*/
const gptAPICall = async (inputText: string, callType: GPTCallType) => {
if (callType === GPTCallType.SUMMARY) inputText += '.';
const opts: GPTCallOpts = callTypeMap[callType];
try {
const configuration: ClientOptions = {
apiKey: "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",
dangerouslyAllowBrowser: true,
};
const openai = new OpenAI(configuration);
let messages: ChatCompletionMessageParam[] = [
{ role: 'system', content: opts.prompt },
{ role: 'user', content: inputText },
];
const response = await openai.chat.completions.create({
model: opts.model,
messages: messages,
temperature: opts.temp,
max_tokens: opts.maxTokens,
});
const content = response.choices[0].message.content;
return content;
} catch (err) {
console.log(err);
return 'Error connecting with API.';
}
};
const gptImageLabel = async (imgUrl: string): Promise<string> => {
try {
const configuration: ClientOptions = {
apiKey: "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",
dangerouslyAllowBrowser: true,
};
const openai = new OpenAI(configuration);
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{
role: 'user',
content: [
{ type: 'text', text: 'Describe this image in 3-5 words' },
{
type: 'image_url',
image_url: {
url: `${imgUrl}`,
},
},
],
},
],
});
if (response.choices[0].message.content) {
return response.choices[0].message.content;
} else {
return ':(';
}
} catch (err) {
console.log(err);
return 'Error connecting with API';
}
};
const gptImageCall = async (prompt: string, n?: number) => {
try {
const configuration: ClientOptions = {
apiKey: process.env.OPENAI_KEY,
dangerouslyAllowBrowser: true,
};
const openai = new OpenAI(configuration);
const response = await openai.images.generate({
prompt: prompt,
n: n ?? 1,
size: '1024x1024',
});
return response.data.map((data: any) => data.url);
// return response.data.data[0].url;
} catch (err) {
console.error(err);
return;
}
};
export { gptAPICall, gptImageCall, gptImageLabel, GPTCallType };
|