forked from shanginn/git-aicommit
-
Notifications
You must be signed in to change notification settings - Fork 0
/
count_tokens.js
78 lines (78 loc) · 2.34 KB
/
count_tokens.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
// langchain/dist/base_language/count_tokens.js
export const getModelNameForTiktoken = (modelName) => {
if (modelName.startsWith("gpt-3.5-turbo-16k")) {
return "gpt-3.5-turbo-16k";
}
if (modelName.startsWith("gpt-3.5-turbo-")) {
return "gpt-3.5-turbo";
}
if (modelName.startsWith("gpt-4-32k-")) {
return "gpt-4-32k";
}
if (modelName.startsWith("gpt-4-")) {
return "gpt-4";
}
return modelName;
};
export const getEmbeddingContextSize = (modelName) => {
switch (modelName) {
case "text-embedding-ada-002":
return 8191;
default:
return 2046;
}
};
export const getModelContextSize = (modelName) => {
switch (getModelNameForTiktoken(modelName)) {
case "gpt-3.5-turbo-16k":
return 16384;
case "gpt-3.5-turbo":
return 4096;
case "gpt-4-32k":
return 32768;
case "gpt-4":
return 8192;
case "text-davinci-003":
return 4097;
case "text-curie-001":
return 2048;
case "text-babbage-001":
return 2048;
case "text-ada-001":
return 2048;
case "code-davinci-002":
return 8000;
case "code-cushman-001":
return 2048;
default:
return 4097;
}
};
export const importTiktoken = async () => {
try {
const { encoding_for_model } = await import("@dqbd/tiktoken");
return { encoding_for_model };
}
catch (error) {
console.log(error);
return { encoding_for_model: null };
}
};
export const calculateMaxTokens = async ({ prompt, modelName, }) => {
const { encoding_for_model } = await importTiktoken();
// fallback to approximate calculation if tiktoken is not available
let numTokens = Math.ceil(prompt.length / 4);
try {
if (encoding_for_model) {
const encoding = encoding_for_model(getModelNameForTiktoken(modelName));
const tokenized = encoding.encode(prompt);
numTokens = tokenized.length;
encoding.free();
}
}
catch (error) {
console.warn("Failed to calculate number of tokens with tiktoken, falling back to approximate count", error);
}
const maxTokens = getModelContextSize(modelName);
return maxTokens - numTokens;
};