-
Couldn't load subscription status.
- Fork 64
fix(token-estimation): refine logic #921
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -3011,25 +3011,11 @@ chat.openapi(completions, async (c) => { | |
| images, | ||
| } = parseProviderResponse(usedProvider, json, messages); | ||
|
|
||
| // Debug: Log images found in response | ||
| logger.debug("Gateway - parseProviderResponse extracted images", { images }); | ||
| logger.debug("Gateway - Used provider", { usedProvider }); | ||
| logger.debug("Gateway - Used model", { usedModel }); | ||
|
|
||
| // Estimate tokens if not provided by the API | ||
| const { calculatedPromptTokens, calculatedCompletionTokens } = estimateTokens( | ||
| usedProvider, | ||
| messages, | ||
| content, | ||
| promptTokens, | ||
| completionTokens, | ||
| ); | ||
|
|
||
| const costs = calculateCosts( | ||
| usedModel, | ||
| usedProvider, | ||
| calculatedPromptTokens, | ||
| calculatedCompletionTokens, | ||
| promptTokens, | ||
| completionTokens, | ||
| cachedTokens, | ||
| { | ||
| prompt: messages.map((m) => m.content).join("\n"), | ||
|
|
@@ -3046,11 +3032,9 @@ chat.openapi(completions, async (c) => { | |
| content, | ||
| reasoningContent, | ||
| finishReason, | ||
| calculatedPromptTokens, | ||
| calculatedCompletionTokens, | ||
| (calculatedPromptTokens || 0) + | ||
| (calculatedCompletionTokens || 0) + | ||
| (reasoningTokens || 0), | ||
| promptTokens, | ||
| completionTokens, | ||
| (promptTokens || 0) + (completionTokens || 0) + (reasoningTokens || 0), | ||
| reasoningTokens, | ||
| cachedTokens, | ||
| toolResults, | ||
|
|
@@ -3097,13 +3081,10 @@ chat.openapi(completions, async (c) => { | |
| content: content, | ||
| reasoningContent: reasoningContent, | ||
| finishReason: finishReason, | ||
| promptTokens: calculatedPromptTokens?.toString() || null, | ||
| completionTokens: calculatedCompletionTokens?.toString() || null, | ||
| promptTokens: promptTokens?.toString() || null, | ||
| completionTokens: completionTokens?.toString() || null, | ||
| totalTokens: | ||
| totalTokens || | ||
| ( | ||
| (calculatedPromptTokens || 0) + (calculatedCompletionTokens || 0) | ||
| ).toString(), | ||
| totalTokens || ((promptTokens || 0) + (completionTokens || 0)).toString(), | ||
| reasoningTokens: reasoningTokens, | ||
|
Comment on lines
+3084
to
3088
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Fix totalTokens type/consistency and include reasoning fallback. Currently totalTokens may be a number (not string) and ignores reasoning in fallback; align with other fields. Apply this diff: - promptTokens: promptTokens?.toString() || null,
- completionTokens: completionTokens?.toString() || null,
- totalTokens:
- totalTokens || ((promptTokens || 0) + (completionTokens || 0)).toString(),
+ promptTokens: (safePromptTokens ?? promptTokens)?.toString() || null,
+ completionTokens: (safeCompletionTokens ?? completionTokens)?.toString() || null,
+ totalTokens: (
+ (totalTokens ?? ((safePromptTokens ?? promptTokens ?? 0) + (safeCompletionTokens ?? completionTokens ?? 0) + (reasoningTokens ?? 0)))
+ ).toString(),
🤖 Prompt for AI Agents |
||
| cachedTokens: cachedTokens?.toString() || null, | ||
| hasError: false, | ||
|
|
||
| Original file line number | Diff line number | Diff line change | ||||||||
|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -28,7 +28,7 @@ export async function processImageUrl( | |||||||||
| const base64Data = isBase64 ? data : btoa(data); | ||||||||||
|
|
||||||||||
| // Validate size (estimate: base64 adds ~33% overhead) | ||||||||||
| const estimatedSize = (base64Data.length * 3) / 4; | ||||||||||
| const estimatedSize = Math.round((base64Data.length * 3) / 4); | ||||||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Compute base64 byte size precisely (handle padding/whitespace). Math.round(len*3/4) can over/under-estimate and cause false 20MB limit rejections. Account for '=' padding and possible whitespace without decoding the payload. Apply this diff: - const estimatedSize = Math.round((base64Data.length * 3) / 4);
+ const sanitized = base64Data.replace(/\s/g, "");
+ const padding = sanitized.endsWith("==") ? 2 : sanitized.endsWith("=") ? 1 : 0;
+ const estimatedSize = Math.floor((sanitized.length * 3) / 4) - padding;📝 Committable suggestion
Suggested change
🤖 Prompt for AI Agents |
||||||||||
| if (estimatedSize > 20 * 1024 * 1024) { | ||||||||||
| logger.warn("Data URL image size exceeds limit", { estimatedSize }); | ||||||||||
| throw new Error("Image size exceeds 20MB limit"); | ||||||||||
|
|
||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ensure costs/response use tokens even when providers omit them.
When prompt/completion tokens are null, derive them once and pass consistent numbers into calculateCosts and transformResponseToOpenai.
Apply this diff to use safe values in the two calls:
const costs = calculateCosts( usedModel, usedProvider, - promptTokens, - completionTokens, + safePromptTokens, + safeCompletionTokens, cachedTokens,const transformedResponse = transformResponseToOpenai( usedProvider, usedModel, json, content, reasoningContent, finishReason, - promptTokens, - completionTokens, - (promptTokens || 0) + (completionTokens || 0) + (reasoningTokens || 0), + safePromptTokens ?? 0, + safeCompletionTokens ?? 0, + (safePromptTokens ?? 0) + (safeCompletionTokens ?? 0) + (reasoningTokens || 0),Add these helpers right before the calculateCosts call:
Also applies to: 3035-3038
🤖 Prompt for AI Agents