fix: gemini thinking tokens count #1014

This commit is contained in:
CaIon
2025-04-29 16:21:20 +08:00
parent da6423de33
commit b64480b750
3 changed files with 10 additions and 3 deletions

View File

@@ -51,6 +51,7 @@ type GeneralOpenAIRequest struct {
Dimensions int `json:"dimensions,omitempty"`
Modalities any `json:"modalities,omitempty"`
Audio any `json:"audio,omitempty"`
EnableThinking any `json:"enable_thinking,omitempty"` // ali
ExtraBody any `json:"extra_body,omitempty"`
}

View File

@@ -1,7 +1,12 @@
package ali
var ModelList = []string{
"qwen-turbo", "qwen-plus", "qwen-max", "qwen-max-longcontext",
"qwen-turbo",
"qwen-plus",
"qwen-max",
"qwen-max-longcontext",
"qwq-32b",
"qwen3-235b-a22b",
"text-embedding-v1",
}

View File

@@ -670,6 +670,7 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom
usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount
usage.CompletionTokens = geminiResponse.UsageMetadata.CandidatesTokenCount
usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
usage.TotalTokens = geminiResponse.UsageMetadata.TotalTokenCount
}
err = helper.ObjectData(c, response)
if err != nil {
@@ -690,9 +691,8 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom
}
}
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
usage.PromptTokensDetails.TextTokens = usage.PromptTokens
//usage.CompletionTokenDetails.TextTokens = usage.CompletionTokens
usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
if info.ShouldIncludeUsage {
response = helper.GenerateFinalUsageResponse(id, createAt, info.UpstreamModelName, *usage)
@@ -740,6 +740,7 @@ func GeminiChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
}
usage.CompletionTokenDetails.ReasoningTokens = geminiResponse.UsageMetadata.ThoughtsTokenCount
usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
fullTextResponse.Usage = usage
jsonResponse, err := json.Marshal(fullTextResponse)