fix: remove unnecessary error handling in token counting functions

This commit is contained in:
CaIon
2025-06-21 01:16:54 +08:00
parent 0708452939
commit 7afd3f97ee
5 changed files with 6 additions and 24 deletions

View File

@@ -67,9 +67,6 @@ func AudioHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
preConsumedTokens := common.PreConsumedQuota preConsumedTokens := common.PreConsumedQuota
if relayInfo.RelayMode == relayconstant.RelayModeAudioSpeech { if relayInfo.RelayMode == relayconstant.RelayModeAudioSpeech {
promptTokens = service.CountTTSToken(audioRequest.Input, audioRequest.Model) promptTokens = service.CountTTSToken(audioRequest.Input, audioRequest.Model)
if err != nil {
return service.OpenAIErrorWrapper(err, "count_audio_token_failed", http.StatusInternalServerError)
}
preConsumedTokens = promptTokens preConsumedTokens = promptTokens
relayInfo.PromptTokens = promptTokens relayInfo.PromptTokens = promptTokens
} }

View File

@@ -619,9 +619,6 @@ func HandleClaudeResponseData(c *gin.Context, info *relaycommon.RelayInfo, claud
} }
if requestMode == RequestModeCompletion { if requestMode == RequestModeCompletion {
completionTokens := service.CountTextToken(claudeResponse.Completion, info.OriginModelName) completionTokens := service.CountTextToken(claudeResponse.Completion, info.OriginModelName)
if err != nil {
return service.OpenAIErrorWrapper(err, "count_token_text_failed", http.StatusInternalServerError)
}
claudeInfo.Usage.PromptTokens = info.PromptTokens claudeInfo.Usage.PromptTokens = info.PromptTokens
claudeInfo.Usage.CompletionTokens = completionTokens claudeInfo.Usage.CompletionTokens = completionTokens
claudeInfo.Usage.TotalTokens = info.PromptTokens + completionTokens claudeInfo.Usage.TotalTokens = info.PromptTokens + completionTokens

View File

@@ -106,7 +106,7 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo
var currentEvent string var currentEvent string
var currentData string var currentData string
var usage dto.Usage var usage = &dto.Usage{}
for scanner.Scan() { for scanner.Scan() {
line := scanner.Text() line := scanner.Text()
@@ -114,7 +114,7 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo
if line == "" { if line == "" {
if currentEvent != "" && currentData != "" { if currentEvent != "" && currentData != "" {
// handle last event // handle last event
handleCozeEvent(c, currentEvent, currentData, &responseText, &usage, id, info) handleCozeEvent(c, currentEvent, currentData, &responseText, usage, id, info)
currentEvent = "" currentEvent = ""
currentData = "" currentData = ""
} }
@@ -134,7 +134,7 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo
// Last event // Last event
if currentEvent != "" && currentData != "" { if currentEvent != "" && currentData != "" {
handleCozeEvent(c, currentEvent, currentData, &responseText, &usage, id, info) handleCozeEvent(c, currentEvent, currentData, &responseText, usage, id, info)
} }
if err := scanner.Err(); err != nil { if err := scanner.Err(); err != nil {
@@ -143,12 +143,10 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo
helper.Done(c) helper.Done(c)
if usage.TotalTokens == 0 { if usage.TotalTokens == 0 {
usage.PromptTokens = info.PromptTokens usage = service.ResponseText2Usage(responseText, info.UpstreamModelName, c.GetInt("coze_input_count"))
usage.CompletionTokens = service.CountTextToken("gpt-3.5-turbo", responseText)
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
} }
return nil, &usage return nil, usage
} }
func handleCozeEvent(c *gin.Context, event string, data string, responseText *string, usage *dto.Usage, id string, info *relaycommon.RelayInfo) { func handleCozeEvent(c *gin.Context, event string, data string, responseText *string, usage *dto.Usage, id string, info *relaycommon.RelayInfo) {

View File

@@ -243,15 +243,8 @@ func difyStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
return true return true
}) })
helper.Done(c) helper.Done(c)
err := resp.Body.Close()
if err != nil {
// return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
common.SysError("close_response_body_failed: " + err.Error())
}
if usage.TotalTokens == 0 { if usage.TotalTokens == 0 {
usage.PromptTokens = info.PromptTokens usage = service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens)
usage.CompletionTokens = service.CountTextToken("gpt-3.5-turbo", responseText)
usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens
} }
usage.CompletionTokens += nodeToken usage.CompletionTokens += nodeToken
return nil, usage return nil, usage

View File

@@ -73,9 +73,6 @@ func ResponsesHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode)
relayInfo.SetPromptTokens(promptTokens) relayInfo.SetPromptTokens(promptTokens)
} else { } else {
promptTokens := getInputTokens(req, relayInfo) promptTokens := getInputTokens(req, relayInfo)
if err != nil {
return service.OpenAIErrorWrapperLocal(err, "count_input_tokens_error", http.StatusBadRequest)
}
c.Set("prompt_tokens", promptTokens) c.Set("prompt_tokens", promptTokens)
} }