From 7afd3f97eec60111f18d231dcc9f9a6bc20045f5 Mon Sep 17 00:00:00 2001 From: CaIon <1808837298@qq.com> Date: Sat, 21 Jun 2025 01:16:54 +0800 Subject: [PATCH] fix: remove unnecessary error handling in token counting functions --- relay/audio_handler.go | 3 --- relay/channel/claude/relay-claude.go | 3 --- relay/channel/coze/relay-coze.go | 12 +++++------- relay/channel/dify/relay-dify.go | 9 +-------- relay/responses_handler.go | 3 --- 5 files changed, 6 insertions(+), 24 deletions(-) diff --git a/relay/audio_handler.go b/relay/audio_handler.go index 96cf1019..c1ce1a02 100644 --- a/relay/audio_handler.go +++ b/relay/audio_handler.go @@ -67,9 +67,6 @@ func AudioHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) { preConsumedTokens := common.PreConsumedQuota if relayInfo.RelayMode == relayconstant.RelayModeAudioSpeech { promptTokens = service.CountTTSToken(audioRequest.Input, audioRequest.Model) - if err != nil { - return service.OpenAIErrorWrapper(err, "count_audio_token_failed", http.StatusInternalServerError) - } preConsumedTokens = promptTokens relayInfo.PromptTokens = promptTokens } diff --git a/relay/channel/claude/relay-claude.go b/relay/channel/claude/relay-claude.go index 5e15d3a2..406ebc8a 100644 --- a/relay/channel/claude/relay-claude.go +++ b/relay/channel/claude/relay-claude.go @@ -619,9 +619,6 @@ func HandleClaudeResponseData(c *gin.Context, info *relaycommon.RelayInfo, claud } if requestMode == RequestModeCompletion { completionTokens := service.CountTextToken(claudeResponse.Completion, info.OriginModelName) - if err != nil { - return service.OpenAIErrorWrapper(err, "count_token_text_failed", http.StatusInternalServerError) - } claudeInfo.Usage.PromptTokens = info.PromptTokens claudeInfo.Usage.CompletionTokens = completionTokens claudeInfo.Usage.TotalTokens = info.PromptTokens + completionTokens diff --git a/relay/channel/coze/relay-coze.go b/relay/channel/coze/relay-coze.go index e9719cb9..ac76476f 100644 --- a/relay/channel/coze/relay-coze.go +++ b/relay/channel/coze/relay-coze.go @@ -106,7 +106,7 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo var currentEvent string var currentData string - var usage dto.Usage + var usage = &dto.Usage{} for scanner.Scan() { line := scanner.Text() @@ -114,7 +114,7 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo if line == "" { if currentEvent != "" && currentData != "" { // handle last event - handleCozeEvent(c, currentEvent, currentData, &responseText, &usage, id, info) + handleCozeEvent(c, currentEvent, currentData, &responseText, usage, id, info) currentEvent = "" currentData = "" } @@ -134,7 +134,7 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo // Last event if currentEvent != "" && currentData != "" { - handleCozeEvent(c, currentEvent, currentData, &responseText, &usage, id, info) + handleCozeEvent(c, currentEvent, currentData, &responseText, usage, id, info) } if err := scanner.Err(); err != nil { @@ -143,12 +143,10 @@ func cozeChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommo helper.Done(c) if usage.TotalTokens == 0 { - usage.PromptTokens = info.PromptTokens - usage.CompletionTokens = service.CountTextToken("gpt-3.5-turbo", responseText) - usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens + usage = service.ResponseText2Usage(responseText, info.UpstreamModelName, c.GetInt("coze_input_count")) } - return nil, &usage + return nil, usage } func handleCozeEvent(c *gin.Context, event string, data string, responseText *string, usage *dto.Usage, id string, info *relaycommon.RelayInfo) { diff --git a/relay/channel/dify/relay-dify.go b/relay/channel/dify/relay-dify.go index b3ae5927..115aed1b 100644 --- a/relay/channel/dify/relay-dify.go +++ b/relay/channel/dify/relay-dify.go @@ -243,15 +243,8 @@ func difyStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re return true }) helper.Done(c) - err := resp.Body.Close() - if err != nil { - // return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil - common.SysError("close_response_body_failed: " + err.Error()) - } if usage.TotalTokens == 0 { - usage.PromptTokens = info.PromptTokens - usage.CompletionTokens = service.CountTextToken("gpt-3.5-turbo", responseText) - usage.TotalTokens = usage.PromptTokens + usage.CompletionTokens + usage = service.ResponseText2Usage(responseText, info.UpstreamModelName, info.PromptTokens) } usage.CompletionTokens += nodeToken return nil, usage diff --git a/relay/responses_handler.go b/relay/responses_handler.go index 9d4adf49..e744e354 100644 --- a/relay/responses_handler.go +++ b/relay/responses_handler.go @@ -73,9 +73,6 @@ func ResponsesHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) relayInfo.SetPromptTokens(promptTokens) } else { promptTokens := getInputTokens(req, relayInfo) - if err != nil { - return service.OpenAIErrorWrapperLocal(err, "count_input_tokens_error", http.StatusBadRequest) - } c.Set("prompt_tokens", promptTokens) }