refactor: Introduce standardized API error

This commit refactors the application's error handling mechanism by introducing a new standardized error type, `types.NewAPIError`. It also renames common JSON utility functions for better clarity.

Previously, internal error handling was tightly coupled to the `dto.OpenAIError` format. This change decouples the internal logic from the external API representation.

Key changes:
- A new `types.NewAPIError` struct is introduced to serve as a canonical internal representation for all API errors.
- All relay adapters (OpenAI, Claude, Gemini, etc.) are updated to return `*types.NewAPIError`.
- Controllers now convert the internal `NewAPIError` to the client-facing `OpenAIError` format at the API boundary, ensuring backward compatibility.
- Channel auto-disable/enable logic is updated to use the new standardized error type.
- JSON utility functions are renamed to align with Go's standard library conventions (e.g., `UnmarshalJson` -> `Unmarshal`, `EncodeJson` -> `Marshal`).
This commit is contained in:
CaIon
2025-07-10 15:02:40 +08:00
parent 0e6b608f91
commit 98952198bb
77 changed files with 1071 additions and 930 deletions

View File

@@ -11,8 +11,8 @@ import (
"one-api/relay/channel"
relaycommon "one-api/relay/common"
"one-api/relay/constant"
"one-api/service"
"one-api/setting/model_setting"
"one-api/types"
"strings"
"github.com/gin-gonic/gin"
@@ -168,30 +168,30 @@ func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, request
return channel.DoApiRequest(a, c, info, requestBody)
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *types.NewAPIError) {
if info.RelayMode == constant.RelayModeGemini {
if info.IsStream {
return GeminiTextGenerationStreamHandler(c, resp, info)
return GeminiTextGenerationStreamHandler(c, info, resp)
} else {
return GeminiTextGenerationHandler(c, resp, info)
return GeminiTextGenerationHandler(c, info, resp)
}
}
if strings.HasPrefix(info.UpstreamModelName, "imagen") {
return GeminiImageHandler(c, resp, info)
return GeminiImageHandler(c, info, resp)
}
// check if the model is an embedding model
if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
strings.HasPrefix(info.UpstreamModelName, "embedding") ||
strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
return GeminiEmbeddingHandler(c, resp, info)
return GeminiEmbeddingHandler(c, info, resp)
}
if info.IsStream {
err, usage = GeminiChatStreamHandler(c, resp, info)
return GeminiChatStreamHandler(c, info, resp)
} else {
err, usage = GeminiChatHandler(c, resp, info)
return GeminiChatHandler(c, info, resp)
}
//if usage.(*dto.Usage).CompletionTokenDetails.ReasoningTokens > 100 {
@@ -205,23 +205,23 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
// }
//}
return
return nil, types.NewError(errors.New("not implemented"), types.ErrorCodeBadResponseBody)
}
func GeminiImageHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
func GeminiImageHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
responseBody, readErr := io.ReadAll(resp.Body)
if readErr != nil {
return nil, service.OpenAIErrorWrapper(readErr, "read_response_body_failed", http.StatusInternalServerError)
return nil, types.NewError(readErr, types.ErrorCodeBadResponseBody)
}
_ = resp.Body.Close()
var geminiResponse GeminiImageResponse
if jsonErr := json.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
return nil, service.OpenAIErrorWrapper(jsonErr, "unmarshal_response_body_failed", http.StatusInternalServerError)
return nil, types.NewError(jsonErr, types.ErrorCodeBadResponseBody)
}
if len(geminiResponse.Predictions) == 0 {
return nil, service.OpenAIErrorWrapper(errors.New("no images generated"), "no_images", http.StatusBadRequest)
return nil, types.NewError(errors.New("no images generated"), types.ErrorCodeBadResponseBody)
}
// convert to openai format response
@@ -241,7 +241,7 @@ func GeminiImageHandler(c *gin.Context, resp *http.Response, info *relaycommon.R
jsonResponse, jsonErr := json.Marshal(openAIResponse)
if jsonErr != nil {
return nil, service.OpenAIErrorWrapper(jsonErr, "marshal_response_failed", http.StatusInternalServerError)
return nil, types.NewError(jsonErr, types.ErrorCodeBadResponseBody)
}
c.Writer.Header().Set("Content-Type", "application/json")
@@ -253,7 +253,7 @@ func GeminiImageHandler(c *gin.Context, resp *http.Response, info *relaycommon.R
const imageTokens = 258
generatedImages := len(openAIResponse.Data)
usage = &dto.Usage{
usage := &dto.Usage{
PromptTokens: imageTokens * generatedImages, // each generated image has fixed 258 tokens
CompletionTokens: 0, // image generation does not calculate completion tokens
TotalTokens: imageTokens * generatedImages,

View File

@@ -8,18 +8,19 @@ import (
relaycommon "one-api/relay/common"
"one-api/relay/helper"
"one-api/service"
"one-api/types"
"strings"
"github.com/gin-gonic/gin"
)
func GeminiTextGenerationHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.Usage, *dto.OpenAIErrorWithStatusCode) {
func GeminiTextGenerationHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
defer common.CloseResponseBodyGracefully(resp)
// 读取响应体
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
}
if common.DebugEnabled {
@@ -28,9 +29,9 @@ func GeminiTextGenerationHandler(c *gin.Context, resp *http.Response, info *rela
// 解析为 Gemini 原生响应格式
var geminiResponse GeminiChatResponse
err = common.UnmarshalJson(responseBody, &geminiResponse)
err = common.Unmarshal(responseBody, &geminiResponse)
if err != nil {
return nil, service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
}
// 计算使用量(基于 UsageMetadata
@@ -51,9 +52,9 @@ func GeminiTextGenerationHandler(c *gin.Context, resp *http.Response, info *rela
}
// 直接返回 Gemini 原生格式的 JSON 响应
jsonResponse, err := common.EncodeJson(geminiResponse)
jsonResponse, err := common.Marshal(geminiResponse)
if err != nil {
return nil, service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError)
return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
}
common.IOCopyBytesGracefully(c, resp, jsonResponse)
@@ -61,7 +62,7 @@ func GeminiTextGenerationHandler(c *gin.Context, resp *http.Response, info *rela
return &usage, nil
}
func GeminiTextGenerationStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.Usage, *dto.OpenAIErrorWithStatusCode) {
func GeminiTextGenerationStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
var usage = &dto.Usage{}
var imageCount int

View File

@@ -2,6 +2,7 @@ package gemini
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
@@ -12,6 +13,7 @@ import (
"one-api/relay/helper"
"one-api/service"
"one-api/setting/model_setting"
"one-api/types"
"strconv"
"strings"
"unicode/utf8"
@@ -792,7 +794,7 @@ func streamResponseGeminiChat2OpenAI(geminiResponse *GeminiChatResponse) (*dto.C
return &response, isStop, hasImage
}
func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
func GeminiChatStreamHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
// responseText := ""
id := helper.GetResponseID(c)
createAt := common.GetTimestamp()
@@ -858,33 +860,25 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom
}
helper.Done(c)
//resp.Body.Close()
return nil, usage
return usage, nil
}
func GeminiChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
func GeminiChatHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
}
common.CloseResponseBodyGracefully(resp)
if common.DebugEnabled {
println(string(responseBody))
}
var geminiResponse GeminiChatResponse
err = common.UnmarshalJson(responseBody, &geminiResponse)
err = common.Unmarshal(responseBody, &geminiResponse)
if err != nil {
return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
}
if len(geminiResponse.Candidates) == 0 {
return &dto.OpenAIErrorWithStatusCode{
Error: dto.OpenAIError{
Message: "No candidates returned",
Type: "server_error",
Param: "",
Code: 500,
},
StatusCode: resp.StatusCode,
}, nil
return nil, types.NewError(errors.New("no candidates returned"), types.ErrorCodeBadResponseBody)
}
fullTextResponse := responseGeminiChat2OpenAI(c, &geminiResponse)
fullTextResponse.Model = info.UpstreamModelName
@@ -908,25 +902,25 @@ func GeminiChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.Re
fullTextResponse.Usage = usage
jsonResponse, err := json.Marshal(fullTextResponse)
if err != nil {
return service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError), nil
return nil, types.NewError(err, types.ErrorCodeBadResponseBody)
}
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(resp.StatusCode)
_, err = c.Writer.Write(jsonResponse)
return nil, &usage
c.Writer.Write(jsonResponse)
return &usage, nil
}
func GeminiEmbeddingHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
func GeminiEmbeddingHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *http.Response) (*dto.Usage, *types.NewAPIError) {
defer common.CloseResponseBodyGracefully(resp)
responseBody, readErr := io.ReadAll(resp.Body)
if readErr != nil {
return nil, service.OpenAIErrorWrapper(readErr, "read_response_body_failed", http.StatusInternalServerError)
return nil, types.NewError(readErr, types.ErrorCodeBadResponseBody)
}
var geminiResponse GeminiEmbeddingResponse
if jsonErr := json.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
return nil, service.OpenAIErrorWrapper(jsonErr, "unmarshal_response_body_failed", http.StatusInternalServerError)
if jsonErr := common.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
return nil, types.NewError(jsonErr, types.ErrorCodeBadResponseBody)
}
// convert to openai format response
@@ -947,16 +941,16 @@ func GeminiEmbeddingHandler(c *gin.Context, resp *http.Response, info *relaycomm
// Google has not yet clarified how embedding models will be billed
// refer to openai billing method to use input tokens billing
// https://platform.openai.com/docs/guides/embeddings#what-are-embeddings
usage = &dto.Usage{
usage := &dto.Usage{
PromptTokens: info.PromptTokens,
CompletionTokens: 0,
TotalTokens: info.PromptTokens,
}
openAIResponse.Usage = *usage.(*dto.Usage)
openAIResponse.Usage = *usage
jsonResponse, jsonErr := common.EncodeJson(openAIResponse)
jsonResponse, jsonErr := common.Marshal(openAIResponse)
if jsonErr != nil {
return nil, service.OpenAIErrorWrapper(jsonErr, "marshal_response_failed", http.StatusInternalServerError)
return nil, types.NewError(jsonErr, types.ErrorCodeBadResponseBody)
}
common.IOCopyBytesGracefully(c, resp, jsonResponse)