refactor: Enhance error handling in Gemini request conversion

- Updated `CovertGemini2OpenAI` function to return an error alongside the GeminiChatRequest, improving error reporting for image processing.
- Modified `ConvertRequest` methods in both `adaptor.go` files to handle potential errors from the Gemini conversion, ensuring robust request handling.
- Improved clarity and maintainability of the code by explicitly managing error cases during request conversion.
This commit is contained in:
CalciumIon
2024-12-20 21:50:58 +08:00
parent f9a7f6085e
commit 03256dbdad
3 changed files with 13 additions and 6 deletions

View File

@@ -57,7 +57,11 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, re
if request == nil { if request == nil {
return nil, errors.New("request is nil") return nil, errors.New("request is nil")
} }
return CovertGemini2OpenAI(*request), nil ai, err := CovertGemini2OpenAI(*request)
if err != nil {
return nil, err
}
return ai, nil
} }
func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) { func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {

View File

@@ -17,7 +17,7 @@ import (
) )
// Setting safety to the lowest possible values since Gemini is already powerless enough // Setting safety to the lowest possible values since Gemini is already powerless enough
func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest) *GeminiChatRequest { func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest) (*GeminiChatRequest, error) {
geminiRequest := GeminiChatRequest{ geminiRequest := GeminiChatRequest{
Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)), Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)),
SafetySettings: []GeminiChatSafetySettings{ SafetySettings: []GeminiChatSafetySettings{
@@ -110,7 +110,7 @@ func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest) *GeminiChatReques
imageNum += 1 imageNum += 1
if constant.GeminiVisionMaxImageNum != -1 && imageNum > constant.GeminiVisionMaxImageNum { if constant.GeminiVisionMaxImageNum != -1 && imageNum > constant.GeminiVisionMaxImageNum {
continue return nil, fmt.Errorf("too many images in the message, max allowed is %d", constant.GeminiVisionMaxImageNum)
} }
// 判断是否是url // 判断是否是url
if strings.HasPrefix(part.ImageUrl.(dto.MessageImageUrl).Url, "http") { if strings.HasPrefix(part.ImageUrl.(dto.MessageImageUrl).Url, "http") {
@@ -125,7 +125,7 @@ func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest) *GeminiChatReques
} else { } else {
_, format, base64String, err := service.DecodeBase64ImageData(part.ImageUrl.(dto.MessageImageUrl).Url) _, format, base64String, err := service.DecodeBase64ImageData(part.ImageUrl.(dto.MessageImageUrl).Url)
if err != nil { if err != nil {
continue return nil, fmt.Errorf("decode base64 image data failed: %s", err.Error())
} }
parts = append(parts, GeminiPart{ parts = append(parts, GeminiPart{
InlineData: &GeminiInlineData{ InlineData: &GeminiInlineData{
@@ -162,7 +162,7 @@ func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest) *GeminiChatReques
// shouldAddDummyModelMessage = false // shouldAddDummyModelMessage = false
//} //}
} }
return &geminiRequest return &geminiRequest, nil
} }
func (g *GeminiChatResponse) GetResponseText() string { func (g *GeminiChatResponse) GetResponseText() string {

View File

@@ -135,7 +135,10 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, re
c.Set("request_model", request.Model) c.Set("request_model", request.Model)
return vertexClaudeReq, nil return vertexClaudeReq, nil
} else if a.RequestMode == RequestModeGemini { } else if a.RequestMode == RequestModeGemini {
geminiRequest := gemini.CovertGemini2OpenAI(*request) geminiRequest, err := gemini.CovertGemini2OpenAI(*request)
if err != nil {
return nil, err
}
c.Set("request_model", request.Model) c.Set("request_model", request.Model)
return geminiRequest, nil return geminiRequest, nil
} else if a.RequestMode == RequestModeLlama { } else if a.RequestMode == RequestModeLlama {