From 58fac129d69ef831ed552e4544768ffd99f5cc5f Mon Sep 17 00:00:00 2001 From: CalciumIon <1808837298@qq.com> Date: Mon, 23 Dec 2024 00:02:15 +0800 Subject: [PATCH] feat: Enhance GeminiChatHandler to include RelayInfo - Updated the GeminiChatHandler function to accept an additional parameter, RelayInfo, allowing for better context handling during chat operations. - Modified the DoResponse method in the Adaptor to pass RelayInfo to GeminiChatHandler, ensuring consistent usage of upstream model information. - Enhanced the GeminiChatStreamHandler to utilize the upstream model name from RelayInfo, improving response accuracy and data representation in Gemini requests. --- relay/channel/gemini/adaptor.go | 2 +- relay/channel/gemini/relay-gemini.go | 4 +++- relay/channel/vertex/adaptor.go | 2 +- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/relay/channel/gemini/adaptor.go b/relay/channel/gemini/adaptor.go index 8e9dfd1e..9a5bc251 100644 --- a/relay/channel/gemini/adaptor.go +++ b/relay/channel/gemini/adaptor.go @@ -76,7 +76,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom if info.IsStream { err, usage = GeminiChatStreamHandler(c, resp, info) } else { - err, usage = GeminiChatHandler(c, resp) + err, usage = GeminiChatHandler(c, resp, info) } return } diff --git a/relay/channel/gemini/relay-gemini.go b/relay/channel/gemini/relay-gemini.go index 3791fd44..8c914aac 100644 --- a/relay/channel/gemini/relay-gemini.go +++ b/relay/channel/gemini/relay-gemini.go @@ -355,6 +355,7 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom } response.Id = id response.Created = createAt + response.Model = info.UpstreamModelName responseText += response.Choices[0].Delta.GetContentString() if geminiResponse.UsageMetadata.TotalTokenCount != 0 { usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount @@ -383,7 +384,7 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom return nil, usage } -func GeminiChatHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) { +func GeminiChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) { responseBody, err := io.ReadAll(resp.Body) if err != nil { return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil @@ -409,6 +410,7 @@ func GeminiChatHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWit }, nil } fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse) + fullTextResponse.Model = info.UpstreamModelName usage := dto.Usage{ PromptTokens: geminiResponse.UsageMetadata.PromptTokenCount, CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount, diff --git a/relay/channel/vertex/adaptor.go b/relay/channel/vertex/adaptor.go index ebff8207..764e5c4b 100644 --- a/relay/channel/vertex/adaptor.go +++ b/relay/channel/vertex/adaptor.go @@ -170,7 +170,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom case RequestModeClaude: err, usage = claude.ClaudeHandler(c, resp, claude.RequestModeMessage, info) case RequestModeGemini: - err, usage = gemini.GeminiChatHandler(c, resp) + err, usage = gemini.GeminiChatHandler(c, resp, info) case RequestModeLlama: err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.OriginModelName) }