feat: Enhance GeminiChatHandler to include RelayInfo
- Updated the GeminiChatHandler function to accept an additional parameter, RelayInfo, allowing for better context handling during chat operations. - Modified the DoResponse method in the Adaptor to pass RelayInfo to GeminiChatHandler, ensuring consistent usage of upstream model information. - Enhanced the GeminiChatStreamHandler to utilize the upstream model name from RelayInfo, improving response accuracy and data representation in Gemini requests.
This commit is contained in:
@@ -76,7 +76,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
|
|||||||
if info.IsStream {
|
if info.IsStream {
|
||||||
err, usage = GeminiChatStreamHandler(c, resp, info)
|
err, usage = GeminiChatStreamHandler(c, resp, info)
|
||||||
} else {
|
} else {
|
||||||
err, usage = GeminiChatHandler(c, resp)
|
err, usage = GeminiChatHandler(c, resp, info)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -355,6 +355,7 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom
|
|||||||
}
|
}
|
||||||
response.Id = id
|
response.Id = id
|
||||||
response.Created = createAt
|
response.Created = createAt
|
||||||
|
response.Model = info.UpstreamModelName
|
||||||
responseText += response.Choices[0].Delta.GetContentString()
|
responseText += response.Choices[0].Delta.GetContentString()
|
||||||
if geminiResponse.UsageMetadata.TotalTokenCount != 0 {
|
if geminiResponse.UsageMetadata.TotalTokenCount != 0 {
|
||||||
usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount
|
usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount
|
||||||
@@ -383,7 +384,7 @@ func GeminiChatStreamHandler(c *gin.Context, resp *http.Response, info *relaycom
|
|||||||
return nil, usage
|
return nil, usage
|
||||||
}
|
}
|
||||||
|
|
||||||
func GeminiChatHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
|
func GeminiChatHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
|
||||||
responseBody, err := io.ReadAll(resp.Body)
|
responseBody, err := io.ReadAll(resp.Body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
|
||||||
@@ -409,6 +410,7 @@ func GeminiChatHandler(c *gin.Context, resp *http.Response) (*dto.OpenAIErrorWit
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse)
|
fullTextResponse := responseGeminiChat2OpenAI(&geminiResponse)
|
||||||
|
fullTextResponse.Model = info.UpstreamModelName
|
||||||
usage := dto.Usage{
|
usage := dto.Usage{
|
||||||
PromptTokens: geminiResponse.UsageMetadata.PromptTokenCount,
|
PromptTokens: geminiResponse.UsageMetadata.PromptTokenCount,
|
||||||
CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount,
|
CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount,
|
||||||
|
|||||||
@@ -170,7 +170,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
|
|||||||
case RequestModeClaude:
|
case RequestModeClaude:
|
||||||
err, usage = claude.ClaudeHandler(c, resp, claude.RequestModeMessage, info)
|
err, usage = claude.ClaudeHandler(c, resp, claude.RequestModeMessage, info)
|
||||||
case RequestModeGemini:
|
case RequestModeGemini:
|
||||||
err, usage = gemini.GeminiChatHandler(c, resp)
|
err, usage = gemini.GeminiChatHandler(c, resp, info)
|
||||||
case RequestModeLlama:
|
case RequestModeLlama:
|
||||||
err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.OriginModelName)
|
err, usage = openai.OpenaiHandler(c, resp, info.PromptTokens, info.OriginModelName)
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user