Merge pull request #1107 from QuantumNous/gemini-relay

Gemini 格式
This commit is contained in:
IcedTangerine
2025-06-03 10:50:50 +08:00
committed by GitHub
9 changed files with 365 additions and 4 deletions

View File

@@ -10,6 +10,7 @@ import (
"one-api/dto"
"one-api/relay/channel"
relaycommon "one-api/relay/common"
"one-api/relay/constant"
"one-api/service"
"one-api/setting/model_setting"
"strings"
@@ -165,6 +166,14 @@ func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, request
}
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *dto.OpenAIErrorWithStatusCode) {
if info.RelayMode == constant.RelayModeGemini {
if info.IsStream {
return GeminiTextGenerationStreamHandler(c, resp, info)
} else {
return GeminiTextGenerationHandler(c, resp, info)
}
}
if strings.HasPrefix(info.UpstreamModelName, "imagen") {
return GeminiImageHandler(c, resp, info)
}

View File

@@ -0,0 +1,128 @@
package gemini
import (
"encoding/json"
"io"
"net/http"
"one-api/common"
"one-api/dto"
relaycommon "one-api/relay/common"
"one-api/relay/helper"
"one-api/service"
"github.com/gin-gonic/gin"
)
func GeminiTextGenerationHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.Usage, *dto.OpenAIErrorWithStatusCode) {
// 读取响应体
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return nil, service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError)
}
err = resp.Body.Close()
if err != nil {
return nil, service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError)
}
if common.DebugEnabled {
println(string(responseBody))
}
// 解析为 Gemini 原生响应格式
var geminiResponse GeminiChatResponse
err = common.DecodeJson(responseBody, &geminiResponse)
if err != nil {
return nil, service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError)
}
// 检查是否有候选响应
if len(geminiResponse.Candidates) == 0 {
return nil, &dto.OpenAIErrorWithStatusCode{
Error: dto.OpenAIError{
Message: "No candidates returned",
Type: "server_error",
Param: "",
Code: 500,
},
StatusCode: resp.StatusCode,
}
}
// 计算使用量(基于 UsageMetadata
usage := dto.Usage{
PromptTokens: geminiResponse.UsageMetadata.PromptTokenCount,
CompletionTokens: geminiResponse.UsageMetadata.CandidatesTokenCount,
TotalTokens: geminiResponse.UsageMetadata.TotalTokenCount,
}
// 直接返回 Gemini 原生格式的 JSON 响应
jsonResponse, err := json.Marshal(geminiResponse)
if err != nil {
return nil, service.OpenAIErrorWrapper(err, "marshal_response_body_failed", http.StatusInternalServerError)
}
// 设置响应头并写入响应
c.Writer.Header().Set("Content-Type", "application/json")
c.Writer.WriteHeader(resp.StatusCode)
_, err = c.Writer.Write(jsonResponse)
if err != nil {
return nil, service.OpenAIErrorWrapper(err, "write_response_failed", http.StatusInternalServerError)
}
return &usage, nil
}
func GeminiTextGenerationStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.Usage, *dto.OpenAIErrorWithStatusCode) {
var usage = &dto.Usage{}
var imageCount int
helper.SetEventStreamHeaders(c)
helper.StreamScannerHandler(c, resp, info, func(data string) bool {
var geminiResponse GeminiChatResponse
err := common.DecodeJsonStr(data, &geminiResponse)
if err != nil {
common.LogError(c, "error unmarshalling stream response: "+err.Error())
return false
}
// 统计图片数量
for _, candidate := range geminiResponse.Candidates {
for _, part := range candidate.Content.Parts {
if part.InlineData != nil && part.InlineData.MimeType != "" {
imageCount++
}
}
}
// 更新使用量统计
if geminiResponse.UsageMetadata.TotalTokenCount != 0 {
usage.PromptTokens = geminiResponse.UsageMetadata.PromptTokenCount
usage.CompletionTokens = geminiResponse.UsageMetadata.CandidatesTokenCount
usage.TotalTokens = geminiResponse.UsageMetadata.TotalTokenCount
}
// 直接发送 GeminiChatResponse 响应
err = helper.ObjectData(c, geminiResponse)
if err != nil {
common.LogError(c, err.Error())
}
return true
})
if imageCount != 0 {
if usage.CompletionTokens == 0 {
usage.CompletionTokens = imageCount * 258
}
}
// 计算最终使用量
usage.PromptTokensDetails.TextTokens = usage.PromptTokens
usage.CompletionTokens = usage.TotalTokens - usage.PromptTokens
// 结束流式响应
helper.Done(c)
return usage, nil
}

View File

@@ -12,6 +12,7 @@ import (
"one-api/relay/channel/gemini"
"one-api/relay/channel/openai"
relaycommon "one-api/relay/common"
"one-api/relay/constant"
"one-api/setting/model_setting"
"strings"
@@ -201,7 +202,11 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
case RequestModeClaude:
err, usage = claude.ClaudeStreamHandler(c, resp, info, claude.RequestModeMessage)
case RequestModeGemini:
err, usage = gemini.GeminiChatStreamHandler(c, resp, info)
if info.RelayMode == constant.RelayModeGemini {
usage, err = gemini.GeminiTextGenerationStreamHandler(c, resp, info)
} else {
err, usage = gemini.GeminiChatStreamHandler(c, resp, info)
}
case RequestModeLlama:
err, usage = openai.OaiStreamHandler(c, resp, info)
}
@@ -210,7 +215,11 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
case RequestModeClaude:
err, usage = claude.ClaudeHandler(c, resp, claude.RequestModeMessage, info)
case RequestModeGemini:
err, usage = gemini.GeminiChatHandler(c, resp, info)
if info.RelayMode == constant.RelayModeGemini {
usage, err = gemini.GeminiTextGenerationHandler(c, resp, info)
} else {
err, usage = gemini.GeminiChatHandler(c, resp, info)
}
case RequestModeLlama:
err, usage = openai.OpenaiHandler(c, resp, info)
}