This commit refactors the logging mechanism across the application by replacing direct logger calls with a centralized logging approach using the `common` package. Key changes include: - Replaced instances of `logger.SysLog` and `logger.FatalLog` with `common.SysLog` and `common.FatalLog` for consistent logging practices. - Updated resource initialization error handling to utilize the new logging structure, enhancing maintainability and readability. - Minor adjustments to improve code clarity and organization throughout various modules. This change aims to streamline logging and improve the overall architecture of the codebase.
248 lines
7.4 KiB
Go
248 lines
7.4 KiB
Go
package gemini
|
|
|
|
import (
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
"net/http"
|
|
"one-api/dto"
|
|
"one-api/relay/channel"
|
|
"one-api/relay/channel/openai"
|
|
relaycommon "one-api/relay/common"
|
|
"one-api/relay/constant"
|
|
"one-api/setting/model_setting"
|
|
"one-api/types"
|
|
"strings"
|
|
|
|
"github.com/gin-gonic/gin"
|
|
)
|
|
|
|
type Adaptor struct {
|
|
}
|
|
|
|
func (a *Adaptor) ConvertGeminiRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeminiChatRequest) (any, error) {
|
|
if len(request.Contents) > 0 {
|
|
for i, content := range request.Contents {
|
|
if i == 0 {
|
|
if request.Contents[0].Role == "" {
|
|
request.Contents[0].Role = "user"
|
|
}
|
|
}
|
|
for _, part := range content.Parts {
|
|
if part.FileData != nil {
|
|
if part.FileData.MimeType == "" && strings.Contains(part.FileData.FileUri, "www.youtube.com") {
|
|
part.FileData.MimeType = "video/webm"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return request, nil
|
|
}
|
|
|
|
func (a *Adaptor) ConvertClaudeRequest(c *gin.Context, info *relaycommon.RelayInfo, req *dto.ClaudeRequest) (any, error) {
|
|
adaptor := openai.Adaptor{}
|
|
oaiReq, err := adaptor.ConvertClaudeRequest(c, info, req)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return a.ConvertOpenAIRequest(c, info, oaiReq.(*dto.GeneralOpenAIRequest))
|
|
}
|
|
|
|
func (a *Adaptor) ConvertAudioRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.AudioRequest) (io.Reader, error) {
|
|
//TODO implement me
|
|
return nil, errors.New("not implemented")
|
|
}
|
|
|
|
func (a *Adaptor) ConvertImageRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.ImageRequest) (any, error) {
|
|
if !strings.HasPrefix(info.UpstreamModelName, "imagen") {
|
|
return nil, errors.New("not supported model for image generation")
|
|
}
|
|
|
|
// convert size to aspect ratio
|
|
aspectRatio := "1:1" // default aspect ratio
|
|
switch request.Size {
|
|
case "1024x1024":
|
|
aspectRatio = "1:1"
|
|
case "1024x1792":
|
|
aspectRatio = "9:16"
|
|
case "1792x1024":
|
|
aspectRatio = "16:9"
|
|
}
|
|
|
|
// build gemini imagen request
|
|
geminiRequest := dto.GeminiImageRequest{
|
|
Instances: []dto.GeminiImageInstance{
|
|
{
|
|
Prompt: request.Prompt,
|
|
},
|
|
},
|
|
Parameters: dto.GeminiImageParameters{
|
|
SampleCount: int(request.N),
|
|
AspectRatio: aspectRatio,
|
|
PersonGeneration: "allow_adult", // default allow adult
|
|
},
|
|
}
|
|
|
|
return geminiRequest, nil
|
|
}
|
|
|
|
func (a *Adaptor) Init(info *relaycommon.RelayInfo) {
|
|
|
|
}
|
|
|
|
func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
|
|
|
|
if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
|
|
// 新增逻辑:处理 -thinking-<budget> 格式
|
|
if strings.Contains(info.UpstreamModelName, "-thinking-") {
|
|
parts := strings.Split(info.UpstreamModelName, "-thinking-")
|
|
info.UpstreamModelName = parts[0]
|
|
} else if strings.HasSuffix(info.UpstreamModelName, "-thinking") { // 旧的适配
|
|
info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-thinking")
|
|
} else if strings.HasSuffix(info.UpstreamModelName, "-nothinking") {
|
|
info.UpstreamModelName = strings.TrimSuffix(info.UpstreamModelName, "-nothinking")
|
|
}
|
|
}
|
|
|
|
version := model_setting.GetGeminiVersionSetting(info.UpstreamModelName)
|
|
|
|
if strings.HasPrefix(info.UpstreamModelName, "imagen") {
|
|
return fmt.Sprintf("%s/%s/models/%s:predict", info.ChannelBaseUrl, version, info.UpstreamModelName), nil
|
|
}
|
|
|
|
if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
|
|
strings.HasPrefix(info.UpstreamModelName, "embedding") ||
|
|
strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
|
|
action := "embedContent"
|
|
if info.IsGeminiBatchEmbedding {
|
|
action = "batchEmbedContents"
|
|
}
|
|
return fmt.Sprintf("%s/%s/models/%s:%s", info.ChannelBaseUrl, version, info.UpstreamModelName, action), nil
|
|
}
|
|
|
|
action := "generateContent"
|
|
if info.IsStream {
|
|
action = "streamGenerateContent?alt=sse"
|
|
if info.RelayMode == constant.RelayModeGemini {
|
|
info.DisablePing = true
|
|
}
|
|
}
|
|
return fmt.Sprintf("%s/%s/models/%s:%s", info.ChannelBaseUrl, version, info.UpstreamModelName, action), nil
|
|
}
|
|
|
|
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error {
|
|
channel.SetupApiRequestHeader(info, c, req)
|
|
req.Set("x-goog-api-key", info.ApiKey)
|
|
return nil
|
|
}
|
|
|
|
func (a *Adaptor) ConvertOpenAIRequest(c *gin.Context, info *relaycommon.RelayInfo, request *dto.GeneralOpenAIRequest) (any, error) {
|
|
if request == nil {
|
|
return nil, errors.New("request is nil")
|
|
}
|
|
|
|
geminiRequest, err := CovertGemini2OpenAI(*request, info)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return geminiRequest, nil
|
|
}
|
|
|
|
func (a *Adaptor) ConvertRerankRequest(c *gin.Context, relayMode int, request dto.RerankRequest) (any, error) {
|
|
return nil, nil
|
|
}
|
|
|
|
func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.EmbeddingRequest) (any, error) {
|
|
if request.Input == nil {
|
|
return nil, errors.New("input is required")
|
|
}
|
|
|
|
inputs := request.ParseInput()
|
|
if len(inputs) == 0 {
|
|
return nil, errors.New("input is empty")
|
|
}
|
|
// We always build a batch-style payload with `requests`, so ensure we call the
|
|
// batch endpoint upstream to avoid payload/endpoint mismatches.
|
|
info.IsGeminiBatchEmbedding = true
|
|
// process all inputs
|
|
geminiRequests := make([]map[string]interface{}, 0, len(inputs))
|
|
for _, input := range inputs {
|
|
geminiRequest := map[string]interface{}{
|
|
"model": fmt.Sprintf("models/%s", info.UpstreamModelName),
|
|
"content": dto.GeminiChatContent{
|
|
Parts: []dto.GeminiPart{
|
|
{
|
|
Text: input,
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
// set specific parameters for different models
|
|
// https://ai.google.dev/api/embeddings?hl=zh-cn#method:-models.embedcontent
|
|
switch info.UpstreamModelName {
|
|
case "text-embedding-004", "gemini-embedding-exp-03-07", "gemini-embedding-001":
|
|
// Only newer models introduced after 2024 support OutputDimensionality
|
|
if request.Dimensions > 0 {
|
|
geminiRequest["outputDimensionality"] = request.Dimensions
|
|
}
|
|
}
|
|
geminiRequests = append(geminiRequests, geminiRequest)
|
|
}
|
|
|
|
return map[string]interface{}{
|
|
"requests": geminiRequests,
|
|
}, nil
|
|
}
|
|
|
|
func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) {
|
|
// TODO implement me
|
|
return nil, errors.New("not implemented")
|
|
}
|
|
|
|
func (a *Adaptor) DoRequest(c *gin.Context, info *relaycommon.RelayInfo, requestBody io.Reader) (any, error) {
|
|
return channel.DoApiRequest(a, c, info, requestBody)
|
|
}
|
|
|
|
func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (usage any, err *types.NewAPIError) {
|
|
if info.RelayMode == constant.RelayModeGemini {
|
|
if strings.HasSuffix(info.RequestURLPath, ":embedContent") ||
|
|
strings.HasSuffix(info.RequestURLPath, ":batchEmbedContents") {
|
|
return NativeGeminiEmbeddingHandler(c, resp, info)
|
|
}
|
|
if info.IsStream {
|
|
return GeminiTextGenerationStreamHandler(c, info, resp)
|
|
} else {
|
|
return GeminiTextGenerationHandler(c, info, resp)
|
|
}
|
|
}
|
|
|
|
if strings.HasPrefix(info.UpstreamModelName, "imagen") {
|
|
return GeminiImageHandler(c, info, resp)
|
|
}
|
|
|
|
// check if the model is an embedding model
|
|
if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
|
|
strings.HasPrefix(info.UpstreamModelName, "embedding") ||
|
|
strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
|
|
return GeminiEmbeddingHandler(c, info, resp)
|
|
}
|
|
|
|
if info.IsStream {
|
|
return GeminiChatStreamHandler(c, info, resp)
|
|
} else {
|
|
return GeminiChatHandler(c, info, resp)
|
|
}
|
|
|
|
}
|
|
|
|
func (a *Adaptor) GetModelList() []string {
|
|
return ModelList
|
|
}
|
|
|
|
func (a *Adaptor) GetChannelName() string {
|
|
return ChannelName
|
|
}
|