✨ feat(gemini): enhance ThinkingAdapter and model handling
- Introduced `isNoThinkingRequest` and `trimModelThinking` functions to manage model names and thinking configurations. - Updated `GeminiHelper` to conditionally adjust the model name based on the thinking budget and request settings. - Refactored `ThinkingAdaptor` to streamline the integration of thinking capabilities into Gemini requests. - Cleaned up commented-out code in `FetchUpstreamModels` for clarity. These changes improve the handling of model configurations and enhance the adaptability of the Gemini relay system.
This commit is contained in:
@@ -134,13 +134,6 @@ func FetchUpstreamModels(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
//if channel.Type != common.ChannelTypeOpenAI {
|
||||
// c.JSON(http.StatusOK, gin.H{
|
||||
// "success": false,
|
||||
// "message": "仅支持 OpenAI 类型渠道",
|
||||
// })
|
||||
// return
|
||||
//}
|
||||
baseURL := common.ChannelBaseURLs[channel.Type]
|
||||
if channel.GetBaseURL() != "" {
|
||||
baseURL = channel.GetBaseURL()
|
||||
|
||||
@@ -78,26 +78,7 @@ func clampThinkingBudget(modelName string, budget int) int {
|
||||
return budget
|
||||
}
|
||||
|
||||
// Setting safety to the lowest possible values since Gemini is already powerless enough
|
||||
func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (*GeminiChatRequest, error) {
|
||||
|
||||
geminiRequest := GeminiChatRequest{
|
||||
Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)),
|
||||
GenerationConfig: GeminiChatGenerationConfig{
|
||||
Temperature: textRequest.Temperature,
|
||||
TopP: textRequest.TopP,
|
||||
MaxOutputTokens: textRequest.MaxTokens,
|
||||
Seed: int64(textRequest.Seed),
|
||||
},
|
||||
}
|
||||
|
||||
if model_setting.IsGeminiModelSupportImagine(info.UpstreamModelName) {
|
||||
geminiRequest.GenerationConfig.ResponseModalities = []string{
|
||||
"TEXT",
|
||||
"IMAGE",
|
||||
}
|
||||
}
|
||||
|
||||
func ThinkingAdaptor(geminiRequest *GeminiChatRequest, info *relaycommon.RelayInfo) {
|
||||
if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
|
||||
modelName := info.UpstreamModelName
|
||||
isNew25Pro := strings.HasPrefix(modelName, "gemini-2.5-pro") &&
|
||||
@@ -150,6 +131,29 @@ func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest, info *relaycommon
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Setting safety to the lowest possible values since Gemini is already powerless enough
|
||||
func CovertGemini2OpenAI(textRequest dto.GeneralOpenAIRequest, info *relaycommon.RelayInfo) (*GeminiChatRequest, error) {
|
||||
|
||||
geminiRequest := GeminiChatRequest{
|
||||
Contents: make([]GeminiChatContent, 0, len(textRequest.Messages)),
|
||||
GenerationConfig: GeminiChatGenerationConfig{
|
||||
Temperature: textRequest.Temperature,
|
||||
TopP: textRequest.TopP,
|
||||
MaxOutputTokens: textRequest.MaxTokens,
|
||||
Seed: int64(textRequest.Seed),
|
||||
},
|
||||
}
|
||||
|
||||
if model_setting.IsGeminiModelSupportImagine(info.UpstreamModelName) {
|
||||
geminiRequest.GenerationConfig.ResponseModalities = []string{
|
||||
"TEXT",
|
||||
"IMAGE",
|
||||
}
|
||||
}
|
||||
|
||||
ThinkingAdaptor(&geminiRequest, info)
|
||||
|
||||
safetySettings := make([]GeminiChatSafetySettings, 0, len(SafetySettingList))
|
||||
for _, category := range SafetySettingList {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"one-api/relay/helper"
|
||||
"one-api/service"
|
||||
"one-api/setting"
|
||||
"one-api/setting/model_setting"
|
||||
"strings"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
@@ -76,6 +77,33 @@ func getGeminiInputTokens(req *gemini.GeminiChatRequest, info *relaycommon.Relay
|
||||
return inputTokens
|
||||
}
|
||||
|
||||
func isNoThinkingRequest(req *gemini.GeminiChatRequest) bool {
|
||||
if req.GenerationConfig.ThinkingConfig != nil && req.GenerationConfig.ThinkingConfig.ThinkingBudget != nil {
|
||||
return *req.GenerationConfig.ThinkingConfig.ThinkingBudget <= 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func trimModelThinking(modelName string) string {
|
||||
// 去除模型名称中的 -nothinking 后缀
|
||||
if strings.HasSuffix(modelName, "-nothinking") {
|
||||
return strings.TrimSuffix(modelName, "-nothinking")
|
||||
}
|
||||
// 去除模型名称中的 -thinking 后缀
|
||||
if strings.HasSuffix(modelName, "-thinking") {
|
||||
return strings.TrimSuffix(modelName, "-thinking")
|
||||
}
|
||||
|
||||
// 去除模型名称中的 -thinking-number
|
||||
if strings.Contains(modelName, "-thinking-") {
|
||||
parts := strings.Split(modelName, "-thinking-")
|
||||
if len(parts) > 1 {
|
||||
return parts[0] + "-thinking"
|
||||
}
|
||||
}
|
||||
return modelName
|
||||
}
|
||||
|
||||
func GeminiHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
|
||||
req, err := getAndValidateGeminiRequest(c)
|
||||
if err != nil {
|
||||
@@ -107,12 +135,27 @@ func GeminiHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
|
||||
relayInfo.SetPromptTokens(promptTokens)
|
||||
} else {
|
||||
promptTokens := getGeminiInputTokens(req, relayInfo)
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapperLocal(err, "count_input_tokens_error", http.StatusBadRequest)
|
||||
}
|
||||
c.Set("prompt_tokens", promptTokens)
|
||||
}
|
||||
|
||||
if model_setting.GetGeminiSettings().ThinkingAdapterEnabled {
|
||||
if isNoThinkingRequest(req) {
|
||||
// check is thinking
|
||||
if !strings.Contains(relayInfo.OriginModelName, "-nothinking") {
|
||||
// try to get no thinking model price
|
||||
noThinkingModelName := relayInfo.OriginModelName + "-nothinking"
|
||||
containPrice := helper.ContainPriceOrRatio(noThinkingModelName)
|
||||
if containPrice {
|
||||
relayInfo.OriginModelName = noThinkingModelName
|
||||
relayInfo.UpstreamModelName = noThinkingModelName
|
||||
}
|
||||
}
|
||||
}
|
||||
if req.GenerationConfig.ThinkingConfig == nil {
|
||||
gemini.ThinkingAdaptor(req, relayInfo)
|
||||
}
|
||||
}
|
||||
|
||||
priceData, err := helper.ModelPriceHelper(c, relayInfo, relayInfo.PromptTokens, int(req.GenerationConfig.MaxOutputTokens))
|
||||
if err != nil {
|
||||
return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
|
||||
|
||||
Reference in New Issue
Block a user