Merge remote-tracking branch 'origin/alpha' into refactor/model-pricing

This commit is contained in:
t0ng7u
2025-08-08 02:34:44 +08:00
25 changed files with 126 additions and 153 deletions

View File

@@ -161,7 +161,7 @@ func testChannel(channel *model.Channel, testModel string) testResult {
logInfo.ApiKey = ""
common.SysLog(fmt.Sprintf("testing channel %d with model %s , info %+v ", channel.Id, testModel, logInfo))
priceData, err := helper.ModelPriceHelper(c, info, 0, int(request.MaxTokens))
priceData, err := helper.ModelPriceHelper(c, info, 0, int(request.GetMaxTokens()))
if err != nil {
return testResult{
context: c,
@@ -275,7 +275,7 @@ func testChannel(channel *model.Channel, testModel string) testResult {
Quota: quota,
Content: "模型测试",
UseTimeSeconds: int(consumedTime),
IsStream: false,
IsStream: info.IsStream,
Group: info.UsingGroup,
Other: other,
})

View File

@@ -36,30 +36,11 @@ type OpenAIModel struct {
Parent string `json:"parent"`
}
type GoogleOpenAICompatibleModels []struct {
Name string `json:"name"`
Version string `json:"version"`
DisplayName string `json:"displayName"`
Description string `json:"description,omitempty"`
InputTokenLimit int `json:"inputTokenLimit"`
OutputTokenLimit int `json:"outputTokenLimit"`
SupportedGenerationMethods []string `json:"supportedGenerationMethods"`
Temperature float64 `json:"temperature,omitempty"`
TopP float64 `json:"topP,omitempty"`
TopK int `json:"topK,omitempty"`
MaxTemperature int `json:"maxTemperature,omitempty"`
}
type OpenAIModelsResponse struct {
Data []OpenAIModel `json:"data"`
Success bool `json:"success"`
}
type GoogleOpenAICompatibleResponse struct {
Models []GoogleOpenAICompatibleModels `json:"models"`
NextPageToken string `json:"nextPageToken"`
}
func parseStatusFilter(statusParam string) int {
switch strings.ToLower(statusParam) {
case "enabled", "1":
@@ -203,7 +184,7 @@ func FetchUpstreamModels(c *gin.Context) {
switch channel.Type {
case constant.ChannelTypeGemini:
// curl https://example.com/v1beta/models?key=$GEMINI_API_KEY
url = fmt.Sprintf("%s/v1beta/openai/models?key=%s", baseURL, channel.Key)
url = fmt.Sprintf("%s/v1beta/openai/models", baseURL) // Remove key in url since we need to use AuthHeader
case constant.ChannelTypeAli:
url = fmt.Sprintf("%s/compatible-mode/v1/models", baseURL)
default:
@@ -212,10 +193,11 @@ func FetchUpstreamModels(c *gin.Context) {
// 获取响应体 - 根据渠道类型决定是否添加 AuthHeader
var body []byte
key := strings.Split(channel.Key, "\n")[0]
if channel.Type == constant.ChannelTypeGemini {
body, err = GetResponseBody("GET", url, channel, nil) // I don't know why, but Gemini requires no AuthHeader
body, err = GetResponseBody("GET", url, channel, GetAuthHeader(key)) // Use AuthHeader since Gemini now forces it
} else {
body, err = GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
body, err = GetResponseBody("GET", url, channel, GetAuthHeader(key))
}
if err != nil {
common.ApiError(c, err)
@@ -223,34 +205,12 @@ func FetchUpstreamModels(c *gin.Context) {
}
var result OpenAIModelsResponse
var parseSuccess bool
// 适配特殊格式
switch channel.Type {
case constant.ChannelTypeGemini:
var googleResult GoogleOpenAICompatibleResponse
if err = json.Unmarshal(body, &googleResult); err == nil {
// 转换Google格式到OpenAI格式
for _, model := range googleResult.Models {
for _, gModel := range model {
result.Data = append(result.Data, OpenAIModel{
ID: gModel.Name,
})
}
}
parseSuccess = true
}
}
// 如果解析失败尝试OpenAI格式
if !parseSuccess {
if err = json.Unmarshal(body, &result); err != nil {
c.JSON(http.StatusOK, gin.H{
"success": false,
"message": fmt.Sprintf("解析响应失败: %s", err.Error()),
})
return
}
if err = json.Unmarshal(body, &result); err != nil {
c.JSON(http.StatusOK, gin.H{
"success": false,
"message": fmt.Sprintf("解析响应失败: %s", err.Error()),
})
return
}
var ids []string