feat: add new GPT-4.5 preview model ratios

This commit is contained in:
1808837298@qq.com
2025-02-28 19:17:15 +08:00
parent cfd3f6c073
commit d6fd50e382
11 changed files with 61 additions and 36 deletions

View File

@@ -68,6 +68,8 @@ var defaultModelRatio = map[string]float64{
"gpt-4o-mini-2024-07-18": 0.075,
"gpt-4-turbo": 5, // $0.01 / 1K tokens
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
"gpt-4.5-preview": 37.5,
"gpt-4.5-preview-2025-02-27": 37.5,
//"gpt-3.5-turbo-0301": 0.75, //deprecated
"gpt-3.5-turbo": 0.25,
"gpt-3.5-turbo-0613": 0.75,
@@ -315,7 +317,7 @@ func UpdateModelRatioByJSONString(jsonStr string) error {
return json.Unmarshal([]byte(jsonStr), &modelRatioMap)
}
func GetModelRatio(name string) float64 {
func GetModelRatio(name string) (float64, bool) {
GetModelRatioMap()
if strings.HasPrefix(name, "gpt-4-gizmo") {
name = "gpt-4-gizmo-*"
@@ -323,9 +325,9 @@ func GetModelRatio(name string) float64 {
ratio, ok := modelRatioMap[name]
if !ok {
SysError("model ratio not found: " + name)
return 30
return 37.5, false
}
return ratio
return ratio, true
}
func DefaultModelRatio2JSONString() string {
@@ -387,6 +389,9 @@ func GetCompletionRatio(name string) float64 {
}
return 4
}
if strings.HasPrefix(name, "gpt-4.5") {
return 2
}
if strings.HasPrefix(name, "gpt-4-turbo") || strings.HasSuffix(name, "preview") {
return 3
}

View File

@@ -146,7 +146,10 @@ func testChannel(channel *model.Channel, testModel string) (err error, openAIErr
return err, nil
}
modelPrice, usePrice := common.GetModelPrice(testModel, false)
modelRatio := common.GetModelRatio(testModel)
modelRatio, success := common.GetModelRatio(testModel)
if !success {
return fmt.Errorf("模型 %s 倍率未设置", testModel), nil
}
completionRatio := common.GetCompletionRatio(testModel)
ratio := modelRatio
quota := 0

View File

@@ -69,7 +69,8 @@ func updatePricing() {
pricing.ModelPrice = modelPrice
pricing.QuotaType = 1
} else {
pricing.ModelRatio = common.GetModelRatio(model)
modelRatio, _ := common.GetModelRatio(model)
pricing.ModelRatio = modelRatio
pricing.CompletionRatio = common.GetCompletionRatio(model)
pricing.QuotaType = 0
}

View File

@@ -1,6 +1,7 @@
package helper
import (
"fmt"
"github.com/gin-gonic/gin"
"one-api/common"
relaycommon "one-api/relay/common"
@@ -15,7 +16,7 @@ type PriceData struct {
ShouldPreConsumedQuota int
}
func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens int, maxTokens int) PriceData {
func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens int, maxTokens int) (PriceData, error) {
modelPrice, usePrice := common.GetModelPrice(info.OriginModelName, false)
groupRatio := setting.GetGroupRatio(info.Group)
var preConsumedQuota int
@@ -25,7 +26,10 @@ func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens
if maxTokens != 0 {
preConsumedTokens = promptTokens + maxTokens
}
modelRatio = common.GetModelRatio(info.OriginModelName)
modelRatio, success := common.GetModelRatio(info.OriginModelName)
if !success {
return PriceData{}, fmt.Errorf("model %s ratio not found", info.OriginModelName)
}
ratio := modelRatio * groupRatio
preConsumedQuota = int(float64(preConsumedTokens) * ratio)
} else {
@@ -37,5 +41,5 @@ func ModelPriceHelper(c *gin.Context, info *relaycommon.RelayInfo, promptTokens
GroupRatio: groupRatio,
UsePrice: usePrice,
ShouldPreConsumedQuota: preConsumedQuota,
}
}, nil
}

View File

@@ -75,7 +75,10 @@ func AudioHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
relayInfo.PromptTokens = promptTokens
}
priceData := helper.ModelPriceHelper(c, relayInfo, preConsumedTokens, 0)
priceData, err := helper.ModelPriceHelper(c, relayInfo, preConsumedTokens, 0)
if err != nil {
return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
}
userQuota, err := model.GetUserQuota(relayInfo.UserId, false)
if err != nil {

View File

@@ -86,7 +86,10 @@ func ImageHelper(c *gin.Context) *dto.OpenAIErrorWithStatusCode {
imageRequest.Model = relayInfo.UpstreamModelName
priceData := helper.ModelPriceHelper(c, relayInfo, 0, 0)
priceData, err := helper.ModelPriceHelper(c, relayInfo, 0, 0)
if err != nil {
return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
}
if !priceData.UsePrice {
// modelRatio 16 = modelPrice $0.04
// per 1 modelRatio = $0.04 / 16

View File

@@ -106,8 +106,10 @@ func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
c.Set("prompt_tokens", promptTokens)
}
priceData := helper.ModelPriceHelper(c, relayInfo, promptTokens, int(textRequest.MaxTokens))
priceData, err := helper.ModelPriceHelper(c, relayInfo, promptTokens, int(textRequest.MaxTokens))
if err != nil {
return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
}
// pre-consume quota 预消耗配额
preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
if openaiErr != nil {

View File

@@ -57,8 +57,10 @@ func EmbeddingHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode)
promptToken := getEmbeddingPromptToken(*embeddingRequest)
relayInfo.PromptTokens = promptToken
priceData := helper.ModelPriceHelper(c, relayInfo, promptToken, 0)
priceData, err := helper.ModelPriceHelper(c, relayInfo, promptToken, 0)
if err != nil {
return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
}
// pre-consume quota 预消耗配额
preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
if openaiErr != nil {

View File

@@ -50,8 +50,10 @@ func RerankHelper(c *gin.Context, relayMode int) (openaiErr *dto.OpenAIErrorWith
promptToken := getRerankPromptToken(*rerankRequest)
relayInfo.PromptTokens = promptToken
priceData := helper.ModelPriceHelper(c, relayInfo, promptToken, 0)
priceData, err := helper.ModelPriceHelper(c, relayInfo, promptToken, 0)
if err != nil {
return service.OpenAIErrorWrapperLocal(err, "model_price_error", http.StatusInternalServerError)
}
// pre-consume quota 预消耗配额
preConsumedQuota, userQuota, openaiErr := preConsumeQuota(c, priceData.ShouldPreConsumedQuota, relayInfo)
if openaiErr != nil {

View File

@@ -65,7 +65,7 @@ func WssHelper(c *gin.Context, ws *websocket.Conn) (openaiErr *dto.OpenAIErrorWi
//if realtimeEvent.Session.MaxResponseOutputTokens != 0 {
// preConsumedTokens = promptTokens + int(realtimeEvent.Session.MaxResponseOutputTokens)
//}
modelRatio = common.GetModelRatio(relayInfo.UpstreamModelName)
modelRatio, _ = common.GetModelRatio(relayInfo.UpstreamModelName)
ratio = modelRatio * groupRatio
preConsumedQuota = int(float64(preConsumedTokens) * ratio)
} else {

View File

@@ -75,7 +75,7 @@ func PreWssConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, usag
audioInputTokens := usage.InputTokenDetails.AudioTokens
audioOutTokens := usage.OutputTokenDetails.AudioTokens
groupRatio := setting.GetGroupRatio(relayInfo.Group)
modelRatio := common.GetModelRatio(modelName)
modelRatio, _ := common.GetModelRatio(modelName)
quotaInfo := QuotaInfo{
InputDetails: TokenDetails{