diff --git a/backend/internal/pkg/openai/constants.go b/backend/internal/pkg/openai/constants.go index 4bbc68e7..b0a31a5f 100644 --- a/backend/internal/pkg/openai/constants.go +++ b/backend/internal/pkg/openai/constants.go @@ -15,6 +15,7 @@ type Model struct { // DefaultModels OpenAI models list var DefaultModels = []Model{ + {ID: "gpt-5.4", Object: "model", Created: 1738368000, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.4"}, {ID: "gpt-5.3-codex", Object: "model", Created: 1735689600, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.3 Codex"}, {ID: "gpt-5.3-codex-spark", Object: "model", Created: 1735689600, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.3 Codex Spark"}, {ID: "gpt-5.2", Object: "model", Created: 1733875200, OwnedBy: "openai", Type: "model", DisplayName: "GPT-5.2"}, diff --git a/backend/internal/service/billing_service.go b/backend/internal/service/billing_service.go index 5d67c808..d058c25a 100644 --- a/backend/internal/service/billing_service.go +++ b/backend/internal/service/billing_service.go @@ -43,15 +43,24 @@ type BillingCache interface { // ModelPricing 模型价格配置(per-token价格,与LiteLLM格式一致) type ModelPricing struct { - InputPricePerToken float64 // 每token输入价格 (USD) - OutputPricePerToken float64 // 每token输出价格 (USD) - CacheCreationPricePerToken float64 // 缓存创建每token价格 (USD) - CacheReadPricePerToken float64 // 缓存读取每token价格 (USD) - CacheCreation5mPrice float64 // 5分钟缓存创建每token价格 (USD) - CacheCreation1hPrice float64 // 1小时缓存创建每token价格 (USD) - SupportsCacheBreakdown bool // 是否支持详细的缓存分类 + InputPricePerToken float64 // 每token输入价格 (USD) + OutputPricePerToken float64 // 每token输出价格 (USD) + CacheCreationPricePerToken float64 // 缓存创建每token价格 (USD) + CacheReadPricePerToken float64 // 缓存读取每token价格 (USD) + CacheCreation5mPrice float64 // 5分钟缓存创建每token价格 (USD) + CacheCreation1hPrice float64 // 1小时缓存创建每token价格 (USD) + SupportsCacheBreakdown bool // 是否支持详细的缓存分类 + LongContextInputThreshold int // 超过阈值后按整次会话提升输入价格 + LongContextInputMultiplier float64 // 长上下文整次会话输入倍率 + LongContextOutputMultiplier float64 // 长上下文整次会话输出倍率 } +const ( + openAIGPT54LongContextInputThreshold = 272000 + openAIGPT54LongContextInputMultiplier = 2.0 + openAIGPT54LongContextOutputMultiplier = 1.5 +) + // UsageTokens 使用的token数量 type UsageTokens struct { InputTokens int @@ -161,6 +170,35 @@ func (s *BillingService) initFallbackPricing() { CacheReadPricePerToken: 0.2e-6, // $0.20 per MTok SupportsCacheBreakdown: false, } + + // OpenAI GPT-5.1(本地兜底,防止动态定价不可用时拒绝计费) + s.fallbackPrices["gpt-5.1"] = &ModelPricing{ + InputPricePerToken: 1.25e-6, // $1.25 per MTok + OutputPricePerToken: 10e-6, // $10 per MTok + CacheCreationPricePerToken: 1.25e-6, // $1.25 per MTok + CacheReadPricePerToken: 0.125e-6, + SupportsCacheBreakdown: false, + } + // OpenAI GPT-5.4(业务指定价格) + s.fallbackPrices["gpt-5.4"] = &ModelPricing{ + InputPricePerToken: 2.5e-6, // $2.5 per MTok + OutputPricePerToken: 15e-6, // $15 per MTok + CacheCreationPricePerToken: 2.5e-6, // $2.5 per MTok + CacheReadPricePerToken: 0.25e-6, // $0.25 per MTok + SupportsCacheBreakdown: false, + LongContextInputThreshold: openAIGPT54LongContextInputThreshold, + LongContextInputMultiplier: openAIGPT54LongContextInputMultiplier, + LongContextOutputMultiplier: openAIGPT54LongContextOutputMultiplier, + } + // Codex 族兜底统一按 GPT-5.1 Codex 价格计费 + s.fallbackPrices["gpt-5.1-codex"] = &ModelPricing{ + InputPricePerToken: 1.5e-6, // $1.5 per MTok + OutputPricePerToken: 12e-6, // $12 per MTok + CacheCreationPricePerToken: 1.5e-6, // $1.5 per MTok + CacheReadPricePerToken: 0.15e-6, + SupportsCacheBreakdown: false, + } + s.fallbackPrices["gpt-5.3-codex"] = s.fallbackPrices["gpt-5.1-codex"] } // getFallbackPricing 根据模型系列获取回退价格 @@ -189,12 +227,30 @@ func (s *BillingService) getFallbackPricing(model string) *ModelPricing { } return s.fallbackPrices["claude-3-haiku"] } + // Claude 未知型号统一回退到 Sonnet,避免计费中断。 + if strings.Contains(modelLower, "claude") { + return s.fallbackPrices["claude-sonnet-4"] + } if strings.Contains(modelLower, "gemini-3.1-pro") || strings.Contains(modelLower, "gemini-3-1-pro") { return s.fallbackPrices["gemini-3.1-pro"] } - // 默认使用Sonnet价格 - return s.fallbackPrices["claude-sonnet-4"] + // OpenAI 仅匹配已知 GPT-5/Codex 族,避免未知 OpenAI 型号误计价。 + if strings.Contains(modelLower, "gpt-5") || strings.Contains(modelLower, "codex") { + normalized := normalizeCodexModel(modelLower) + switch normalized { + case "gpt-5.4": + return s.fallbackPrices["gpt-5.4"] + case "gpt-5.3-codex": + return s.fallbackPrices["gpt-5.3-codex"] + case "gpt-5.1-codex", "gpt-5.1-codex-max", "gpt-5.1-codex-mini", "codex-mini-latest": + return s.fallbackPrices["gpt-5.1-codex"] + case "gpt-5.1": + return s.fallbackPrices["gpt-5.1"] + } + } + + return nil } // GetModelPricing 获取模型价格配置 @@ -212,15 +268,18 @@ func (s *BillingService) GetModelPricing(model string) (*ModelPricing, error) { price5m := litellmPricing.CacheCreationInputTokenCost price1h := litellmPricing.CacheCreationInputTokenCostAbove1hr enableBreakdown := price1h > 0 && price1h > price5m - return &ModelPricing{ - InputPricePerToken: litellmPricing.InputCostPerToken, - OutputPricePerToken: litellmPricing.OutputCostPerToken, - CacheCreationPricePerToken: litellmPricing.CacheCreationInputTokenCost, - CacheReadPricePerToken: litellmPricing.CacheReadInputTokenCost, - CacheCreation5mPrice: price5m, - CacheCreation1hPrice: price1h, - SupportsCacheBreakdown: enableBreakdown, - }, nil + return s.applyModelSpecificPricingPolicy(model, &ModelPricing{ + InputPricePerToken: litellmPricing.InputCostPerToken, + OutputPricePerToken: litellmPricing.OutputCostPerToken, + CacheCreationPricePerToken: litellmPricing.CacheCreationInputTokenCost, + CacheReadPricePerToken: litellmPricing.CacheReadInputTokenCost, + CacheCreation5mPrice: price5m, + CacheCreation1hPrice: price1h, + SupportsCacheBreakdown: enableBreakdown, + LongContextInputThreshold: litellmPricing.LongContextInputTokenThreshold, + LongContextInputMultiplier: litellmPricing.LongContextInputCostMultiplier, + LongContextOutputMultiplier: litellmPricing.LongContextOutputCostMultiplier, + }), nil } } @@ -228,7 +287,7 @@ func (s *BillingService) GetModelPricing(model string) (*ModelPricing, error) { fallback := s.getFallbackPricing(model) if fallback != nil { log.Printf("[Billing] Using fallback pricing for model: %s", model) - return fallback, nil + return s.applyModelSpecificPricingPolicy(model, fallback), nil } return nil, fmt.Errorf("pricing not found for model: %s", model) @@ -242,12 +301,18 @@ func (s *BillingService) CalculateCost(model string, tokens UsageTokens, rateMul } breakdown := &CostBreakdown{} + inputPricePerToken := pricing.InputPricePerToken + outputPricePerToken := pricing.OutputPricePerToken + if s.shouldApplySessionLongContextPricing(tokens, pricing) { + inputPricePerToken *= pricing.LongContextInputMultiplier + outputPricePerToken *= pricing.LongContextOutputMultiplier + } // 计算输入token费用(使用per-token价格) - breakdown.InputCost = float64(tokens.InputTokens) * pricing.InputPricePerToken + breakdown.InputCost = float64(tokens.InputTokens) * inputPricePerToken // 计算输出token费用 - breakdown.OutputCost = float64(tokens.OutputTokens) * pricing.OutputPricePerToken + breakdown.OutputCost = float64(tokens.OutputTokens) * outputPricePerToken // 计算缓存费用 if pricing.SupportsCacheBreakdown && (pricing.CacheCreation5mPrice > 0 || pricing.CacheCreation1hPrice > 0) { @@ -279,6 +344,45 @@ func (s *BillingService) CalculateCost(model string, tokens UsageTokens, rateMul return breakdown, nil } +func (s *BillingService) applyModelSpecificPricingPolicy(model string, pricing *ModelPricing) *ModelPricing { + if pricing == nil { + return nil + } + if !isOpenAIGPT54Model(model) { + return pricing + } + if pricing.LongContextInputThreshold > 0 && pricing.LongContextInputMultiplier > 0 && pricing.LongContextOutputMultiplier > 0 { + return pricing + } + cloned := *pricing + if cloned.LongContextInputThreshold <= 0 { + cloned.LongContextInputThreshold = openAIGPT54LongContextInputThreshold + } + if cloned.LongContextInputMultiplier <= 0 { + cloned.LongContextInputMultiplier = openAIGPT54LongContextInputMultiplier + } + if cloned.LongContextOutputMultiplier <= 0 { + cloned.LongContextOutputMultiplier = openAIGPT54LongContextOutputMultiplier + } + return &cloned +} + +func (s *BillingService) shouldApplySessionLongContextPricing(tokens UsageTokens, pricing *ModelPricing) bool { + if pricing == nil || pricing.LongContextInputThreshold <= 0 { + return false + } + if pricing.LongContextInputMultiplier <= 1 && pricing.LongContextOutputMultiplier <= 1 { + return false + } + totalInputTokens := tokens.InputTokens + tokens.CacheReadTokens + return totalInputTokens > pricing.LongContextInputThreshold +} + +func isOpenAIGPT54Model(model string) bool { + normalized := normalizeCodexModel(strings.TrimSpace(strings.ToLower(model))) + return normalized == "gpt-5.4" +} + // CalculateCostWithConfig 使用配置中的默认倍率计算费用 func (s *BillingService) CalculateCostWithConfig(model string, tokens UsageTokens) (*CostBreakdown, error) { multiplier := s.cfg.Default.RateMultiplier diff --git a/backend/internal/service/billing_service_test.go b/backend/internal/service/billing_service_test.go index 5eb278f6..0ba52e56 100644 --- a/backend/internal/service/billing_service_test.go +++ b/backend/internal/service/billing_service_test.go @@ -133,7 +133,7 @@ func TestGetModelPricing_CaseInsensitive(t *testing.T) { require.Equal(t, p1.InputPricePerToken, p2.InputPricePerToken) } -func TestGetModelPricing_UnknownModelFallsBackToSonnet(t *testing.T) { +func TestGetModelPricing_UnknownClaudeModelFallsBackToSonnet(t *testing.T) { svc := newTestBillingService() // 不包含 opus/sonnet/haiku 关键词的 Claude 模型会走默认 Sonnet 价格 @@ -142,6 +142,93 @@ func TestGetModelPricing_UnknownModelFallsBackToSonnet(t *testing.T) { require.InDelta(t, 3e-6, pricing.InputPricePerToken, 1e-12) } +func TestGetModelPricing_UnknownOpenAIModelReturnsError(t *testing.T) { + svc := newTestBillingService() + + pricing, err := svc.GetModelPricing("gpt-unknown-model") + require.Error(t, err) + require.Nil(t, pricing) + require.Contains(t, err.Error(), "pricing not found") +} + +func TestGetModelPricing_OpenAIGPT51Fallback(t *testing.T) { + svc := newTestBillingService() + + pricing, err := svc.GetModelPricing("gpt-5.1") + require.NoError(t, err) + require.NotNil(t, pricing) + require.InDelta(t, 1.25e-6, pricing.InputPricePerToken, 1e-12) +} + +func TestGetModelPricing_OpenAIGPT54Fallback(t *testing.T) { + svc := newTestBillingService() + + pricing, err := svc.GetModelPricing("gpt-5.4") + require.NoError(t, err) + require.NotNil(t, pricing) + require.InDelta(t, 2.5e-6, pricing.InputPricePerToken, 1e-12) + require.InDelta(t, 15e-6, pricing.OutputPricePerToken, 1e-12) + require.InDelta(t, 0.25e-6, pricing.CacheReadPricePerToken, 1e-12) + require.Equal(t, 272000, pricing.LongContextInputThreshold) + require.InDelta(t, 2.0, pricing.LongContextInputMultiplier, 1e-12) + require.InDelta(t, 1.5, pricing.LongContextOutputMultiplier, 1e-12) +} + +func TestCalculateCost_OpenAIGPT54LongContextAppliesWholeSessionMultipliers(t *testing.T) { + svc := newTestBillingService() + + tokens := UsageTokens{ + InputTokens: 300000, + OutputTokens: 4000, + } + + cost, err := svc.CalculateCost("gpt-5.4-2026-03-05", tokens, 1.0) + require.NoError(t, err) + + expectedInput := float64(tokens.InputTokens) * 2.5e-6 * 2.0 + expectedOutput := float64(tokens.OutputTokens) * 15e-6 * 1.5 + require.InDelta(t, expectedInput, cost.InputCost, 1e-10) + require.InDelta(t, expectedOutput, cost.OutputCost, 1e-10) + require.InDelta(t, expectedInput+expectedOutput, cost.TotalCost, 1e-10) + require.InDelta(t, expectedInput+expectedOutput, cost.ActualCost, 1e-10) +} + +func TestGetFallbackPricing_FamilyMatching(t *testing.T) { + svc := newTestBillingService() + + tests := []struct { + name string + model string + expectedInput float64 + expectNilPricing bool + }{ + {name: "empty model", model: " ", expectNilPricing: true}, + {name: "claude opus 4.6", model: "claude-opus-4.6-20260201", expectedInput: 5e-6}, + {name: "claude opus 4.5 alt separator", model: "claude-opus-4-5-20260101", expectedInput: 5e-6}, + {name: "claude generic model fallback sonnet", model: "claude-foo-bar", expectedInput: 3e-6}, + {name: "gemini explicit fallback", model: "gemini-3-1-pro", expectedInput: 2e-6}, + {name: "gemini unknown no fallback", model: "gemini-2.0-pro", expectNilPricing: true}, + {name: "openai gpt5.1", model: "gpt-5.1", expectedInput: 1.25e-6}, + {name: "openai gpt5.4", model: "gpt-5.4", expectedInput: 2.5e-6}, + {name: "openai gpt5.3 codex", model: "gpt-5.3-codex", expectedInput: 1.5e-6}, + {name: "openai gpt5.1 codex max alias", model: "gpt-5.1-codex-max", expectedInput: 1.5e-6}, + {name: "openai codex mini latest alias", model: "codex-mini-latest", expectedInput: 1.5e-6}, + {name: "openai unknown no fallback", model: "gpt-unknown-model", expectNilPricing: true}, + {name: "non supported family", model: "qwen-max", expectNilPricing: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + pricing := svc.getFallbackPricing(tt.model) + if tt.expectNilPricing { + require.Nil(t, pricing) + return + } + require.NotNil(t, pricing) + require.InDelta(t, tt.expectedInput, pricing.InputPricePerToken, 1e-12) + }) + } +} func TestCalculateCostWithLongContext_BelowThreshold(t *testing.T) { svc := newTestBillingService() diff --git a/backend/internal/service/openai_codex_transform.go b/backend/internal/service/openai_codex_transform.go index 16befb82..9bc48cf6 100644 --- a/backend/internal/service/openai_codex_transform.go +++ b/backend/internal/service/openai_codex_transform.go @@ -9,6 +9,13 @@ import ( var codexCLIInstructions string var codexModelMap = map[string]string{ + "gpt-5.4": "gpt-5.4", + "gpt-5.4-none": "gpt-5.4", + "gpt-5.4-low": "gpt-5.4", + "gpt-5.4-medium": "gpt-5.4", + "gpt-5.4-high": "gpt-5.4", + "gpt-5.4-xhigh": "gpt-5.4", + "gpt-5.4-chat-latest": "gpt-5.4", "gpt-5.3": "gpt-5.3-codex", "gpt-5.3-none": "gpt-5.3-codex", "gpt-5.3-low": "gpt-5.3-codex", @@ -154,6 +161,9 @@ func normalizeCodexModel(model string) string { normalized := strings.ToLower(modelID) + if strings.Contains(normalized, "gpt-5.4") || strings.Contains(normalized, "gpt 5.4") { + return "gpt-5.4" + } if strings.Contains(normalized, "gpt-5.2-codex") || strings.Contains(normalized, "gpt 5.2 codex") { return "gpt-5.2-codex" } diff --git a/backend/internal/service/openai_codex_transform_test.go b/backend/internal/service/openai_codex_transform_test.go index 27093f6c..7ee4bbc8 100644 --- a/backend/internal/service/openai_codex_transform_test.go +++ b/backend/internal/service/openai_codex_transform_test.go @@ -167,6 +167,10 @@ func TestApplyCodexOAuthTransform_EmptyInput(t *testing.T) { func TestNormalizeCodexModel_Gpt53(t *testing.T) { cases := map[string]string{ + "gpt-5.4": "gpt-5.4", + "gpt-5.4-high": "gpt-5.4", + "gpt-5.4-chat-latest": "gpt-5.4", + "gpt 5.4": "gpt-5.4", "gpt-5.3": "gpt-5.3-codex", "gpt-5.3-codex": "gpt-5.3-codex", "gpt-5.3-codex-xhigh": "gpt-5.3-codex", diff --git a/backend/internal/service/pricing_service.go b/backend/internal/service/pricing_service.go index 41e8b5eb..897623d6 100644 --- a/backend/internal/service/pricing_service.go +++ b/backend/internal/service/pricing_service.go @@ -21,8 +21,19 @@ import ( ) var ( - openAIModelDatePattern = regexp.MustCompile(`-\d{8}$`) - openAIModelBasePattern = regexp.MustCompile(`^(gpt-\d+(?:\.\d+)?)(?:-|$)`) + openAIModelDatePattern = regexp.MustCompile(`-\d{8}$`) + openAIModelBasePattern = regexp.MustCompile(`^(gpt-\d+(?:\.\d+)?)(?:-|$)`) + openAIGPT54FallbackPricing = &LiteLLMModelPricing{ + InputCostPerToken: 2.5e-06, // $2.5 per MTok + OutputCostPerToken: 1.5e-05, // $15 per MTok + CacheReadInputTokenCost: 2.5e-07, // $0.25 per MTok + LongContextInputTokenThreshold: 272000, + LongContextInputCostMultiplier: 2.0, + LongContextOutputCostMultiplier: 1.5, + LiteLLMProvider: "openai", + Mode: "chat", + SupportsPromptCaching: true, + } ) // LiteLLMModelPricing LiteLLM价格数据结构 @@ -33,6 +44,9 @@ type LiteLLMModelPricing struct { CacheCreationInputTokenCost float64 `json:"cache_creation_input_token_cost"` CacheCreationInputTokenCostAbove1hr float64 `json:"cache_creation_input_token_cost_above_1hr"` CacheReadInputTokenCost float64 `json:"cache_read_input_token_cost"` + LongContextInputTokenThreshold int `json:"long_context_input_token_threshold,omitempty"` + LongContextInputCostMultiplier float64 `json:"long_context_input_cost_multiplier,omitempty"` + LongContextOutputCostMultiplier float64 `json:"long_context_output_cost_multiplier,omitempty"` LiteLLMProvider string `json:"litellm_provider"` Mode string `json:"mode"` SupportsPromptCaching bool `json:"supports_prompt_caching"` @@ -660,7 +674,8 @@ func (s *PricingService) matchByModelFamily(model string) *LiteLLMModelPricing { // 2. gpt-5.2-codex -> gpt-5.2(去掉后缀如 -codex, -mini, -max 等) // 3. gpt-5.2-20251222 -> gpt-5.2(去掉日期版本号) // 4. gpt-5.3-codex -> gpt-5.2-codex -// 5. 最终回退到 DefaultTestModel (gpt-5.1-codex) +// 5. gpt-5.4* -> 业务静态兜底价 +// 6. 最终回退到 DefaultTestModel (gpt-5.1-codex) func (s *PricingService) matchOpenAIModel(model string) *LiteLLMModelPricing { if strings.HasPrefix(model, "gpt-5.3-codex-spark") { if pricing, ok := s.pricingData["gpt-5.1-codex"]; ok { @@ -690,6 +705,12 @@ func (s *PricingService) matchOpenAIModel(model string) *LiteLLMModelPricing { } } + if strings.HasPrefix(model, "gpt-5.4") { + logger.With(zap.String("component", "service.pricing")). + Info(fmt.Sprintf("[Pricing] OpenAI fallback matched %s -> %s", model, "gpt-5.4(static)")) + return openAIGPT54FallbackPricing + } + // 最终回退到 DefaultTestModel defaultModel := strings.ToLower(openai.DefaultTestModel) if pricing, ok := s.pricingData[defaultModel]; ok { diff --git a/backend/internal/service/pricing_service_test.go b/backend/internal/service/pricing_service_test.go index 127ff342..6b67c55a 100644 --- a/backend/internal/service/pricing_service_test.go +++ b/backend/internal/service/pricing_service_test.go @@ -51,3 +51,20 @@ func TestGetModelPricing_OpenAIFallbackMatchedLoggedAsInfo(t *testing.T) { require.True(t, logSink.ContainsMessageAtLevel("[Pricing] OpenAI fallback matched gpt-5.3-codex -> gpt-5.2-codex", "info")) require.False(t, logSink.ContainsMessageAtLevel("[Pricing] OpenAI fallback matched gpt-5.3-codex -> gpt-5.2-codex", "warn")) } + +func TestGetModelPricing_Gpt54UsesStaticFallbackWhenRemoteMissing(t *testing.T) { + svc := &PricingService{ + pricingData: map[string]*LiteLLMModelPricing{ + "gpt-5.1-codex": &LiteLLMModelPricing{InputCostPerToken: 1.25e-6}, + }, + } + + got := svc.GetModelPricing("gpt-5.4") + require.NotNil(t, got) + require.InDelta(t, 2.5e-6, got.InputCostPerToken, 1e-12) + require.InDelta(t, 1.5e-5, got.OutputCostPerToken, 1e-12) + require.InDelta(t, 2.5e-7, got.CacheReadInputTokenCost, 1e-12) + require.Equal(t, 272000, got.LongContextInputTokenThreshold) + require.InDelta(t, 2.0, got.LongContextInputCostMultiplier, 1e-12) + require.InDelta(t, 1.5, got.LongContextOutputCostMultiplier, 1e-12) +} diff --git a/backend/resources/model-pricing/model_prices_and_context_window.json b/backend/resources/model-pricing/model_prices_and_context_window.json index 650e128e..72860bf9 100644 --- a/backend/resources/model-pricing/model_prices_and_context_window.json +++ b/backend/resources/model-pricing/model_prices_and_context_window.json @@ -5140,6 +5140,39 @@ "supports_vision": true, "supports_web_search": true }, + "gpt-5.4": { + "cache_read_input_token_cost": 2.5e-07, + "input_cost_per_token": 2.5e-06, + "litellm_provider": "openai", + "max_input_tokens": 1050000, + "max_output_tokens": 128000, + "max_tokens": 128000, + "mode": "chat", + "output_cost_per_token": 1.5e-05, + "supported_endpoints": [ + "/v1/chat/completions", + "/v1/responses" + ], + "supported_modalities": [ + "text", + "image" + ], + "supported_output_modalities": [ + "text", + "image" + ], + "supports_function_calling": true, + "supports_native_streaming": true, + "supports_parallel_function_calling": true, + "supports_pdf_input": true, + "supports_prompt_caching": true, + "supports_reasoning": true, + "supports_response_schema": true, + "supports_service_tier": true, + "supports_system_messages": true, + "supports_tool_choice": true, + "supports_vision": true + }, "gpt-5.3-codex": { "cache_read_input_token_cost": 1.75e-07, "cache_read_input_token_cost_priority": 3.5e-07, diff --git a/frontend/src/components/account/BulkEditAccountModal.vue b/frontend/src/components/account/BulkEditAccountModal.vue index 5bc338f7..1d6f32fe 100644 --- a/frontend/src/components/account/BulkEditAccountModal.vue +++ b/frontend/src/components/account/BulkEditAccountModal.vue @@ -951,6 +951,7 @@ const allModels = [ { value: 'claude-3-haiku-20240307', label: 'Claude 3 Haiku' }, { value: 'gpt-5.3-codex', label: 'GPT-5.3 Codex' }, { value: 'gpt-5.3-codex-spark', label: 'GPT-5.3 Codex Spark' }, + { value: 'gpt-5.4', label: 'GPT-5.4' }, { value: 'gpt-5.2-2025-12-11', label: 'GPT-5.2' }, { value: 'gpt-5.2-codex', label: 'GPT-5.2 Codex' }, { value: 'gpt-5.1-codex-max', label: 'GPT-5.1 Codex Max' }, @@ -1065,6 +1066,12 @@ const presetMappings = [ to: 'gpt-5.3-codex-spark', color: 'bg-emerald-100 text-emerald-700 hover:bg-emerald-200 dark:bg-emerald-900/30 dark:text-emerald-400' }, + { + label: 'GPT-5.4', + from: 'gpt-5.4', + to: 'gpt-5.4', + color: 'bg-rose-100 text-rose-700 hover:bg-rose-200 dark:bg-rose-900/30 dark:text-rose-400' + }, { label: '5.2→5.3', from: 'gpt-5.2-codex', diff --git a/frontend/src/components/keys/UseKeyModal.vue b/frontend/src/components/keys/UseKeyModal.vue index a0233742..a61ce6b4 100644 --- a/frontend/src/components/keys/UseKeyModal.vue +++ b/frontend/src/components/keys/UseKeyModal.vue @@ -670,6 +670,22 @@ function generateOpenCodeConfig(platform: string, baseUrl: string, apiKey: strin xhigh: {} } }, + 'gpt-5.4': { + name: 'GPT-5.4', + limit: { + context: 1050000, + output: 128000 + }, + options: { + store: false + }, + variants: { + low: {}, + medium: {}, + high: {}, + xhigh: {} + } + }, 'gpt-5.3-codex-spark': { name: 'GPT-5.3 Codex Spark', limit: { diff --git a/frontend/src/composables/__tests__/useModelWhitelist.spec.ts b/frontend/src/composables/__tests__/useModelWhitelist.spec.ts index 4088e5a4..79c88a29 100644 --- a/frontend/src/composables/__tests__/useModelWhitelist.spec.ts +++ b/frontend/src/composables/__tests__/useModelWhitelist.spec.ts @@ -2,6 +2,13 @@ import { describe, expect, it } from 'vitest' import { buildModelMappingObject, getModelsByPlatform } from '../useModelWhitelist' describe('useModelWhitelist', () => { + it('openai 模型列表包含 GPT-5.4 官方快照', () => { + const models = getModelsByPlatform('openai') + + expect(models).toContain('gpt-5.4') + expect(models).toContain('gpt-5.4-2026-03-05') + }) + it('antigravity 模型列表包含图片模型兼容项', () => { const models = getModelsByPlatform('antigravity') @@ -15,4 +22,12 @@ describe('useModelWhitelist', () => { 'gemini-3.1-flash-image': 'gemini-3.1-flash-image' }) }) + + it('whitelist 模式会保留 GPT-5.4 官方快照的精确映射', () => { + const mapping = buildModelMappingObject('whitelist', ['gpt-5.4-2026-03-05'], []) + + expect(mapping).toEqual({ + 'gpt-5.4-2026-03-05': 'gpt-5.4-2026-03-05' + }) + }) }) diff --git a/frontend/src/composables/useModelWhitelist.ts b/frontend/src/composables/useModelWhitelist.ts index 444e4b91..986a99f4 100644 --- a/frontend/src/composables/useModelWhitelist.ts +++ b/frontend/src/composables/useModelWhitelist.ts @@ -24,6 +24,8 @@ const openaiModels = [ // GPT-5.2 系列 'gpt-5.2', 'gpt-5.2-2025-12-11', 'gpt-5.2-chat-latest', 'gpt-5.2-codex', 'gpt-5.2-pro', 'gpt-5.2-pro-2025-12-11', + // GPT-5.4 系列 + 'gpt-5.4', 'gpt-5.4-2026-03-05', // GPT-5.3 系列 'gpt-5.3-codex', 'gpt-5.3-codex-spark', 'chatgpt-4o-latest', @@ -277,6 +279,7 @@ const openaiPresetMappings = [ { label: 'GPT-5.3 Codex Spark', from: 'gpt-5.3-codex-spark', to: 'gpt-5.3-codex-spark', color: 'bg-teal-100 text-teal-700 hover:bg-teal-200 dark:bg-teal-900/30 dark:text-teal-400' }, { label: 'GPT-5.1', from: 'gpt-5.1', to: 'gpt-5.1', color: 'bg-orange-100 text-orange-700 hover:bg-orange-200 dark:bg-orange-900/30 dark:text-orange-400' }, { label: 'GPT-5.2', from: 'gpt-5.2', to: 'gpt-5.2', color: 'bg-red-100 text-red-700 hover:bg-red-200 dark:bg-red-900/30 dark:text-red-400' }, + { label: 'GPT-5.4', from: 'gpt-5.4', to: 'gpt-5.4', color: 'bg-rose-100 text-rose-700 hover:bg-rose-200 dark:bg-rose-900/30 dark:text-rose-400' }, { label: 'GPT-5.1 Codex', from: 'gpt-5.1-codex', to: 'gpt-5.1-codex', color: 'bg-cyan-100 text-cyan-700 hover:bg-cyan-200 dark:bg-cyan-900/30 dark:text-cyan-400' } ]