fix(openai): 修复 gpt-5.4 长上下文计费与快照白名单

补齐 gpt-5.4 fallback 的长上下文计费元信息,\n确保超过 272000 输入 token 时对整次会话应用\n2x 输入与 1.5x 输出计费规则。\n\n同时将官方快照 gpt-5.4-2026-03-05 加入前端\n白名单候选与回归测试,避免 whitelist 模式误拦截。\n\nCo-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

(cherry picked from commit d95497af87f608c6dadcbe7d6e851de9413ae147)
This commit is contained in:
yangjianbo
2026-03-06 09:04:58 +08:00
parent 1a0d4ed668
commit f366026435
6 changed files with 137 additions and 31 deletions

View File

@@ -24,12 +24,15 @@ var (
openAIModelDatePattern = regexp.MustCompile(`-\d{8}$`)
openAIModelBasePattern = regexp.MustCompile(`^(gpt-\d+(?:\.\d+)?)(?:-|$)`)
openAIGPT54FallbackPricing = &LiteLLMModelPricing{
InputCostPerToken: 2.5e-06, // $2.5 per MTok
OutputCostPerToken: 1.5e-05, // $15 per MTok
CacheReadInputTokenCost: 2.5e-07, // $0.25 per MTok
LiteLLMProvider: "openai",
Mode: "chat",
SupportsPromptCaching: true,
InputCostPerToken: 2.5e-06, // $2.5 per MTok
OutputCostPerToken: 1.5e-05, // $15 per MTok
CacheReadInputTokenCost: 2.5e-07, // $0.25 per MTok
LongContextInputTokenThreshold: 272000,
LongContextInputCostMultiplier: 2.0,
LongContextOutputCostMultiplier: 1.5,
LiteLLMProvider: "openai",
Mode: "chat",
SupportsPromptCaching: true,
}
)
@@ -41,6 +44,9 @@ type LiteLLMModelPricing struct {
CacheCreationInputTokenCost float64 `json:"cache_creation_input_token_cost"`
CacheCreationInputTokenCostAbove1hr float64 `json:"cache_creation_input_token_cost_above_1hr"`
CacheReadInputTokenCost float64 `json:"cache_read_input_token_cost"`
LongContextInputTokenThreshold int `json:"long_context_input_token_threshold,omitempty"`
LongContextInputCostMultiplier float64 `json:"long_context_input_cost_multiplier,omitempty"`
LongContextOutputCostMultiplier float64 `json:"long_context_output_cost_multiplier,omitempty"`
LiteLLMProvider string `json:"litellm_provider"`
Mode string `json:"mode"`
SupportsPromptCaching bool `json:"supports_prompt_caching"`