refactor: optimize billing flow for OpenAI-to-Anthropic convert

This commit is contained in:
Seefs
2026-03-23 14:22:12 +08:00
parent deff59a5be
commit 9ecad90652
16 changed files with 809 additions and 433 deletions

View File

@@ -555,6 +555,35 @@ type ClaudeResponseInfo struct {
Done bool
}
func cacheCreationTokensForOpenAIUsage(usage *dto.Usage) int {
if usage == nil {
return 0
}
splitCacheCreationTokens := usage.ClaudeCacheCreation5mTokens + usage.ClaudeCacheCreation1hTokens
if splitCacheCreationTokens == 0 {
return usage.PromptTokensDetails.CachedCreationTokens
}
if usage.PromptTokensDetails.CachedCreationTokens > splitCacheCreationTokens {
return usage.PromptTokensDetails.CachedCreationTokens
}
return splitCacheCreationTokens
}
func buildOpenAIStyleUsageFromClaudeUsage(usage *dto.Usage) dto.Usage {
if usage == nil {
return dto.Usage{}
}
clone := *usage
cacheCreationTokens := cacheCreationTokensForOpenAIUsage(usage)
totalInputTokens := usage.PromptTokens + usage.PromptTokensDetails.CachedTokens + cacheCreationTokens
clone.PromptTokens = totalInputTokens
clone.InputTokens = totalInputTokens
clone.TotalTokens = totalInputTokens + usage.CompletionTokens
clone.UsageSemantic = "openai"
clone.UsageSource = "anthropic"
return clone
}
func buildMessageDeltaPatchUsage(claudeResponse *dto.ClaudeResponse, claudeInfo *ClaudeResponseInfo) *dto.ClaudeUsage {
usage := &dto.ClaudeUsage{}
if claudeResponse != nil && claudeResponse.Usage != nil {
@@ -643,6 +672,7 @@ func FormatClaudeResponseInfo(claudeResponse *dto.ClaudeResponse, oaiResponse *d
// message_start, 获取usage
if claudeResponse.Message != nil && claudeResponse.Message.Usage != nil {
claudeInfo.Usage.PromptTokens = claudeResponse.Message.Usage.InputTokens
claudeInfo.Usage.UsageSemantic = "anthropic"
claudeInfo.Usage.PromptTokensDetails.CachedTokens = claudeResponse.Message.Usage.CacheReadInputTokens
claudeInfo.Usage.PromptTokensDetails.CachedCreationTokens = claudeResponse.Message.Usage.CacheCreationInputTokens
claudeInfo.Usage.ClaudeCacheCreation5mTokens = claudeResponse.Message.Usage.GetCacheCreation5mTokens()
@@ -661,6 +691,7 @@ func FormatClaudeResponseInfo(claudeResponse *dto.ClaudeResponse, oaiResponse *d
} else if claudeResponse.Type == "message_delta" {
// 最终的usage获取
if claudeResponse.Usage != nil {
claudeInfo.Usage.UsageSemantic = "anthropic"
if claudeResponse.Usage.InputTokens > 0 {
// 不叠加,只取最新的
claudeInfo.Usage.PromptTokens = claudeResponse.Usage.InputTokens
@@ -754,12 +785,16 @@ func HandleStreamFinalResponse(c *gin.Context, info *relaycommon.RelayInfo, clau
}
claudeInfo.Usage = service.ResponseText2Usage(c, claudeInfo.ResponseText.String(), info.UpstreamModelName, claudeInfo.Usage.PromptTokens)
}
if claudeInfo.Usage != nil {
claudeInfo.Usage.UsageSemantic = "anthropic"
}
if info.RelayFormat == types.RelayFormatClaude {
//
} else if info.RelayFormat == types.RelayFormatOpenAI {
if info.ShouldIncludeUsage {
response := helper.GenerateFinalUsageResponse(claudeInfo.ResponseId, claudeInfo.Created, info.UpstreamModelName, *claudeInfo.Usage)
openAIUsage := buildOpenAIStyleUsageFromClaudeUsage(claudeInfo.Usage)
response := helper.GenerateFinalUsageResponse(claudeInfo.ResponseId, claudeInfo.Created, info.UpstreamModelName, openAIUsage)
err := helper.ObjectData(c, response)
if err != nil {
common.SysLog("send final response failed: " + err.Error())
@@ -810,6 +845,7 @@ func HandleClaudeResponseData(c *gin.Context, info *relaycommon.RelayInfo, claud
claudeInfo.Usage.PromptTokens = claudeResponse.Usage.InputTokens
claudeInfo.Usage.CompletionTokens = claudeResponse.Usage.OutputTokens
claudeInfo.Usage.TotalTokens = claudeResponse.Usage.InputTokens + claudeResponse.Usage.OutputTokens
claudeInfo.Usage.UsageSemantic = "anthropic"
claudeInfo.Usage.PromptTokensDetails.CachedTokens = claudeResponse.Usage.CacheReadInputTokens
claudeInfo.Usage.PromptTokensDetails.CachedCreationTokens = claudeResponse.Usage.CacheCreationInputTokens
claudeInfo.Usage.ClaudeCacheCreation5mTokens = claudeResponse.Usage.GetCacheCreation5mTokens()
@@ -819,7 +855,7 @@ func HandleClaudeResponseData(c *gin.Context, info *relaycommon.RelayInfo, claud
switch info.RelayFormat {
case types.RelayFormatOpenAI:
openaiResponse := ResponseClaude2OpenAI(&claudeResponse)
openaiResponse.Usage = *claudeInfo.Usage
openaiResponse.Usage = buildOpenAIStyleUsageFromClaudeUsage(claudeInfo.Usage)
responseData, err = json.Marshal(openaiResponse)
if err != nil {
return types.NewError(err, types.ErrorCodeBadResponseBody)

View File

@@ -173,3 +173,85 @@ func TestFormatClaudeResponseInfo_ContentBlockDelta(t *testing.T) {
t.Errorf("ResponseText = %q, want %q", claudeInfo.ResponseText.String(), "hello")
}
}
func TestBuildOpenAIStyleUsageFromClaudeUsage(t *testing.T) {
usage := &dto.Usage{
PromptTokens: 100,
CompletionTokens: 20,
PromptTokensDetails: dto.InputTokenDetails{
CachedTokens: 30,
CachedCreationTokens: 50,
},
ClaudeCacheCreation5mTokens: 10,
ClaudeCacheCreation1hTokens: 20,
UsageSemantic: "anthropic",
}
openAIUsage := buildOpenAIStyleUsageFromClaudeUsage(usage)
if openAIUsage.PromptTokens != 180 {
t.Fatalf("PromptTokens = %d, want 180", openAIUsage.PromptTokens)
}
if openAIUsage.InputTokens != 180 {
t.Fatalf("InputTokens = %d, want 180", openAIUsage.InputTokens)
}
if openAIUsage.TotalTokens != 200 {
t.Fatalf("TotalTokens = %d, want 200", openAIUsage.TotalTokens)
}
if openAIUsage.UsageSemantic != "openai" {
t.Fatalf("UsageSemantic = %s, want openai", openAIUsage.UsageSemantic)
}
if openAIUsage.UsageSource != "anthropic" {
t.Fatalf("UsageSource = %s, want anthropic", openAIUsage.UsageSource)
}
}
func TestBuildOpenAIStyleUsageFromClaudeUsagePreservesCacheCreationRemainder(t *testing.T) {
tests := []struct {
name string
cachedCreationTokens int
cacheCreationTokens5m int
cacheCreationTokens1h int
expectedTotalInputToken int
}{
{
name: "prefers aggregate when it includes remainder",
cachedCreationTokens: 50,
cacheCreationTokens5m: 10,
cacheCreationTokens1h: 20,
expectedTotalInputToken: 180,
},
{
name: "falls back to split tokens when aggregate missing",
cachedCreationTokens: 0,
cacheCreationTokens5m: 10,
cacheCreationTokens1h: 20,
expectedTotalInputToken: 160,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
usage := &dto.Usage{
PromptTokens: 100,
CompletionTokens: 20,
PromptTokensDetails: dto.InputTokenDetails{
CachedTokens: 30,
CachedCreationTokens: tt.cachedCreationTokens,
},
ClaudeCacheCreation5mTokens: tt.cacheCreationTokens5m,
ClaudeCacheCreation1hTokens: tt.cacheCreationTokens1h,
UsageSemantic: "anthropic",
}
openAIUsage := buildOpenAIStyleUsageFromClaudeUsage(usage)
if openAIUsage.PromptTokens != tt.expectedTotalInputToken {
t.Fatalf("PromptTokens = %d, want %d", openAIUsage.PromptTokens, tt.expectedTotalInputToken)
}
if openAIUsage.InputTokens != tt.expectedTotalInputToken {
t.Fatalf("InputTokens = %d, want %d", openAIUsage.InputTokens, tt.expectedTotalInputToken)
}
})
}
}