feat: Add prompt cache hit tokens support for DeepSeek channel #406
This commit is contained in:
@@ -166,6 +166,7 @@ type Usage struct {
|
|||||||
PromptTokens int `json:"prompt_tokens"`
|
PromptTokens int `json:"prompt_tokens"`
|
||||||
CompletionTokens int `json:"completion_tokens"`
|
CompletionTokens int `json:"completion_tokens"`
|
||||||
TotalTokens int `json:"total_tokens"`
|
TotalTokens int `json:"total_tokens"`
|
||||||
|
PromptCacheHitTokens int `json:"prompt_cache_hit_tokens,omitempty"`
|
||||||
PromptTokensDetails InputTokenDetails `json:"prompt_tokens_details"`
|
PromptTokensDetails InputTokenDetails `json:"prompt_tokens_details"`
|
||||||
CompletionTokenDetails OutputTokenDetails `json:"completion_tokens_details"`
|
CompletionTokenDetails OutputTokenDetails `json:"completion_tokens_details"`
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -254,6 +254,12 @@ func OaiStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.Rel
|
|||||||
if !containStreamUsage {
|
if !containStreamUsage {
|
||||||
usage, _ = service.ResponseText2Usage(responseTextBuilder.String(), info.UpstreamModelName, info.PromptTokens)
|
usage, _ = service.ResponseText2Usage(responseTextBuilder.String(), info.UpstreamModelName, info.PromptTokens)
|
||||||
usage.CompletionTokens += toolCount * 7
|
usage.CompletionTokens += toolCount * 7
|
||||||
|
} else {
|
||||||
|
if info.ChannelType == common.ChannelTypeDeepSeek {
|
||||||
|
if usage.PromptCacheHitTokens != 0 {
|
||||||
|
usage.PromptTokensDetails.CachedTokens = usage.PromptCacheHitTokens
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if info.ShouldIncludeUsage && !containStreamUsage {
|
if info.ShouldIncludeUsage && !containStreamUsage {
|
||||||
|
|||||||
Reference in New Issue
Block a user