Merge pull request #1025 from QuantumNous/responses_buildin_tools

feat: implement OpenAI responses built-in tool tracking
This commit is contained in:
Calcium-Ion
2025-05-07 02:25:47 +08:00
committed by GitHub
12 changed files with 451 additions and 177 deletions

View File

@@ -195,28 +195,28 @@ type OutputTokenDetails struct {
}
type OpenAIResponsesResponse struct {
ID string `json:"id"`
Object string `json:"object"`
CreatedAt int `json:"created_at"`
Status string `json:"status"`
Error *OpenAIError `json:"error,omitempty"`
IncompleteDetails *IncompleteDetails `json:"incomplete_details,omitempty"`
Instructions string `json:"instructions"`
MaxOutputTokens int `json:"max_output_tokens"`
Model string `json:"model"`
Output []ResponsesOutput `json:"output"`
ParallelToolCalls bool `json:"parallel_tool_calls"`
PreviousResponseID string `json:"previous_response_id"`
Reasoning *Reasoning `json:"reasoning"`
Store bool `json:"store"`
Temperature float64 `json:"temperature"`
ToolChoice string `json:"tool_choice"`
Tools []interface{} `json:"tools"`
TopP float64 `json:"top_p"`
Truncation string `json:"truncation"`
Usage *Usage `json:"usage"`
User json.RawMessage `json:"user"`
Metadata json.RawMessage `json:"metadata"`
ID string `json:"id"`
Object string `json:"object"`
CreatedAt int `json:"created_at"`
Status string `json:"status"`
Error *OpenAIError `json:"error,omitempty"`
IncompleteDetails *IncompleteDetails `json:"incomplete_details,omitempty"`
Instructions string `json:"instructions"`
MaxOutputTokens int `json:"max_output_tokens"`
Model string `json:"model"`
Output []ResponsesOutput `json:"output"`
ParallelToolCalls bool `json:"parallel_tool_calls"`
PreviousResponseID string `json:"previous_response_id"`
Reasoning *Reasoning `json:"reasoning"`
Store bool `json:"store"`
Temperature float64 `json:"temperature"`
ToolChoice string `json:"tool_choice"`
Tools []ResponsesToolsCall `json:"tools"`
TopP float64 `json:"top_p"`
Truncation string `json:"truncation"`
Usage *Usage `json:"usage"`
User json.RawMessage `json:"user"`
Metadata json.RawMessage `json:"metadata"`
}
type IncompleteDetails struct {
@@ -238,8 +238,12 @@ type ResponsesOutputContent struct {
}
const (
BuildInTools_WebSearch = "web_search_preview"
BuildInTools_FileSearch = "file_search"
BuildInToolWebSearchPreview = "web_search_preview"
BuildInToolFileSearch = "file_search"
)
const (
BuildInCallWebSearchCall = "web_search_call"
)
const (
@@ -250,6 +254,7 @@ const (
// ResponsesStreamResponse 用于处理 /v1/responses 流式响应
type ResponsesStreamResponse struct {
Type string `json:"type"`
Response *OpenAIResponsesResponse `json:"response"`
Response *OpenAIResponsesResponse `json:"response,omitempty"`
Delta string `json:"delta,omitempty"`
Item *ResponsesOutput `json:"item,omitempty"`
}

View File

@@ -429,7 +429,7 @@ func (a *Adaptor) DoResponse(c *gin.Context, resp *http.Response, info *relaycom
if info.IsStream {
err, usage = OaiResponsesStreamHandler(c, resp, info)
} else {
err, usage = OpenaiResponsesHandler(c, resp, info)
err, usage = OaiResponsesHandler(c, resp, info)
}
default:
if info.IsStream {

View File

@@ -187,3 +187,10 @@ func handleFinalResponse(c *gin.Context, info *relaycommon.RelayInfo, lastStream
}
}
}
func sendResponsesStreamData(c *gin.Context, streamResponse dto.ResponsesStreamResponse, data string) {
if data == "" {
return
}
helper.ResponseChunkData(c, streamResponse, data)
}

View File

@@ -644,102 +644,3 @@ func OpenaiHandlerWithUsage(c *gin.Context, resp *http.Response, info *relaycomm
}
return nil, &usageResp.Usage
}
func OpenaiResponsesHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
// read response body
var responsesResponse dto.OpenAIResponsesResponse
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
}
err = resp.Body.Close()
if err != nil {
return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
err = common.DecodeJson(responseBody, &responsesResponse)
if err != nil {
return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if responsesResponse.Error != nil {
return &dto.OpenAIErrorWithStatusCode{
Error: dto.OpenAIError{
Message: responsesResponse.Error.Message,
Type: "openai_error",
Code: responsesResponse.Error.Code,
},
StatusCode: resp.StatusCode,
}, nil
}
// reset response body
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
// We shouldn't set the header before we parse the response body, because the parse part may fail.
// And then we will have to send an error response, but in this case, the header has already been set.
// So the httpClient will be confused by the response.
// For example, Postman will report error, and we cannot check the response at all.
for k, v := range resp.Header {
c.Writer.Header().Set(k, v[0])
}
c.Writer.WriteHeader(resp.StatusCode)
// copy response body
_, err = io.Copy(c.Writer, resp.Body)
if err != nil {
common.SysError("error copying response body: " + err.Error())
}
resp.Body.Close()
// compute usage
usage := dto.Usage{}
usage.PromptTokens = responsesResponse.Usage.InputTokens
usage.CompletionTokens = responsesResponse.Usage.OutputTokens
usage.TotalTokens = responsesResponse.Usage.TotalTokens
return nil, &usage
}
func OaiResponsesStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
if resp == nil || resp.Body == nil {
common.LogError(c, "invalid response or response body")
return service.OpenAIErrorWrapper(fmt.Errorf("invalid response"), "invalid_response", http.StatusInternalServerError), nil
}
var usage = &dto.Usage{}
var responseTextBuilder strings.Builder
helper.StreamScannerHandler(c, resp, info, func(data string) bool {
// 检查当前数据是否包含 completed 状态和 usage 信息
var streamResponse dto.ResponsesStreamResponse
if err := common.DecodeJsonStr(data, &streamResponse); err == nil {
sendResponsesStreamData(c, streamResponse, data)
switch streamResponse.Type {
case "response.completed":
usage.PromptTokens = streamResponse.Response.Usage.InputTokens
usage.CompletionTokens = streamResponse.Response.Usage.OutputTokens
usage.TotalTokens = streamResponse.Response.Usage.TotalTokens
case "response.output_text.delta":
// 处理输出文本
responseTextBuilder.WriteString(streamResponse.Delta)
}
}
return true
})
if usage.CompletionTokens == 0 {
// 计算输出文本的 token 数量
tempStr := responseTextBuilder.String()
if len(tempStr) > 0 {
// 非正常结束,使用输出文本的 token 数量
completionTokens, _ := service.CountTextToken(tempStr, info.UpstreamModelName)
usage.CompletionTokens = completionTokens
}
}
return nil, usage
}
func sendResponsesStreamData(c *gin.Context, streamResponse dto.ResponsesStreamResponse, data string) {
if data == "" {
return
}
helper.ResponseChunkData(c, streamResponse, data)
}

View File

@@ -0,0 +1,119 @@
package openai
import (
"bytes"
"fmt"
"io"
"net/http"
"one-api/common"
"one-api/dto"
relaycommon "one-api/relay/common"
"one-api/relay/helper"
"one-api/service"
"strings"
"github.com/gin-gonic/gin"
)
func OaiResponsesHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
// read response body
var responsesResponse dto.OpenAIResponsesResponse
responseBody, err := io.ReadAll(resp.Body)
if err != nil {
return service.OpenAIErrorWrapper(err, "read_response_body_failed", http.StatusInternalServerError), nil
}
err = resp.Body.Close()
if err != nil {
return service.OpenAIErrorWrapper(err, "close_response_body_failed", http.StatusInternalServerError), nil
}
err = common.DecodeJson(responseBody, &responsesResponse)
if err != nil {
return service.OpenAIErrorWrapper(err, "unmarshal_response_body_failed", http.StatusInternalServerError), nil
}
if responsesResponse.Error != nil {
return &dto.OpenAIErrorWithStatusCode{
Error: dto.OpenAIError{
Message: responsesResponse.Error.Message,
Type: "openai_error",
Code: responsesResponse.Error.Code,
},
StatusCode: resp.StatusCode,
}, nil
}
// reset response body
resp.Body = io.NopCloser(bytes.NewBuffer(responseBody))
// We shouldn't set the header before we parse the response body, because the parse part may fail.
// And then we will have to send an error response, but in this case, the header has already been set.
// So the httpClient will be confused by the response.
// For example, Postman will report error, and we cannot check the response at all.
for k, v := range resp.Header {
c.Writer.Header().Set(k, v[0])
}
c.Writer.WriteHeader(resp.StatusCode)
// copy response body
_, err = io.Copy(c.Writer, resp.Body)
if err != nil {
common.SysError("error copying response body: " + err.Error())
}
resp.Body.Close()
// compute usage
usage := dto.Usage{}
usage.PromptTokens = responsesResponse.Usage.InputTokens
usage.CompletionTokens = responsesResponse.Usage.OutputTokens
usage.TotalTokens = responsesResponse.Usage.TotalTokens
// 解析 Tools 用量
for _, tool := range responsesResponse.Tools {
info.ResponsesUsageInfo.BuiltInTools[tool.Type].CallCount++
}
return nil, &usage
}
func OaiResponsesStreamHandler(c *gin.Context, resp *http.Response, info *relaycommon.RelayInfo) (*dto.OpenAIErrorWithStatusCode, *dto.Usage) {
if resp == nil || resp.Body == nil {
common.LogError(c, "invalid response or response body")
return service.OpenAIErrorWrapper(fmt.Errorf("invalid response"), "invalid_response", http.StatusInternalServerError), nil
}
var usage = &dto.Usage{}
var responseTextBuilder strings.Builder
helper.StreamScannerHandler(c, resp, info, func(data string) bool {
// 检查当前数据是否包含 completed 状态和 usage 信息
var streamResponse dto.ResponsesStreamResponse
if err := common.DecodeJsonStr(data, &streamResponse); err == nil {
sendResponsesStreamData(c, streamResponse, data)
switch streamResponse.Type {
case "response.completed":
usage.PromptTokens = streamResponse.Response.Usage.InputTokens
usage.CompletionTokens = streamResponse.Response.Usage.OutputTokens
usage.TotalTokens = streamResponse.Response.Usage.TotalTokens
case "response.output_text.delta":
// 处理输出文本
responseTextBuilder.WriteString(streamResponse.Delta)
case dto.ResponsesOutputTypeItemDone:
// 函数调用处理
if streamResponse.Item != nil {
switch streamResponse.Item.Type {
case dto.BuildInCallWebSearchCall:
info.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolWebSearchPreview].CallCount++
}
}
}
}
return true
})
if usage.CompletionTokens == 0 {
// 计算输出文本的 token 数量
tempStr := responseTextBuilder.String()
if len(tempStr) > 0 {
// 非正常结束,使用输出文本的 token 数量
completionTokens, _ := service.CountTextToken(tempStr, info.UpstreamModelName)
usage.CompletionTokens = completionTokens
}
}
return nil, usage
}

View File

@@ -11,8 +11,8 @@ import (
"one-api/relay/channel/claude"
"one-api/relay/channel/gemini"
"one-api/relay/channel/openai"
"one-api/setting/model_setting"
relaycommon "one-api/relay/common"
"one-api/setting/model_setting"
"strings"
"github.com/gin-gonic/gin"

View File

@@ -36,6 +36,7 @@ type ClaudeConvertInfo struct {
const (
RelayFormatOpenAI = "openai"
RelayFormatClaude = "claude"
RelayFormatGemini = "gemini"
)
type RerankerInfo struct {
@@ -43,6 +44,16 @@ type RerankerInfo struct {
ReturnDocuments bool
}
type BuildInToolInfo struct {
ToolName string
CallCount int
SearchContextSize string
}
type ResponsesUsageInfo struct {
BuiltInTools map[string]*BuildInToolInfo
}
type RelayInfo struct {
ChannelType int
ChannelId int
@@ -90,6 +101,7 @@ type RelayInfo struct {
ThinkingContentInfo
*ClaudeConvertInfo
*RerankerInfo
*ResponsesUsageInfo
}
// 定义支持流式选项的通道类型
@@ -134,6 +146,31 @@ func GenRelayInfoRerank(c *gin.Context, req *dto.RerankRequest) *RelayInfo {
return info
}
func GenRelayInfoResponses(c *gin.Context, req *dto.OpenAIResponsesRequest) *RelayInfo {
info := GenRelayInfo(c)
info.RelayMode = relayconstant.RelayModeResponses
info.ResponsesUsageInfo = &ResponsesUsageInfo{
BuiltInTools: make(map[string]*BuildInToolInfo),
}
if len(req.Tools) > 0 {
for _, tool := range req.Tools {
info.ResponsesUsageInfo.BuiltInTools[tool.Type] = &BuildInToolInfo{
ToolName: tool.Type,
CallCount: 0,
}
switch tool.Type {
case dto.BuildInToolWebSearchPreview:
if tool.SearchContextSize == "" {
tool.SearchContextSize = "medium"
}
info.ResponsesUsageInfo.BuiltInTools[tool.Type].SearchContextSize = tool.SearchContextSize
}
}
}
info.IsStream = req.Stream
return info
}
func GenRelayInfo(c *gin.Context) *RelayInfo {
channelType := c.GetInt("channel_type")
channelId := c.GetInt("channel_id")

View File

@@ -19,7 +19,7 @@ import (
"github.com/gin-gonic/gin"
)
func getAndValidateResponsesRequest(c *gin.Context, relayInfo *relaycommon.RelayInfo) (*dto.OpenAIResponsesRequest, error) {
func getAndValidateResponsesRequest(c *gin.Context) (*dto.OpenAIResponsesRequest, error) {
request := &dto.OpenAIResponsesRequest{}
err := common.UnmarshalBodyReusable(c, request)
if err != nil {
@@ -31,13 +31,11 @@ func getAndValidateResponsesRequest(c *gin.Context, relayInfo *relaycommon.Relay
if len(request.Input) == 0 {
return nil, errors.New("input is required")
}
relayInfo.IsStream = request.Stream
return request, nil
}
func checkInputSensitive(textRequest *dto.OpenAIResponsesRequest, info *relaycommon.RelayInfo) ([]string, error) {
sensitiveWords, err := service.CheckSensitiveInput(textRequest.Input)
return sensitiveWords, err
}
@@ -49,12 +47,14 @@ func getInputTokens(req *dto.OpenAIResponsesRequest, info *relaycommon.RelayInfo
}
func ResponsesHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
relayInfo := relaycommon.GenRelayInfo(c)
req, err := getAndValidateResponsesRequest(c, relayInfo)
req, err := getAndValidateResponsesRequest(c)
if err != nil {
common.LogError(c, fmt.Sprintf("getAndValidateResponsesRequest error: %s", err.Error()))
return service.OpenAIErrorWrapperLocal(err, "invalid_responses_request", http.StatusBadRequest)
}
relayInfo := relaycommon.GenRelayInfoResponses(c, req)
if setting.ShouldCheckPromptSensitive() {
sensitiveWords, err := checkInputSensitive(req, relayInfo)
if err != nil {

View File

@@ -18,6 +18,7 @@ import (
"one-api/service"
"one-api/setting"
"one-api/setting/model_setting"
"one-api/setting/operation_setting"
"strings"
"time"
@@ -358,6 +359,34 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
ratio := dModelRatio.Mul(dGroupRatio)
// openai web search 工具计费
var dWebSearchQuota decimal.Decimal
var webSearchPrice float64
if relayInfo.ResponsesUsageInfo != nil {
if webSearchTool, exists := relayInfo.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolWebSearchPreview]; exists && webSearchTool.CallCount > 0 {
// 计算 web search 调用的配额 (配额 = 价格 * 调用次数 / 1000)
webSearchPrice = operation_setting.GetWebSearchPricePerThousand(modelName, webSearchTool.SearchContextSize)
dWebSearchQuota = decimal.NewFromFloat(webSearchPrice).
Mul(decimal.NewFromInt(int64(webSearchTool.CallCount))).
Div(decimal.NewFromInt(1000))
extraContent += fmt.Sprintf("Web Search 调用 %d 次,上下文大小 %s调用花费 $%s",
webSearchTool.CallCount, webSearchTool.SearchContextSize, dWebSearchQuota.String())
}
}
// file search tool 计费
var dFileSearchQuota decimal.Decimal
var fileSearchPrice float64
if relayInfo.ResponsesUsageInfo != nil {
if fileSearchTool, exists := relayInfo.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolFileSearch]; exists && fileSearchTool.CallCount > 0 {
fileSearchPrice = operation_setting.GetFileSearchPricePerThousand()
dFileSearchQuota = decimal.NewFromFloat(fileSearchPrice).
Mul(decimal.NewFromInt(int64(fileSearchTool.CallCount))).
Div(decimal.NewFromInt(1000))
extraContent += fmt.Sprintf("File Search 调用 %d 次,调用花费 $%s",
fileSearchTool.CallCount, dFileSearchQuota.String())
}
}
var quotaCalculateDecimal decimal.Decimal
if !priceData.UsePrice {
nonCachedTokens := dPromptTokens.Sub(dCacheTokens)
@@ -380,6 +409,9 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
} else {
quotaCalculateDecimal = dModelPrice.Mul(dQuotaPerUnit).Mul(dGroupRatio)
}
// 添加 responses tools call 调用的配额
quotaCalculateDecimal = quotaCalculateDecimal.Add(dWebSearchQuota)
quotaCalculateDecimal = quotaCalculateDecimal.Add(dFileSearchQuota)
quota := int(quotaCalculateDecimal.Round(0).IntPart())
totalTokens := promptTokens + completionTokens
@@ -430,6 +462,20 @@ func postConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
other["image_ratio"] = imageRatio
other["image_output"] = imageTokens
}
if !dWebSearchQuota.IsZero() && relayInfo.ResponsesUsageInfo != nil {
if webSearchTool, exists := relayInfo.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolWebSearchPreview]; exists {
other["web_search"] = true
other["web_search_call_count"] = webSearchTool.CallCount
other["web_search_price"] = webSearchPrice
}
}
if !dFileSearchQuota.IsZero() && relayInfo.ResponsesUsageInfo != nil {
if fileSearchTool, exists := relayInfo.ResponsesUsageInfo.BuiltInTools[dto.BuildInToolFileSearch]; exists {
other["file_search"] = true
other["file_search_call_count"] = fileSearchTool.CallCount
other["file_search_price"] = fileSearchPrice
}
}
model.RecordConsumeLog(ctx, relayInfo.UserId, relayInfo.ChannelId, promptTokens, completionTokens, logModel,
tokenName, quota, logContent, relayInfo.TokenId, userQuota, int(useTimeSeconds), relayInfo.IsStream, relayInfo.Group, other)
}

View File

@@ -0,0 +1,57 @@
package operation_setting
import "strings"
const (
// Web search
WebSearchHighTierModelPriceLow = 30.00
WebSearchHighTierModelPriceMedium = 35.00
WebSearchHighTierModelPriceHigh = 50.00
WebSearchPriceLow = 25.00
WebSearchPriceMedium = 27.50
WebSearchPriceHigh = 30.00
// File search
FileSearchPrice = 2.5
)
func GetWebSearchPricePerThousand(modelName string, contextSize string) float64 {
// 确定模型类型
// https://platform.openai.com/docs/pricing Web search 价格按模型类型和 search context size 收费
// gpt-4.1, gpt-4o, or gpt-4o-search-preview 更贵gpt-4.1-mini, gpt-4o-mini, gpt-4o-mini-search-preview 更便宜
isHighTierModel := (strings.HasPrefix(modelName, "gpt-4.1") || strings.HasPrefix(modelName, "gpt-4o")) &&
!strings.Contains(modelName, "mini")
// 确定 search context size 对应的价格
var priceWebSearchPerThousandCalls float64
switch contextSize {
case "low":
if isHighTierModel {
priceWebSearchPerThousandCalls = WebSearchHighTierModelPriceLow
} else {
priceWebSearchPerThousandCalls = WebSearchPriceLow
}
case "medium":
if isHighTierModel {
priceWebSearchPerThousandCalls = WebSearchHighTierModelPriceMedium
} else {
priceWebSearchPerThousandCalls = WebSearchPriceMedium
}
case "high":
if isHighTierModel {
priceWebSearchPerThousandCalls = WebSearchHighTierModelPriceHigh
} else {
priceWebSearchPerThousandCalls = WebSearchPriceHigh
}
default:
// search context size 默认为 medium
if isHighTierModel {
priceWebSearchPerThousandCalls = WebSearchHighTierModelPriceMedium
} else {
priceWebSearchPerThousandCalls = WebSearchPriceMedium
}
}
return priceWebSearchPerThousandCalls
}
func GetFileSearchPricePerThousand() float64 {
return FileSearchPrice
}

View File

@@ -618,7 +618,6 @@ const LogsTable = () => {
</Paragraph>
);
}
let content = other?.claude
? renderClaudeModelPriceSimple(
other.model_ratio,
@@ -935,6 +934,13 @@ const LogsTable = () => {
other.model_price,
other.group_ratio,
other?.user_group_ratio,
false,
1.0,
undefined,
other.web_search || false,
other.web_search_call_count || 0,
other.file_search || false,
other.file_search_call_count || 0,
),
});
}
@@ -995,6 +1001,12 @@ const LogsTable = () => {
other?.image || false,
other?.image_ratio || 0,
other?.image_output || 0,
other?.web_search || false,
other?.web_search_call_count || 0,
other?.web_search_price || 0,
other?.file_search || false,
other?.file_search_call_count || 0,
other?.file_search_price || 0,
);
}
expandDataLocal.push({

View File

@@ -317,6 +317,12 @@ export function renderModelPrice(
image = false,
imageRatio = 1.0,
imageOutputTokens = 0,
webSearch = false,
webSearchCallCount = 0,
webSearchPrice = 0,
fileSearch = false,
fileSearchCallCount = 0,
fileSearchPrice = 0,
) {
if (modelPrice !== -1) {
return i18next.t(
@@ -339,14 +345,17 @@ export function renderModelPrice(
// Calculate effective input tokens (non-cached + cached with ratio applied)
let effectiveInputTokens =
inputTokens - cacheTokens + cacheTokens * cacheRatio;
// Handle image tokens if present
// Handle image tokens if present
if (image && imageOutputTokens > 0) {
effectiveInputTokens = inputTokens - imageOutputTokens + imageOutputTokens * imageRatio;
effectiveInputTokens =
inputTokens - imageOutputTokens + imageOutputTokens * imageRatio;
}
let price =
(effectiveInputTokens / 1000000) * inputRatioPrice * groupRatio +
(completionTokens / 1000000) * completionRatioPrice * groupRatio;
(completionTokens / 1000000) * completionRatioPrice * groupRatio +
(webSearchCallCount / 1000) * webSearchPrice +
(fileSearchCallCount / 1000) * fileSearchPrice;
return (
<>
@@ -391,9 +400,23 @@ export function renderModelPrice(
)}
</p>
)}
{webSearch && webSearchCallCount > 0 && (
<p>
{i18next.t('Web搜索价格${{price}} / 1K 次', {
price: webSearchPrice,
})}
</p>
)}
{fileSearch && fileSearchCallCount > 0 && (
<p>
{i18next.t('文件搜索价格:${{price}} / 1K 次', {
price: fileSearchPrice,
})}
</p>
)}
<p></p>
<p>
{cacheTokens > 0 && !image
{cacheTokens > 0 && !image && !webSearch && !fileSearch
? i18next.t(
'输入 {{nonCacheInput}} tokens / 1M tokens * ${{price}} + 缓存 {{cacheInput}} tokens / 1M tokens * ${{cachePrice}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}',
{
@@ -407,31 +430,75 @@ export function renderModelPrice(
total: price.toFixed(6),
},
)
: image && imageOutputTokens > 0
? i18next.t(
'输入 {{nonImageInput}} tokens + 图片输入 {{imageInput}} tokens * {{imageRatio}} / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}',
{
nonImageInput: inputTokens - imageOutputTokens,
imageInput: imageOutputTokens,
imageRatio: imageRatio,
price: inputRatioPrice,
completion: completionTokens,
compPrice: completionRatioPrice,
ratio: groupRatio,
total: price.toFixed(6),
},
)
: i18next.t(
'输入 {{input}} tokens / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}',
{
input: inputTokens,
price: inputRatioPrice,
completion: completionTokens,
compPrice: completionRatioPrice,
ratio: groupRatio,
total: price.toFixed(6),
},
)}
: image && imageOutputTokens > 0 && !webSearch && !fileSearch
? i18next.t(
'输入 {{nonImageInput}} tokens + 图片输入 {{imageInput}} tokens * {{imageRatio}} / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}',
{
nonImageInput: inputTokens - imageOutputTokens,
imageInput: imageOutputTokens,
imageRatio: imageRatio,
price: inputRatioPrice,
completion: completionTokens,
compPrice: completionRatioPrice,
ratio: groupRatio,
total: price.toFixed(6),
},
)
: webSearch && webSearchCallCount > 0 && !image && !fileSearch
? i18next.t(
'输入 {{input}} tokens / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} + Web搜索 {{webSearchCallCount}}次 / 1K 次 * ${{webSearchPrice}} = ${{total}}',
{
input: inputTokens,
price: inputRatioPrice,
completion: completionTokens,
compPrice: completionRatioPrice,
ratio: groupRatio,
webSearchCallCount,
webSearchPrice,
total: price.toFixed(6),
},
)
: fileSearch && fileSearchCallCount > 0 && !image && !webSearch
? i18next.t(
'输入 {{input}} tokens / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} + 文件搜索 {{fileSearchCallCount}}次 / 1K 次 * ${{fileSearchPrice}} = ${{total}}',
{
input: inputTokens,
price: inputRatioPrice,
completion: completionTokens,
compPrice: completionRatioPrice,
ratio: groupRatio,
fileSearchCallCount,
fileSearchPrice,
total: price.toFixed(6),
},
)
: webSearch && webSearchCallCount > 0 && fileSearch && fileSearchCallCount > 0 && !image
? i18next.t(
'输入 {{input}} tokens / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} + Web搜索 {{webSearchCallCount}}次 / 1K 次 * ${{webSearchPrice}} + 文件搜索 {{fileSearchCallCount}}次 / 1K 次 * ${{fileSearchPrice}} = ${{total}}',
{
input: inputTokens,
price: inputRatioPrice,
completion: completionTokens,
compPrice: completionRatioPrice,
ratio: groupRatio,
webSearchCallCount,
webSearchPrice,
fileSearchCallCount,
fileSearchPrice,
total: price.toFixed(6),
},
)
: i18next.t(
'输入 {{input}} tokens / 1M tokens * ${{price}} + 输出 {{completion}} tokens / 1M tokens * ${{compPrice}} * 分组 {{ratio}} = ${{total}}',
{
input: inputTokens,
price: inputRatioPrice,
completion: completionTokens,
compPrice: completionRatioPrice,
ratio: groupRatio,
total: price.toFixed(6),
},
)}
</p>
<p>{i18next.t('仅供参考,以实际扣费为准')}</p>
</article>
@@ -448,33 +515,56 @@ export function renderLogContent(
user_group_ratio,
image = false,
imageRatio = 1.0,
useUserGroupRatio = undefined
useUserGroupRatio = undefined,
webSearch = false,
webSearchCallCount = 0,
fileSearch = false,
fileSearchCallCount = 0,
) {
const ratioLabel = useUserGroupRatio ? i18next.t('专属倍率') : i18next.t('分组倍率');
const ratioLabel = useUserGroupRatio
? i18next.t('专属倍率')
: i18next.t('分组倍率');
const ratio = useUserGroupRatio ? user_group_ratio : groupRatio;
if (modelPrice !== -1) {
return i18next.t('模型价格 ${{price}}{{ratioType}} {{ratio}}', {
price: modelPrice,
ratioType: ratioLabel,
ratio
ratio,
});
} else {
if (image) {
return i18next.t('模型倍率 {{modelRatio}},输出倍率 {{completionRatio}},图片输入倍率 {{imageRatio}}{{ratioType}} {{ratio}}', {
modelRatio: modelRatio,
completionRatio: completionRatio,
imageRatio: imageRatio,
ratioType: ratioLabel,
ratio
});
return i18next.t(
'模型倍率 {{modelRatio}},输出倍率 {{completionRatio}},图片输入倍率 {{imageRatio}}{{ratioType}} {{ratio}}',
{
modelRatio: modelRatio,
completionRatio: completionRatio,
imageRatio: imageRatio,
ratioType: ratioLabel,
ratio,
},
);
} else if (webSearch) {
return i18next.t(
'模型倍率 {{modelRatio}},输出倍率 {{completionRatio}}{{ratioType}} {{ratio}}Web 搜索调用 {{webSearchCallCount}} 次',
{
modelRatio: modelRatio,
completionRatio: completionRatio,
ratioType: ratioLabel,
ratio,
webSearchCallCount,
},
);
} else {
return i18next.t('模型倍率 {{modelRatio}},输出倍率 {{completionRatio}}{{ratioType}} {{ratio}}', {
modelRatio: modelRatio,
completionRatio: completionRatio,
ratioType: ratioLabel,
ratio
});
return i18next.t(
'模型倍率 {{modelRatio}},输出倍率 {{completionRatio}}{{ratioType}} {{ratio}}',
{
modelRatio: modelRatio,
completionRatio: completionRatio,
ratioType: ratioLabel,
ratio,
},
);
}
}
}