Merge branch 'main' into main
This commit is contained in:
2
.github/workflows/linux-release.yml
vendored
2
.github/workflows/linux-release.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
node-version: 18
|
||||
- name: Build Frontend
|
||||
env:
|
||||
CI: ""
|
||||
|
||||
2
.github/workflows/macos-release.yml
vendored
2
.github/workflows/macos-release.yml
vendored
@@ -17,7 +17,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
node-version: 18
|
||||
- name: Build Frontend
|
||||
env:
|
||||
CI: ""
|
||||
|
||||
2
.github/workflows/windows-release.yml
vendored
2
.github/workflows/windows-release.yml
vendored
@@ -20,7 +20,7 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
node-version: 18
|
||||
- name: Build Frontend
|
||||
env:
|
||||
CI: ""
|
||||
|
||||
@@ -24,8 +24,8 @@ FROM alpine
|
||||
|
||||
RUN apk update \
|
||||
&& apk upgrade \
|
||||
&& apk add --no-cache ca-certificates tzdata ffmpeg\
|
||||
&& update-ca-certificates 2>/dev/null || true
|
||||
&& apk add --no-cache ca-certificates tzdata ffmpeg \
|
||||
&& update-ca-certificates
|
||||
|
||||
COPY --from=builder2 /build/one-api /
|
||||
EXPOSE 3000
|
||||
|
||||
@@ -59,6 +59,10 @@
|
||||
13. 🎵 Added [Suno API](https://github.com/Suno-API/Suno-API) interface support, [Integration Guide](Suno.md)
|
||||
14. 🔄 Support for Rerank models, compatible with Cohere and Jina, can integrate with Dify, [Integration Guide](Rerank.md)
|
||||
15. ⚡ **[OpenAI Realtime API](https://platform.openai.com/docs/guides/realtime/integration)** - Support for OpenAI's Realtime API, including Azure channels
|
||||
16. 🧠 Support for setting reasoning effort through model name suffix:
|
||||
- Add suffix `-high` to set high reasoning effort (e.g., `o3-mini-high`)
|
||||
- Add suffix `-medium` to set medium reasoning effort
|
||||
- Add suffix `-low` to set low reasoning effort
|
||||
|
||||
## Model Support
|
||||
This version additionally supports:
|
||||
@@ -84,6 +88,7 @@ You can add custom models gpt-4-gizmo-* in channels. These are third-party model
|
||||
- `GEMINI_VISION_MAX_IMAGE_NUM`: Gemini model maximum image number, default `16`, set to `-1` to disable
|
||||
- `MAX_FILE_DOWNLOAD_MB`: Maximum file download size in MB, default `20`
|
||||
- `CRYPTO_SECRET`: Encryption key for encrypting database content
|
||||
- `AZURE_DEFAULT_API_VERSION`: Azure channel default API version, if not specified in channel settings, use this version, default `2024-12-01-preview`
|
||||
|
||||
## Deployment
|
||||
> [!TIP]
|
||||
|
||||
@@ -65,6 +65,10 @@
|
||||
14. 🔄 支持Rerank模型,目前兼容Cohere和Jina,可接入Dify,[对接文档](Rerank.md)
|
||||
15. ⚡ **[OpenAI Realtime API](https://platform.openai.com/docs/guides/realtime/integration)** - 支持OpenAI的Realtime API,支持Azure渠道
|
||||
16. 支持使用路由/chat2link 进入聊天界面
|
||||
17. 🧠 支持通过模型名称后缀设置 reasoning effort:
|
||||
- 添加后缀 `-high` 设置为 high reasoning effort (例如: `o3-mini-high`)
|
||||
- 添加后缀 `-medium` 设置为 medium reasoning effort (例如: `o3-mini-medium`)
|
||||
- 添加后缀 `-low` 设置为 low reasoning effort (例如: `o3-mini-low`)
|
||||
|
||||
## 模型支持
|
||||
此版本额外支持以下模型:
|
||||
@@ -85,11 +89,12 @@
|
||||
- `GET_MEDIA_TOKEN`:是否统计图片token,默认为 `true`,关闭后将不再在本地计算图片token,可能会导致和上游计费不同,此项覆盖 `GET_MEDIA_TOKEN_NOT_STREAM` 选项作用。
|
||||
- `GET_MEDIA_TOKEN_NOT_STREAM`:是否在非流(`stream=false`)情况下统计图片token,默认为 `true`。
|
||||
- `UPDATE_TASK`:是否更新异步任务(Midjourney、Suno),默认为 `true`,关闭后将不会更新任务进度。
|
||||
- `GEMINI_MODEL_MAP`:Gemini模型指定版本(v1/v1beta),使用“模型:版本”指定,","分隔,例如:-e GEMINI_MODEL_MAP="gemini-1.5-pro-latest:v1beta,gemini-1.5-pro-001:v1beta",为空则使用默认配置(v1beta)
|
||||
- `COHERE_SAFETY_SETTING`:Cohere模型[安全设置](https://docs.cohere.com/docs/safety-modes#overview),可选值为 `NONE`, `CONTEXTUAL`,`STRICT`,默认为 `NONE`。
|
||||
- `GEMINI_MODEL_MAP`:Gemini模型指定版本(v1/v1beta),使用"模型:版本"指定,","分隔,例如:-e GEMINI_MODEL_MAP="gemini-1.5-pro-latest:v1beta,gemini-1.5-pro-001:v1beta",为空则使用默认配置(v1beta)
|
||||
- `COHERE_SAFETY_SETTING`:Cohere模型[安全设置](https://docs.cohere.com/docs/safety-modes#overview),可选值为 `NONE`, `CONTEXTUAL`, `STRICT`,默认为 `NONE`。
|
||||
- `GEMINI_VISION_MAX_IMAGE_NUM`:Gemini模型最大图片数量,默认为 `16`,设置为 `-1` 则不限制。
|
||||
- `MAX_FILE_DOWNLOAD_MB`: 最大文件下载大小,单位 MB,默认为 `20`。
|
||||
- `CRYPTO_SECRET`:加密密钥,用于加密数据库内容。
|
||||
- `AZURE_DEFAULT_API_VERSION`:Azure渠道默认API版本,如果渠道设置中未指定API版本,则使用此版本,默认为 `2024-12-01-preview`
|
||||
## 部署
|
||||
> [!TIP]
|
||||
> 最新版Docker镜像:`calciumion/new-api:latest`
|
||||
|
||||
@@ -36,7 +36,7 @@ func SetupLogger() {
|
||||
setupLogLock.Unlock()
|
||||
setupLogWorking = false
|
||||
}()
|
||||
logPath := filepath.Join(*LogDir, fmt.Sprintf("oneapi-%s.log", time.Now().Format("20060102")))
|
||||
logPath := filepath.Join(*LogDir, fmt.Sprintf("oneapi-%s.log", time.Now().Format("20060102150405")))
|
||||
fd, err := os.OpenFile(logPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
log.Fatal("failed to open log file")
|
||||
|
||||
@@ -50,16 +50,24 @@ var defaultModelRatio = map[string]float64{
|
||||
"gpt-4o-realtime-preview-2024-12-17": 2.5,
|
||||
"gpt-4o-mini-realtime-preview": 0.3,
|
||||
"gpt-4o-mini-realtime-preview-2024-12-17": 0.3,
|
||||
"o1": 7.5,
|
||||
"o1-2024-12-17": 7.5,
|
||||
"o1-preview": 7.5,
|
||||
"o1-preview-2024-09-12": 7.5,
|
||||
"o1-mini": 1.5,
|
||||
"o1-mini-2024-09-12": 1.5,
|
||||
"gpt-4o-mini": 0.075,
|
||||
"gpt-4o-mini-2024-07-18": 0.075,
|
||||
"gpt-4-turbo": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
|
||||
"o1": 7.5,
|
||||
"o1-2024-12-17": 7.5,
|
||||
"o1-preview": 7.5,
|
||||
"o1-preview-2024-09-12": 7.5,
|
||||
"o1-mini": 0.55,
|
||||
"o1-mini-2024-09-12": 0.55,
|
||||
"o3-mini": 0.55,
|
||||
"o3-mini-2025-01-31": 0.55,
|
||||
"o3-mini-high": 0.55,
|
||||
"o3-mini-2025-01-31-high": 0.55,
|
||||
"o3-mini-low": 0.55,
|
||||
"o3-mini-2025-01-31-low": 0.55,
|
||||
"o3-mini-medium": 0.55,
|
||||
"o3-mini-2025-01-31-medium": 0.55,
|
||||
"gpt-4o-mini": 0.075,
|
||||
"gpt-4o-mini-2024-07-18": 0.075,
|
||||
"gpt-4-turbo": 5, // $0.01 / 1K tokens
|
||||
"gpt-4-turbo-2024-04-09": 5, // $0.01 / 1K tokens
|
||||
//"gpt-3.5-turbo-0301": 0.75, //deprecated
|
||||
"gpt-3.5-turbo": 0.25,
|
||||
"gpt-3.5-turbo-0613": 0.75,
|
||||
@@ -342,6 +350,12 @@ func UpdateCompletionRatioByJSONString(jsonStr string) error {
|
||||
}
|
||||
|
||||
func GetCompletionRatio(name string) float64 {
|
||||
if strings.Contains(name, "/") {
|
||||
if ratio, ok := CompletionRatio[name]; ok {
|
||||
return ratio
|
||||
}
|
||||
}
|
||||
lowercaseName := strings.ToLower(name)
|
||||
if strings.HasPrefix(name, "gpt-4-gizmo") {
|
||||
name = "gpt-4-gizmo-*"
|
||||
}
|
||||
@@ -360,7 +374,7 @@ func GetCompletionRatio(name string) float64 {
|
||||
}
|
||||
return 2
|
||||
}
|
||||
if strings.HasPrefix(name, "o1") {
|
||||
if strings.HasPrefix(name, "o1") || strings.HasPrefix(name, "o3") {
|
||||
return 4
|
||||
}
|
||||
if name == "chatgpt-4o-latest" {
|
||||
@@ -404,8 +418,8 @@ func GetCompletionRatio(name string) float64 {
|
||||
return 4
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(name, "deepseek") {
|
||||
if name == "deepseek-reasoner" {
|
||||
if strings.HasPrefix(lowercaseName, "deepseek") {
|
||||
if strings.HasSuffix(lowercaseName, "reasoner") || strings.HasSuffix(lowercaseName, "r1") {
|
||||
return 4
|
||||
}
|
||||
return 2
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
package constant
|
||||
|
||||
var (
|
||||
ForceFormat = "force_format" // ForceFormat 强制格式化为OpenAI格式
|
||||
ForceFormat = "force_format" // ForceFormat 强制格式化为OpenAI格式
|
||||
ChanelSettingProxy = "proxy" // Proxy 代理
|
||||
)
|
||||
|
||||
@@ -21,6 +21,8 @@ var GetMediaTokenNotStream = common.GetEnvOrDefaultBool("GET_MEDIA_TOKEN_NOT_STR
|
||||
|
||||
var UpdateTask = common.GetEnvOrDefaultBool("UPDATE_TASK", true)
|
||||
|
||||
var AzureDefaultAPIVersion = common.GetEnvOrDefaultString("AZURE_DEFAULT_API_VERSION", "2024-12-01-preview")
|
||||
|
||||
var GeminiModelMap = map[string]string{
|
||||
"gemini-1.0-pro": "v1",
|
||||
}
|
||||
|
||||
@@ -78,6 +78,36 @@ type APGC2DGPTUsageResponse struct {
|
||||
TotalUsed float64 `json:"total_used"`
|
||||
}
|
||||
|
||||
type SiliconFlowUsageResponse struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Status bool `json:"status"`
|
||||
Data struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Image string `json:"image"`
|
||||
Email string `json:"email"`
|
||||
IsAdmin bool `json:"isAdmin"`
|
||||
Balance string `json:"balance"`
|
||||
Status string `json:"status"`
|
||||
Introduction string `json:"introduction"`
|
||||
Role string `json:"role"`
|
||||
ChargeBalance string `json:"chargeBalance"`
|
||||
TotalBalance string `json:"totalBalance"`
|
||||
Category string `json:"category"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
type DeepSeekUsageResponse struct {
|
||||
IsAvailable bool `json:"is_available"`
|
||||
BalanceInfos []struct {
|
||||
Currency string `json:"currency"`
|
||||
TotalBalance string `json:"total_balance"`
|
||||
GrantedBalance string `json:"granted_balance"`
|
||||
ToppedUpBalance string `json:"topped_up_balance"`
|
||||
} `json:"balance_infos"`
|
||||
}
|
||||
|
||||
// GetAuthHeader get auth header
|
||||
func GetAuthHeader(token string) http.Header {
|
||||
h := http.Header{}
|
||||
@@ -185,6 +215,57 @@ func updateChannelAPI2GPTBalance(channel *model.Channel) (float64, error) {
|
||||
return response.TotalRemaining, nil
|
||||
}
|
||||
|
||||
func updateChannelSiliconFlowBalance(channel *model.Channel) (float64, error) {
|
||||
url := "https://api.siliconflow.cn/v1/user/info"
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
response := SiliconFlowUsageResponse{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if response.Code != 20000 {
|
||||
return 0, fmt.Errorf("code: %d, message: %s", response.Code, response.Message)
|
||||
}
|
||||
balance, err := strconv.ParseFloat(response.Data.TotalBalance, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
channel.UpdateBalance(balance)
|
||||
return balance, nil
|
||||
}
|
||||
|
||||
func updateChannelDeepSeekBalance(channel *model.Channel) (float64, error) {
|
||||
url := "https://api.deepseek.com/user/balance"
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
response := DeepSeekUsageResponse{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
index := -1
|
||||
for i, balanceInfo := range response.BalanceInfos {
|
||||
if balanceInfo.Currency == "CNY" {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if index == -1 {
|
||||
return 0, errors.New("currency CNY not found")
|
||||
}
|
||||
balance, err := strconv.ParseFloat(response.BalanceInfos[index].TotalBalance, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
channel.UpdateBalance(balance)
|
||||
return balance, nil
|
||||
}
|
||||
|
||||
func updateChannelAIGC2DBalance(channel *model.Channel) (float64, error) {
|
||||
url := "https://api.aigc2d.com/dashboard/billing/credit_grants"
|
||||
body, err := GetResponseBody("GET", url, channel, GetAuthHeader(channel.Key))
|
||||
@@ -222,6 +303,10 @@ func updateChannelBalance(channel *model.Channel) (float64, error) {
|
||||
return updateChannelAPI2GPTBalance(channel)
|
||||
case common.ChannelTypeAIGC2D:
|
||||
return updateChannelAIGC2DBalance(channel)
|
||||
case common.ChannelTypeSiliconFlow:
|
||||
return updateChannelSiliconFlowBalance(channel)
|
||||
case common.ChannelTypeDeepSeek:
|
||||
return updateChannelDeepSeekBalance(channel)
|
||||
default:
|
||||
return 0, errors.New("尚未实现")
|
||||
}
|
||||
@@ -300,9 +385,9 @@ func updateAllChannelsBalance() error {
|
||||
continue
|
||||
}
|
||||
// TODO: support Azure
|
||||
if channel.Type != common.ChannelTypeOpenAI && channel.Type != common.ChannelTypeCustom {
|
||||
continue
|
||||
}
|
||||
//if channel.Type != common.ChannelTypeOpenAI && channel.Type != common.ChannelTypeCustom {
|
||||
// continue
|
||||
//}
|
||||
balance, err := updateChannelBalance(channel)
|
||||
if err != nil {
|
||||
continue
|
||||
|
||||
@@ -170,6 +170,7 @@ func buildTestRequest(model string) *dto.GeneralOpenAIRequest {
|
||||
Model: "", // this will be set later
|
||||
Stream: false,
|
||||
}
|
||||
|
||||
// 先判断是否为 Embedding 模型
|
||||
if strings.Contains(strings.ToLower(model), "embedding") ||
|
||||
strings.HasPrefix(model, "m3e") || // m3e 系列模型
|
||||
@@ -180,10 +181,10 @@ func buildTestRequest(model string) *dto.GeneralOpenAIRequest {
|
||||
return testRequest
|
||||
}
|
||||
// 并非Embedding 模型
|
||||
if strings.HasPrefix(model, "o1") {
|
||||
if strings.HasPrefix(model, "o1") || strings.HasPrefix(model, "o3") {
|
||||
testRequest.MaxCompletionTokens = 10
|
||||
} else if strings.HasPrefix(model, "gemini-2.0-flash-thinking") {
|
||||
testRequest.MaxTokens = 2
|
||||
testRequest.MaxTokens = 10
|
||||
} else {
|
||||
testRequest.MaxTokens = 1
|
||||
}
|
||||
|
||||
@@ -510,6 +510,7 @@ func UpdateChannel(c *gin.Context) {
|
||||
func FetchModels(c *gin.Context) {
|
||||
var req struct {
|
||||
BaseURL string `json:"base_url"`
|
||||
Type int `json:"type"`
|
||||
Key string `json:"key"`
|
||||
}
|
||||
|
||||
@@ -523,7 +524,7 @@ func FetchModels(c *gin.Context) {
|
||||
|
||||
baseURL := req.BaseURL
|
||||
if baseURL == "" {
|
||||
baseURL = "https://api.openai.com"
|
||||
baseURL = common.ChannelBaseURLs[req.Type]
|
||||
}
|
||||
|
||||
client := &http.Client{}
|
||||
@@ -538,7 +539,11 @@ func FetchModels(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
request.Header.Set("Authorization", "Bearer "+req.Key)
|
||||
// remove line breaks and extra spaces.
|
||||
key := strings.TrimSpace(req.Key)
|
||||
// If the key contains a line break, only take the first part.
|
||||
key = strings.Split(key, "\n")[0]
|
||||
request.Header.Set("Authorization", "Bearer "+key)
|
||||
|
||||
response, err := client.Do(request)
|
||||
if err != nil {
|
||||
|
||||
@@ -66,6 +66,7 @@ func GetStatus(c *gin.Context) {
|
||||
"enable_online_topup": setting.PayAddress != "" && setting.EpayId != "" && setting.EpayKey != "",
|
||||
"mj_notify_enabled": setting.MjNotifyEnabled,
|
||||
"chats": setting.Chats,
|
||||
"demo_site_enabled": setting.DemoSiteEnabled,
|
||||
},
|
||||
})
|
||||
return
|
||||
|
||||
28
docs/channel/other_setting.md
Normal file
28
docs/channel/other_setting.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# 渠道而外设置说明
|
||||
|
||||
该配置用于设置一些额外的渠道参数,可以通过 JSON 对象进行配置。主要包含以下两个设置项:
|
||||
|
||||
1. force_format
|
||||
- 用于标识是否对数据进行强制格式化为 OpenAI 格式
|
||||
- 类型为布尔值,设置为 true 时启用强制格式化
|
||||
|
||||
2. proxy
|
||||
- 用于配置网络代理
|
||||
- 类型为字符串,填写代理地址(例如 socks5 协议的代理地址)
|
||||
|
||||
--------------------------------------------------------------
|
||||
|
||||
## JSON 格式示例
|
||||
|
||||
以下是一个示例配置,启用强制格式化并设置了代理地址:
|
||||
|
||||
```json
|
||||
{
|
||||
"force_format": true,
|
||||
"proxy": "socks5://xxxxxxx"
|
||||
}
|
||||
```
|
||||
|
||||
--------------------------------------------------------------
|
||||
|
||||
通过调整上述 JSON 配置中的值,可以灵活控制渠道的额外行为,比如是否进行格式化以及使用特定的网络代理。
|
||||
@@ -23,7 +23,7 @@ type GeneralOpenAIRequest struct {
|
||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||
MaxCompletionTokens uint `json:"max_completion_tokens,omitempty"`
|
||||
ReasoningEffort string `json:"reasoning_effort,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Stop any `json:"stop,omitempty"`
|
||||
|
||||
35
model/log.go
35
model/log.go
@@ -43,6 +43,7 @@ const (
|
||||
|
||||
func formatUserLogs(logs []*Log) {
|
||||
for i := range logs {
|
||||
logs[i].ChannelName = ""
|
||||
var otherMap map[string]interface{}
|
||||
otherMap = common.StrToMap(logs[i].Other)
|
||||
if otherMap != nil {
|
||||
@@ -129,38 +130,38 @@ func GetAllLogs(logType int, startTimestamp int64, endTimestamp int64, modelName
|
||||
if logType == LogTypeUnknown {
|
||||
tx = LOG_DB
|
||||
} else {
|
||||
tx = LOG_DB.Where("type = ?", logType)
|
||||
tx = LOG_DB.Where("logs.type = ?", logType)
|
||||
}
|
||||
|
||||
tx = tx.Joins("LEFT JOIN channels ON logs.channel_id = channels.id")
|
||||
tx = tx.Select("logs.*, channels.name as channel_name")
|
||||
|
||||
if modelName != "" {
|
||||
tx = tx.Where("model_name like ?", modelName)
|
||||
tx = tx.Where("logs.model_name like ?", modelName)
|
||||
}
|
||||
if username != "" {
|
||||
tx = tx.Where("username = ?", username)
|
||||
tx = tx.Where("logs.username = ?", username)
|
||||
}
|
||||
if tokenName != "" {
|
||||
tx = tx.Where("token_name = ?", tokenName)
|
||||
tx = tx.Where("logs.token_name = ?", tokenName)
|
||||
}
|
||||
if startTimestamp != 0 {
|
||||
tx = tx.Where("created_at >= ?", startTimestamp)
|
||||
tx = tx.Where("logs.created_at >= ?", startTimestamp)
|
||||
}
|
||||
if endTimestamp != 0 {
|
||||
tx = tx.Where("created_at <= ?", endTimestamp)
|
||||
tx = tx.Where("logs.created_at <= ?", endTimestamp)
|
||||
}
|
||||
if channel != 0 {
|
||||
tx = tx.Where("channel_id = ?", channel)
|
||||
tx = tx.Where("logs.channel_id = ?", channel)
|
||||
}
|
||||
if group != "" {
|
||||
tx = tx.Where(groupCol+" = ?", group)
|
||||
tx = tx.Where("logs."+groupCol+" = ?", group)
|
||||
}
|
||||
err = tx.Model(&Log{}).Count(&total).Error
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
err = tx.Order("id desc").Limit(num).Offset(startIdx).Find(&logs).Error
|
||||
err = tx.Order("logs.id desc").Limit(num).Offset(startIdx).Find(&logs).Error
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
@@ -170,34 +171,34 @@ func GetAllLogs(logType int, startTimestamp int64, endTimestamp int64, modelName
|
||||
func GetUserLogs(userId int, logType int, startTimestamp int64, endTimestamp int64, modelName string, tokenName string, startIdx int, num int, group string) (logs []*Log, total int64, err error) {
|
||||
var tx *gorm.DB
|
||||
if logType == LogTypeUnknown {
|
||||
tx = LOG_DB.Where("user_id = ?", userId)
|
||||
tx = LOG_DB.Where("logs.user_id = ?", userId)
|
||||
} else {
|
||||
tx = LOG_DB.Where("user_id = ? and type = ?", userId, logType)
|
||||
tx = LOG_DB.Where("logs.user_id = ? and logs.type = ?", userId, logType)
|
||||
}
|
||||
|
||||
tx = tx.Joins("LEFT JOIN channels ON logs.channel_id = channels.id")
|
||||
tx = tx.Select("logs.*, channels.name as channel_name")
|
||||
|
||||
if modelName != "" {
|
||||
tx = tx.Where("model_name like ?", modelName)
|
||||
tx = tx.Where("logs.model_name like ?", modelName)
|
||||
}
|
||||
if tokenName != "" {
|
||||
tx = tx.Where("token_name = ?", tokenName)
|
||||
tx = tx.Where("logs.token_name = ?", tokenName)
|
||||
}
|
||||
if startTimestamp != 0 {
|
||||
tx = tx.Where("created_at >= ?", startTimestamp)
|
||||
tx = tx.Where("logs.created_at >= ?", startTimestamp)
|
||||
}
|
||||
if endTimestamp != 0 {
|
||||
tx = tx.Where("created_at <= ?", endTimestamp)
|
||||
tx = tx.Where("logs.created_at <= ?", endTimestamp)
|
||||
}
|
||||
if group != "" {
|
||||
tx = tx.Where(groupCol+" = ?", group)
|
||||
tx = tx.Where("logs."+groupCol+" = ?", group)
|
||||
}
|
||||
err = tx.Model(&Log{}).Count(&total).Error
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
err = tx.Order("id desc").Limit(num).Offset(startIdx).Find(&logs).Error
|
||||
err = tx.Order("logs.id desc").Limit(num).Offset(startIdx).Find(&logs).Error
|
||||
formatUserLogs(logs)
|
||||
return logs, total, err
|
||||
}
|
||||
|
||||
@@ -104,6 +104,7 @@ func InitOptionMap() {
|
||||
common.OptionMap["MjForwardUrlEnabled"] = strconv.FormatBool(setting.MjForwardUrlEnabled)
|
||||
common.OptionMap["MjActionCheckSuccessEnabled"] = strconv.FormatBool(setting.MjActionCheckSuccessEnabled)
|
||||
common.OptionMap["CheckSensitiveEnabled"] = strconv.FormatBool(setting.CheckSensitiveEnabled)
|
||||
common.OptionMap["DemoSiteEnabled"] = strconv.FormatBool(setting.DemoSiteEnabled)
|
||||
common.OptionMap["CheckSensitiveOnPromptEnabled"] = strconv.FormatBool(setting.CheckSensitiveOnPromptEnabled)
|
||||
//common.OptionMap["CheckSensitiveOnCompletionEnabled"] = strconv.FormatBool(constant.CheckSensitiveOnCompletionEnabled)
|
||||
common.OptionMap["StopOnSensitiveEnabled"] = strconv.FormatBool(setting.StopOnSensitiveEnabled)
|
||||
@@ -220,6 +221,8 @@ func updateOptionMap(key string, value string) (err error) {
|
||||
setting.MjActionCheckSuccessEnabled = boolValue
|
||||
case "CheckSensitiveEnabled":
|
||||
setting.CheckSensitiveEnabled = boolValue
|
||||
case "DemoSiteEnabled":
|
||||
setting.DemoSiteEnabled = boolValue
|
||||
case "CheckSensitiveOnPromptEnabled":
|
||||
setting.CheckSensitiveOnPromptEnabled = boolValue
|
||||
//case "CheckSensitiveOnCompletionEnabled":
|
||||
|
||||
@@ -39,7 +39,7 @@ func DoApiRequest(a Adaptor, c *gin.Context, info *common.RelayInfo, requestBody
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setup request header failed: %w", err)
|
||||
}
|
||||
resp, err := doRequest(c, req)
|
||||
resp, err := doRequest(c, req, info)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("do request failed: %w", err)
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func DoFormRequest(a Adaptor, c *gin.Context, info *common.RelayInfo, requestBod
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setup request header failed: %w", err)
|
||||
}
|
||||
resp, err := doRequest(c, req)
|
||||
resp, err := doRequest(c, req, info)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("do request failed: %w", err)
|
||||
}
|
||||
@@ -90,8 +90,18 @@ func DoWssRequest(a Adaptor, c *gin.Context, info *common.RelayInfo, requestBody
|
||||
return targetConn, nil
|
||||
}
|
||||
|
||||
func doRequest(c *gin.Context, req *http.Request) (*http.Response, error) {
|
||||
resp, err := service.GetHttpClient().Do(req)
|
||||
func doRequest(c *gin.Context, req *http.Request, info *common.RelayInfo) (*http.Response, error) {
|
||||
var client *http.Client
|
||||
var err error
|
||||
if proxyURL, ok := info.ChannelSetting["proxy"]; ok {
|
||||
client, err = service.NewProxyHttpClient(proxyURL.(string))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new proxy http client failed: %w", err)
|
||||
}
|
||||
} else {
|
||||
client = service.GetHttpClient()
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -120,7 +130,7 @@ func DoTaskApiRequest(a TaskAdaptor, c *gin.Context, info *common.TaskRelayInfo,
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("setup request header failed: %w", err)
|
||||
}
|
||||
resp, err := doRequest(c, req)
|
||||
resp, err := doRequest(c, req, info.ToRelayInfo())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("do request failed: %w", err)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ type AwsClaudeRequest struct {
|
||||
System string `json:"system,omitempty"`
|
||||
Messages []claude.ClaudeMessage `json:"messages"`
|
||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
|
||||
@@ -12,7 +12,7 @@ type BaiduMessage struct {
|
||||
|
||||
type BaiduChatRequest struct {
|
||||
Messages []BaiduMessage `json:"messages"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
PenaltyScore float64 `json:"penalty_score,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
|
||||
@@ -50,7 +50,7 @@ type ClaudeRequest struct {
|
||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||
MaxTokensToSample uint `json:"max_tokens_to_sample,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
//ClaudeMetadata `json:"metadata,omitempty"`
|
||||
|
||||
@@ -9,7 +9,7 @@ type CfRequest struct {
|
||||
Prompt string `json:"prompt,omitempty"`
|
||||
Raw bool `json:"raw,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
}
|
||||
|
||||
type CfAudioResponse struct {
|
||||
|
||||
@@ -71,7 +71,7 @@ type GeminiChatTool struct {
|
||||
}
|
||||
|
||||
type GeminiChatGenerationConfig struct {
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"topP,omitempty"`
|
||||
TopK float64 `json:"topK,omitempty"`
|
||||
MaxOutputTokens uint `json:"maxOutputTokens,omitempty"`
|
||||
|
||||
@@ -6,7 +6,7 @@ type OllamaRequest struct {
|
||||
Model string `json:"model,omitempty"`
|
||||
Messages []dto.Message `json:"messages,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
Seed float64 `json:"seed,omitempty"`
|
||||
Topp float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
@@ -18,14 +18,14 @@ type OllamaRequest struct {
|
||||
}
|
||||
|
||||
type Options struct {
|
||||
Seed int `json:"seed,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||
NumPredict int `json:"num_predict,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
Seed int `json:"seed,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
FrequencyPenalty float64 `json:"frequency_penalty,omitempty"`
|
||||
PresencePenalty float64 `json:"presence_penalty,omitempty"`
|
||||
NumPredict int `json:"num_predict,omitempty"`
|
||||
NumCtx int `json:"num_ctx,omitempty"`
|
||||
}
|
||||
|
||||
type OllamaEmbeddingRequest struct {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"one-api/common"
|
||||
constant2 "one-api/constant"
|
||||
"one-api/dto"
|
||||
"one-api/relay/channel"
|
||||
"one-api/relay/channel/ai360"
|
||||
@@ -44,16 +45,20 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
|
||||
}
|
||||
switch info.ChannelType {
|
||||
case common.ChannelTypeAzure:
|
||||
apiVersion := info.ApiVersion
|
||||
if apiVersion == "" {
|
||||
apiVersion = constant2.AzureDefaultAPIVersion
|
||||
}
|
||||
// https://learn.microsoft.com/en-us/azure/cognitive-services/openai/chatgpt-quickstart?pivots=rest-api&tabs=command-line#rest-api
|
||||
requestURL := strings.Split(info.RequestURLPath, "?")[0]
|
||||
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, info.ApiVersion)
|
||||
requestURL = fmt.Sprintf("%s?api-version=%s", requestURL, apiVersion)
|
||||
task := strings.TrimPrefix(requestURL, "/v1/")
|
||||
model_ := info.UpstreamModelName
|
||||
model_ = strings.Replace(model_, ".", "", -1)
|
||||
// https://github.com/songquanpeng/one-api/issues/67
|
||||
requestURL = fmt.Sprintf("/openai/deployments/%s/%s", model_, task)
|
||||
if info.RelayMode == constant.RelayModeRealtime {
|
||||
requestURL = fmt.Sprintf("/openai/realtime?deployment=%s&api-version=%s", model_, info.ApiVersion)
|
||||
requestURL = fmt.Sprintf("/openai/realtime?deployment=%s&api-version=%s", model_, apiVersion)
|
||||
}
|
||||
return relaycommon.GetFullRequestURL(info.BaseUrl, requestURL, info.ChannelType), nil
|
||||
case common.ChannelTypeMiniMax:
|
||||
@@ -109,13 +114,28 @@ func (a *Adaptor) ConvertRequest(c *gin.Context, info *relaycommon.RelayInfo, re
|
||||
if info.ChannelType != common.ChannelTypeOpenAI && info.ChannelType != common.ChannelTypeAzure {
|
||||
request.StreamOptions = nil
|
||||
}
|
||||
if strings.HasPrefix(request.Model, "o1") {
|
||||
if strings.HasPrefix(request.Model, "o1") || strings.HasPrefix(request.Model, "o3") {
|
||||
if request.MaxCompletionTokens == 0 && request.MaxTokens != 0 {
|
||||
request.MaxCompletionTokens = request.MaxTokens
|
||||
request.MaxTokens = 0
|
||||
}
|
||||
if strings.HasPrefix(request.Model, "o3") {
|
||||
request.Temperature = nil
|
||||
}
|
||||
if strings.HasSuffix(request.Model, "-high") {
|
||||
request.ReasoningEffort = "high"
|
||||
request.Model = strings.TrimSuffix(request.Model, "-high")
|
||||
} else if strings.HasSuffix(request.Model, "-low") {
|
||||
request.ReasoningEffort = "low"
|
||||
request.Model = strings.TrimSuffix(request.Model, "-low")
|
||||
} else if strings.HasSuffix(request.Model, "-medium") {
|
||||
request.ReasoningEffort = "medium"
|
||||
request.Model = strings.TrimSuffix(request.Model, "-medium")
|
||||
}
|
||||
info.ReasoningEffort = request.ReasoningEffort
|
||||
info.UpstreamModelName = request.Model
|
||||
}
|
||||
if request.Model == "o1" || request.Model == "o1-2024-12-17" {
|
||||
if request.Model == "o1" || request.Model == "o1-2024-12-17" || strings.HasPrefix(request.Model, "o3") {
|
||||
//修改第一个Message的内容,将system改为developer
|
||||
if len(request.Messages) > 0 && request.Messages[0].Role == "system" {
|
||||
request.Messages[0].Role = "developer"
|
||||
|
||||
@@ -13,6 +13,10 @@ var ModelList = []string{
|
||||
"gpt-4o-mini", "gpt-4o-mini-2024-07-18",
|
||||
"o1-preview", "o1-preview-2024-09-12",
|
||||
"o1-mini", "o1-mini-2024-09-12",
|
||||
"o3-mini", "o3-mini-2025-01-31",
|
||||
"o3-mini-high", "o3-mini-2025-01-31-high",
|
||||
"o3-mini-low", "o3-mini-2025-01-31-low",
|
||||
"o3-mini-medium", "o3-mini-2025-01-31-medium",
|
||||
"o1", "o1-2024-12-17",
|
||||
"gpt-4o-audio-preview", "gpt-4o-audio-preview-2024-10-01",
|
||||
"gpt-4o-realtime-preview", "gpt-4o-realtime-preview-2024-10-01", "gpt-4o-realtime-preview-2024-12-17",
|
||||
|
||||
@@ -18,7 +18,7 @@ type PaLMPrompt struct {
|
||||
|
||||
type PaLMChatRequest struct {
|
||||
Prompt PaLMPrompt `json:"prompt"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
CandidateCount int `json:"candidateCount,omitempty"`
|
||||
TopP float64 `json:"topP,omitempty"`
|
||||
TopK uint `json:"topK,omitempty"`
|
||||
|
||||
@@ -39,9 +39,7 @@ func requestOpenAI2Tencent(a *Adaptor, request dto.GeneralOpenAIRequest) *Tencen
|
||||
if request.TopP != 0 {
|
||||
req.TopP = &request.TopP
|
||||
}
|
||||
if request.Temperature != 0 {
|
||||
req.Temperature = &request.Temperature
|
||||
}
|
||||
req.Temperature = request.Temperature
|
||||
return &req
|
||||
}
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ type VertexAIClaudeRequest struct {
|
||||
MaxTokens int `json:"max_tokens,omitempty"`
|
||||
StopSequences []string `json:"stop_sequences,omitempty"`
|
||||
Stream bool `json:"stream,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
Tools []claude.Tool `json:"tools,omitempty"`
|
||||
|
||||
@@ -13,11 +13,11 @@ type XunfeiChatRequest struct {
|
||||
} `json:"header"`
|
||||
Parameter struct {
|
||||
Chat struct {
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||
Auditing bool `json:"auditing,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopK int `json:"top_k,omitempty"`
|
||||
MaxTokens uint `json:"max_tokens,omitempty"`
|
||||
Auditing bool `json:"auditing,omitempty"`
|
||||
} `json:"chat"`
|
||||
} `json:"parameter"`
|
||||
Payload struct {
|
||||
|
||||
@@ -12,7 +12,7 @@ type ZhipuMessage struct {
|
||||
|
||||
type ZhipuRequest struct {
|
||||
Prompt []ZhipuMessage `json:"prompt"`
|
||||
Temperature float64 `json:"temperature,omitempty"`
|
||||
Temperature *float64 `json:"temperature,omitempty"`
|
||||
TopP float64 `json:"top_p,omitempty"`
|
||||
RequestId string `json:"request_id,omitempty"`
|
||||
Incremental bool `json:"incremental,omitempty"`
|
||||
|
||||
@@ -30,6 +30,7 @@ type RelayInfo struct {
|
||||
RelayMode int
|
||||
UpstreamModelName string
|
||||
OriginModelName string
|
||||
RecodeModelName string
|
||||
RequestURLPath string
|
||||
ApiVersion string
|
||||
PromptTokens int
|
||||
@@ -45,6 +46,7 @@ type RelayInfo struct {
|
||||
RealtimeTools []dto.RealTimeTool
|
||||
IsFirstRequest bool
|
||||
AudioUsage bool
|
||||
ReasoningEffort string
|
||||
ChannelSetting map[string]interface{}
|
||||
}
|
||||
|
||||
@@ -87,6 +89,7 @@ func GenRelayInfo(c *gin.Context) *RelayInfo {
|
||||
FirstResponseTime: startTime.Add(-time.Second),
|
||||
OriginModelName: c.GetString("original_model"),
|
||||
UpstreamModelName: c.GetString("original_model"),
|
||||
RecodeModelName: c.GetString("recode_model"),
|
||||
ApiType: apiType,
|
||||
ApiVersion: c.GetString("api_version"),
|
||||
ApiKey: strings.TrimPrefix(c.Request.Header.Get("Authorization"), "Bearer "),
|
||||
|
||||
@@ -93,6 +93,7 @@ func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
|
||||
}
|
||||
}
|
||||
relayInfo.UpstreamModelName = textRequest.Model
|
||||
relayInfo.RecodeModelName = textRequest.Model
|
||||
modelPrice, getModelPriceSuccess := common.GetModelPrice(textRequest.Model, false)
|
||||
groupRatio := setting.GetGroupRatio(relayInfo.Group)
|
||||
|
||||
@@ -218,10 +219,10 @@ func TextHelper(c *gin.Context) (openaiErr *dto.OpenAIErrorWithStatusCode) {
|
||||
return openaiErr
|
||||
}
|
||||
|
||||
if strings.HasPrefix(relayInfo.UpstreamModelName, "gpt-4o-audio") {
|
||||
if strings.HasPrefix(relayInfo.RecodeModelName, "gpt-4o-audio") {
|
||||
service.PostAudioConsumeQuota(c, relayInfo, usage.(*dto.Usage), preConsumedQuota, userQuota, modelRatio, groupRatio, modelPrice, getModelPriceSuccess, "")
|
||||
} else {
|
||||
postConsumeQuota(c, relayInfo, textRequest.Model, usage.(*dto.Usage), ratio, preConsumedQuota, userQuota, modelRatio, groupRatio, modelPrice, getModelPriceSuccess, "")
|
||||
postConsumeQuota(c, relayInfo, relayInfo.RecodeModelName, usage.(*dto.Usage), ratio, preConsumedQuota, userQuota, modelRatio, groupRatio, modelPrice, getModelPriceSuccess, "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"golang.org/x/net/proxy"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"one-api/common"
|
||||
"time"
|
||||
)
|
||||
@@ -30,3 +35,43 @@ func GetHttpClient() *http.Client {
|
||||
func GetImpatientHttpClient() *http.Client {
|
||||
return impatientHTTPClient
|
||||
}
|
||||
|
||||
// NewProxyHttpClient 创建支持代理的 HTTP 客户端
|
||||
func NewProxyHttpClient(proxyURL string) (*http.Client, error) {
|
||||
if proxyURL == "" {
|
||||
return http.DefaultClient, nil
|
||||
}
|
||||
|
||||
// 解析代理URL
|
||||
parsedURL, err := url.Parse(proxyURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch parsedURL.Scheme {
|
||||
case "http", "https":
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
Proxy: http.ProxyURL(parsedURL),
|
||||
},
|
||||
}, nil
|
||||
|
||||
case "socks5":
|
||||
// 创建 SOCKS5 代理拨号器
|
||||
dialer, err := proxy.SOCKS5("tcp", parsedURL.Host, nil, proxy.Direct)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &http.Client{
|
||||
Transport: &http.Transport{
|
||||
DialContext: func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
return dialer.Dial(network, addr)
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported proxy scheme: %s", parsedURL.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,6 +13,9 @@ func GenerateTextOtherInfo(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, m
|
||||
other["completion_ratio"] = completionRatio
|
||||
other["model_price"] = modelPrice
|
||||
other["frt"] = float64(relayInfo.FirstResponseTime.UnixMilli() - relayInfo.StartTime.UnixMilli())
|
||||
if relayInfo.ReasoningEffort != "" {
|
||||
other["reasoning_effort"] = relayInfo.ReasoningEffort
|
||||
}
|
||||
adminInfo := make(map[string]interface{})
|
||||
adminInfo["use_channel"] = ctx.GetStringSlice("use_channel")
|
||||
other["admin_info"] = adminInfo
|
||||
|
||||
@@ -182,9 +182,9 @@ func PostAudioConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
|
||||
audioOutTokens := usage.CompletionTokenDetails.AudioTokens
|
||||
|
||||
tokenName := ctx.GetString("token_name")
|
||||
completionRatio := common.GetCompletionRatio(relayInfo.UpstreamModelName)
|
||||
audioRatio := common.GetAudioRatio(relayInfo.UpstreamModelName)
|
||||
audioCompletionRatio := common.GetAudioCompletionRatio(relayInfo.UpstreamModelName)
|
||||
completionRatio := common.GetCompletionRatio(relayInfo.RecodeModelName)
|
||||
audioRatio := common.GetAudioRatio(relayInfo.RecodeModelName)
|
||||
audioCompletionRatio := common.GetAudioCompletionRatio(relayInfo.RecodeModelName)
|
||||
|
||||
quotaInfo := QuotaInfo{
|
||||
InputDetails: TokenDetails{
|
||||
@@ -195,7 +195,7 @@ func PostAudioConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
|
||||
TextTokens: textOutTokens,
|
||||
AudioTokens: audioOutTokens,
|
||||
},
|
||||
ModelName: relayInfo.UpstreamModelName,
|
||||
ModelName: relayInfo.RecodeModelName,
|
||||
UsePrice: usePrice,
|
||||
ModelRatio: modelRatio,
|
||||
GroupRatio: groupRatio,
|
||||
@@ -218,7 +218,7 @@ func PostAudioConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
|
||||
quota = 0
|
||||
logContent += fmt.Sprintf("(可能是上游超时)")
|
||||
common.LogError(ctx, fmt.Sprintf("total tokens is 0, cannot consume quota, userId %d, channelId %d, "+
|
||||
"tokenId %d, model %s, pre-consumed quota %d", relayInfo.UserId, relayInfo.ChannelId, relayInfo.TokenId, relayInfo.UpstreamModelName, preConsumedQuota))
|
||||
"tokenId %d, model %s, pre-consumed quota %d", relayInfo.UserId, relayInfo.ChannelId, relayInfo.TokenId, relayInfo.RecodeModelName, preConsumedQuota))
|
||||
} else {
|
||||
quotaDelta := quota - preConsumedQuota
|
||||
if quotaDelta != 0 {
|
||||
@@ -231,7 +231,7 @@ func PostAudioConsumeQuota(ctx *gin.Context, relayInfo *relaycommon.RelayInfo,
|
||||
model.UpdateChannelUsedQuota(relayInfo.ChannelId, quota)
|
||||
}
|
||||
|
||||
logModel := relayInfo.UpstreamModelName
|
||||
logModel := relayInfo.RecodeModelName
|
||||
if extraContent != "" {
|
||||
logContent += ", " + extraContent
|
||||
}
|
||||
|
||||
3
setting/operation_setting.go
Normal file
3
setting/operation_setting.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package setting
|
||||
|
||||
var DemoSiteEnabled = false
|
||||
@@ -608,7 +608,12 @@ const LogsTable = () => {
|
||||
key: t('计费过程'),
|
||||
value: content,
|
||||
});
|
||||
|
||||
if (other?.reasoning_effort) {
|
||||
expandDataLocal.push({
|
||||
key: t('Reasoning Effort'),
|
||||
value: other.reasoning_effort,
|
||||
});
|
||||
}
|
||||
}
|
||||
expandDatesLocal[logs[i].key] = expandDataLocal;
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ const OperationSetting = () => {
|
||||
DefaultCollapseSidebar: false, // 默认折叠侧边栏
|
||||
RetryTimes: 0,
|
||||
Chats: "[]",
|
||||
DemoSiteEnabled: false,
|
||||
});
|
||||
|
||||
let [loading, setLoading] = useState(false);
|
||||
|
||||
@@ -218,6 +218,7 @@ const EditChannel = (props) => {
|
||||
try {
|
||||
const res = await API.post('/api/channel/fetch_models', {
|
||||
base_url: inputs['base_url'],
|
||||
type: inputs['type'],
|
||||
key: inputs['key']
|
||||
});
|
||||
|
||||
@@ -885,7 +886,7 @@ const EditChannel = (props) => {
|
||||
</Typography.Text>
|
||||
</div>
|
||||
<TextArea
|
||||
placeholder={t('此项可选,用于复写返回的状态码,比如将claude渠道的400错误复写为500(用于重试),请勿滥用该功能,例如:') +
|
||||
placeholder={t('此项可选,用于复写返回的状态码,比如将claude渠道的400错误复写为500(用于重试),请勿滥用该功能,例如:') +
|
||||
'\n' + JSON.stringify(STATUS_CODE_MAPPING_EXAMPLE, null, 2)}
|
||||
name="status_code_mapping"
|
||||
onChange={(value) => {
|
||||
@@ -965,7 +966,6 @@ const EditChannel = (props) => {
|
||||
value={inputs.weight}
|
||||
autoComplete="new-password"
|
||||
/>
|
||||
{inputs.type === 8 && (
|
||||
<>
|
||||
<div style={{ marginTop: 10 }}>
|
||||
<Typography.Text strong>
|
||||
@@ -982,25 +982,38 @@ const EditChannel = (props) => {
|
||||
value={inputs.setting}
|
||||
autoComplete="new-password"
|
||||
/>
|
||||
<Typography.Text
|
||||
style={{
|
||||
color: 'rgba(var(--semi-blue-5), 1)',
|
||||
userSelect: 'none',
|
||||
cursor: 'pointer'
|
||||
}}
|
||||
onClick={() => {
|
||||
handleInputChange(
|
||||
'setting',
|
||||
JSON.stringify({
|
||||
force_format: true
|
||||
}, null, 2)
|
||||
);
|
||||
}}
|
||||
>
|
||||
{t('填入模板')}
|
||||
<Space>
|
||||
<Typography.Text
|
||||
style={{
|
||||
color: 'rgba(var(--semi-blue-5), 1)',
|
||||
userSelect: 'none',
|
||||
cursor: 'pointer'
|
||||
}}
|
||||
onClick={() => {
|
||||
handleInputChange(
|
||||
'setting',
|
||||
JSON.stringify({
|
||||
force_format: true
|
||||
}, null, 2)
|
||||
);
|
||||
}}
|
||||
>
|
||||
{t('填入模板')}
|
||||
</Typography.Text>
|
||||
</>
|
||||
)}
|
||||
<Typography.Text
|
||||
style={{
|
||||
color: 'rgba(var(--semi-blue-5), 1)',
|
||||
userSelect: 'none',
|
||||
cursor: 'pointer'
|
||||
}}
|
||||
onClick={() => {
|
||||
window.open('https://github.com/Calcium-Ion/new-api/blob/main/docs/channel/other_setting.md');
|
||||
}}
|
||||
>
|
||||
{t('设置说明')}
|
||||
</Typography.Text>
|
||||
</Space>
|
||||
</>
|
||||
</Spin>
|
||||
</SideSheet>
|
||||
</>
|
||||
|
||||
@@ -21,6 +21,7 @@ export default function GeneralSettings(props) {
|
||||
DisplayInCurrencyEnabled: false,
|
||||
DisplayTokenStatEnabled: false,
|
||||
DefaultCollapseSidebar: false,
|
||||
DemoSiteEnabled: false,
|
||||
});
|
||||
const refForm = useRef();
|
||||
const [inputsRow, setInputsRow] = useState(inputs);
|
||||
@@ -188,6 +189,23 @@ export default function GeneralSettings(props) {
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Col span={8}>
|
||||
<Form.Switch
|
||||
field={'DemoSiteEnabled'}
|
||||
label={t('演示站点模式')}
|
||||
size='default'
|
||||
checkedText='|'
|
||||
uncheckedText='〇'
|
||||
onChange={(value) =>
|
||||
setInputs({
|
||||
...inputs,
|
||||
DemoSiteEnabled: value
|
||||
})
|
||||
}
|
||||
/>
|
||||
</Col>
|
||||
</Row>
|
||||
<Row>
|
||||
<Button size='default' onClick={onSubmit}>
|
||||
{t('保存通用设置')}
|
||||
|
||||
Reference in New Issue
Block a user