Merge remote-tracking branch 'origin/alpha' into alpha

This commit is contained in:
t0ng7u
2025-08-09 13:10:20 +08:00
22 changed files with 276 additions and 193 deletions

View File

@@ -40,4 +40,6 @@ const (
ContextKeyUserGroup ContextKey = "user_group"
ContextKeyUsingGroup ContextKey = "group"
ContextKeyUserName ContextKey = "username"
ContextKeySystemPromptOverride ContextKey = "system_prompt_override"
)

View File

@@ -145,6 +145,22 @@ func UpdateMidjourneyTaskBulk() {
buttonStr, _ := json.Marshal(responseItem.Buttons)
task.Buttons = string(buttonStr)
}
// 映射 VideoUrl
task.VideoUrl = responseItem.VideoUrl
// 映射 VideoUrls - 将数组序列化为 JSON 字符串
if responseItem.VideoUrls != nil && len(responseItem.VideoUrls) > 0 {
videoUrlsStr, err := json.Marshal(responseItem.VideoUrls)
if err != nil {
common.LogError(ctx, fmt.Sprintf("序列化 VideoUrls 失败: %v", err))
task.VideoUrls = "[]" // 失败时设置为空数组
} else {
task.VideoUrls = string(videoUrlsStr)
}
} else {
task.VideoUrls = "" // 空值时清空字段
}
shouldReturnQuota := false
if (task.Progress != "100%" && responseItem.FailReason != "") || (task.Progress == "100%" && task.Status == "FAILURE") {
common.LogInfo(ctx, task.MjId+" 构建失败,"+task.FailReason)
@@ -208,6 +224,20 @@ func checkMjTaskNeedUpdate(oldTask *model.Midjourney, newTask dto.MidjourneyDto)
if oldTask.Progress != "100%" && newTask.FailReason != "" {
return true
}
// 检查 VideoUrl 是否需要更新
if oldTask.VideoUrl != newTask.VideoUrl {
return true
}
// 检查 VideoUrls 是否需要更新
if newTask.VideoUrls != nil && len(newTask.VideoUrls) > 0 {
newVideoUrlsStr, _ := json.Marshal(newTask.VideoUrls)
if oldTask.VideoUrls != string(newVideoUrlsStr) {
return true
}
} else if oldTask.VideoUrls != "" {
// 如果新数据没有 VideoUrls 但旧数据有,需要更新(清空)
return true
}
return false
}

View File

@@ -6,4 +6,5 @@ type ChannelSettings struct {
Proxy string `json:"proxy"`
PassThroughBodyEnabled bool `json:"pass_through_body_enabled,omitempty"`
SystemPrompt string `json:"system_prompt,omitempty"`
SystemPromptOverride bool `json:"system_prompt_override,omitempty"`
}

View File

@@ -216,10 +216,14 @@ type GeminiEmbeddingRequest struct {
OutputDimensionality int `json:"outputDimensionality,omitempty"`
}
type GeminiEmbeddingResponse struct {
Embedding ContentEmbedding `json:"embedding"`
type GeminiBatchEmbeddingRequest struct {
Requests []*GeminiEmbeddingRequest `json:"requests"`
}
type ContentEmbedding struct {
type GeminiEmbedding struct {
Values []float64 `json:"values"`
}
type GeminiBatchEmbeddingResponse struct {
Embeddings []*GeminiEmbedding `json:"embeddings"`
}

View File

@@ -78,6 +78,8 @@ func (r *GeneralOpenAIRequest) GetSystemRoleName() string {
if !strings.HasPrefix(r.Model, "o1-mini") && !strings.HasPrefix(r.Model, "o1-preview") {
return "developer"
}
} else if strings.HasPrefix(r.Model, "gpt-5") {
return "developer"
}
return "system"
}

View File

@@ -267,6 +267,8 @@ func SetupContextForSelectedChannel(c *gin.Context, channel *model.Channel, mode
common.SetContextKey(c, constant.ContextKeyChannelKey, key)
common.SetContextKey(c, constant.ContextKeyChannelBaseUrl, channel.GetBaseURL())
common.SetContextKey(c, constant.ContextKeySystemPromptOverride, false)
// TODO: api_version统一
switch channel.Type {
case constant.ChannelTypeAzure:

View File

@@ -114,7 +114,7 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
if strings.HasPrefix(info.UpstreamModelName, "text-embedding") ||
strings.HasPrefix(info.UpstreamModelName, "embedding") ||
strings.HasPrefix(info.UpstreamModelName, "gemini-embedding") {
return fmt.Sprintf("%s/%s/models/%s:embedContent", info.BaseUrl, version, info.UpstreamModelName), nil
return fmt.Sprintf("%s/%s/models/%s:batchEmbedContents", info.BaseUrl, version, info.UpstreamModelName), nil
}
action := "generateContent"
@@ -159,29 +159,35 @@ func (a *Adaptor) ConvertEmbeddingRequest(c *gin.Context, info *relaycommon.Rela
if len(inputs) == 0 {
return nil, errors.New("input is empty")
}
// only process the first input
geminiRequest := dto.GeminiEmbeddingRequest{
Content: dto.GeminiChatContent{
Parts: []dto.GeminiPart{
{
Text: inputs[0],
// process all inputs
geminiRequests := make([]map[string]interface{}, 0, len(inputs))
for _, input := range inputs {
geminiRequest := map[string]interface{}{
"model": fmt.Sprintf("models/%s", info.UpstreamModelName),
"content": dto.GeminiChatContent{
Parts: []dto.GeminiPart{
{
Text: input,
},
},
},
},
}
// set specific parameters for different models
// https://ai.google.dev/api/embeddings?hl=zh-cn#method:-models.embedcontent
switch info.UpstreamModelName {
case "text-embedding-004":
// except embedding-001 supports setting `OutputDimensionality`
if request.Dimensions > 0 {
geminiRequest.OutputDimensionality = request.Dimensions
}
// set specific parameters for different models
// https://ai.google.dev/api/embeddings?hl=zh-cn#method:-models.embedcontent
switch info.UpstreamModelName {
case "text-embedding-004","gemini-embedding-exp-03-07","gemini-embedding-001":
// Only newer models introduced after 2024 support OutputDimensionality
if request.Dimensions > 0 {
geminiRequest["outputDimensionality"] = request.Dimensions
}
}
geminiRequests = append(geminiRequests, geminiRequest)
}
return geminiRequest, nil
return map[string]interface{}{
"requests": geminiRequests,
}, nil
}
func (a *Adaptor) ConvertOpenAIResponsesRequest(c *gin.Context, info *relaycommon.RelayInfo, request dto.OpenAIResponsesRequest) (any, error) {

View File

@@ -1071,7 +1071,7 @@ func GeminiEmbeddingHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *h
return nil, types.NewOpenAIError(readErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
}
var geminiResponse dto.GeminiEmbeddingResponse
var geminiResponse dto.GeminiBatchEmbeddingResponse
if jsonErr := common.Unmarshal(responseBody, &geminiResponse); jsonErr != nil {
return nil, types.NewOpenAIError(jsonErr, types.ErrorCodeBadResponseBody, http.StatusInternalServerError)
}
@@ -1079,14 +1079,16 @@ func GeminiEmbeddingHandler(c *gin.Context, info *relaycommon.RelayInfo, resp *h
// convert to openai format response
openAIResponse := dto.OpenAIEmbeddingResponse{
Object: "list",
Data: []dto.OpenAIEmbeddingResponseItem{
{
Object: "embedding",
Embedding: geminiResponse.Embedding.Values,
Index: 0,
},
},
Model: info.UpstreamModelName,
Data: make([]dto.OpenAIEmbeddingResponseItem, 0, len(geminiResponse.Embeddings)),
Model: info.UpstreamModelName,
}
for i, embedding := range geminiResponse.Embeddings {
openAIResponse.Data = append(openAIResponse.Data, dto.OpenAIEmbeddingResponseItem{
Object: "embedding",
Embedding: embedding.Values,
Index: i,
})
}
// calculate usage

View File

@@ -54,8 +54,7 @@ func (a *Adaptor) GetRequestURL(info *relaycommon.RelayInfo) (string, error) {
func (a *Adaptor) SetupRequestHeader(c *gin.Context, req *http.Header, info *relaycommon.RelayInfo) error {
channel.SetupApiRequestHeader(info, c, req)
token := getZhipuToken(info.ApiKey)
req.Set("Authorization", token)
req.Set("Authorization", "Bearer "+info.ApiKey)
return nil
}

View File

@@ -1,69 +1,10 @@
package zhipu_4v
import (
"github.com/golang-jwt/jwt"
"one-api/common"
"one-api/dto"
"strings"
"sync"
"time"
)
// https://open.bigmodel.cn/doc/api#chatglm_std
// chatglm_std, chatglm_lite
// https://open.bigmodel.cn/api/paas/v3/model-api/chatglm_std/invoke
// https://open.bigmodel.cn/api/paas/v3/model-api/chatglm_std/sse-invoke
var zhipuTokens sync.Map
var expSeconds int64 = 24 * 3600
func getZhipuToken(apikey string) string {
data, ok := zhipuTokens.Load(apikey)
if ok {
tokenData := data.(tokenData)
if time.Now().Before(tokenData.ExpiryTime) {
return tokenData.Token
}
}
split := strings.Split(apikey, ".")
if len(split) != 2 {
common.SysError("invalid zhipu key: " + apikey)
return ""
}
id := split[0]
secret := split[1]
expMillis := time.Now().Add(time.Duration(expSeconds)*time.Second).UnixNano() / 1e6
expiryTime := time.Now().Add(time.Duration(expSeconds) * time.Second)
timestamp := time.Now().UnixNano() / 1e6
payload := jwt.MapClaims{
"api_key": id,
"exp": expMillis,
"timestamp": timestamp,
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, payload)
token.Header["alg"] = "HS256"
token.Header["sign_type"] = "SIGN"
tokenString, err := token.SignedString([]byte(secret))
if err != nil {
return ""
}
zhipuTokens.Store(apikey, tokenData{
Token: tokenString,
ExpiryTime: expiryTime,
})
return tokenString
}
func requestOpenAI2Zhipu(request dto.GeneralOpenAIRequest) *dto.GeneralOpenAIRequest {
messages := make([]dto.Message, 0, len(request.Messages))
for _, message := range request.Messages {

View File

@@ -201,6 +201,26 @@ func TextHelper(c *gin.Context) (newAPIError *types.NewAPIError) {
Content: relayInfo.ChannelSetting.SystemPrompt,
}
request.Messages = append([]dto.Message{systemMessage}, request.Messages...)
} else if relayInfo.ChannelSetting.SystemPromptOverride {
common.SetContextKey(c, constant.ContextKeySystemPromptOverride, true)
// 如果有系统提示,且允许覆盖,则拼接到前面
for i, message := range request.Messages {
if message.Role == request.GetSystemRoleName() {
if message.IsStringContent() {
request.Messages[i].SetStringContent(relayInfo.ChannelSetting.SystemPrompt + "\n" + message.StringContent())
} else {
contents := message.ParseContent()
contents = append([]dto.MediaContent{
{
Type: dto.ContentTypeText,
Text: relayInfo.ChannelSetting.SystemPrompt,
},
}, contents...)
request.Messages[i].Content = contents
}
break
}
}
}
}

View File

@@ -28,6 +28,12 @@ func GenerateTextOtherInfo(ctx *gin.Context, relayInfo *relaycommon.RelayInfo, m
other["is_model_mapped"] = true
other["upstream_model_name"] = relayInfo.UpstreamModelName
}
isSystemPromptOverwritten := common.GetContextKeyBool(ctx, constant.ContextKeySystemPromptOverride)
if isSystemPromptOverwritten {
other["is_system_prompt_overwritten"] = true
}
adminInfo := make(map[string]interface{})
adminInfo["use_channel"] = ctx.GetStringSlice("use_channel")
isMultiKey := common.GetContextKeyBool(ctx, constant.ContextKeyChannelIsMultiKey)

View File

@@ -131,6 +131,7 @@ const EditChannelModal = (props) => {
proxy: '',
pass_through_body_enabled: false,
system_prompt: '',
system_prompt_override: false,
};
const [batch, setBatch] = useState(false);
const [multiToSingle, setMultiToSingle] = useState(false);
@@ -340,12 +341,15 @@ const EditChannelModal = (props) => {
data.proxy = parsedSettings.proxy || '';
data.pass_through_body_enabled = parsedSettings.pass_through_body_enabled || false;
data.system_prompt = parsedSettings.system_prompt || '';
data.system_prompt_override = parsedSettings.system_prompt_override || false;
} catch (error) {
console.error('解析渠道设置失败:', error);
data.force_format = false;
data.thinking_to_content = false;
data.proxy = '';
data.pass_through_body_enabled = false;
data.system_prompt = '';
data.system_prompt_override = false;
}
} else {
data.force_format = false;
@@ -353,6 +357,7 @@ const EditChannelModal = (props) => {
data.proxy = '';
data.pass_through_body_enabled = false;
data.system_prompt = '';
data.system_prompt_override = false;
}
setInputs(data);
@@ -372,6 +377,7 @@ const EditChannelModal = (props) => {
proxy: data.proxy,
pass_through_body_enabled: data.pass_through_body_enabled,
system_prompt: data.system_prompt,
system_prompt_override: data.system_prompt_override || false,
});
// console.log(data);
} else {
@@ -573,6 +579,7 @@ const EditChannelModal = (props) => {
proxy: '',
pass_through_body_enabled: false,
system_prompt: '',
system_prompt_override: false,
});
// 重置密钥模式状态
setKeyMode('append');
@@ -721,6 +728,7 @@ const EditChannelModal = (props) => {
proxy: localInputs.proxy || '',
pass_through_body_enabled: localInputs.pass_through_body_enabled || false,
system_prompt: localInputs.system_prompt || '',
system_prompt_override: localInputs.system_prompt_override || false,
};
localInputs.setting = JSON.stringify(channelExtraSettings);
@@ -730,6 +738,7 @@ const EditChannelModal = (props) => {
delete localInputs.proxy;
delete localInputs.pass_through_body_enabled;
delete localInputs.system_prompt;
delete localInputs.system_prompt_override;
let res;
localInputs.auto_ban = localInputs.auto_ban ? 1 : 0;
@@ -1722,6 +1731,14 @@ const EditChannelModal = (props) => {
showClear
extraText={t('用户优先:如果用户在请求中指定了系统提示词,将优先使用用户的设置')}
/>
<Form.Switch
field='system_prompt_override'
label={t('系统提示词拼接')}
checkedText={t('开')}
uncheckedText={t('关')}
onChange={(value) => handleChannelSettingsChange('system_prompt_override', value)}
extraText={t('如果用户请求中包含系统提示词,则使用此设置拼接到用户的系统提示词前面')}
/>
</Card>
</div>
</Spin>

View File

@@ -211,6 +211,7 @@ export const getTaskLogsColumns = ({
copyText,
openContentModal,
isAdminUser,
openVideoModal,
}) => {
return [
{
@@ -342,7 +343,13 @@ export const getTaskLogsColumns = ({
const isUrl = typeof text === 'string' && /^https?:\/\//.test(text);
if (isSuccess && isVideoTask && isUrl) {
return (
<a href={text} target="_blank" rel="noopener noreferrer">
<a
href="#"
onClick={e => {
e.preventDefault();
openVideoModal(text);
}}
>
{t('点击预览视频')}
</a>
);

View File

@@ -39,6 +39,7 @@ const TaskLogsTable = (taskLogsData) => {
handlePageSizeChange,
copyText,
openContentModal,
openVideoModal,
isAdminUser,
t,
COLUMN_KEYS,
@@ -51,6 +52,7 @@ const TaskLogsTable = (taskLogsData) => {
COLUMN_KEYS,
copyText,
openContentModal,
openVideoModal,
isAdminUser,
});
}, [
@@ -58,6 +60,7 @@ const TaskLogsTable = (taskLogsData) => {
COLUMN_KEYS,
copyText,
openContentModal,
openVideoModal,
isAdminUser,
]);

View File

@@ -37,7 +37,14 @@ const TaskLogsPage = () => {
<>
{/* Modals */}
<ColumnSelectorModal {...taskLogsData} />
<ContentModal {...taskLogsData} />
<ContentModal {...taskLogsData} isVideo={false} />
{/* 新增:视频预览弹窗 */}
<ContentModal
isModalOpen={taskLogsData.isVideoModalOpen}
setIsModalOpen={taskLogsData.setIsVideoModalOpen}
modalContent={taskLogsData.videoUrl}
isVideo={true}
/>
<Layout>
<CardPro

View File

@@ -24,6 +24,7 @@ const ContentModal = ({
isModalOpen,
setIsModalOpen,
modalContent,
isVideo,
}) => {
return (
<Modal
@@ -34,7 +35,11 @@ const ContentModal = ({
bodyStyle={{ height: '400px', overflow: 'auto' }}
width={800}
>
<p style={{ whiteSpace: 'pre-line' }}>{modalContent}</p>
{isVideo ? (
<video src={modalContent} controls style={{ width: '100%' }} autoPlay />
) : (
<p style={{ whiteSpace: 'pre-line' }}>{modalContent}</p>
)}
</Modal>
);
};

View File

@@ -34,7 +34,6 @@ import {
getLogOther,
renderModelTag,
renderClaudeLogContent,
renderClaudeModelPriceSimple,
renderLogContent,
renderModelPriceSimple,
renderAudioModelPrice,
@@ -538,7 +537,7 @@ export const getLogsColumns = ({
);
}
let content = other?.claude
? renderClaudeModelPriceSimple(
? renderModelPriceSimple(
other.model_ratio,
other.model_price,
other.group_ratio,
@@ -547,6 +546,10 @@ export const getLogsColumns = ({
other.cache_ratio || 1.0,
other.cache_creation_tokens || 0,
other.cache_creation_ratio || 1.0,
false,
1.0,
other?.is_system_prompt_overwritten,
'claude'
)
: renderModelPriceSimple(
other.model_ratio,
@@ -555,13 +558,19 @@ export const getLogsColumns = ({
other?.user_group_ratio,
other.cache_tokens || 0,
other.cache_ratio || 1.0,
0,
1.0,
false,
1.0,
other?.is_system_prompt_overwritten,
'openai'
);
return (
<Typography.Paragraph
ellipsis={{
rows: 2,
rows: 3,
}}
style={{ maxWidth: 240 }}
style={{ maxWidth: 240, whiteSpace: 'pre-line' }}
>
{content}
</Typography.Paragraph>

View File

@@ -215,14 +215,16 @@ export async function getOAuthState() {
export async function onOIDCClicked(auth_url, client_id, openInNewTab = false) {
const state = await getOAuthState();
if (!state) return;
const redirect_uri = `${window.location.origin}/oauth/oidc`;
const response_type = 'code';
const scope = 'openid profile email';
const url = `${auth_url}?client_id=${client_id}&redirect_uri=${redirect_uri}&response_type=${response_type}&scope=${scope}&state=${state}`;
const url = new URL(auth_url);
url.searchParams.set('client_id', client_id);
url.searchParams.set('redirect_uri', `${window.location.origin}/oauth/oidc`);
url.searchParams.set('response_type', 'code');
url.searchParams.set('scope', 'openid profile email');
url.searchParams.set('state', state);
if (openInNewTab) {
window.open(url);
window.open(url.toString(), '_blank');
} else {
window.location.href = url;
window.location.href = url.toString();
}
}

View File

@@ -953,6 +953,71 @@ function getEffectiveRatio(groupRatio, user_group_ratio) {
};
}
// Shared core for simple price rendering (used by OpenAI-like and Claude-like variants)
function renderPriceSimpleCore({
modelRatio,
modelPrice = -1,
groupRatio,
user_group_ratio,
cacheTokens = 0,
cacheRatio = 1.0,
cacheCreationTokens = 0,
cacheCreationRatio = 1.0,
image = false,
imageRatio = 1.0,
isSystemPromptOverride = false
}) {
const { ratio: effectiveGroupRatio, label: ratioLabel } = getEffectiveRatio(
groupRatio,
user_group_ratio,
);
const finalGroupRatio = effectiveGroupRatio;
if (modelPrice !== -1) {
return i18next.t('价格:${{price}} * {{ratioType}}{{ratio}}', {
price: modelPrice,
ratioType: ratioLabel,
ratio: finalGroupRatio,
});
}
const parts = [];
// base: model ratio
parts.push(i18next.t('模型: {{ratio}}'));
// cache part (label differs when with image)
if (cacheTokens !== 0) {
parts.push(i18next.t('缓存: {{cacheRatio}}'));
}
// cache creation part (Claude specific if passed)
if (cacheCreationTokens !== 0) {
parts.push(i18next.t('缓存创建: {{cacheCreationRatio}}'));
}
// image part
if (image) {
parts.push(i18next.t('图片输入: {{imageRatio}}'));
}
parts.push(`{{ratioType}}: {{groupRatio}}`);
let result = i18next.t(parts.join(' * '), {
ratio: modelRatio,
ratioType: ratioLabel,
groupRatio: finalGroupRatio,
cacheRatio: cacheRatio,
cacheCreationRatio: cacheCreationRatio,
imageRatio: imageRatio,
})
if (isSystemPromptOverride) {
result += '\n\r' + i18next.t('系统提示覆盖');
}
return result;
}
export function renderModelPrice(
inputTokens,
completionTokens,
@@ -1245,56 +1310,26 @@ export function renderModelPriceSimple(
user_group_ratio,
cacheTokens = 0,
cacheRatio = 1.0,
cacheCreationTokens = 0,
cacheCreationRatio = 1.0,
image = false,
imageRatio = 1.0,
isSystemPromptOverride = false,
provider = 'openai',
) {
const { ratio: effectiveGroupRatio, label: ratioLabel } = getEffectiveRatio(groupRatio, user_group_ratio);
groupRatio = effectiveGroupRatio;
if (modelPrice !== -1) {
return i18next.t('价格:${{price}} * {{ratioType}}{{ratio}}', {
price: modelPrice,
ratioType: ratioLabel,
ratio: groupRatio,
});
} else {
if (image && cacheTokens !== 0) {
return i18next.t(
'模型: {{ratio}} * {{ratioType}}: {{groupRatio}} * 缓存倍率: {{cacheRatio}} * 图片输入倍率: {{imageRatio}}',
{
ratio: modelRatio,
ratioType: ratioLabel,
groupRatio: groupRatio,
cacheRatio: cacheRatio,
imageRatio: imageRatio,
},
);
} else if (image) {
return i18next.t(
'模型: {{ratio}} * {{ratioType}}: {{groupRatio}} * 图片输入倍率: {{imageRatio}}',
{
ratio: modelRatio,
ratioType: ratioLabel,
groupRatio: groupRatio,
imageRatio: imageRatio,
},
);
} else if (cacheTokens !== 0) {
return i18next.t(
'模型: {{ratio}} * 分组: {{groupRatio}} * 缓存: {{cacheRatio}}',
{
ratio: modelRatio,
groupRatio: groupRatio,
cacheRatio: cacheRatio,
},
);
} else {
return i18next.t('模型: {{ratio}} * {{ratioType}}{{groupRatio}}', {
ratio: modelRatio,
ratioType: ratioLabel,
groupRatio: groupRatio,
});
}
}
return renderPriceSimpleCore({
modelRatio,
modelPrice,
groupRatio,
user_group_ratio,
cacheTokens,
cacheRatio,
cacheCreationTokens,
cacheCreationRatio,
image,
imageRatio,
isSystemPromptOverride
});
}
export function renderAudioModelPrice(
@@ -1635,46 +1670,7 @@ export function renderClaudeLogContent(
}
}
export function renderClaudeModelPriceSimple(
modelRatio,
modelPrice = -1,
groupRatio,
user_group_ratio,
cacheTokens = 0,
cacheRatio = 1.0,
cacheCreationTokens = 0,
cacheCreationRatio = 1.0,
) {
const { ratio: effectiveGroupRatio, label: ratioLabel } = getEffectiveRatio(groupRatio, user_group_ratio);
groupRatio = effectiveGroupRatio;
if (modelPrice !== -1) {
return i18next.t('价格:${{price}} * {{ratioType}}{{ratio}}', {
price: modelPrice,
ratioType: ratioLabel,
ratio: groupRatio,
});
} else {
if (cacheTokens !== 0 || cacheCreationTokens !== 0) {
return i18next.t(
'模型: {{ratio}} * {{ratioType}}: {{groupRatio}} * 缓存: {{cacheRatio}}',
{
ratio: modelRatio,
ratioType: ratioLabel,
groupRatio: groupRatio,
cacheRatio: cacheRatio,
cacheCreationRatio: cacheCreationRatio,
},
);
} else {
return i18next.t('模型: {{ratio}} * {{ratioType}}: {{groupRatio}}', {
ratio: modelRatio,
ratioType: ratioLabel,
groupRatio: groupRatio,
});
}
}
}
// 已统一至 renderModelPriceSimple若仍有遗留引用请改为传入 provider='claude'
/**
* rehype 插件:将段落等文本节点拆分为逐词 <span>,并添加淡入动画 class。

View File

@@ -65,6 +65,10 @@ export const useTaskLogsData = () => {
const [isModalOpen, setIsModalOpen] = useState(false);
const [modalContent, setModalContent] = useState('');
// 新增:视频预览弹窗状态
const [isVideoModalOpen, setIsVideoModalOpen] = useState(false);
const [videoUrl, setVideoUrl] = useState('');
// Form state
const [formApi, setFormApi] = useState(null);
let now = new Date();
@@ -250,6 +254,12 @@ export const useTaskLogsData = () => {
setIsModalOpen(true);
};
// 新增:打开视频预览弹窗
const openVideoModal = (url) => {
setVideoUrl(url);
setIsVideoModalOpen(true);
};
// Initialize data
useEffect(() => {
const localPageSize = parseInt(localStorage.getItem('task-page-size')) || ITEMS_PER_PAGE;
@@ -271,6 +281,11 @@ export const useTaskLogsData = () => {
setIsModalOpen,
modalContent,
// 新增:视频弹窗状态
isVideoModalOpen,
setIsVideoModalOpen,
videoUrl,
// Form state
formApi,
setFormApi,
@@ -297,6 +312,7 @@ export const useTaskLogsData = () => {
refresh,
copyText,
openContentModal,
openVideoModal, // 新增
enrichLogs,
syncPageData,

View File

@@ -1804,5 +1804,11 @@
"已选择 {{selected}} / {{total}}": "Selected {{selected}} / {{total}}",
"新获取的模型": "New models",
"已有的模型": "Existing models",
"搜索模型": "Search models"
"搜索模型": "Search models",
"缓存: {{cacheRatio}}": "Cache: {{cacheRatio}}",
"缓存创建: {{cacheCreationRatio}}": "Cache creation: {{cacheCreationRatio}}",
"图片输入: {{imageRatio}}": "Image input: {{imageRatio}}",
"系统提示覆盖": "System prompt override",
"模型: {{ratio}}": "Model: {{ratio}}",
"专属倍率": "Exclusive group ratio"
}