feat: ionet integrate (#2105)

* wip ionet integrate

* wip ionet integrate

* wip ionet integrate

* ollama wip

* wip

* feat: ionet integration & ollama manage

* fix merge conflict

* wip

* fix: test conn cors

* wip

* fix ionet

* fix ionet

* wip

* fix model select

* refactor: Remove `pkg/ionet` test files and update related Go source and web UI model deployment components.

* feat: Enhance model deployment UI with styling improvements, updated text, and a new description component.

* Revert "feat: Enhance model deployment UI with styling improvements, updated text, and a new description component."

This reverts commit 8b75cb5bf0d1a534b339df8c033be9a6c7df7964.
This commit is contained in:
Seefs
2025-12-28 15:55:35 +08:00
committed by GitHub
parent 984ae32667
commit b10f1f7b85
51 changed files with 11895 additions and 369 deletions

View File

@@ -47,7 +47,8 @@ import {
import { FaRandom } from 'react-icons/fa';
// Render functions
const renderType = (type, channelInfo = undefined, t) => {
const renderType = (type, record = {}, t) => {
const channelInfo = record?.channel_info;
let type2label = new Map();
for (let i = 0; i < CHANNEL_OPTIONS.length; i++) {
type2label[CHANNEL_OPTIONS[i].value] = CHANNEL_OPTIONS[i];
@@ -71,11 +72,65 @@ const renderType = (type, channelInfo = undefined, t) => {
);
}
return (
const typeTag = (
<Tag color={type2label[type]?.color} shape='circle' prefixIcon={icon}>
{type2label[type]?.label}
</Tag>
);
let ionetMeta = null;
if (record?.other_info) {
try {
const parsed = JSON.parse(record.other_info);
if (parsed && typeof parsed === 'object' && parsed.source === 'ionet') {
ionetMeta = parsed;
}
} catch (error) {
// ignore invalid metadata
}
}
if (!ionetMeta) {
return typeTag;
}
const handleNavigate = (event) => {
event?.stopPropagation?.();
if (!ionetMeta?.deployment_id) {
return;
}
const targetUrl = `/console/deployment?deployment_id=${ionetMeta.deployment_id}`;
window.open(targetUrl, '_blank', 'noopener');
};
return (
<Space spacing={6}>
{typeTag}
<Tooltip
content={
<div className='max-w-xs'>
<div className='text-xs text-gray-600'>{t('来源于 IO.NET 部署')}</div>
{ionetMeta?.deployment_id && (
<div className='text-xs text-gray-500 mt-1'>
{t('部署 ID')}: {ionetMeta.deployment_id}
</div>
)}
</div>
}
>
<span>
<Tag
color='purple'
type='light'
className='cursor-pointer'
onClick={handleNavigate}
>
IO.NET
</Tag>
</span>
</Tooltip>
</Space>
);
};
const renderTagType = (t) => {
@@ -231,6 +286,7 @@ export const getChannelsColumns = ({
refresh,
activePage,
channels,
checkOllamaVersion,
setShowMultiKeyManageModal,
setCurrentMultiKeyChannel,
}) => {
@@ -330,12 +386,7 @@ export const getChannelsColumns = ({
dataIndex: 'type',
render: (text, record, index) => {
if (record.children === undefined) {
if (record.channel_info) {
if (record.channel_info.is_multi_key) {
return <>{renderType(text, record.channel_info, t)}</>;
}
}
return <>{renderType(text, undefined, t)}</>;
return <>{renderType(text, record, t)}</>;
} else {
return <>{renderTagType(t)}</>;
}
@@ -569,6 +620,15 @@ export const getChannelsColumns = ({
},
];
if (record.type === 4) {
moreMenuItems.unshift({
node: 'item',
name: t('测活'),
type: 'tertiary',
onClick: () => checkOllamaVersion(record),
});
}
return (
<Space wrap>
<SplitButtonGroup

View File

@@ -57,6 +57,7 @@ const ChannelsTable = (channelsData) => {
setEditingTag,
copySelectedChannel,
refresh,
checkOllamaVersion,
// Multi-key management
setShowMultiKeyManageModal,
setCurrentMultiKeyChannel,
@@ -82,6 +83,7 @@ const ChannelsTable = (channelsData) => {
refresh,
activePage,
channels,
checkOllamaVersion,
setShowMultiKeyManageModal,
setCurrentMultiKeyChannel,
});
@@ -103,6 +105,7 @@ const ChannelsTable = (channelsData) => {
refresh,
activePage,
channels,
checkOllamaVersion,
setShowMultiKeyManageModal,
setCurrentMultiKeyChannel,
]);

View File

@@ -55,6 +55,7 @@ import {
selectFilter,
} from '../../../../helpers';
import ModelSelectModal from './ModelSelectModal';
import OllamaModelModal from './OllamaModelModal';
import JSONEditor from '../../../common/ui/JSONEditor';
import SecureVerificationModal from '../../../common/modals/SecureVerificationModal';
import ChannelKeyDisplay from '../../../common/ui/ChannelKeyDisplay';
@@ -180,6 +181,7 @@ const EditChannelModal = (props) => {
const [isModalOpenurl, setIsModalOpenurl] = useState(false);
const [modelModalVisible, setModelModalVisible] = useState(false);
const [fetchedModels, setFetchedModels] = useState([]);
const [ollamaModalVisible, setOllamaModalVisible] = useState(false);
const formApiRef = useRef(null);
const [vertexKeys, setVertexKeys] = useState([]);
const [vertexFileList, setVertexFileList] = useState([]);
@@ -214,6 +216,8 @@ const EditChannelModal = (props) => {
return [];
}
}, [inputs.model_mapping]);
const [isIonetChannel, setIsIonetChannel] = useState(false);
const [ionetMetadata, setIonetMetadata] = useState(null);
// 密钥显示状态
const [keyDisplayState, setKeyDisplayState] = useState({
@@ -224,6 +228,21 @@ const EditChannelModal = (props) => {
// 专门的2FA验证状态用于TwoFactorAuthModal
const [show2FAVerifyModal, setShow2FAVerifyModal] = useState(false);
const [verifyCode, setVerifyCode] = useState('');
useEffect(() => {
if (!isEdit) {
setIsIonetChannel(false);
setIonetMetadata(null);
}
}, [isEdit]);
const handleOpenIonetDeployment = () => {
if (!ionetMetadata?.deployment_id) {
return;
}
const targetUrl = `/console/deployment?deployment_id=${ionetMetadata.deployment_id}`;
window.open(targetUrl, '_blank', 'noopener');
};
const [verifyLoading, setVerifyLoading] = useState(false);
// 表单块导航相关状态
@@ -404,7 +423,12 @@ const EditChannelModal = (props) => {
handleInputChange('settings', settingsJson);
};
const isIonetLocked = isIonetChannel && isEdit;
const handleInputChange = (name, value) => {
if (isIonetChannel && isEdit && ['type', 'key', 'base_url'].includes(name)) {
return;
}
if (formApiRef.current) {
formApiRef.current.setValue(name, value);
}
@@ -625,6 +649,25 @@ const EditChannelModal = (props) => {
.map((model) => (model || '').trim())
.filter(Boolean);
initialModelMappingRef.current = data.model_mapping || '';
let parsedIonet = null;
if (data.other_info) {
try {
const maybeMeta = JSON.parse(data.other_info);
if (
maybeMeta &&
typeof maybeMeta === 'object' &&
maybeMeta.source === 'ionet'
) {
parsedIonet = maybeMeta;
}
} catch (error) {
// ignore parse error
}
}
const managedByIonet = !!parsedIonet;
setIsIonetChannel(managedByIonet);
setIonetMetadata(parsedIonet);
// console.log(data);
} else {
showError(message);
@@ -632,7 +675,8 @@ const EditChannelModal = (props) => {
setLoading(false);
};
const fetchUpstreamModelList = async (name) => {
const fetchUpstreamModelList = async (name, options = {}) => {
const silent = !!options.silent;
// if (inputs['type'] !== 1) {
// showError(t('仅支持 OpenAI 接口格式'));
// return;
@@ -683,7 +727,9 @@ const EditChannelModal = (props) => {
if (!err) {
const uniqueModels = Array.from(new Set(models));
setFetchedModels(uniqueModels);
setModelModalVisible(true);
if (!silent) {
setModelModalVisible(true);
}
} else {
showError(t('获取模型列表失败'));
}
@@ -1626,20 +1672,44 @@ const EditChannelModal = (props) => {
</div>
</div>
<Form.Select
field='type'
label={t('类型')}
placeholder={t('请选择渠道类型')}
rules={[{ required: true, message: t('请选择渠道类型') }]}
optionList={channelOptionList}
style={{ width: '100%' }}
filter={selectFilter}
autoClearSearchValue={false}
searchPosition='dropdown'
onSearch={(value) => setChannelSearchValue(value)}
renderOptionItem={renderChannelOption}
onChange={(value) => handleInputChange('type', value)}
/>
{isIonetChannel && (
<Banner
type='info'
closeIcon={null}
className='mb-4 rounded-xl'
description={t('此渠道由 IO.NET 自动同步,类型、密钥和 API 地址已锁定。')}
>
<Space>
{ionetMetadata?.deployment_id && (
<Button
size='small'
theme='light'
type='primary'
icon={<IconGlobe />}
onClick={handleOpenIonetDeployment}
>
{t('查看关联部署')}
</Button>
)}
</Space>
</Banner>
)}
<Form.Select
field='type'
label={t('类型')}
placeholder={t('请选择渠道类型')}
rules={[{ required: true, message: t('请选择渠道类型') }]}
optionList={channelOptionList}
style={{ width: '100%' }}
filter={selectFilter}
autoClearSearchValue={false}
searchPosition='dropdown'
onSearch={(value) => setChannelSearchValue(value)}
renderOptionItem={renderChannelOption}
onChange={(value) => handleInputChange('type', value)}
disabled={isIonetLocked}
/>
{inputs.type === 20 && (
<Form.Switch
@@ -1778,87 +1848,86 @@ const EditChannelModal = (props) => {
autosize
autoComplete='new-password'
onChange={(value) => handleInputChange('key', value)}
extraText={
<div className='flex items-center gap-2 flex-wrap'>
{isEdit &&
isMultiKeyChannel &&
keyMode === 'append' && (
<Text type='warning' size='small'>
{t(
'追加模式:新密钥将添加到现有密钥列表的末尾',
)}
</Text>
)}
{isEdit && (
disabled={isIonetLocked}
extraText={
<div className='flex items-center gap-2 flex-wrap'>
{isEdit &&
isMultiKeyChannel &&
keyMode === 'append' && (
<Text type='warning' size='small'>
{t(
'追加模式:新密钥将添加到现有密钥列表的末尾',
)}
</Text>
)}
{isEdit && (
<Button
size='small'
type='primary'
theme='outline'
onClick={handleShow2FAModal}
>
{t('查看密钥')}
</Button>
)}
{batchExtra}
</div>
}
showClear
/>
)
) : (
<>
{inputs.type === 41 &&
(inputs.vertex_key_type || 'json') === 'json' ? (
<>
{!batch && (
<div className='flex items-center justify-between mb-3'>
<Text className='text-sm font-medium'>
{t('密钥输入方式')}
</Text>
<Space>
<Button
size='small'
type='primary'
theme='outline'
onClick={handleShow2FAModal}
type={
!useManualInput ? 'primary' : 'tertiary'
}
onClick={() => {
setUseManualInput(false);
// 切换到文件上传模式时清空手动输入的密钥
if (formApiRef.current) {
formApiRef.current.setValue('key', '');
}
handleInputChange('key', '');
}}
>
{t('查看密钥')}
{t('文件上传')}
</Button>
)}
{batchExtra}
<Button
size='small'
type={useManualInput ? 'primary' : 'tertiary'}
onClick={() => {
setUseManualInput(true);
// 切换到手动输入模式时清空文件上传相关状态
setVertexKeys([]);
setVertexFileList([]);
if (formApiRef.current) {
formApiRef.current.setValue(
'vertex_files',
[],
);
}
setInputs((prev) => ({
...prev,
vertex_files: [],
}));
}}
>
{t('手动输入')}
</Button>
</Space>
</div>
}
showClear
/>
)
) : (
<>
{inputs.type === 41 &&
(inputs.vertex_key_type || 'json') === 'json' ? (
<>
{!batch && (
<div className='flex items-center justify-between mb-3'>
<Text className='text-sm font-medium'>
{t('密钥输入方式')}
</Text>
<Space>
<Button
size='small'
type={
!useManualInput ? 'primary' : 'tertiary'
}
onClick={() => {
setUseManualInput(false);
// 切换到文件上传模式时清空手动输入的密钥
if (formApiRef.current) {
formApiRef.current.setValue('key', '');
}
handleInputChange('key', '');
}}
>
{t('文件上传')}
</Button>
<Button
size='small'
type={
useManualInput ? 'primary' : 'tertiary'
}
onClick={() => {
setUseManualInput(true);
// 切换到手动输入模式时清空文件上传相关状态
setVertexKeys([]);
setVertexFileList([]);
if (formApiRef.current) {
formApiRef.current.setValue(
'vertex_files',
[],
);
}
setInputs((prev) => ({
...prev,
vertex_files: [],
}));
}}
>
{t('手动输入')}
</Button>
</Space>
</div>
)}
)}
{batch && (
<Banner
@@ -2189,84 +2258,86 @@ const EditChannelModal = (props) => {
/>
)}
{inputs.type === 3 && (
<>
<Banner
type='warning'
description={t(
'2025年5月10日后添加的渠道不需要再在部署的时候移除模型名称中的"."',
{inputs.type === 3 && (
<>
<Banner
type='warning'
description={t(
'2025年5月10日后添加的渠道不需要再在部署的时候移除模型名称中的"."',
)}
className='!rounded-lg'
/>
<div>
<Form.Input
field='base_url'
label='AZURE_OPENAI_ENDPOINT'
placeholder={t(
'请输入 AZURE_OPENAI_ENDPOINT例如https://docs-test-001.openai.azure.com',
)}
className='!rounded-lg'
onChange={(value) =>
handleInputChange('base_url', value)
}
showClear
disabled={isIonetLocked}
/>
<div>
<Form.Input
field='base_url'
label='AZURE_OPENAI_ENDPOINT'
placeholder={t(
'请输入 AZURE_OPENAI_ENDPOINT例如https://docs-test-001.openai.azure.com',
)}
onChange={(value) =>
handleInputChange('base_url', value)
}
showClear
/>
</div>
<div>
<Form.Input
field='other'
label={t('默认 API 版本')}
placeholder={t(
'请输入默认 API 版本例如2025-04-01-preview',
)}
onChange={(value) =>
handleInputChange('other', value)
}
showClear
/>
</div>
<div>
<Form.Input
field='azure_responses_version'
label={t(
'默认 Responses API 版本,为空则使用上方版本',
)}
placeholder={t('例如preview')}
onChange={(value) =>
handleChannelOtherSettingsChange(
'azure_responses_version',
value,
)
}
showClear
/>
</div>
</>
)}
</div>
<div>
<Form.Input
field='other'
label={t('默认 API 版本')}
placeholder={t(
'请输入默认 API 版本例如2025-04-01-preview',
)}
onChange={(value) =>
handleInputChange('other', value)
}
showClear
/>
</div>
<div>
<Form.Input
field='azure_responses_version'
label={t(
'默认 Responses API 版本,为空则使用上方版本',
)}
placeholder={t('例如preview')}
onChange={(value) =>
handleChannelOtherSettingsChange(
'azure_responses_version',
value,
)
}
showClear
/>
</div>
</>
)}
{inputs.type === 8 && (
<>
<Banner
type='warning'
description={t(
'如果你对接的是上游One API或者New API等转发项目请使用OpenAI类型不要使用此类型除非你知道你在做什么。',
{inputs.type === 8 && (
<>
<Banner
type='warning'
description={t(
'如果你对接的是上游One API或者New API等转发项目请使用OpenAI类型不要使用此类型除非你知道你在做什么。',
)}
className='!rounded-lg'
/>
<div>
<Form.Input
field='base_url'
label={t('完整的 Base URL支持变量{model}')}
placeholder={t(
'请输入完整的URL例如https://api.openai.com/v1/chat/completions',
)}
className='!rounded-lg'
onChange={(value) =>
handleInputChange('base_url', value)
}
showClear
disabled={isIonetLocked}
/>
<div>
<Form.Input
field='base_url'
label={t('完整的 Base URL支持变量{model}')}
placeholder={t(
'请输入完整的URL例如https://api.openai.com/v1/chat/completions',
)}
onChange={(value) =>
handleInputChange('base_url', value)
}
showClear
/>
</div>
</>
)}
</div>
</>
)}
{inputs.type === 37 && (
<Banner
@@ -2294,76 +2365,77 @@ const EditChannelModal = (props) => {
handleInputChange('base_url', value)
}
showClear
extraText={t(
'对于官方渠道new-api已经内置地址除非是第三方代理站点或者Azure的特殊接入地址否则不需要填写',
)}
/>
</div>
)}
{inputs.type === 22 && (
<div>
<Form.Input
field='base_url'
label={t('私有部署地址')}
placeholder={t(
'请输入私有部署地址格式为https://fastgpt.run/api/openapi',
disabled={isIonetLocked}
extraText={t(
'对于官方渠道new-api已经内置地址除非是第三方代理站点或者Azure的特殊接入地址否则不需要填写',
)}
onChange={(value) =>
handleInputChange('base_url', value)
}
showClear
/>
</div>
)}
{inputs.type === 36 && (
<div>
<Form.Input
field='base_url'
label={t(
'注意非Chat API请务必填写正确的API地址否则可能导致无法使用',
)}
placeholder={t(
'请输入到 /suno 前的路径通常就是域名例如https://api.example.com',
)}
onChange={(value) =>
handleInputChange('base_url', value)
}
showClear
/>
</div>
)}
{inputs.type === 22 && (
<div>
<Form.Input
field='base_url'
label={t('私有部署地址')}
placeholder={t(
'请输入私有部署地址格式为https://fastgpt.run/api/openapi',
)}
onChange={(value) =>
handleInputChange('base_url', value)
}
showClear
disabled={isIonetLocked}
/>
</div>
)}
{inputs.type === 45 && !doubaoApiEditUnlocked && (
<div>
<Form.Select
field='base_url'
label={t('API地址')}
placeholder={t('请选择API地址')}
onChange={(value) =>
{inputs.type === 36 && (
<div>
<Form.Input
field='base_url'
label={t(
'注意非Chat API请务必填写正确的API地址否则可能导致无法使用',
)}
placeholder={t(
'请输入到 /suno 前的路径通常就是域名例如https://api.example.com',
)}
onChange={(value) =>
handleInputChange('base_url', value)
}
showClear
disabled={isIonetLocked}
/>
</div>
)}
{inputs.type === 45 && !doubaoApiEditUnlocked && (
<div>
<Form.Select
field='base_url'
label={t('API地址')}
placeholder={t('请选择API地址')}
onChange={(value) =>
handleInputChange('base_url', value)
}
optionList={[
{
value: 'https://ark.cn-beijing.volces.com',
label: 'https://ark.cn-beijing.volces.com',
},
{
value:
'https://ark.ap-southeast.bytepluses.com',
label:
'https://ark.ap-southeast.bytepluses.com',
},
{
value: 'doubao-coding-plan',
}
optionList={[
{
value: 'https://ark.cn-beijing.volces.com',
label: 'https://ark.cn-beijing.volces.com',
},
{
value: 'https://ark.ap-southeast.bytepluses.com',
label: 'https://ark.ap-southeast.bytepluses.com',
},
{
value: 'doubao-coding-plan',
label: 'Doubao Coding Plan',
},
]}
defaultValue='https://ark.cn-beijing.volces.com'
/>
</div>
)}
]}defaultValue='https://ark.cn-beijing.volces.com'
disabled={isIonetLocked}
/>
</div>
)}
</Card>
</div>
)}
@@ -2458,72 +2530,80 @@ const EditChannelModal = (props) => {
{t('获取模型列表')}
</Button>
)}
{inputs.type === 4 && isEdit && (
<Button
size='small'
type='warning'
onClick={() => handleInputChange('models', [])}
type='primary'
theme='light'
onClick={() => setOllamaModalVisible(true)}
>
{t('清除所有模型')}
{t('Ollama 模型管理')}
</Button>
<Button
size='small'
type='tertiary'
onClick={() => {
if (inputs.models.length === 0) {
showInfo(t('没有模型可以复制'));
return;
}
try {
copy(inputs.models.join(','));
showSuccess(t('模型列表已复制到剪贴板'));
} catch (error) {
showError(t('复制失败'));
}
}}
>
{t('复制所有模型')}
</Button>
{modelGroups &&
modelGroups.length > 0 &&
modelGroups.map((group) => (
<Button
key={group.id}
size='small'
type='primary'
onClick={() => {
let items = [];
try {
if (Array.isArray(group.items)) {
items = group.items;
} else if (
typeof group.items === 'string'
) {
const parsed = JSON.parse(
group.items || '[]',
);
if (Array.isArray(parsed)) items = parsed;
}
} catch {}
const current =
formApiRef.current?.getValue('models') ||
inputs.models ||
[];
const merged = Array.from(
new Set(
[...current, ...items]
.map((m) => (m || '').trim())
.filter(Boolean),
),
);
handleInputChange('models', merged);
}}
>
{group.name}
</Button>
))}
</Space>
}
/>
)}
<Button
size='small'
type='warning'
onClick={() => handleInputChange('models', [])}
>
{t('清除所有模型')}
</Button>
<Button
size='small'
type='tertiary'
onClick={() => {
if (inputs.models.length === 0) {
showInfo(t('没有模型可以复制'));
return;
}
try {
copy(inputs.models.join(','));
showSuccess(t('模型列表已复制到剪贴板'));
} catch (error) {
showError(t('复制失败'));
}
}}
>
{t('复制所有模型')}
</Button>
{modelGroups &&
modelGroups.length > 0 &&
modelGroups.map((group) => (
<Button
key={group.id}
size='small'
type='primary'
onClick={() => {
let items = [];
try {
if (Array.isArray(group.items)) {
items = group.items;
} else if (typeof group.items === 'string') {
const parsed = JSON.parse(
group.items || '[]',
);
if (Array.isArray(parsed)) items = parsed;
}
} catch {}
const current =
formApiRef.current?.getValue('models') ||
inputs.models ||
[];
const merged = Array.from(
new Set(
[...current, ...items]
.map((m) => (m || '').trim())
.filter(Boolean),
),
);
handleInputChange('models', merged);
}}
>
{group.name}
</Button>
))}
</Space>
}
/>
<Form.Input
field='custom_model'
@@ -3083,6 +3163,33 @@ const EditChannelModal = (props) => {
}}
onCancel={() => setModelModalVisible(false)}
/>
<OllamaModelModal
visible={ollamaModalVisible}
onCancel={() => setOllamaModalVisible(false)}
channelId={channelId}
channelInfo={inputs}
onModelsUpdate={(options = {}) => {
// 当模型更新后,重新获取模型列表以更新表单
fetchUpstreamModelList('models', { silent: !!options.silent });
}}
onApplyModels={({ mode, modelIds } = {}) => {
if (!Array.isArray(modelIds) || modelIds.length === 0) {
return;
}
const existingModels = Array.isArray(inputs.models)
? inputs.models.map(String)
: [];
const incoming = modelIds.map(String);
const nextModels = Array.from(new Set([...existingModels, ...incoming]));
handleInputChange('models', nextModels);
if (formApiRef.current) {
formApiRef.current.setValue('models', nextModels);
}
showSuccess(t('模型列表已追加更新'));
}}
/>
</>
);
};

View File

@@ -47,7 +47,20 @@ const ModelSelectModal = ({
onCancel,
}) => {
const { t } = useTranslation();
const [checkedList, setCheckedList] = useState(selected);
const getModelName = (model) => {
if (!model) return '';
if (typeof model === 'string') return model;
if (typeof model === 'object' && model.model_name) return model.model_name;
return String(model ?? '');
};
const normalizedSelected = useMemo(
() => (selected || []).map(getModelName),
[selected],
);
const [checkedList, setCheckedList] = useState(normalizedSelected);
const [keyword, setKeyword] = useState('');
const [activeTab, setActiveTab] = useState('new');
@@ -105,9 +118,9 @@ const ModelSelectModal = ({
// 同步外部选中值
useEffect(() => {
if (visible) {
setCheckedList(selected);
setCheckedList(normalizedSelected);
}
}, [visible, selected]);
}, [visible, normalizedSelected]);
// 当模型列表变化时设置默认tab
useEffect(() => {

View File

@@ -0,0 +1,806 @@
/*
Copyright (C) 2025 QuantumNous
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
For commercial licensing, please contact support@quantumnous.com
*/
import React, { useState, useEffect } from 'react';
import { useTranslation } from 'react-i18next';
import {
Modal,
Button,
Typography,
Card,
List,
Space,
Input,
Spin,
Popconfirm,
Tag,
Avatar,
Empty,
Divider,
Row,
Col,
Progress,
Checkbox,
Radio,
} from '@douyinfe/semi-ui';
import {
IconClose,
IconDownload,
IconDelete,
IconRefresh,
IconSearch,
IconPlus,
IconServer,
} from '@douyinfe/semi-icons';
import {
API,
authHeader,
getUserIdFromLocalStorage,
showError,
showInfo,
showSuccess,
} from '../../../../helpers';
const { Text, Title } = Typography;
const CHANNEL_TYPE_OLLAMA = 4;
const parseMaybeJSON = (value) => {
if (!value) return null;
if (typeof value === 'object') return value;
if (typeof value === 'string') {
try {
return JSON.parse(value);
} catch (error) {
return null;
}
}
return null;
};
const resolveOllamaBaseUrl = (info) => {
if (!info) {
return '';
}
const direct = typeof info.base_url === 'string' ? info.base_url.trim() : '';
if (direct) {
return direct;
}
const alt =
typeof info.ollama_base_url === 'string'
? info.ollama_base_url.trim()
: '';
if (alt) {
return alt;
}
const parsed = parseMaybeJSON(info.other_info);
if (parsed && typeof parsed === 'object') {
const candidate =
(typeof parsed.base_url === 'string' && parsed.base_url.trim()) ||
(typeof parsed.public_url === 'string' && parsed.public_url.trim()) ||
(typeof parsed.api_url === 'string' && parsed.api_url.trim());
if (candidate) {
return candidate;
}
}
return '';
};
const normalizeModels = (items) => {
if (!Array.isArray(items)) {
return [];
}
return items
.map((item) => {
if (!item) {
return null;
}
if (typeof item === 'string') {
return {
id: item,
owned_by: 'ollama',
};
}
if (typeof item === 'object') {
const candidateId = item.id || item.ID || item.name || item.model || item.Model;
if (!candidateId) {
return null;
}
const metadata = item.metadata || item.Metadata;
const normalized = {
...item,
id: candidateId,
owned_by: item.owned_by || item.ownedBy || 'ollama',
};
if (typeof item.size === 'number' && !normalized.size) {
normalized.size = item.size;
}
if (metadata && typeof metadata === 'object') {
if (typeof metadata.size === 'number' && !normalized.size) {
normalized.size = metadata.size;
}
if (!normalized.digest && typeof metadata.digest === 'string') {
normalized.digest = metadata.digest;
}
if (!normalized.modified_at && typeof metadata.modified_at === 'string') {
normalized.modified_at = metadata.modified_at;
}
if (metadata.details && !normalized.details) {
normalized.details = metadata.details;
}
}
return normalized;
}
return null;
})
.filter(Boolean);
};
const OllamaModelModal = ({
visible,
onCancel,
channelId,
channelInfo,
onModelsUpdate,
onApplyModels,
}) => {
const { t } = useTranslation();
const [loading, setLoading] = useState(false);
const [models, setModels] = useState([]);
const [filteredModels, setFilteredModels] = useState([]);
const [searchValue, setSearchValue] = useState('');
const [pullModelName, setPullModelName] = useState('');
const [pullLoading, setPullLoading] = useState(false);
const [pullProgress, setPullProgress] = useState(null);
const [eventSource, setEventSource] = useState(null);
const [selectedModelIds, setSelectedModelIds] = useState([]);
const handleApplyAllModels = () => {
if (!onApplyModels || selectedModelIds.length === 0) {
return;
}
onApplyModels({ mode: 'append', modelIds: selectedModelIds });
};
const handleToggleModel = (modelId, checked) => {
if (!modelId) {
return;
}
setSelectedModelIds((prev) => {
if (checked) {
if (prev.includes(modelId)) {
return prev;
}
return [...prev, modelId];
}
return prev.filter((id) => id !== modelId);
});
};
const handleSelectAll = () => {
setSelectedModelIds(models.map((item) => item?.id).filter(Boolean));
};
const handleClearSelection = () => {
setSelectedModelIds([]);
};
// 获取模型列表
const fetchModels = async () => {
const channelType = Number(channelInfo?.type ?? CHANNEL_TYPE_OLLAMA);
const shouldTryLiveFetch = channelType === CHANNEL_TYPE_OLLAMA;
const resolvedBaseUrl = resolveOllamaBaseUrl(channelInfo);
setLoading(true);
let liveFetchSucceeded = false;
let fallbackSucceeded = false;
let lastError = '';
let nextModels = [];
try {
if (shouldTryLiveFetch && resolvedBaseUrl) {
try {
const payload = {
base_url: resolvedBaseUrl,
type: CHANNEL_TYPE_OLLAMA,
key: channelInfo?.key || '',
};
const res = await API.post('/api/channel/fetch_models', payload, {
skipErrorHandler: true,
});
if (res?.data?.success) {
nextModels = normalizeModels(res.data.data);
liveFetchSucceeded = true;
} else if (res?.data?.message) {
lastError = res.data.message;
}
} catch (error) {
const message = error?.response?.data?.message || error.message;
if (message) {
lastError = message;
}
}
} else if (shouldTryLiveFetch && !resolvedBaseUrl && !channelId) {
lastError = t('请先填写 Ollama API 地址');
}
if ((!liveFetchSucceeded || nextModels.length === 0) && channelId) {
try {
const res = await API.get(`/api/channel/fetch_models/${channelId}`, {
skipErrorHandler: true,
});
if (res?.data?.success) {
nextModels = normalizeModels(res.data.data);
fallbackSucceeded = true;
lastError = '';
} else if (res?.data?.message) {
lastError = res.data.message;
}
} catch (error) {
const message = error?.response?.data?.message || error.message;
if (message) {
lastError = message;
}
}
}
if (!liveFetchSucceeded && !fallbackSucceeded && lastError) {
showError(`${t('获取模型列表失败')}: ${lastError}`);
}
const normalized = nextModels;
setModels(normalized);
setFilteredModels(normalized);
setSelectedModelIds((prev) => {
if (!normalized || normalized.length === 0) {
return [];
}
if (!prev || prev.length === 0) {
return normalized.map((item) => item.id).filter(Boolean);
}
const available = prev.filter((id) =>
normalized.some((item) => item.id === id),
);
return available.length > 0
? available
: normalized.map((item) => item.id).filter(Boolean);
});
} finally {
setLoading(false);
}
};
// 拉取模型 (流式,支持进度)
const pullModel = async () => {
if (!pullModelName.trim()) {
showError(t('请输入模型名称'));
return;
}
setPullLoading(true);
setPullProgress({ status: 'starting', completed: 0, total: 0 });
let hasRefreshed = false;
const refreshModels = async () => {
if (hasRefreshed) return;
hasRefreshed = true;
await fetchModels();
if (onModelsUpdate) {
onModelsUpdate({ silent: true });
}
};
try {
// 关闭之前的连接
if (eventSource) {
eventSource.close();
setEventSource(null);
}
const controller = new AbortController();
const closable = {
close: () => controller.abort(),
};
setEventSource(closable);
// 使用 fetch 请求 SSE 流
const authHeaders = authHeader();
const userId = getUserIdFromLocalStorage();
const fetchHeaders = {
'Content-Type': 'application/json',
Accept: 'text/event-stream',
'New-API-User': String(userId),
...authHeaders,
};
const response = await fetch('/api/channel/ollama/pull/stream', {
method: 'POST',
headers: fetchHeaders,
body: JSON.stringify({
channel_id: channelId,
model_name: pullModelName.trim(),
}),
signal: controller.signal,
});
if (!response.ok) {
throw new Error(`HTTP ${response.status}: ${response.statusText}`);
}
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
// 读取 SSE 流
const processStream = async () => {
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (!line.startsWith('data: ')) {
continue;
}
try {
const eventData = line.substring(6);
if (eventData === '[DONE]') {
setPullLoading(false);
setPullProgress(null);
setEventSource(null);
return;
}
const data = JSON.parse(eventData);
if (data.status) {
// 处理进度数据
setPullProgress(data);
} else if (data.error) {
// 处理错误
showError(data.error);
setPullProgress(null);
setPullLoading(false);
setEventSource(null);
return;
} else if (data.message) {
// 处理成功消息
showSuccess(data.message);
setPullModelName('');
setPullProgress(null);
setPullLoading(false);
setEventSource(null);
await fetchModels();
if (onModelsUpdate) {
onModelsUpdate({ silent: true });
}
await refreshModels();
return;
}
} catch (e) {
console.error('Failed to parse SSE data:', e);
}
}
}
// 正常结束流
setPullLoading(false);
setPullProgress(null);
setEventSource(null);
await refreshModels();
} catch (error) {
if (error?.name === 'AbortError') {
setPullProgress(null);
setPullLoading(false);
setEventSource(null);
return;
}
console.error('Stream processing error:', error);
showError(t('数据传输中断'));
setPullProgress(null);
setPullLoading(false);
setEventSource(null);
await refreshModels();
}
};
await processStream();
} catch (error) {
if (error?.name !== 'AbortError') {
showError(t('模型拉取失败: {{error}}', { error: error.message }));
}
setPullLoading(false);
setPullProgress(null);
setEventSource(null);
await refreshModels();
}
};
// 删除模型
const deleteModel = async (modelName) => {
try {
const res = await API.delete('/api/channel/ollama/delete', {
data: {
channel_id: channelId,
model_name: modelName,
},
});
if (res.data.success) {
showSuccess(t('模型删除成功'));
await fetchModels(); // 重新获取模型列表
if (onModelsUpdate) {
onModelsUpdate({ silent: true }); // 通知父组件更新
}
} else {
showError(res.data.message || t('模型删除失败'));
}
} catch (error) {
showError(t('模型删除失败: {{error}}', { error: error.message }));
}
};
// 搜索过滤
useEffect(() => {
if (!searchValue) {
setFilteredModels(models);
} else {
const filtered = models.filter(model =>
model.id.toLowerCase().includes(searchValue.toLowerCase())
);
setFilteredModels(filtered);
}
}, [models, searchValue]);
useEffect(() => {
if (!visible) {
setSelectedModelIds([]);
setPullModelName('');
setPullProgress(null);
setPullLoading(false);
}
}, [visible]);
// 组件加载时获取模型列表
useEffect(() => {
if (!visible) {
return;
}
if (channelId || Number(channelInfo?.type) === CHANNEL_TYPE_OLLAMA) {
fetchModels();
}
}, [
visible,
channelId,
channelInfo?.type,
channelInfo?.base_url,
channelInfo?.other_info,
channelInfo?.ollama_base_url,
]);
// 组件卸载时清理 EventSource
useEffect(() => {
return () => {
if (eventSource) {
eventSource.close();
}
};
}, [eventSource]);
const formatModelSize = (size) => {
if (!size) return '-';
const gb = size / (1024 * 1024 * 1024);
return gb >= 1 ? `${gb.toFixed(1)} GB` : `${(size / (1024 * 1024)).toFixed(0)} MB`;
};
return (
<Modal
title={
<div className='flex items-center'>
<Avatar
size='small'
color='blue'
className='mr-3 shadow-md'
>
<IconServer size={16} />
</Avatar>
<div>
<Title heading={4} className='m-0'>
{t('Ollama 模型管理')}
</Title>
<Text type='tertiary' size='small'>
{channelInfo?.name && `${channelInfo.name} - `}
{t('管理 Ollama 模型的拉取和删除')}
</Text>
</div>
</div>
}
visible={visible}
onCancel={onCancel}
width={800}
style={{ maxWidth: '95vw' }}
footer={
<div className='flex justify-end'>
<Button
theme='light'
type='primary'
onClick={onCancel}
icon={<IconClose />}
>
{t('关闭')}
</Button>
</div>
}
>
<div className='space-y-6'>
{/* 拉取新模型 */}
<Card className='!rounded-2xl shadow-sm border-0'>
<div className='flex items-center mb-4'>
<Avatar size='small' color='green' className='mr-2'>
<IconPlus size={16} />
</Avatar>
<Title heading={5} className='m-0'>
{t('拉取新模型')}
</Title>
</div>
<Row gutter={12} align='middle'>
<Col span={16}>
<Input
placeholder={t('请输入模型名称,例如: llama3.2, qwen2.5:7b')}
value={pullModelName}
onChange={(value) => setPullModelName(value)}
onEnterPress={pullModel}
disabled={pullLoading}
showClear
/>
</Col>
<Col span={8}>
<Button
theme='solid'
type='primary'
onClick={pullModel}
loading={pullLoading}
disabled={!pullModelName.trim()}
icon={<IconDownload />}
block
>
{pullLoading ? t('拉取中...') : t('拉取模型')}
</Button>
</Col>
</Row>
{/* 进度条显示 */}
{pullProgress && (() => {
const completedBytes = Number(pullProgress.completed) || 0;
const totalBytes = Number(pullProgress.total) || 0;
const hasTotal = Number.isFinite(totalBytes) && totalBytes > 0;
const safePercent = hasTotal
? Math.min(
100,
Math.max(0, Math.round((completedBytes / totalBytes) * 100)),
)
: null;
const percentText = hasTotal && safePercent !== null
? `${safePercent.toFixed(0)}%`
: pullProgress.status || t('处理中');
return (
<div className='mt-3 p-3 bg-gray-50 rounded-lg'>
<div className='flex items-center justify-between mb-2'>
<Text strong>{t('拉取进度')}</Text>
<Text type='tertiary' size='small'>{percentText}</Text>
</div>
{hasTotal && safePercent !== null ? (
<div>
<Progress
percent={safePercent}
showInfo={false}
stroke='#1890ff'
size='small'
/>
<div className='flex justify-between mt-1'>
<Text type='tertiary' size='small'>
{(completedBytes / (1024 * 1024 * 1024)).toFixed(2)} GB
</Text>
<Text type='tertiary' size='small'>
{(totalBytes / (1024 * 1024 * 1024)).toFixed(2)} GB
</Text>
</div>
</div>
) : (
<div className='flex items-center gap-2 text-xs text-[var(--semi-color-text-2)]'>
<Spin size='small' />
<span>{t('准备中...')}</span>
</div>
)}
</div>
);
})()}
<Text type='tertiary' size='small' className='mt-2 block'>
{t('支持拉取 Ollama 官方模型库中的所有模型,拉取过程可能需要几分钟时间')}
</Text>
</Card>
{/* 已有模型列表 */}
<Card className='!rounded-2xl shadow-sm border-0'>
<div className='flex items-center justify-between mb-4'>
<div className='flex items-center'>
<Avatar size='small' color='purple' className='mr-2'>
<IconServer size={16} />
</Avatar>
<Title heading={5} className='m-0'>
{t('已有模型')}
{models.length > 0 && (
<Tag color='blue' className='ml-2'>
{models.length}
</Tag>
)}
</Title>
</div>
<Space wrap>
<Input
prefix={<IconSearch />}
placeholder={t('搜索模型...')}
value={searchValue}
onChange={(value) => setSearchValue(value)}
style={{ width: 200 }}
showClear
/>
<Button
size='small'
theme='borderless'
onClick={handleSelectAll}
disabled={models.length === 0}
>
{t('全选')}
</Button>
<Button
size='small'
theme='borderless'
onClick={handleClearSelection}
disabled={selectedModelIds.length === 0}
>
{t('清空')}
</Button>
<Button
theme='solid'
type='primary'
icon={<IconPlus />}
onClick={handleApplyAllModels}
disabled={selectedModelIds.length === 0}
size='small'
>
{t('加入渠道')}
</Button>
<Button
theme='light'
type='primary'
onClick={fetchModels}
loading={loading}
icon={<IconRefresh />}
size='small'
>
{t('刷新')}
</Button>
</Space>
</div>
<Spin spinning={loading}>
{filteredModels.length === 0 ? (
<Empty
image={<IconServer size={60} />}
title={searchValue ? t('未找到匹配的模型') : t('暂无模型')}
description={
searchValue
? t('请尝试其他搜索关键词')
: t('您可以在上方拉取需要的模型')
}
style={{ padding: '40px 0' }}
/>
) : (
<List
dataSource={filteredModels}
split={false}
renderItem={(model, index) => (
<List.Item
key={model.id}
className='hover:bg-gray-50 rounded-lg p-3 transition-colors'
>
<div className='flex items-center justify-between w-full'>
<div className='flex items-center flex-1 min-w-0 gap-3'>
<Checkbox
checked={selectedModelIds.includes(model.id)}
onChange={(checked) => handleToggleModel(model.id, checked)}
/>
<Avatar
size='small'
color='blue'
className='flex-shrink-0'
>
{model.id.charAt(0).toUpperCase()}
</Avatar>
<div className='flex-1 min-w-0'>
<Text strong className='block truncate'>
{model.id}
</Text>
<div className='flex items-center space-x-2 mt-1'>
<Tag color='cyan' size='small'>
{model.owned_by || 'ollama'}
</Tag>
{model.size && (
<Text type='tertiary' size='small'>
{formatModelSize(model.size)}
</Text>
)}
</div>
</div>
</div>
<div className='flex items-center space-x-2 ml-4'>
<Popconfirm
title={t('确认删除模型')}
content={t('删除后无法恢复,确定要删除模型 "{{name}}" 吗?', { name: model.id })}
onConfirm={() => deleteModel(model.id)}
okText={t('确认')}
cancelText={t('取消')}
>
<Button
theme='borderless'
type='danger'
size='small'
icon={<IconDelete />}
/>
</Popconfirm>
</div>
</div>
</List.Item>
)}
/>
)}
</Spin>
</Card>
</div>
</Modal>
);
};
export default OllamaModelModal;