feat(ops): 运维监控新增 OpenAI Token 请求统计表

- 新增管理端接口 /api/v1/admin/ops/dashboard/openai-token-stats,按模型聚合统计 gpt% 请求

- 支持 time_range=30m|1h|1d|15d|30d(默认 30d),支持 platform/group_id 过滤

- 支持分页(page/page_size)或 TopN(top_n)互斥查询

- 前端运维监控页新增统计表卡片,包含空态/错误态与分页/TopN 交互

- 补齐后端与前端测试
This commit is contained in:
yangjianbo
2026-02-12 14:20:14 +08:00
parent ed2eba9028
commit 65661f24e2
15 changed files with 1335 additions and 0 deletions

View File

@@ -259,6 +259,40 @@ export interface OpsErrorDistributionResponse {
items: OpsErrorDistributionItem[]
}
export type OpsOpenAITokenStatsTimeRange = '30m' | '1h' | '1d' | '15d' | '30d'
export interface OpsOpenAITokenStatsItem {
model: string
request_count: number
avg_tokens_per_sec?: number | null
avg_first_token_ms?: number | null
total_output_tokens: number
avg_duration_ms: number
requests_with_first_token: number
}
export interface OpsOpenAITokenStatsResponse {
time_range: OpsOpenAITokenStatsTimeRange
start_time: string
end_time: string
platform?: string
group_id?: number | null
items: OpsOpenAITokenStatsItem[]
total: number
page?: number
page_size?: number
top_n?: number | null
}
export interface OpsOpenAITokenStatsParams {
time_range?: OpsOpenAITokenStatsTimeRange
platform?: string
group_id?: number | null
page?: number
page_size?: number
top_n?: number
}
export interface OpsSystemMetricsSnapshot {
id: number
created_at: string
@@ -971,6 +1005,17 @@ export async function getErrorDistribution(
return data
}
export async function getOpenAITokenStats(
params: OpsOpenAITokenStatsParams,
options: OpsRequestOptions = {}
): Promise<OpsOpenAITokenStatsResponse> {
const { data } = await apiClient.get<OpsOpenAITokenStatsResponse>('/admin/ops/dashboard/openai-token-stats', {
params,
signal: options.signal
})
return data
}
export type OpsErrorListView = 'errors' | 'excluded' | 'all'
export type OpsErrorListQueryParams = {
@@ -1188,6 +1233,7 @@ export const opsAPI = {
getLatencyHistogram,
getErrorTrend,
getErrorDistribution,
getOpenAITokenStats,
getConcurrencyStats,
getUserConcurrencyStats,
getAccountAvailabilityStats,

View File

@@ -2508,11 +2508,33 @@ export default {
'5m': 'Last 5 minutes',
'30m': 'Last 30 minutes',
'1h': 'Last 1 hour',
'1d': 'Last 1 day',
'15d': 'Last 15 days',
'6h': 'Last 6 hours',
'24h': 'Last 24 hours',
'7d': 'Last 7 days',
'30d': 'Last 30 days'
},
openaiTokenStats: {
title: 'OpenAI Token Request Stats',
viewModeTopN: 'TopN',
viewModePagination: 'Pagination',
prevPage: 'Previous',
nextPage: 'Next',
pageInfo: 'Page {page}/{total}',
totalModels: 'Total models: {total}',
failedToLoad: 'Failed to load OpenAI token stats',
empty: 'No OpenAI token stats for the current filters',
table: {
model: 'Model',
requestCount: 'Requests',
avgTokensPerSec: 'Avg Tokens/sec',
avgFirstTokenMs: 'Avg First Token Latency (ms)',
totalOutputTokens: 'Total Output Tokens',
avgDurationMs: 'Avg Duration (ms)',
requestsWithFirstToken: 'Requests With First Token'
}
},
fullscreen: {
enter: 'Enter Fullscreen'
},

View File

@@ -2675,12 +2675,34 @@ export default {
'5m': '近5分钟',
'30m': '近30分钟',
'1h': '近1小时',
'1d': '近1天',
'15d': '近15天',
'6h': '近6小时',
'24h': '近24小时',
'7d': '近7天',
'30d': '近30天',
custom: '自定义'
},
openaiTokenStats: {
title: 'OpenAI Token 请求统计',
viewModeTopN: 'TopN',
viewModePagination: '分页',
prevPage: '上一页',
nextPage: '下一页',
pageInfo: '第 {page}/{total} 页',
totalModels: '模型总数:{total}',
failedToLoad: '加载 OpenAI Token 统计失败',
empty: '当前筛选条件下暂无 OpenAI Token 请求统计数据',
table: {
model: '模型',
requestCount: '请求数',
avgTokensPerSec: '平均 Tokens/秒',
avgFirstTokenMs: '平均首 Token 延迟(ms)',
totalOutputTokens: '输出 Token 总数',
avgDurationMs: '平均时长(ms)',
requestsWithFirstToken: '首 Token 样本数'
}
},
customTimeRange: {
startTime: '开始时间',
endTime: '结束时间'

View File

@@ -84,6 +84,15 @@
/>
</div>
<!-- Row: OpenAI Token Stats -->
<div v-if="opsEnabled && !(loading && !hasLoadedOnce)" class="grid grid-cols-1 gap-6">
<OpsOpenAITokenStatsCard
:platform-filter="platform"
:group-id-filter="groupId"
:refresh-token="dashboardRefreshToken"
/>
</div>
<!-- Alert Events -->
<OpsAlertEventsCard v-if="opsEnabled && !(loading && !hasLoadedOnce)" />
@@ -148,6 +157,7 @@ import OpsLatencyChart from './components/OpsLatencyChart.vue'
import OpsThroughputTrendChart from './components/OpsThroughputTrendChart.vue'
import OpsSwitchRateTrendChart from './components/OpsSwitchRateTrendChart.vue'
import OpsAlertEventsCard from './components/OpsAlertEventsCard.vue'
import OpsOpenAITokenStatsCard from './components/OpsOpenAITokenStatsCard.vue'
import OpsRequestDetailsModal, { type OpsRequestDetailsPreset } from './components/OpsRequestDetailsModal.vue'
import OpsSettingsDialog from './components/OpsSettingsDialog.vue'
import OpsAlertRulesCard from './components/OpsAlertRulesCard.vue'

View File

@@ -0,0 +1,246 @@
<script setup lang="ts">
import { computed, ref, watch } from 'vue'
import { useI18n } from 'vue-i18n'
import Select from '@/components/common/Select.vue'
import EmptyState from '@/components/common/EmptyState.vue'
import { opsAPI, type OpsOpenAITokenStatsResponse, type OpsOpenAITokenStatsTimeRange } from '@/api/admin/ops'
import { formatNumber } from '@/utils/format'
interface Props {
platformFilter?: string
groupIdFilter?: number | null
refreshToken: number
}
type ViewMode = 'topn' | 'pagination'
const props = withDefaults(defineProps<Props>(), {
platformFilter: '',
groupIdFilter: null
})
const { t } = useI18n()
const loading = ref(false)
const errorMessage = ref('')
const response = ref<OpsOpenAITokenStatsResponse | null>(null)
const timeRange = ref<OpsOpenAITokenStatsTimeRange>('30d')
const viewMode = ref<ViewMode>('topn')
const topN = ref<number>(20)
const page = ref<number>(1)
const pageSize = ref<number>(20)
const items = computed(() => response.value?.items ?? [])
const total = computed(() => response.value?.total ?? 0)
const totalPages = computed(() => {
if (viewMode.value !== 'pagination') return 1
const size = pageSize.value > 0 ? pageSize.value : 20
return Math.max(1, Math.ceil(total.value / size))
})
const timeRangeOptions = computed(() => [
{ value: '30m', label: t('admin.ops.timeRange.30m') },
{ value: '1h', label: t('admin.ops.timeRange.1h') },
{ value: '1d', label: t('admin.ops.timeRange.1d') },
{ value: '15d', label: t('admin.ops.timeRange.15d') },
{ value: '30d', label: t('admin.ops.timeRange.30d') }
])
const viewModeOptions = computed(() => [
{ value: 'topn', label: t('admin.ops.openaiTokenStats.viewModeTopN') },
{ value: 'pagination', label: t('admin.ops.openaiTokenStats.viewModePagination') }
])
const topNOptions = computed(() => [
{ value: 10, label: 'Top 10' },
{ value: 20, label: 'Top 20' },
{ value: 50, label: 'Top 50' },
{ value: 100, label: 'Top 100' }
])
const pageSizeOptions = computed(() => [
{ value: 10, label: '10' },
{ value: 20, label: '20' },
{ value: 50, label: '50' },
{ value: 100, label: '100' }
])
function formatRate(v?: number | null): string {
if (typeof v !== 'number' || !Number.isFinite(v)) return '-'
return v.toFixed(2)
}
function formatInt(v?: number | null): string {
if (typeof v !== 'number' || !Number.isFinite(v)) return '-'
return formatNumber(Math.round(v))
}
function buildParams() {
const params: Record<string, any> = {
time_range: timeRange.value,
platform: props.platformFilter || undefined,
group_id: typeof props.groupIdFilter === 'number' && props.groupIdFilter > 0 ? props.groupIdFilter : undefined
}
if (viewMode.value === 'topn') {
params.top_n = topN.value
} else {
params.page = page.value
params.page_size = pageSize.value
}
return params
}
async function loadData() {
loading.value = true
errorMessage.value = ''
try {
response.value = await opsAPI.getOpenAITokenStats(buildParams())
// 防御:若 total 变化导致当前页超出最大页,则回退到末页并重新拉取一次。
if (viewMode.value === 'pagination' && page.value > totalPages.value) {
page.value = totalPages.value
response.value = await opsAPI.getOpenAITokenStats(buildParams())
}
} catch (err: any) {
console.error('[OpsOpenAITokenStatsCard] Failed to load data', err)
response.value = null
errorMessage.value = err?.message || t('admin.ops.openaiTokenStats.failedToLoad')
} finally {
loading.value = false
}
}
watch(
() => ({
timeRange: timeRange.value,
viewMode: viewMode.value,
topN: topN.value,
page: page.value,
pageSize: pageSize.value,
platform: props.platformFilter,
groupId: props.groupIdFilter,
refreshToken: props.refreshToken
}),
(next, prev) => {
// 避免“筛选变化 -> 重置页码 -> 触发两次请求”:
// 先只重置页码,等待下一次 watch仅 page 变化)再发起请求。
const filtersChanged = !prev ||
next.timeRange !== prev.timeRange ||
next.viewMode !== prev.viewMode ||
next.pageSize !== prev.pageSize ||
next.platform !== prev.platform ||
next.groupId !== prev.groupId ||
next.refreshToken !== prev.refreshToken
if (next.viewMode === 'pagination' && filtersChanged && next.page !== 1) {
page.value = 1
return
}
void loadData()
},
{ immediate: true }
)
function onPrevPage() {
if (viewMode.value !== 'pagination') return
if (page.value > 1) page.value -= 1
}
function onNextPage() {
if (viewMode.value !== 'pagination') return
if (page.value < totalPages.value) page.value += 1
}
</script>
<template>
<section class="card p-4 md:p-5">
<div class="mb-4 flex flex-wrap items-center justify-between gap-3">
<h3 class="text-sm font-bold text-gray-900 dark:text-white">
{{ t('admin.ops.openaiTokenStats.title') }}
</h3>
<div class="flex flex-wrap items-center gap-2">
<div class="w-36">
<Select v-model="timeRange" :options="timeRangeOptions" />
</div>
<div class="w-36">
<Select v-model="viewMode" :options="viewModeOptions" />
</div>
<div v-if="viewMode === 'topn'" class="w-28">
<Select v-model="topN" :options="topNOptions" />
</div>
<template v-else>
<div class="w-24">
<Select v-model="pageSize" :options="pageSizeOptions" />
</div>
<button
class="btn btn-secondary btn-sm"
:disabled="loading || page <= 1"
@click="onPrevPage"
>
{{ t('admin.ops.openaiTokenStats.prevPage') }}
</button>
<button
class="btn btn-secondary btn-sm"
:disabled="loading || page >= totalPages"
@click="onNextPage"
>
{{ t('admin.ops.openaiTokenStats.nextPage') }}
</button>
<span class="text-xs text-gray-500 dark:text-gray-400">
{{ t('admin.ops.openaiTokenStats.pageInfo', { page, total: totalPages }) }}
</span>
</template>
</div>
</div>
<div v-if="errorMessage" class="mb-4 rounded-lg bg-red-50 px-3 py-2 text-xs text-red-600 dark:bg-red-900/20 dark:text-red-400">
{{ errorMessage }}
</div>
<div v-if="loading" class="py-8 text-center text-sm text-gray-500 dark:text-gray-400">
{{ t('admin.ops.loadingText') }}
</div>
<EmptyState
v-else-if="items.length === 0"
:title="t('common.noData')"
:description="t('admin.ops.openaiTokenStats.empty')"
/>
<div v-else class="overflow-x-auto">
<table class="min-w-full text-left text-xs md:text-sm">
<thead>
<tr class="border-b border-gray-200 text-gray-500 dark:border-dark-700 dark:text-gray-400">
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.model') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.requestCount') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.avgTokensPerSec') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.avgFirstTokenMs') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.totalOutputTokens') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.avgDurationMs') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.requestsWithFirstToken') }}</th>
</tr>
</thead>
<tbody>
<tr
v-for="row in items"
:key="row.model"
class="border-b border-gray-100 text-gray-700 dark:border-dark-800 dark:text-gray-200"
>
<td class="px-2 py-2 font-medium">{{ row.model }}</td>
<td class="px-2 py-2">{{ formatInt(row.request_count) }}</td>
<td class="px-2 py-2">{{ formatRate(row.avg_tokens_per_sec) }}</td>
<td class="px-2 py-2">{{ formatRate(row.avg_first_token_ms) }}</td>
<td class="px-2 py-2">{{ formatInt(row.total_output_tokens) }}</td>
<td class="px-2 py-2">{{ formatInt(row.avg_duration_ms) }}</td>
<td class="px-2 py-2">{{ formatInt(row.requests_with_first_token) }}</td>
</tr>
</tbody>
</table>
<div v-if="viewMode === 'topn'" class="mt-3 text-xs text-gray-500 dark:text-gray-400">
{{ t('admin.ops.openaiTokenStats.totalModels', { total }) }}
</div>
</div>
</section>
</template>

View File

@@ -0,0 +1,215 @@
import { describe, it, expect, beforeEach, vi } from 'vitest'
import { defineComponent } from 'vue'
import { flushPromises, mount } from '@vue/test-utils'
import OpsOpenAITokenStatsCard from '../OpsOpenAITokenStatsCard.vue'
const mockGetOpenAITokenStats = vi.fn()
vi.mock('@/api/admin/ops', () => ({
opsAPI: {
getOpenAITokenStats: (...args: any[]) => mockGetOpenAITokenStats(...args),
},
}))
vi.mock('vue-i18n', async (importOriginal) => {
const actual = await importOriginal<typeof import('vue-i18n')>()
return {
...actual,
useI18n: () => ({
t: (key: string, params?: Record<string, any>) => {
if (key === 'admin.ops.openaiTokenStats.pageInfo' && params) {
return `${params.page}/${params.total}`
}
return key
},
}),
}
})
const SelectStub = defineComponent({
name: 'SelectControlStub',
props: {
modelValue: {
type: [String, Number],
default: '',
},
},
emits: ['update:modelValue'],
template: '<div class="select-stub" />',
})
const EmptyStateStub = defineComponent({
name: 'EmptyState',
props: {
title: { type: String, default: '' },
description: { type: String, default: '' },
},
template: '<div class="empty-state">{{ title }}|{{ description }}</div>',
})
const sampleResponse = {
time_range: '30d' as const,
start_time: '2026-01-01T00:00:00Z',
end_time: '2026-01-31T00:00:00Z',
platform: 'openai',
group_id: 7,
items: [
{
model: 'gpt-4o-mini',
request_count: 12,
avg_tokens_per_sec: 22.5,
avg_first_token_ms: 123.45,
total_output_tokens: 1234,
avg_duration_ms: 321,
requests_with_first_token: 10,
},
],
total: 40,
page: 1,
page_size: 20,
top_n: null,
}
describe('OpsOpenAITokenStatsCard', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('默认加载并透传 platform/group 过滤,支持时间窗口切换', async () => {
mockGetOpenAITokenStats.mockResolvedValue(sampleResponse)
const wrapper = mount(OpsOpenAITokenStatsCard, {
props: {
platformFilter: 'openai',
groupIdFilter: 7,
refreshToken: 0,
},
global: {
stubs: {
Select: SelectStub,
EmptyState: EmptyStateStub,
},
},
})
await flushPromises()
expect(mockGetOpenAITokenStats).toHaveBeenCalledWith(
expect.objectContaining({
time_range: '30d',
platform: 'openai',
group_id: 7,
top_n: 20,
})
)
const selects = wrapper.findAllComponents(SelectStub)
await selects[0].vm.$emit('update:modelValue', '1h')
await flushPromises()
expect(mockGetOpenAITokenStats).toHaveBeenCalledWith(
expect.objectContaining({
time_range: '1h',
platform: 'openai',
group_id: 7,
})
)
})
it('支持分页与 TopN 模式切换并按参数请求', async () => {
mockGetOpenAITokenStats.mockImplementation(async (params: Record<string, any>) => ({
...sampleResponse,
time_range: params.time_range ?? '30d',
page: params.page ?? 1,
page_size: params.page_size ?? 20,
top_n: params.top_n ?? null,
total: 40,
}))
const wrapper = mount(OpsOpenAITokenStatsCard, {
props: {
refreshToken: 0,
},
global: {
stubs: {
Select: SelectStub,
EmptyState: EmptyStateStub,
},
},
})
await flushPromises()
let selects = wrapper.findAllComponents(SelectStub)
await selects[1].vm.$emit('update:modelValue', 'pagination')
await flushPromises()
expect(mockGetOpenAITokenStats).toHaveBeenCalledWith(
expect.objectContaining({
page: 1,
page_size: 20,
})
)
const buttons = wrapper.findAll('button')
expect(buttons.length).toBeGreaterThanOrEqual(2)
await buttons[1].trigger('click')
await flushPromises()
expect(mockGetOpenAITokenStats).toHaveBeenCalledWith(
expect.objectContaining({
page: 2,
page_size: 20,
})
)
selects = wrapper.findAllComponents(SelectStub)
await selects[1].vm.$emit('update:modelValue', 'topn')
await flushPromises()
selects = wrapper.findAllComponents(SelectStub)
await selects[2].vm.$emit('update:modelValue', 50)
await flushPromises()
expect(mockGetOpenAITokenStats).toHaveBeenCalledWith(
expect.objectContaining({
top_n: 50,
})
)
})
it('接口返回空数据时显示空态', async () => {
mockGetOpenAITokenStats.mockResolvedValue({
...sampleResponse,
items: [],
total: 0,
})
const wrapper = mount(OpsOpenAITokenStatsCard, {
props: { refreshToken: 0 },
global: {
stubs: {
Select: SelectStub,
EmptyState: EmptyStateStub,
},
},
})
await flushPromises()
expect(wrapper.find('.empty-state').exists()).toBe(true)
})
it('接口异常时显示错误提示', async () => {
mockGetOpenAITokenStats.mockRejectedValue(new Error('加载失败'))
const wrapper = mount(OpsOpenAITokenStatsCard, {
props: { refreshToken: 0 },
global: {
stubs: {
Select: SelectStub,
EmptyState: EmptyStateStub,
},
},
})
await flushPromises()
expect(wrapper.text()).toContain('加载失败')
})
})