feat(ops): 运维监控新增 OpenAI Token 请求统计表

- 新增管理端接口 /api/v1/admin/ops/dashboard/openai-token-stats,按模型聚合统计 gpt% 请求

- 支持 time_range=30m|1h|1d|15d|30d(默认 30d),支持 platform/group_id 过滤

- 支持分页(page/page_size)或 TopN(top_n)互斥查询

- 前端运维监控页新增统计表卡片,包含空态/错误态与分页/TopN 交互

- 补齐后端与前端测试
This commit is contained in:
yangjianbo
2026-02-12 14:20:14 +08:00
parent ed2eba9028
commit 65661f24e2
15 changed files with 1335 additions and 0 deletions

View File

@@ -58,6 +58,96 @@ func TestParseOpsDuration(t *testing.T) {
require.False(t, ok)
}
func TestParseOpsOpenAITokenStatsDuration(t *testing.T) {
tests := []struct {
input string
want time.Duration
ok bool
}{
{input: "30m", want: 30 * time.Minute, ok: true},
{input: "1h", want: time.Hour, ok: true},
{input: "1d", want: 24 * time.Hour, ok: true},
{input: "15d", want: 15 * 24 * time.Hour, ok: true},
{input: "30d", want: 30 * 24 * time.Hour, ok: true},
{input: "7d", want: 0, ok: false},
}
for _, tt := range tests {
got, ok := parseOpsOpenAITokenStatsDuration(tt.input)
require.Equal(t, tt.ok, ok, "input=%s", tt.input)
require.Equal(t, tt.want, got, "input=%s", tt.input)
}
}
func TestParseOpsOpenAITokenStatsFilter_Defaults(t *testing.T) {
gin.SetMode(gin.TestMode)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request = httptest.NewRequest(http.MethodGet, "/", nil)
before := time.Now().UTC()
filter, err := parseOpsOpenAITokenStatsFilter(c)
after := time.Now().UTC()
require.NoError(t, err)
require.NotNil(t, filter)
require.Equal(t, "30d", filter.TimeRange)
require.Equal(t, 1, filter.Page)
require.Equal(t, 20, filter.PageSize)
require.Equal(t, 0, filter.TopN)
require.Nil(t, filter.GroupID)
require.Equal(t, "", filter.Platform)
require.True(t, filter.StartTime.Before(filter.EndTime))
require.WithinDuration(t, before.Add(-30*24*time.Hour), filter.StartTime, 2*time.Second)
require.WithinDuration(t, after, filter.EndTime, 2*time.Second)
}
func TestParseOpsOpenAITokenStatsFilter_WithTopN(t *testing.T) {
gin.SetMode(gin.TestMode)
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request = httptest.NewRequest(
http.MethodGet,
"/?time_range=1h&platform=openai&group_id=12&top_n=50",
nil,
)
filter, err := parseOpsOpenAITokenStatsFilter(c)
require.NoError(t, err)
require.Equal(t, "1h", filter.TimeRange)
require.Equal(t, "openai", filter.Platform)
require.NotNil(t, filter.GroupID)
require.Equal(t, int64(12), *filter.GroupID)
require.Equal(t, 50, filter.TopN)
require.Equal(t, 0, filter.Page)
require.Equal(t, 0, filter.PageSize)
}
func TestParseOpsOpenAITokenStatsFilter_InvalidParams(t *testing.T) {
tests := []string{
"/?time_range=7d",
"/?group_id=0",
"/?group_id=abc",
"/?top_n=0",
"/?top_n=101",
"/?top_n=10&page=1",
"/?top_n=10&page_size=20",
"/?page=0",
"/?page_size=0",
"/?page_size=101",
}
gin.SetMode(gin.TestMode)
for _, rawURL := range tests {
w := httptest.NewRecorder()
c, _ := gin.CreateTestContext(w)
c.Request = httptest.NewRequest(http.MethodGet, rawURL, nil)
_, err := parseOpsOpenAITokenStatsFilter(c)
require.Error(t, err, "url=%s", rawURL)
}
}
func TestParseOpsTimeRange(t *testing.T) {
gin.SetMode(gin.TestMode)
w := httptest.NewRecorder()

View File

@@ -1,6 +1,7 @@
package admin
import (
"fmt"
"net/http"
"strconv"
"strings"
@@ -218,6 +219,115 @@ func (h *OpsHandler) GetDashboardErrorDistribution(c *gin.Context) {
response.Success(c, data)
}
// GetDashboardOpenAITokenStats returns OpenAI token efficiency stats grouped by model.
// GET /api/v1/admin/ops/dashboard/openai-token-stats
func (h *OpsHandler) GetDashboardOpenAITokenStats(c *gin.Context) {
if h.opsService == nil {
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
return
}
if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
response.ErrorFrom(c, err)
return
}
filter, err := parseOpsOpenAITokenStatsFilter(c)
if err != nil {
response.BadRequest(c, err.Error())
return
}
data, err := h.opsService.GetOpenAITokenStats(c.Request.Context(), filter)
if err != nil {
response.ErrorFrom(c, err)
return
}
response.Success(c, data)
}
func parseOpsOpenAITokenStatsFilter(c *gin.Context) (*service.OpsOpenAITokenStatsFilter, error) {
if c == nil {
return nil, fmt.Errorf("invalid request")
}
timeRange := strings.TrimSpace(c.Query("time_range"))
if timeRange == "" {
timeRange = "30d"
}
dur, ok := parseOpsOpenAITokenStatsDuration(timeRange)
if !ok {
return nil, fmt.Errorf("invalid time_range")
}
end := time.Now().UTC()
start := end.Add(-dur)
filter := &service.OpsOpenAITokenStatsFilter{
TimeRange: timeRange,
StartTime: start,
EndTime: end,
Platform: strings.TrimSpace(c.Query("platform")),
}
if v := strings.TrimSpace(c.Query("group_id")); v != "" {
id, err := strconv.ParseInt(v, 10, 64)
if err != nil || id <= 0 {
return nil, fmt.Errorf("invalid group_id")
}
filter.GroupID = &id
}
topNRaw := strings.TrimSpace(c.Query("top_n"))
pageRaw := strings.TrimSpace(c.Query("page"))
pageSizeRaw := strings.TrimSpace(c.Query("page_size"))
if topNRaw != "" && (pageRaw != "" || pageSizeRaw != "") {
return nil, fmt.Errorf("invalid query: top_n cannot be used with page/page_size")
}
if topNRaw != "" {
topN, err := strconv.Atoi(topNRaw)
if err != nil || topN < 1 || topN > 100 {
return nil, fmt.Errorf("invalid top_n")
}
filter.TopN = topN
return filter, nil
}
filter.Page = 1
filter.PageSize = 20
if pageRaw != "" {
page, err := strconv.Atoi(pageRaw)
if err != nil || page < 1 {
return nil, fmt.Errorf("invalid page")
}
filter.Page = page
}
if pageSizeRaw != "" {
pageSize, err := strconv.Atoi(pageSizeRaw)
if err != nil || pageSize < 1 || pageSize > 100 {
return nil, fmt.Errorf("invalid page_size")
}
filter.PageSize = pageSize
}
return filter, nil
}
func parseOpsOpenAITokenStatsDuration(v string) (time.Duration, bool) {
switch strings.TrimSpace(v) {
case "30m":
return 30 * time.Minute, true
case "1h":
return time.Hour, true
case "1d":
return 24 * time.Hour, true
case "15d":
return 15 * 24 * time.Hour, true
case "30d":
return 30 * 24 * time.Hour, true
default:
return 0, false
}
}
func pickThroughputBucketSeconds(window time.Duration) int {
// Keep buckets predictable and avoid huge responses.
switch {

View File

@@ -0,0 +1,145 @@
package repository
import (
"context"
"database/sql"
"fmt"
"strings"
"github.com/Wei-Shaw/sub2api/internal/service"
)
func (r *opsRepository) GetOpenAITokenStats(ctx context.Context, filter *service.OpsOpenAITokenStatsFilter) (*service.OpsOpenAITokenStatsResponse, error) {
if r == nil || r.db == nil {
return nil, fmt.Errorf("nil ops repository")
}
if filter == nil {
return nil, fmt.Errorf("nil filter")
}
if filter.StartTime.IsZero() || filter.EndTime.IsZero() {
return nil, fmt.Errorf("start_time/end_time required")
}
// 允许 start_time == end_time结果为空与 service 层校验口径保持一致。
if filter.StartTime.After(filter.EndTime) {
return nil, fmt.Errorf("start_time must be <= end_time")
}
dashboardFilter := &service.OpsDashboardFilter{
StartTime: filter.StartTime.UTC(),
EndTime: filter.EndTime.UTC(),
Platform: strings.TrimSpace(strings.ToLower(filter.Platform)),
GroupID: filter.GroupID,
}
join, where, baseArgs, next := buildUsageWhere(dashboardFilter, dashboardFilter.StartTime, dashboardFilter.EndTime, 1)
where += " AND ul.model LIKE 'gpt%'"
baseCTE := `
WITH stats AS (
SELECT
ul.model AS model,
COUNT(*)::bigint AS request_count,
ROUND(
AVG(
CASE
WHEN ul.duration_ms > 0 AND ul.output_tokens > 0
THEN ul.output_tokens * 1000.0 / ul.duration_ms
END
)::numeric,
2
)::float8 AS avg_tokens_per_sec,
ROUND(AVG(ul.first_token_ms)::numeric, 2)::float8 AS avg_first_token_ms,
COALESCE(SUM(ul.output_tokens), 0)::bigint AS total_output_tokens,
COALESCE(ROUND(AVG(ul.duration_ms)::numeric, 0), 0)::bigint AS avg_duration_ms,
COUNT(CASE WHEN ul.first_token_ms IS NOT NULL THEN 1 END)::bigint AS requests_with_first_token
FROM usage_logs ul
` + join + `
` + where + `
GROUP BY ul.model
)
`
countSQL := baseCTE + `SELECT COUNT(*) FROM stats`
var total int64
if err := r.db.QueryRowContext(ctx, countSQL, baseArgs...).Scan(&total); err != nil {
return nil, err
}
querySQL := baseCTE + `
SELECT
model,
request_count,
avg_tokens_per_sec,
avg_first_token_ms,
total_output_tokens,
avg_duration_ms,
requests_with_first_token
FROM stats
ORDER BY request_count DESC, model ASC`
args := make([]any, 0, len(baseArgs)+2)
args = append(args, baseArgs...)
if filter.IsTopNMode() {
querySQL += fmt.Sprintf("\nLIMIT $%d", next)
args = append(args, filter.TopN)
} else {
offset := (filter.Page - 1) * filter.PageSize
querySQL += fmt.Sprintf("\nLIMIT $%d OFFSET $%d", next, next+1)
args = append(args, filter.PageSize, offset)
}
rows, err := r.db.QueryContext(ctx, querySQL, args...)
if err != nil {
return nil, err
}
defer func() { _ = rows.Close() }()
items := make([]*service.OpsOpenAITokenStatsItem, 0, 32)
for rows.Next() {
item := &service.OpsOpenAITokenStatsItem{}
var avgTPS sql.NullFloat64
var avgFirstToken sql.NullFloat64
if err := rows.Scan(
&item.Model,
&item.RequestCount,
&avgTPS,
&avgFirstToken,
&item.TotalOutputTokens,
&item.AvgDurationMs,
&item.RequestsWithFirstToken,
); err != nil {
return nil, err
}
if avgTPS.Valid {
v := avgTPS.Float64
item.AvgTokensPerSec = &v
}
if avgFirstToken.Valid {
v := avgFirstToken.Float64
item.AvgFirstTokenMs = &v
}
items = append(items, item)
}
if err := rows.Err(); err != nil {
return nil, err
}
resp := &service.OpsOpenAITokenStatsResponse{
TimeRange: strings.TrimSpace(filter.TimeRange),
StartTime: dashboardFilter.StartTime,
EndTime: dashboardFilter.EndTime,
Platform: dashboardFilter.Platform,
GroupID: dashboardFilter.GroupID,
Items: items,
Total: total,
}
if filter.IsTopNMode() {
topN := filter.TopN
resp.TopN = &topN
} else {
resp.Page = filter.Page
resp.PageSize = filter.PageSize
}
return resp, nil
}

View File

@@ -0,0 +1,156 @@
package repository
import (
"context"
"testing"
"time"
"github.com/DATA-DOG/go-sqlmock"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/stretchr/testify/require"
)
func TestOpsRepositoryGetOpenAITokenStats_PaginationMode(t *testing.T) {
db, mock := newSQLMock(t)
repo := &opsRepository{db: db}
start := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
end := start.Add(24 * time.Hour)
groupID := int64(9)
filter := &service.OpsOpenAITokenStatsFilter{
TimeRange: "1d",
StartTime: start,
EndTime: end,
Platform: " OpenAI ",
GroupID: &groupID,
Page: 2,
PageSize: 10,
}
mock.ExpectQuery(`SELECT COUNT\(\*\) FROM stats`).
WithArgs(start, end, groupID, "openai").
WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(3)))
rows := sqlmock.NewRows([]string{
"model",
"request_count",
"avg_tokens_per_sec",
"avg_first_token_ms",
"total_output_tokens",
"avg_duration_ms",
"requests_with_first_token",
}).
AddRow("gpt-4o-mini", int64(20), 21.56, 120.34, int64(3000), int64(850), int64(18)).
AddRow("gpt-4.1", int64(20), 10.2, 240.0, int64(2500), int64(900), int64(20))
mock.ExpectQuery(`ORDER BY request_count DESC, model ASC\s+LIMIT \$5 OFFSET \$6`).
WithArgs(start, end, groupID, "openai", 10, 10).
WillReturnRows(rows)
resp, err := repo.GetOpenAITokenStats(context.Background(), filter)
require.NoError(t, err)
require.NotNil(t, resp)
require.Equal(t, int64(3), resp.Total)
require.Equal(t, 2, resp.Page)
require.Equal(t, 10, resp.PageSize)
require.Nil(t, resp.TopN)
require.Equal(t, "openai", resp.Platform)
require.NotNil(t, resp.GroupID)
require.Equal(t, groupID, *resp.GroupID)
require.Len(t, resp.Items, 2)
require.Equal(t, "gpt-4o-mini", resp.Items[0].Model)
require.NotNil(t, resp.Items[0].AvgTokensPerSec)
require.InDelta(t, 21.56, *resp.Items[0].AvgTokensPerSec, 0.0001)
require.NotNil(t, resp.Items[0].AvgFirstTokenMs)
require.InDelta(t, 120.34, *resp.Items[0].AvgFirstTokenMs, 0.0001)
require.NoError(t, mock.ExpectationsWereMet())
}
func TestOpsRepositoryGetOpenAITokenStats_TopNMode(t *testing.T) {
db, mock := newSQLMock(t)
repo := &opsRepository{db: db}
start := time.Date(2026, 1, 1, 10, 0, 0, 0, time.UTC)
end := start.Add(time.Hour)
filter := &service.OpsOpenAITokenStatsFilter{
TimeRange: "1h",
StartTime: start,
EndTime: end,
TopN: 5,
}
mock.ExpectQuery(`SELECT COUNT\(\*\) FROM stats`).
WithArgs(start, end).
WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(1)))
rows := sqlmock.NewRows([]string{
"model",
"request_count",
"avg_tokens_per_sec",
"avg_first_token_ms",
"total_output_tokens",
"avg_duration_ms",
"requests_with_first_token",
}).
AddRow("gpt-4o", int64(5), nil, nil, int64(0), int64(0), int64(0))
mock.ExpectQuery(`ORDER BY request_count DESC, model ASC\s+LIMIT \$3`).
WithArgs(start, end, 5).
WillReturnRows(rows)
resp, err := repo.GetOpenAITokenStats(context.Background(), filter)
require.NoError(t, err)
require.NotNil(t, resp)
require.NotNil(t, resp.TopN)
require.Equal(t, 5, *resp.TopN)
require.Equal(t, 0, resp.Page)
require.Equal(t, 0, resp.PageSize)
require.Len(t, resp.Items, 1)
require.Nil(t, resp.Items[0].AvgTokensPerSec)
require.Nil(t, resp.Items[0].AvgFirstTokenMs)
require.NoError(t, mock.ExpectationsWereMet())
}
func TestOpsRepositoryGetOpenAITokenStats_EmptyResult(t *testing.T) {
db, mock := newSQLMock(t)
repo := &opsRepository{db: db}
start := time.Date(2026, 1, 2, 0, 0, 0, 0, time.UTC)
end := start.Add(30 * time.Minute)
filter := &service.OpsOpenAITokenStatsFilter{
TimeRange: "30m",
StartTime: start,
EndTime: end,
Page: 1,
PageSize: 20,
}
mock.ExpectQuery(`SELECT COUNT\(\*\) FROM stats`).
WithArgs(start, end).
WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(0)))
mock.ExpectQuery(`ORDER BY request_count DESC, model ASC\s+LIMIT \$3 OFFSET \$4`).
WithArgs(start, end, 20, 0).
WillReturnRows(sqlmock.NewRows([]string{
"model",
"request_count",
"avg_tokens_per_sec",
"avg_first_token_ms",
"total_output_tokens",
"avg_duration_ms",
"requests_with_first_token",
}))
resp, err := repo.GetOpenAITokenStats(context.Background(), filter)
require.NoError(t, err)
require.NotNil(t, resp)
require.Equal(t, int64(0), resp.Total)
require.Len(t, resp.Items, 0)
require.Equal(t, 1, resp.Page)
require.Equal(t, 20, resp.PageSize)
require.NoError(t, mock.ExpectationsWereMet())
}

View File

@@ -150,6 +150,7 @@ func registerOpsRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
ops.GET("/dashboard/latency-histogram", h.Admin.Ops.GetDashboardLatencyHistogram)
ops.GET("/dashboard/error-trend", h.Admin.Ops.GetDashboardErrorTrend)
ops.GET("/dashboard/error-distribution", h.Admin.Ops.GetDashboardErrorDistribution)
ops.GET("/dashboard/openai-token-stats", h.Admin.Ops.GetDashboardOpenAITokenStats)
}
}

View File

@@ -0,0 +1,55 @@
package service
import (
"context"
infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
)
func (s *OpsService) GetOpenAITokenStats(ctx context.Context, filter *OpsOpenAITokenStatsFilter) (*OpsOpenAITokenStatsResponse, error) {
if err := s.RequireMonitoringEnabled(ctx); err != nil {
return nil, err
}
if s.opsRepo == nil {
return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available")
}
if filter == nil {
return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required")
}
if filter.StartTime.IsZero() || filter.EndTime.IsZero() {
return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required")
}
if filter.StartTime.After(filter.EndTime) {
return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time")
}
if filter.GroupID != nil && *filter.GroupID <= 0 {
return nil, infraerrors.BadRequest("OPS_GROUP_ID_INVALID", "group_id must be > 0")
}
// top_n cannot be mixed with page/page_size params.
if filter.TopN > 0 && (filter.Page > 0 || filter.PageSize > 0) {
return nil, infraerrors.BadRequest("OPS_PAGINATION_CONFLICT", "top_n cannot be used with page/page_size")
}
if filter.TopN > 0 {
if filter.TopN < 1 || filter.TopN > 100 {
return nil, infraerrors.BadRequest("OPS_TOPN_INVALID", "top_n must be between 1 and 100")
}
} else {
if filter.Page <= 0 {
filter.Page = 1
}
if filter.PageSize <= 0 {
filter.PageSize = 20
}
if filter.Page < 1 {
return nil, infraerrors.BadRequest("OPS_PAGE_INVALID", "page must be >= 1")
}
if filter.PageSize < 1 || filter.PageSize > 100 {
return nil, infraerrors.BadRequest("OPS_PAGE_SIZE_INVALID", "page_size must be between 1 and 100")
}
}
return s.opsRepo.GetOpenAITokenStats(ctx, filter)
}

View File

@@ -0,0 +1,54 @@
package service
import "time"
type OpsOpenAITokenStatsFilter struct {
TimeRange string
StartTime time.Time
EndTime time.Time
Platform string
GroupID *int64
// Pagination mode (default): page/page_size
Page int
PageSize int
// TopN mode: top_n
TopN int
}
func (f *OpsOpenAITokenStatsFilter) IsTopNMode() bool {
return f != nil && f.TopN > 0
}
type OpsOpenAITokenStatsItem struct {
Model string `json:"model"`
RequestCount int64 `json:"request_count"`
AvgTokensPerSec *float64 `json:"avg_tokens_per_sec"`
AvgFirstTokenMs *float64 `json:"avg_first_token_ms"`
TotalOutputTokens int64 `json:"total_output_tokens"`
AvgDurationMs int64 `json:"avg_duration_ms"`
RequestsWithFirstToken int64 `json:"requests_with_first_token"`
}
type OpsOpenAITokenStatsResponse struct {
TimeRange string `json:"time_range"`
StartTime time.Time `json:"start_time"`
EndTime time.Time `json:"end_time"`
Platform string `json:"platform,omitempty"`
GroupID *int64 `json:"group_id,omitempty"`
Items []*OpsOpenAITokenStatsItem `json:"items"`
// Total model rows before pagination/topN trimming.
Total int64 `json:"total"`
// Pagination mode metadata.
Page int `json:"page,omitempty"`
PageSize int `json:"page_size,omitempty"`
// TopN mode metadata.
TopN *int `json:"top_n,omitempty"`
}

View File

@@ -0,0 +1,162 @@
package service
import (
"context"
"testing"
"time"
infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
"github.com/stretchr/testify/require"
)
type openAITokenStatsRepoStub struct {
OpsRepository
resp *OpsOpenAITokenStatsResponse
err error
captured *OpsOpenAITokenStatsFilter
}
func (s *openAITokenStatsRepoStub) GetOpenAITokenStats(ctx context.Context, filter *OpsOpenAITokenStatsFilter) (*OpsOpenAITokenStatsResponse, error) {
s.captured = filter
if s.err != nil {
return nil, s.err
}
if s.resp != nil {
return s.resp, nil
}
return &OpsOpenAITokenStatsResponse{}, nil
}
func TestOpsServiceGetOpenAITokenStats_Validation(t *testing.T) {
now := time.Now().UTC()
tests := []struct {
name string
filter *OpsOpenAITokenStatsFilter
wantCode int
wantReason string
}{
{
name: "filter 不能为空",
filter: nil,
wantCode: 400,
wantReason: "OPS_FILTER_REQUIRED",
},
{
name: "start_time/end_time 必填",
filter: &OpsOpenAITokenStatsFilter{
StartTime: time.Time{},
EndTime: now,
},
wantCode: 400,
wantReason: "OPS_TIME_RANGE_REQUIRED",
},
{
name: "start_time 不能晚于 end_time",
filter: &OpsOpenAITokenStatsFilter{
StartTime: now,
EndTime: now.Add(-1 * time.Minute),
},
wantCode: 400,
wantReason: "OPS_TIME_RANGE_INVALID",
},
{
name: "group_id 必须大于 0",
filter: &OpsOpenAITokenStatsFilter{
StartTime: now.Add(-time.Hour),
EndTime: now,
GroupID: int64Ptr(0),
},
wantCode: 400,
wantReason: "OPS_GROUP_ID_INVALID",
},
{
name: "top_n 与分页参数互斥",
filter: &OpsOpenAITokenStatsFilter{
StartTime: now.Add(-time.Hour),
EndTime: now,
TopN: 10,
Page: 1,
},
wantCode: 400,
wantReason: "OPS_PAGINATION_CONFLICT",
},
{
name: "top_n 参数越界",
filter: &OpsOpenAITokenStatsFilter{
StartTime: now.Add(-time.Hour),
EndTime: now,
TopN: 101,
},
wantCode: 400,
wantReason: "OPS_TOPN_INVALID",
},
{
name: "page_size 参数越界",
filter: &OpsOpenAITokenStatsFilter{
StartTime: now.Add(-time.Hour),
EndTime: now,
Page: 1,
PageSize: 101,
},
wantCode: 400,
wantReason: "OPS_PAGE_SIZE_INVALID",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
svc := &OpsService{
opsRepo: &openAITokenStatsRepoStub{},
}
_, err := svc.GetOpenAITokenStats(context.Background(), tt.filter)
require.Error(t, err)
require.Equal(t, tt.wantCode, infraerrors.Code(err))
require.Equal(t, tt.wantReason, infraerrors.Reason(err))
})
}
}
func TestOpsServiceGetOpenAITokenStats_DefaultPagination(t *testing.T) {
now := time.Now().UTC()
repo := &openAITokenStatsRepoStub{
resp: &OpsOpenAITokenStatsResponse{
Items: []*OpsOpenAITokenStatsItem{
{Model: "gpt-4o-mini", RequestCount: 10},
},
Total: 1,
},
}
svc := &OpsService{opsRepo: repo}
filter := &OpsOpenAITokenStatsFilter{
TimeRange: "30d",
StartTime: now.Add(-30 * 24 * time.Hour),
EndTime: now,
}
resp, err := svc.GetOpenAITokenStats(context.Background(), filter)
require.NoError(t, err)
require.NotNil(t, resp)
require.NotNil(t, repo.captured)
require.Equal(t, 1, repo.captured.Page)
require.Equal(t, 20, repo.captured.PageSize)
require.Equal(t, 0, repo.captured.TopN)
}
func TestOpsServiceGetOpenAITokenStats_RepoUnavailable(t *testing.T) {
now := time.Now().UTC()
svc := &OpsService{}
_, err := svc.GetOpenAITokenStats(context.Background(), &OpsOpenAITokenStatsFilter{
TimeRange: "1h",
StartTime: now.Add(-time.Hour),
EndTime: now,
TopN: 10,
})
require.Error(t, err)
require.Equal(t, 503, infraerrors.Code(err))
require.Equal(t, "OPS_REPO_UNAVAILABLE", infraerrors.Reason(err))
}
func int64Ptr(v int64) *int64 { return &v }

View File

@@ -27,6 +27,7 @@ type OpsRepository interface {
GetLatencyHistogram(ctx context.Context, filter *OpsDashboardFilter) (*OpsLatencyHistogramResponse, error)
GetErrorTrend(ctx context.Context, filter *OpsDashboardFilter, bucketSeconds int) (*OpsErrorTrendResponse, error)
GetErrorDistribution(ctx context.Context, filter *OpsDashboardFilter) (*OpsErrorDistributionResponse, error)
GetOpenAITokenStats(ctx context.Context, filter *OpsOpenAITokenStatsFilter) (*OpsOpenAITokenStatsResponse, error)
InsertSystemMetrics(ctx context.Context, input *OpsInsertSystemMetricsInput) error
GetLatestSystemMetrics(ctx context.Context, windowMinutes int) (*OpsSystemMetricsSnapshot, error)

View File

@@ -259,6 +259,40 @@ export interface OpsErrorDistributionResponse {
items: OpsErrorDistributionItem[]
}
export type OpsOpenAITokenStatsTimeRange = '30m' | '1h' | '1d' | '15d' | '30d'
export interface OpsOpenAITokenStatsItem {
model: string
request_count: number
avg_tokens_per_sec?: number | null
avg_first_token_ms?: number | null
total_output_tokens: number
avg_duration_ms: number
requests_with_first_token: number
}
export interface OpsOpenAITokenStatsResponse {
time_range: OpsOpenAITokenStatsTimeRange
start_time: string
end_time: string
platform?: string
group_id?: number | null
items: OpsOpenAITokenStatsItem[]
total: number
page?: number
page_size?: number
top_n?: number | null
}
export interface OpsOpenAITokenStatsParams {
time_range?: OpsOpenAITokenStatsTimeRange
platform?: string
group_id?: number | null
page?: number
page_size?: number
top_n?: number
}
export interface OpsSystemMetricsSnapshot {
id: number
created_at: string
@@ -971,6 +1005,17 @@ export async function getErrorDistribution(
return data
}
export async function getOpenAITokenStats(
params: OpsOpenAITokenStatsParams,
options: OpsRequestOptions = {}
): Promise<OpsOpenAITokenStatsResponse> {
const { data } = await apiClient.get<OpsOpenAITokenStatsResponse>('/admin/ops/dashboard/openai-token-stats', {
params,
signal: options.signal
})
return data
}
export type OpsErrorListView = 'errors' | 'excluded' | 'all'
export type OpsErrorListQueryParams = {
@@ -1188,6 +1233,7 @@ export const opsAPI = {
getLatencyHistogram,
getErrorTrend,
getErrorDistribution,
getOpenAITokenStats,
getConcurrencyStats,
getUserConcurrencyStats,
getAccountAvailabilityStats,

View File

@@ -2508,11 +2508,33 @@ export default {
'5m': 'Last 5 minutes',
'30m': 'Last 30 minutes',
'1h': 'Last 1 hour',
'1d': 'Last 1 day',
'15d': 'Last 15 days',
'6h': 'Last 6 hours',
'24h': 'Last 24 hours',
'7d': 'Last 7 days',
'30d': 'Last 30 days'
},
openaiTokenStats: {
title: 'OpenAI Token Request Stats',
viewModeTopN: 'TopN',
viewModePagination: 'Pagination',
prevPage: 'Previous',
nextPage: 'Next',
pageInfo: 'Page {page}/{total}',
totalModels: 'Total models: {total}',
failedToLoad: 'Failed to load OpenAI token stats',
empty: 'No OpenAI token stats for the current filters',
table: {
model: 'Model',
requestCount: 'Requests',
avgTokensPerSec: 'Avg Tokens/sec',
avgFirstTokenMs: 'Avg First Token Latency (ms)',
totalOutputTokens: 'Total Output Tokens',
avgDurationMs: 'Avg Duration (ms)',
requestsWithFirstToken: 'Requests With First Token'
}
},
fullscreen: {
enter: 'Enter Fullscreen'
},

View File

@@ -2675,12 +2675,34 @@ export default {
'5m': '近5分钟',
'30m': '近30分钟',
'1h': '近1小时',
'1d': '近1天',
'15d': '近15天',
'6h': '近6小时',
'24h': '近24小时',
'7d': '近7天',
'30d': '近30天',
custom: '自定义'
},
openaiTokenStats: {
title: 'OpenAI Token 请求统计',
viewModeTopN: 'TopN',
viewModePagination: '分页',
prevPage: '上一页',
nextPage: '下一页',
pageInfo: '第 {page}/{total} 页',
totalModels: '模型总数:{total}',
failedToLoad: '加载 OpenAI Token 统计失败',
empty: '当前筛选条件下暂无 OpenAI Token 请求统计数据',
table: {
model: '模型',
requestCount: '请求数',
avgTokensPerSec: '平均 Tokens/秒',
avgFirstTokenMs: '平均首 Token 延迟(ms)',
totalOutputTokens: '输出 Token 总数',
avgDurationMs: '平均时长(ms)',
requestsWithFirstToken: '首 Token 样本数'
}
},
customTimeRange: {
startTime: '开始时间',
endTime: '结束时间'

View File

@@ -84,6 +84,15 @@
/>
</div>
<!-- Row: OpenAI Token Stats -->
<div v-if="opsEnabled && !(loading && !hasLoadedOnce)" class="grid grid-cols-1 gap-6">
<OpsOpenAITokenStatsCard
:platform-filter="platform"
:group-id-filter="groupId"
:refresh-token="dashboardRefreshToken"
/>
</div>
<!-- Alert Events -->
<OpsAlertEventsCard v-if="opsEnabled && !(loading && !hasLoadedOnce)" />
@@ -148,6 +157,7 @@ import OpsLatencyChart from './components/OpsLatencyChart.vue'
import OpsThroughputTrendChart from './components/OpsThroughputTrendChart.vue'
import OpsSwitchRateTrendChart from './components/OpsSwitchRateTrendChart.vue'
import OpsAlertEventsCard from './components/OpsAlertEventsCard.vue'
import OpsOpenAITokenStatsCard from './components/OpsOpenAITokenStatsCard.vue'
import OpsRequestDetailsModal, { type OpsRequestDetailsPreset } from './components/OpsRequestDetailsModal.vue'
import OpsSettingsDialog from './components/OpsSettingsDialog.vue'
import OpsAlertRulesCard from './components/OpsAlertRulesCard.vue'

View File

@@ -0,0 +1,246 @@
<script setup lang="ts">
import { computed, ref, watch } from 'vue'
import { useI18n } from 'vue-i18n'
import Select from '@/components/common/Select.vue'
import EmptyState from '@/components/common/EmptyState.vue'
import { opsAPI, type OpsOpenAITokenStatsResponse, type OpsOpenAITokenStatsTimeRange } from '@/api/admin/ops'
import { formatNumber } from '@/utils/format'
interface Props {
platformFilter?: string
groupIdFilter?: number | null
refreshToken: number
}
type ViewMode = 'topn' | 'pagination'
const props = withDefaults(defineProps<Props>(), {
platformFilter: '',
groupIdFilter: null
})
const { t } = useI18n()
const loading = ref(false)
const errorMessage = ref('')
const response = ref<OpsOpenAITokenStatsResponse | null>(null)
const timeRange = ref<OpsOpenAITokenStatsTimeRange>('30d')
const viewMode = ref<ViewMode>('topn')
const topN = ref<number>(20)
const page = ref<number>(1)
const pageSize = ref<number>(20)
const items = computed(() => response.value?.items ?? [])
const total = computed(() => response.value?.total ?? 0)
const totalPages = computed(() => {
if (viewMode.value !== 'pagination') return 1
const size = pageSize.value > 0 ? pageSize.value : 20
return Math.max(1, Math.ceil(total.value / size))
})
const timeRangeOptions = computed(() => [
{ value: '30m', label: t('admin.ops.timeRange.30m') },
{ value: '1h', label: t('admin.ops.timeRange.1h') },
{ value: '1d', label: t('admin.ops.timeRange.1d') },
{ value: '15d', label: t('admin.ops.timeRange.15d') },
{ value: '30d', label: t('admin.ops.timeRange.30d') }
])
const viewModeOptions = computed(() => [
{ value: 'topn', label: t('admin.ops.openaiTokenStats.viewModeTopN') },
{ value: 'pagination', label: t('admin.ops.openaiTokenStats.viewModePagination') }
])
const topNOptions = computed(() => [
{ value: 10, label: 'Top 10' },
{ value: 20, label: 'Top 20' },
{ value: 50, label: 'Top 50' },
{ value: 100, label: 'Top 100' }
])
const pageSizeOptions = computed(() => [
{ value: 10, label: '10' },
{ value: 20, label: '20' },
{ value: 50, label: '50' },
{ value: 100, label: '100' }
])
function formatRate(v?: number | null): string {
if (typeof v !== 'number' || !Number.isFinite(v)) return '-'
return v.toFixed(2)
}
function formatInt(v?: number | null): string {
if (typeof v !== 'number' || !Number.isFinite(v)) return '-'
return formatNumber(Math.round(v))
}
function buildParams() {
const params: Record<string, any> = {
time_range: timeRange.value,
platform: props.platformFilter || undefined,
group_id: typeof props.groupIdFilter === 'number' && props.groupIdFilter > 0 ? props.groupIdFilter : undefined
}
if (viewMode.value === 'topn') {
params.top_n = topN.value
} else {
params.page = page.value
params.page_size = pageSize.value
}
return params
}
async function loadData() {
loading.value = true
errorMessage.value = ''
try {
response.value = await opsAPI.getOpenAITokenStats(buildParams())
// 防御:若 total 变化导致当前页超出最大页,则回退到末页并重新拉取一次。
if (viewMode.value === 'pagination' && page.value > totalPages.value) {
page.value = totalPages.value
response.value = await opsAPI.getOpenAITokenStats(buildParams())
}
} catch (err: any) {
console.error('[OpsOpenAITokenStatsCard] Failed to load data', err)
response.value = null
errorMessage.value = err?.message || t('admin.ops.openaiTokenStats.failedToLoad')
} finally {
loading.value = false
}
}
watch(
() => ({
timeRange: timeRange.value,
viewMode: viewMode.value,
topN: topN.value,
page: page.value,
pageSize: pageSize.value,
platform: props.platformFilter,
groupId: props.groupIdFilter,
refreshToken: props.refreshToken
}),
(next, prev) => {
// 避免“筛选变化 -> 重置页码 -> 触发两次请求”:
// 先只重置页码,等待下一次 watch仅 page 变化)再发起请求。
const filtersChanged = !prev ||
next.timeRange !== prev.timeRange ||
next.viewMode !== prev.viewMode ||
next.pageSize !== prev.pageSize ||
next.platform !== prev.platform ||
next.groupId !== prev.groupId ||
next.refreshToken !== prev.refreshToken
if (next.viewMode === 'pagination' && filtersChanged && next.page !== 1) {
page.value = 1
return
}
void loadData()
},
{ immediate: true }
)
function onPrevPage() {
if (viewMode.value !== 'pagination') return
if (page.value > 1) page.value -= 1
}
function onNextPage() {
if (viewMode.value !== 'pagination') return
if (page.value < totalPages.value) page.value += 1
}
</script>
<template>
<section class="card p-4 md:p-5">
<div class="mb-4 flex flex-wrap items-center justify-between gap-3">
<h3 class="text-sm font-bold text-gray-900 dark:text-white">
{{ t('admin.ops.openaiTokenStats.title') }}
</h3>
<div class="flex flex-wrap items-center gap-2">
<div class="w-36">
<Select v-model="timeRange" :options="timeRangeOptions" />
</div>
<div class="w-36">
<Select v-model="viewMode" :options="viewModeOptions" />
</div>
<div v-if="viewMode === 'topn'" class="w-28">
<Select v-model="topN" :options="topNOptions" />
</div>
<template v-else>
<div class="w-24">
<Select v-model="pageSize" :options="pageSizeOptions" />
</div>
<button
class="btn btn-secondary btn-sm"
:disabled="loading || page <= 1"
@click="onPrevPage"
>
{{ t('admin.ops.openaiTokenStats.prevPage') }}
</button>
<button
class="btn btn-secondary btn-sm"
:disabled="loading || page >= totalPages"
@click="onNextPage"
>
{{ t('admin.ops.openaiTokenStats.nextPage') }}
</button>
<span class="text-xs text-gray-500 dark:text-gray-400">
{{ t('admin.ops.openaiTokenStats.pageInfo', { page, total: totalPages }) }}
</span>
</template>
</div>
</div>
<div v-if="errorMessage" class="mb-4 rounded-lg bg-red-50 px-3 py-2 text-xs text-red-600 dark:bg-red-900/20 dark:text-red-400">
{{ errorMessage }}
</div>
<div v-if="loading" class="py-8 text-center text-sm text-gray-500 dark:text-gray-400">
{{ t('admin.ops.loadingText') }}
</div>
<EmptyState
v-else-if="items.length === 0"
:title="t('common.noData')"
:description="t('admin.ops.openaiTokenStats.empty')"
/>
<div v-else class="overflow-x-auto">
<table class="min-w-full text-left text-xs md:text-sm">
<thead>
<tr class="border-b border-gray-200 text-gray-500 dark:border-dark-700 dark:text-gray-400">
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.model') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.requestCount') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.avgTokensPerSec') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.avgFirstTokenMs') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.totalOutputTokens') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.avgDurationMs') }}</th>
<th class="px-2 py-2 font-semibold">{{ t('admin.ops.openaiTokenStats.table.requestsWithFirstToken') }}</th>
</tr>
</thead>
<tbody>
<tr
v-for="row in items"
:key="row.model"
class="border-b border-gray-100 text-gray-700 dark:border-dark-800 dark:text-gray-200"
>
<td class="px-2 py-2 font-medium">{{ row.model }}</td>
<td class="px-2 py-2">{{ formatInt(row.request_count) }}</td>
<td class="px-2 py-2">{{ formatRate(row.avg_tokens_per_sec) }}</td>
<td class="px-2 py-2">{{ formatRate(row.avg_first_token_ms) }}</td>
<td class="px-2 py-2">{{ formatInt(row.total_output_tokens) }}</td>
<td class="px-2 py-2">{{ formatInt(row.avg_duration_ms) }}</td>
<td class="px-2 py-2">{{ formatInt(row.requests_with_first_token) }}</td>
</tr>
</tbody>
</table>
<div v-if="viewMode === 'topn'" class="mt-3 text-xs text-gray-500 dark:text-gray-400">
{{ t('admin.ops.openaiTokenStats.totalModels', { total }) }}
</div>
</div>
</section>
</template>

View File

@@ -0,0 +1,215 @@
import { describe, it, expect, beforeEach, vi } from 'vitest'
import { defineComponent } from 'vue'
import { flushPromises, mount } from '@vue/test-utils'
import OpsOpenAITokenStatsCard from '../OpsOpenAITokenStatsCard.vue'
const mockGetOpenAITokenStats = vi.fn()
vi.mock('@/api/admin/ops', () => ({
opsAPI: {
getOpenAITokenStats: (...args: any[]) => mockGetOpenAITokenStats(...args),
},
}))
vi.mock('vue-i18n', async (importOriginal) => {
const actual = await importOriginal<typeof import('vue-i18n')>()
return {
...actual,
useI18n: () => ({
t: (key: string, params?: Record<string, any>) => {
if (key === 'admin.ops.openaiTokenStats.pageInfo' && params) {
return `${params.page}/${params.total}`
}
return key
},
}),
}
})
const SelectStub = defineComponent({
name: 'SelectControlStub',
props: {
modelValue: {
type: [String, Number],
default: '',
},
},
emits: ['update:modelValue'],
template: '<div class="select-stub" />',
})
const EmptyStateStub = defineComponent({
name: 'EmptyState',
props: {
title: { type: String, default: '' },
description: { type: String, default: '' },
},
template: '<div class="empty-state">{{ title }}|{{ description }}</div>',
})
const sampleResponse = {
time_range: '30d' as const,
start_time: '2026-01-01T00:00:00Z',
end_time: '2026-01-31T00:00:00Z',
platform: 'openai',
group_id: 7,
items: [
{
model: 'gpt-4o-mini',
request_count: 12,
avg_tokens_per_sec: 22.5,
avg_first_token_ms: 123.45,
total_output_tokens: 1234,
avg_duration_ms: 321,
requests_with_first_token: 10,
},
],
total: 40,
page: 1,
page_size: 20,
top_n: null,
}
describe('OpsOpenAITokenStatsCard', () => {
beforeEach(() => {
vi.clearAllMocks()
})
it('默认加载并透传 platform/group 过滤,支持时间窗口切换', async () => {
mockGetOpenAITokenStats.mockResolvedValue(sampleResponse)
const wrapper = mount(OpsOpenAITokenStatsCard, {
props: {
platformFilter: 'openai',
groupIdFilter: 7,
refreshToken: 0,
},
global: {
stubs: {
Select: SelectStub,
EmptyState: EmptyStateStub,
},
},
})
await flushPromises()
expect(mockGetOpenAITokenStats).toHaveBeenCalledWith(
expect.objectContaining({
time_range: '30d',
platform: 'openai',
group_id: 7,
top_n: 20,
})
)
const selects = wrapper.findAllComponents(SelectStub)
await selects[0].vm.$emit('update:modelValue', '1h')
await flushPromises()
expect(mockGetOpenAITokenStats).toHaveBeenCalledWith(
expect.objectContaining({
time_range: '1h',
platform: 'openai',
group_id: 7,
})
)
})
it('支持分页与 TopN 模式切换并按参数请求', async () => {
mockGetOpenAITokenStats.mockImplementation(async (params: Record<string, any>) => ({
...sampleResponse,
time_range: params.time_range ?? '30d',
page: params.page ?? 1,
page_size: params.page_size ?? 20,
top_n: params.top_n ?? null,
total: 40,
}))
const wrapper = mount(OpsOpenAITokenStatsCard, {
props: {
refreshToken: 0,
},
global: {
stubs: {
Select: SelectStub,
EmptyState: EmptyStateStub,
},
},
})
await flushPromises()
let selects = wrapper.findAllComponents(SelectStub)
await selects[1].vm.$emit('update:modelValue', 'pagination')
await flushPromises()
expect(mockGetOpenAITokenStats).toHaveBeenCalledWith(
expect.objectContaining({
page: 1,
page_size: 20,
})
)
const buttons = wrapper.findAll('button')
expect(buttons.length).toBeGreaterThanOrEqual(2)
await buttons[1].trigger('click')
await flushPromises()
expect(mockGetOpenAITokenStats).toHaveBeenCalledWith(
expect.objectContaining({
page: 2,
page_size: 20,
})
)
selects = wrapper.findAllComponents(SelectStub)
await selects[1].vm.$emit('update:modelValue', 'topn')
await flushPromises()
selects = wrapper.findAllComponents(SelectStub)
await selects[2].vm.$emit('update:modelValue', 50)
await flushPromises()
expect(mockGetOpenAITokenStats).toHaveBeenCalledWith(
expect.objectContaining({
top_n: 50,
})
)
})
it('接口返回空数据时显示空态', async () => {
mockGetOpenAITokenStats.mockResolvedValue({
...sampleResponse,
items: [],
total: 0,
})
const wrapper = mount(OpsOpenAITokenStatsCard, {
props: { refreshToken: 0 },
global: {
stubs: {
Select: SelectStub,
EmptyState: EmptyStateStub,
},
},
})
await flushPromises()
expect(wrapper.find('.empty-state').exists()).toBe(true)
})
it('接口异常时显示错误提示', async () => {
mockGetOpenAITokenStats.mockRejectedValue(new Error('加载失败'))
const wrapper = mount(OpsOpenAITokenStatsCard, {
props: { refreshToken: 0 },
global: {
stubs: {
Select: SelectStub,
EmptyState: EmptyStateStub,
},
},
})
await flushPromises()
expect(wrapper.text()).toContain('加载失败')
})
})