feat(ops): 运维监控新增 OpenAI Token 请求统计表
- 新增管理端接口 /api/v1/admin/ops/dashboard/openai-token-stats,按模型聚合统计 gpt% 请求 - 支持 time_range=30m|1h|1d|15d|30d(默认 30d),支持 platform/group_id 过滤 - 支持分页(page/page_size)或 TopN(top_n)互斥查询 - 前端运维监控页新增统计表卡片,包含空态/错误态与分页/TopN 交互 - 补齐后端与前端测试
This commit is contained in:
55
backend/internal/service/ops_openai_token_stats.go
Normal file
55
backend/internal/service/ops_openai_token_stats.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *OpsService) GetOpenAITokenStats(ctx context.Context, filter *OpsOpenAITokenStatsFilter) (*OpsOpenAITokenStatsResponse, error) {
|
||||
if err := s.RequireMonitoringEnabled(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s.opsRepo == nil {
|
||||
return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available")
|
||||
}
|
||||
if filter == nil {
|
||||
return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required")
|
||||
}
|
||||
if filter.StartTime.IsZero() || filter.EndTime.IsZero() {
|
||||
return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required")
|
||||
}
|
||||
if filter.StartTime.After(filter.EndTime) {
|
||||
return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time")
|
||||
}
|
||||
|
||||
if filter.GroupID != nil && *filter.GroupID <= 0 {
|
||||
return nil, infraerrors.BadRequest("OPS_GROUP_ID_INVALID", "group_id must be > 0")
|
||||
}
|
||||
|
||||
// top_n cannot be mixed with page/page_size params.
|
||||
if filter.TopN > 0 && (filter.Page > 0 || filter.PageSize > 0) {
|
||||
return nil, infraerrors.BadRequest("OPS_PAGINATION_CONFLICT", "top_n cannot be used with page/page_size")
|
||||
}
|
||||
|
||||
if filter.TopN > 0 {
|
||||
if filter.TopN < 1 || filter.TopN > 100 {
|
||||
return nil, infraerrors.BadRequest("OPS_TOPN_INVALID", "top_n must be between 1 and 100")
|
||||
}
|
||||
} else {
|
||||
if filter.Page <= 0 {
|
||||
filter.Page = 1
|
||||
}
|
||||
if filter.PageSize <= 0 {
|
||||
filter.PageSize = 20
|
||||
}
|
||||
if filter.Page < 1 {
|
||||
return nil, infraerrors.BadRequest("OPS_PAGE_INVALID", "page must be >= 1")
|
||||
}
|
||||
if filter.PageSize < 1 || filter.PageSize > 100 {
|
||||
return nil, infraerrors.BadRequest("OPS_PAGE_SIZE_INVALID", "page_size must be between 1 and 100")
|
||||
}
|
||||
}
|
||||
|
||||
return s.opsRepo.GetOpenAITokenStats(ctx, filter)
|
||||
}
|
||||
54
backend/internal/service/ops_openai_token_stats_models.go
Normal file
54
backend/internal/service/ops_openai_token_stats_models.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package service
|
||||
|
||||
import "time"
|
||||
|
||||
type OpsOpenAITokenStatsFilter struct {
|
||||
TimeRange string
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
|
||||
Platform string
|
||||
GroupID *int64
|
||||
|
||||
// Pagination mode (default): page/page_size
|
||||
Page int
|
||||
PageSize int
|
||||
|
||||
// TopN mode: top_n
|
||||
TopN int
|
||||
}
|
||||
|
||||
func (f *OpsOpenAITokenStatsFilter) IsTopNMode() bool {
|
||||
return f != nil && f.TopN > 0
|
||||
}
|
||||
|
||||
type OpsOpenAITokenStatsItem struct {
|
||||
Model string `json:"model"`
|
||||
RequestCount int64 `json:"request_count"`
|
||||
AvgTokensPerSec *float64 `json:"avg_tokens_per_sec"`
|
||||
AvgFirstTokenMs *float64 `json:"avg_first_token_ms"`
|
||||
TotalOutputTokens int64 `json:"total_output_tokens"`
|
||||
AvgDurationMs int64 `json:"avg_duration_ms"`
|
||||
RequestsWithFirstToken int64 `json:"requests_with_first_token"`
|
||||
}
|
||||
|
||||
type OpsOpenAITokenStatsResponse struct {
|
||||
TimeRange string `json:"time_range"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
EndTime time.Time `json:"end_time"`
|
||||
|
||||
Platform string `json:"platform,omitempty"`
|
||||
GroupID *int64 `json:"group_id,omitempty"`
|
||||
|
||||
Items []*OpsOpenAITokenStatsItem `json:"items"`
|
||||
|
||||
// Total model rows before pagination/topN trimming.
|
||||
Total int64 `json:"total"`
|
||||
|
||||
// Pagination mode metadata.
|
||||
Page int `json:"page,omitempty"`
|
||||
PageSize int `json:"page_size,omitempty"`
|
||||
|
||||
// TopN mode metadata.
|
||||
TopN *int `json:"top_n,omitempty"`
|
||||
}
|
||||
162
backend/internal/service/ops_openai_token_stats_test.go
Normal file
162
backend/internal/service/ops_openai_token_stats_test.go
Normal file
@@ -0,0 +1,162 @@
|
||||
package service
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type openAITokenStatsRepoStub struct {
|
||||
OpsRepository
|
||||
resp *OpsOpenAITokenStatsResponse
|
||||
err error
|
||||
captured *OpsOpenAITokenStatsFilter
|
||||
}
|
||||
|
||||
func (s *openAITokenStatsRepoStub) GetOpenAITokenStats(ctx context.Context, filter *OpsOpenAITokenStatsFilter) (*OpsOpenAITokenStatsResponse, error) {
|
||||
s.captured = filter
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
if s.resp != nil {
|
||||
return s.resp, nil
|
||||
}
|
||||
return &OpsOpenAITokenStatsResponse{}, nil
|
||||
}
|
||||
|
||||
func TestOpsServiceGetOpenAITokenStats_Validation(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
filter *OpsOpenAITokenStatsFilter
|
||||
wantCode int
|
||||
wantReason string
|
||||
}{
|
||||
{
|
||||
name: "filter 不能为空",
|
||||
filter: nil,
|
||||
wantCode: 400,
|
||||
wantReason: "OPS_FILTER_REQUIRED",
|
||||
},
|
||||
{
|
||||
name: "start_time/end_time 必填",
|
||||
filter: &OpsOpenAITokenStatsFilter{
|
||||
StartTime: time.Time{},
|
||||
EndTime: now,
|
||||
},
|
||||
wantCode: 400,
|
||||
wantReason: "OPS_TIME_RANGE_REQUIRED",
|
||||
},
|
||||
{
|
||||
name: "start_time 不能晚于 end_time",
|
||||
filter: &OpsOpenAITokenStatsFilter{
|
||||
StartTime: now,
|
||||
EndTime: now.Add(-1 * time.Minute),
|
||||
},
|
||||
wantCode: 400,
|
||||
wantReason: "OPS_TIME_RANGE_INVALID",
|
||||
},
|
||||
{
|
||||
name: "group_id 必须大于 0",
|
||||
filter: &OpsOpenAITokenStatsFilter{
|
||||
StartTime: now.Add(-time.Hour),
|
||||
EndTime: now,
|
||||
GroupID: int64Ptr(0),
|
||||
},
|
||||
wantCode: 400,
|
||||
wantReason: "OPS_GROUP_ID_INVALID",
|
||||
},
|
||||
{
|
||||
name: "top_n 与分页参数互斥",
|
||||
filter: &OpsOpenAITokenStatsFilter{
|
||||
StartTime: now.Add(-time.Hour),
|
||||
EndTime: now,
|
||||
TopN: 10,
|
||||
Page: 1,
|
||||
},
|
||||
wantCode: 400,
|
||||
wantReason: "OPS_PAGINATION_CONFLICT",
|
||||
},
|
||||
{
|
||||
name: "top_n 参数越界",
|
||||
filter: &OpsOpenAITokenStatsFilter{
|
||||
StartTime: now.Add(-time.Hour),
|
||||
EndTime: now,
|
||||
TopN: 101,
|
||||
},
|
||||
wantCode: 400,
|
||||
wantReason: "OPS_TOPN_INVALID",
|
||||
},
|
||||
{
|
||||
name: "page_size 参数越界",
|
||||
filter: &OpsOpenAITokenStatsFilter{
|
||||
StartTime: now.Add(-time.Hour),
|
||||
EndTime: now,
|
||||
Page: 1,
|
||||
PageSize: 101,
|
||||
},
|
||||
wantCode: 400,
|
||||
wantReason: "OPS_PAGE_SIZE_INVALID",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
svc := &OpsService{
|
||||
opsRepo: &openAITokenStatsRepoStub{},
|
||||
}
|
||||
|
||||
_, err := svc.GetOpenAITokenStats(context.Background(), tt.filter)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, tt.wantCode, infraerrors.Code(err))
|
||||
require.Equal(t, tt.wantReason, infraerrors.Reason(err))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpsServiceGetOpenAITokenStats_DefaultPagination(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
repo := &openAITokenStatsRepoStub{
|
||||
resp: &OpsOpenAITokenStatsResponse{
|
||||
Items: []*OpsOpenAITokenStatsItem{
|
||||
{Model: "gpt-4o-mini", RequestCount: 10},
|
||||
},
|
||||
Total: 1,
|
||||
},
|
||||
}
|
||||
svc := &OpsService{opsRepo: repo}
|
||||
|
||||
filter := &OpsOpenAITokenStatsFilter{
|
||||
TimeRange: "30d",
|
||||
StartTime: now.Add(-30 * 24 * time.Hour),
|
||||
EndTime: now,
|
||||
}
|
||||
resp, err := svc.GetOpenAITokenStats(context.Background(), filter)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, resp)
|
||||
require.NotNil(t, repo.captured)
|
||||
require.Equal(t, 1, repo.captured.Page)
|
||||
require.Equal(t, 20, repo.captured.PageSize)
|
||||
require.Equal(t, 0, repo.captured.TopN)
|
||||
}
|
||||
|
||||
func TestOpsServiceGetOpenAITokenStats_RepoUnavailable(t *testing.T) {
|
||||
now := time.Now().UTC()
|
||||
svc := &OpsService{}
|
||||
|
||||
_, err := svc.GetOpenAITokenStats(context.Background(), &OpsOpenAITokenStatsFilter{
|
||||
TimeRange: "1h",
|
||||
StartTime: now.Add(-time.Hour),
|
||||
EndTime: now,
|
||||
TopN: 10,
|
||||
})
|
||||
require.Error(t, err)
|
||||
require.Equal(t, 503, infraerrors.Code(err))
|
||||
require.Equal(t, "OPS_REPO_UNAVAILABLE", infraerrors.Reason(err))
|
||||
}
|
||||
|
||||
func int64Ptr(v int64) *int64 { return &v }
|
||||
@@ -27,6 +27,7 @@ type OpsRepository interface {
|
||||
GetLatencyHistogram(ctx context.Context, filter *OpsDashboardFilter) (*OpsLatencyHistogramResponse, error)
|
||||
GetErrorTrend(ctx context.Context, filter *OpsDashboardFilter, bucketSeconds int) (*OpsErrorTrendResponse, error)
|
||||
GetErrorDistribution(ctx context.Context, filter *OpsDashboardFilter) (*OpsErrorDistributionResponse, error)
|
||||
GetOpenAITokenStats(ctx context.Context, filter *OpsOpenAITokenStatsFilter) (*OpsOpenAITokenStatsResponse, error)
|
||||
|
||||
InsertSystemMetrics(ctx context.Context, input *OpsInsertSystemMetricsInput) error
|
||||
GetLatestSystemMetrics(ctx context.Context, windowMinutes int) (*OpsSystemMetricsSnapshot, error)
|
||||
|
||||
Reference in New Issue
Block a user