diff --git a/backend/internal/repository/ops_repo_metrics.go b/backend/internal/repository/ops_repo_metrics.go index bc80ed6e..713e0eb9 100644 --- a/backend/internal/repository/ops_repo_metrics.go +++ b/backend/internal/repository/ops_repo_metrics.go @@ -296,9 +296,10 @@ INSERT INTO ops_job_heartbeats ( last_error_at, last_error, last_duration_ms, + last_result, updated_at ) VALUES ( - $1,$2,$3,$4,$5,$6,NOW() + $1,$2,$3,$4,$5,$6,$7,NOW() ) ON CONFLICT (job_name) DO UPDATE SET last_run_at = COALESCE(EXCLUDED.last_run_at, ops_job_heartbeats.last_run_at), @@ -312,6 +313,10 @@ ON CONFLICT (job_name) DO UPDATE SET ELSE COALESCE(EXCLUDED.last_error, ops_job_heartbeats.last_error) END, last_duration_ms = COALESCE(EXCLUDED.last_duration_ms, ops_job_heartbeats.last_duration_ms), + last_result = CASE + WHEN EXCLUDED.last_success_at IS NOT NULL THEN COALESCE(EXCLUDED.last_result, ops_job_heartbeats.last_result) + ELSE ops_job_heartbeats.last_result + END, updated_at = NOW()` _, err := r.db.ExecContext( @@ -323,6 +328,7 @@ ON CONFLICT (job_name) DO UPDATE SET opsNullTime(input.LastErrorAt), opsNullString(input.LastError), opsNullInt(input.LastDurationMs), + opsNullString(input.LastResult), ) return err } @@ -340,6 +346,7 @@ SELECT last_error_at, last_error, last_duration_ms, + last_result, updated_at FROM ops_job_heartbeats ORDER BY job_name ASC` @@ -359,6 +366,8 @@ ORDER BY job_name ASC` var lastError sql.NullString var lastDuration sql.NullInt64 + var lastResult sql.NullString + if err := rows.Scan( &item.JobName, &lastRun, @@ -366,6 +375,7 @@ ORDER BY job_name ASC` &lastErrorAt, &lastError, &lastDuration, + &lastResult, &item.UpdatedAt, ); err != nil { return nil, err @@ -391,6 +401,10 @@ ORDER BY job_name ASC` v := lastDuration.Int64 item.LastDurationMs = &v } + if lastResult.Valid { + v := lastResult.String + item.LastResult = &v + } out = append(out, &item) } diff --git a/backend/internal/service/ops_aggregation_service.go b/backend/internal/service/ops_aggregation_service.go index 2a6afbba..972462ec 100644 --- a/backend/internal/service/ops_aggregation_service.go +++ b/backend/internal/service/ops_aggregation_service.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "errors" + "fmt" "log" "strings" "sync" @@ -235,11 +236,13 @@ func (s *OpsAggregationService) aggregateHourly() { successAt := finishedAt hbCtx, hbCancel := context.WithTimeout(context.Background(), 2*time.Second) defer hbCancel() + result := truncateString(fmt.Sprintf("window=%s..%s", start.Format(time.RFC3339), end.Format(time.RFC3339)), 2048) _ = s.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ JobName: opsAggHourlyJobName, LastRunAt: &runAt, LastSuccessAt: &successAt, LastDurationMs: &dur, + LastResult: &result, }) } @@ -331,11 +334,13 @@ func (s *OpsAggregationService) aggregateDaily() { successAt := finishedAt hbCtx, hbCancel := context.WithTimeout(context.Background(), 2*time.Second) defer hbCancel() + result := truncateString(fmt.Sprintf("window=%s..%s", start.Format(time.RFC3339), end.Format(time.RFC3339)), 2048) _ = s.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ JobName: opsAggDailyJobName, LastRunAt: &runAt, LastSuccessAt: &successAt, LastDurationMs: &dur, + LastResult: &result, }) } diff --git a/backend/internal/service/ops_alert_evaluator_service.go b/backend/internal/service/ops_alert_evaluator_service.go index 2b619f4d..7c62e247 100644 --- a/backend/internal/service/ops_alert_evaluator_service.go +++ b/backend/internal/service/ops_alert_evaluator_service.go @@ -190,6 +190,13 @@ func (s *OpsAlertEvaluatorService) evaluateOnce(interval time.Duration) { return } + rulesTotal := len(rules) + rulesEnabled := 0 + rulesEvaluated := 0 + eventsCreated := 0 + eventsResolved := 0 + emailsSent := 0 + now := time.Now().UTC() safeEnd := now.Truncate(time.Minute) if safeEnd.IsZero() { @@ -205,6 +212,7 @@ func (s *OpsAlertEvaluatorService) evaluateOnce(interval time.Duration) { if rule == nil || !rule.Enabled || rule.ID <= 0 { continue } + rulesEnabled++ scopePlatform, scopeGroupID, scopeRegion := parseOpsAlertRuleScope(rule.Filters) @@ -220,6 +228,7 @@ func (s *OpsAlertEvaluatorService) evaluateOnce(interval time.Duration) { s.resetRuleState(rule.ID, now) continue } + rulesEvaluated++ breachedNow := compareMetric(metricValue, rule.Operator, rule.Threshold) required := requiredSustainedBreaches(rule.SustainedMinutes, interval) @@ -278,8 +287,11 @@ func (s *OpsAlertEvaluatorService) evaluateOnce(interval time.Duration) { continue } + eventsCreated++ if created != nil && created.ID > 0 { - s.maybeSendAlertEmail(ctx, runtimeCfg, rule, created) + if s.maybeSendAlertEmail(ctx, runtimeCfg, rule, created) { + emailsSent++ + } } continue } @@ -289,11 +301,14 @@ func (s *OpsAlertEvaluatorService) evaluateOnce(interval time.Duration) { resolvedAt := now if err := s.opsRepo.UpdateAlertEventStatus(ctx, activeEvent.ID, OpsAlertStatusResolved, &resolvedAt); err != nil { log.Printf("[OpsAlertEvaluator] resolve event failed (event=%d): %v", activeEvent.ID, err) + } else { + eventsResolved++ } } } - s.recordHeartbeatSuccess(runAt, time.Since(startedAt)) + result := truncateString(fmt.Sprintf("rules=%d enabled=%d evaluated=%d created=%d resolved=%d emails_sent=%d", rulesTotal, rulesEnabled, rulesEvaluated, eventsCreated, eventsResolved, emailsSent), 2048) + s.recordHeartbeatSuccess(runAt, time.Since(startedAt), result) } func (s *OpsAlertEvaluatorService) pruneRuleStates(rules []*OpsAlertRule) { @@ -585,32 +600,32 @@ func buildOpsAlertDescription(rule *OpsAlertRule, value float64, windowMinutes i ) } -func (s *OpsAlertEvaluatorService) maybeSendAlertEmail(ctx context.Context, runtimeCfg *OpsAlertRuntimeSettings, rule *OpsAlertRule, event *OpsAlertEvent) { +func (s *OpsAlertEvaluatorService) maybeSendAlertEmail(ctx context.Context, runtimeCfg *OpsAlertRuntimeSettings, rule *OpsAlertRule, event *OpsAlertEvent) bool { if s == nil || s.emailService == nil || s.opsService == nil || event == nil || rule == nil { - return + return false } if event.EmailSent { - return + return false } if !rule.NotifyEmail { - return + return false } emailCfg, err := s.opsService.GetEmailNotificationConfig(ctx) if err != nil || emailCfg == nil || !emailCfg.Alert.Enabled { - return + return false } if len(emailCfg.Alert.Recipients) == 0 { - return + return false } if !shouldSendOpsAlertEmailByMinSeverity(strings.TrimSpace(emailCfg.Alert.MinSeverity), strings.TrimSpace(rule.Severity)) { - return + return false } if runtimeCfg != nil && runtimeCfg.Silencing.Enabled { if isOpsAlertSilenced(time.Now().UTC(), rule, event, runtimeCfg.Silencing) { - return + return false } } @@ -639,6 +654,7 @@ func (s *OpsAlertEvaluatorService) maybeSendAlertEmail(ctx context.Context, runt if anySent { _ = s.opsRepo.UpdateAlertEventEmailSent(context.Background(), event.ID, true) } + return anySent } func buildOpsAlertEmailBody(rule *OpsAlertRule, event *OpsAlertEvent) string { @@ -806,7 +822,7 @@ func (s *OpsAlertEvaluatorService) maybeLogSkip(key string) { log.Printf("[OpsAlertEvaluator] leader lock held by another instance; skipping (key=%q)", key) } -func (s *OpsAlertEvaluatorService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration) { +func (s *OpsAlertEvaluatorService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration, result string) { if s == nil || s.opsRepo == nil { return } @@ -814,11 +830,17 @@ func (s *OpsAlertEvaluatorService) recordHeartbeatSuccess(runAt time.Time, durat durMs := duration.Milliseconds() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() + msg := strings.TrimSpace(result) + if msg == "" { + msg = "ok" + } + msg = truncateString(msg, 2048) _ = s.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ JobName: opsAlertEvaluatorJobName, LastRunAt: &runAt, LastSuccessAt: &now, LastDurationMs: &durMs, + LastResult: &msg, }) } diff --git a/backend/internal/service/ops_cleanup_service.go b/backend/internal/service/ops_cleanup_service.go index afd2d22c..1ade7176 100644 --- a/backend/internal/service/ops_cleanup_service.go +++ b/backend/internal/service/ops_cleanup_service.go @@ -149,7 +149,7 @@ func (s *OpsCleanupService) runScheduled() { log.Printf("[OpsCleanup] cleanup failed: %v", err) return } - s.recordHeartbeatSuccess(runAt, time.Since(startedAt)) + s.recordHeartbeatSuccess(runAt, time.Since(startedAt), counts) log.Printf("[OpsCleanup] cleanup complete: %s", counts) } @@ -330,12 +330,13 @@ func (s *OpsCleanupService) tryAcquireLeaderLock(ctx context.Context) (func(), b return release, true } -func (s *OpsCleanupService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration) { +func (s *OpsCleanupService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration, counts opsCleanupDeletedCounts) { if s == nil || s.opsRepo == nil { return } now := time.Now().UTC() durMs := duration.Milliseconds() + result := truncateString(counts.String(), 2048) ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() _ = s.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ @@ -343,6 +344,7 @@ func (s *OpsCleanupService) recordHeartbeatSuccess(runAt time.Time, duration tim LastRunAt: &runAt, LastSuccessAt: &now, LastDurationMs: &durMs, + LastResult: &result, }) } diff --git a/backend/internal/service/ops_port.go b/backend/internal/service/ops_port.go index cdeea241..515b47bb 100644 --- a/backend/internal/service/ops_port.go +++ b/backend/internal/service/ops_port.go @@ -235,6 +235,9 @@ type OpsUpsertJobHeartbeatInput struct { LastErrorAt *time.Time LastError *string LastDurationMs *int64 + + // LastResult is an optional human-readable summary of the last successful run. + LastResult *string } type OpsJobHeartbeat struct { @@ -245,6 +248,7 @@ type OpsJobHeartbeat struct { LastErrorAt *time.Time `json:"last_error_at"` LastError *string `json:"last_error"` LastDurationMs *int64 `json:"last_duration_ms"` + LastResult *string `json:"last_result"` UpdatedAt time.Time `json:"updated_at"` } diff --git a/backend/internal/service/ops_scheduled_report_service.go b/backend/internal/service/ops_scheduled_report_service.go index 28902cbc..98b2045d 100644 --- a/backend/internal/service/ops_scheduled_report_service.go +++ b/backend/internal/service/ops_scheduled_report_service.go @@ -177,6 +177,10 @@ func (s *OpsScheduledReportService) runOnce() { return } + reportsTotal := len(reports) + reportsDue := 0 + sentAttempts := 0 + for _, report := range reports { if report == nil || !report.Enabled { continue @@ -184,14 +188,18 @@ func (s *OpsScheduledReportService) runOnce() { if report.NextRunAt.After(now) { continue } + reportsDue++ - if err := s.runReport(ctx, report, now); err != nil { + attempts, err := s.runReport(ctx, report, now) + if err != nil { s.recordHeartbeatError(runAt, time.Since(startedAt), err) return } + sentAttempts += attempts } - s.recordHeartbeatSuccess(runAt, time.Since(startedAt)) + result := truncateString(fmt.Sprintf("reports=%d due=%d send_attempts=%d", reportsTotal, reportsDue, sentAttempts), 2048) + s.recordHeartbeatSuccess(runAt, time.Since(startedAt), result) } type opsScheduledReport struct { @@ -297,9 +305,9 @@ func (s *OpsScheduledReportService) listScheduledReports(ctx context.Context, no return out } -func (s *OpsScheduledReportService) runReport(ctx context.Context, report *opsScheduledReport, now time.Time) error { +func (s *OpsScheduledReportService) runReport(ctx context.Context, report *opsScheduledReport, now time.Time) (int, error) { if s == nil || s.opsService == nil || s.emailService == nil || report == nil { - return nil + return 0, nil } if ctx == nil { ctx = context.Background() @@ -310,11 +318,11 @@ func (s *OpsScheduledReportService) runReport(ctx context.Context, report *opsSc content, err := s.generateReportHTML(ctx, report, now) if err != nil { - return err + return 0, err } if strings.TrimSpace(content) == "" { // Skip sending when the report decides not to emit content (e.g., digest below min count). - return nil + return 0, nil } recipients := report.Recipients @@ -325,22 +333,24 @@ func (s *OpsScheduledReportService) runReport(ctx context.Context, report *opsSc } } if len(recipients) == 0 { - return nil + return 0, nil } subject := fmt.Sprintf("[Ops Report] %s", strings.TrimSpace(report.Name)) + attempts := 0 for _, to := range recipients { addr := strings.TrimSpace(to) if addr == "" { continue } + attempts++ if err := s.emailService.SendEmail(ctx, addr, subject, content); err != nil { // Ignore per-recipient failures; continue best-effort. continue } } - return nil + return attempts, nil } func (s *OpsScheduledReportService) generateReportHTML(ctx context.Context, report *opsScheduledReport, now time.Time) (string, error) { @@ -650,7 +660,7 @@ func (s *OpsScheduledReportService) setLastRunAt(ctx context.Context, reportType _ = s.redisClient.Set(ctx, key, strconv.FormatInt(t.UTC().Unix(), 10), 14*24*time.Hour).Err() } -func (s *OpsScheduledReportService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration) { +func (s *OpsScheduledReportService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration, result string) { if s == nil || s.opsService == nil || s.opsService.opsRepo == nil { return } @@ -658,11 +668,17 @@ func (s *OpsScheduledReportService) recordHeartbeatSuccess(runAt time.Time, dura durMs := duration.Milliseconds() ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) defer cancel() + msg := strings.TrimSpace(result) + if msg == "" { + msg = "ok" + } + msg = truncateString(msg, 2048) _ = s.opsService.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ JobName: opsScheduledReportJobName, LastRunAt: &runAt, LastSuccessAt: &now, LastDurationMs: &durMs, + LastResult: &msg, }) } diff --git a/backend/migrations/039_ops_job_heartbeats_add_last_result.sql b/backend/migrations/039_ops_job_heartbeats_add_last_result.sql new file mode 100644 index 00000000..7d6dc743 --- /dev/null +++ b/backend/migrations/039_ops_job_heartbeats_add_last_result.sql @@ -0,0 +1,6 @@ +-- Add last_result to ops_job_heartbeats for UI job details. + +ALTER TABLE IF EXISTS ops_job_heartbeats + ADD COLUMN IF NOT EXISTS last_result TEXT; + +COMMENT ON COLUMN ops_job_heartbeats.last_result IS 'Last successful run result summary (human readable).'; diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts index 63b12cfb..6e048436 100644 --- a/frontend/src/api/admin/ops.ts +++ b/frontend/src/api/admin/ops.ts @@ -293,6 +293,7 @@ export interface OpsJobHeartbeat { last_error_at?: string | null last_error?: string | null last_duration_ms?: number | null + last_result?: string | null updated_at: string } diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 3dfbd834..c31ddbc4 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1925,7 +1925,7 @@ export default { errors: 'Errors', errorRate: 'error_rate:', upstreamRate: 'upstream_rate:', - latencyDuration: 'Request Duration (ms)', + latencyDuration: 'Request Duration', ttftLabel: 'TTFT (first_token_ms)', p50: 'p50:', p90: 'p90:', @@ -2590,7 +2590,7 @@ export default { errors: 'Error statistics, including total errors, error rate, and upstream error rate.', upstreamErrors: 'Upstream error statistics, excluding rate limit errors (429/529).', latency: 'Request duration statistics, including p50, p90, p95, p99 percentiles.', - ttft: 'Time To First Token, measuring the speed of first byte return in streaming responses.', + ttft: 'Time To First Token, measuring the speed of first token return in streaming responses.', health: 'System health score (0-100), considering SLA, error rate, and resource usage.' }, charts: { diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index daf39939..43aeee41 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -2048,6 +2048,7 @@ export default { lastRun: '最近运行', lastSuccess: '最近成功', lastError: '最近错误', + result: '结果', noData: '暂无数据', loadingText: '加载中...', ready: '就绪', @@ -2062,7 +2063,7 @@ export default { avgQps: '平均 QPS', avgTps: '平均 TPS', avgLatency: '平均请求时长', - avgTtft: '平均首字延迟', + avgTtft: '平均首 Token 延迟', exceptions: '异常数', requestErrors: '请求错误', errorCount: '错误数', @@ -2073,8 +2074,8 @@ export default { errors: '错误', errorRate: '错误率:', upstreamRate: '上游错误率:', - latencyDuration: '请求时长(毫秒)', - ttftLabel: '首字延迟(毫秒)', + latencyDuration: '请求时长', + ttftLabel: '首 Token 延迟(毫秒)', p50: 'p50', p90: 'p90', p95: 'p95', @@ -2117,7 +2118,12 @@ export default { '6h': '近6小时', '24h': '近24小时', '7d': '近7天', - '30d': '近30天' + '30d': '近30天', + custom: '自定义' + }, + customTimeRange: { + startTime: '开始时间', + endTime: '结束时间' }, fullscreen: { enter: '进入全屏' @@ -2146,7 +2152,7 @@ export default { memoryHigh: '内存使用率偏高 ({usage}%)', memoryHighImpact: '内存压力较大,需要关注', memoryHighAction: '监控内存趋势,检查是否有内存泄漏', - ttftHigh: '首字节时间偏高 ({ttft}ms)', + ttftHigh: '首 Token 时间偏高 ({ttft}ms)', ttftHighImpact: '用户感知时长增加', ttftHighAction: '优化请求处理流程,减少前置逻辑耗时', // Error rate diagnostics @@ -2738,7 +2744,7 @@ export default { sla: '服务等级协议达成率,排除业务限制(如余额不足、配额超限)的成功请求占比。', errors: '错误统计,包括总错误数、错误率和上游错误率。', latency: '请求时长统计,包括 p50、p90、p95、p99 等百分位数。', - ttft: '首Token延迟(Time To First Token),衡量流式响应的首字节返回速度。', + ttft: '首 Token 延迟(Time To First Token),衡量流式响应的首 Token 返回速度。', health: '系统健康评分(0-100),综合考虑 SLA、错误率和资源使用情况。' }, charts: { diff --git a/frontend/src/views/admin/AccountsView.vue b/frontend/src/views/admin/AccountsView.vue index cf484303..fbf3ee0a 100644 --- a/frontend/src/views/admin/AccountsView.vue +++ b/frontend/src/views/admin/AccountsView.vue @@ -414,7 +414,17 @@ const handleScroll = () => { menu.show = false } -onMounted(async () => { load(); try { const [p, g] = await Promise.all([adminAPI.proxies.getAll(), adminAPI.groups.getAll()]); proxies.value = p; groups.value = g } catch (error) { console.error('Failed to load proxies/groups:', error) }; window.addEventListener('scroll', handleScroll, true) }) +onMounted(async () => { + load() + try { + const [p, g] = await Promise.all([adminAPI.proxies.getAll(), adminAPI.groups.getAll()]) + proxies.value = p + groups.value = g + } catch (error) { + console.error('Failed to load proxies/groups:', error) + } + window.addEventListener('scroll', handleScroll, true) +}) onUnmounted(() => { window.removeEventListener('scroll', handleScroll, true) diff --git a/frontend/src/views/admin/ops/OpsDashboard.vue b/frontend/src/views/admin/ops/OpsDashboard.vue index ff2a434d..72cb2607 100644 --- a/frontend/src/views/admin/ops/OpsDashboard.vue +++ b/frontend/src/views/admin/ops/OpsDashboard.vue @@ -23,10 +23,13 @@ :auto-refresh-enabled="autoRefreshEnabled" :auto-refresh-countdown="autoRefreshCountdown" :fullscreen="isFullscreen" + :custom-start-time="customStartTime" + :custom-end-time="customEndTime" @update:time-range="onTimeRangeChange" @update:platform="onPlatformChange" @update:group="onGroupChange" @update:query-mode="onQueryModeChange" + @update:custom-time-range="onCustomTimeRangeChange" @refresh="fetchData" @open-request-details="handleOpenRequestDetails" @open-error-details="openErrorDetails" @@ -39,7 +42,7 @@
- +
adminSettingsStore.opsMonitoringEnabled) -type TimeRange = '5m' | '30m' | '1h' | '6h' | '24h' -const allowedTimeRanges = new Set(['5m', '30m', '1h', '6h', '24h']) +type TimeRange = '5m' | '30m' | '1h' | '6h' | '24h' | 'custom' +const allowedTimeRanges = new Set(['5m', '30m', '1h', '6h', '24h', 'custom']) type QueryMode = 'auto' | 'raw' | 'preagg' const allowedQueryModes = new Set(['auto', 'raw', 'preagg']) @@ -163,6 +166,8 @@ const timeRange = ref('1h') const platform = ref('') const groupId = ref(null) const queryMode = ref('auto') +const customStartTime = ref(null) +const customEndTime = ref(null) const QUERY_KEYS = { timeRange: 'tr', @@ -347,23 +352,24 @@ const autoRefreshEnabled = ref(false) const autoRefreshIntervalMs = ref(30000) // default 30 seconds const autoRefreshCountdown = ref(0) -// Auto refresh timer -const { pause: pauseAutoRefresh, resume: resumeAutoRefresh } = useIntervalFn( - () => { - if (autoRefreshEnabled.value && opsEnabled.value && !loading.value) { - fetchData() - } - }, - autoRefreshIntervalMs, - { immediate: false } -) +// Used to trigger child component refreshes in a single shared cadence. +const dashboardRefreshToken = ref(0) -// Countdown timer (updates every second) +// Countdown timer (drives auto refresh; updates every second) const { pause: pauseCountdown, resume: resumeCountdown } = useIntervalFn( () => { - if (autoRefreshEnabled.value && autoRefreshCountdown.value > 0) { - autoRefreshCountdown.value-- + if (!autoRefreshEnabled.value) return + if (!opsEnabled.value) return + if (loading.value) return + + if (autoRefreshCountdown.value <= 0) { + // Fetch immediately when the countdown reaches 0. + // fetchData() will reset the countdown to the full interval. + fetchData() + return } + + autoRefreshCountdown.value -= 1 }, 1000, { immediate: false } @@ -420,6 +426,11 @@ function onTimeRangeChange(v: string | number | boolean | null) { timeRange.value = v as TimeRange } +function onCustomTimeRangeChange(startTime: string, endTime: string) { + customStartTime.value = startTime + customEndTime.value = endTime +} + function onSettingsSaved() { loadThresholds() fetchData() @@ -458,18 +469,32 @@ function openError(id: number) { showErrorModal.value = true } +function buildApiParams() { + const params: any = { + platform: platform.value || undefined, + group_id: groupId.value ?? undefined, + mode: queryMode.value + } + + if (timeRange.value === 'custom') { + if (customStartTime.value && customEndTime.value) { + params.start_time = customStartTime.value + params.end_time = customEndTime.value + } else { + // Safety fallback: avoid sending time_range=custom (backend may not support it) + params.time_range = '1h' + } + } else { + params.time_range = timeRange.value + } + + return params +} + async function refreshOverviewWithCancel(fetchSeq: number, signal: AbortSignal) { if (!opsEnabled.value) return try { - const data = await opsAPI.getDashboardOverview( - { - time_range: timeRange.value, - platform: platform.value || undefined, - group_id: groupId.value ?? undefined, - mode: queryMode.value - }, - { signal } - ) + const data = await opsAPI.getDashboardOverview(buildApiParams(), { signal }) if (fetchSeq !== dashboardFetchSeq) return overview.value = data } catch (err: any) { @@ -483,15 +508,7 @@ async function refreshThroughputTrendWithCancel(fetchSeq: number, signal: AbortS if (!opsEnabled.value) return loadingTrend.value = true try { - const data = await opsAPI.getThroughputTrend( - { - time_range: timeRange.value, - platform: platform.value || undefined, - group_id: groupId.value ?? undefined, - mode: queryMode.value - }, - { signal } - ) + const data = await opsAPI.getThroughputTrend(buildApiParams(), { signal }) if (fetchSeq !== dashboardFetchSeq) return throughputTrend.value = data } catch (err: any) { @@ -509,15 +526,7 @@ async function refreshLatencyHistogramWithCancel(fetchSeq: number, signal: Abort if (!opsEnabled.value) return loadingLatency.value = true try { - const data = await opsAPI.getLatencyHistogram( - { - time_range: timeRange.value, - platform: platform.value || undefined, - group_id: groupId.value ?? undefined, - mode: queryMode.value - }, - { signal } - ) + const data = await opsAPI.getLatencyHistogram(buildApiParams(), { signal }) if (fetchSeq !== dashboardFetchSeq) return latencyHistogram.value = data } catch (err: any) { @@ -535,15 +544,7 @@ async function refreshErrorTrendWithCancel(fetchSeq: number, signal: AbortSignal if (!opsEnabled.value) return loadingErrorTrend.value = true try { - const data = await opsAPI.getErrorTrend( - { - time_range: timeRange.value, - platform: platform.value || undefined, - group_id: groupId.value ?? undefined, - mode: queryMode.value - }, - { signal } - ) + const data = await opsAPI.getErrorTrend(buildApiParams(), { signal }) if (fetchSeq !== dashboardFetchSeq) return errorTrend.value = data } catch (err: any) { @@ -561,15 +562,7 @@ async function refreshErrorDistributionWithCancel(fetchSeq: number, signal: Abor if (!opsEnabled.value) return loadingErrorDistribution.value = true try { - const data = await opsAPI.getErrorDistribution( - { - time_range: timeRange.value, - platform: platform.value || undefined, - group_id: groupId.value ?? undefined, - mode: queryMode.value - }, - { signal } - ) + const data = await opsAPI.getErrorDistribution(buildApiParams(), { signal }) if (fetchSeq !== dashboardFetchSeq) return errorDistribution.value = data } catch (err: any) { @@ -612,7 +605,12 @@ async function fetchData() { refreshErrorDistributionWithCancel(fetchSeq, dashboardFetchController.signal) ]) if (fetchSeq !== dashboardFetchSeq) return + lastUpdated.value = new Date() + + // Trigger child component refreshes using the same cadence as the header. + dashboardRefreshToken.value += 1 + // Reset auto refresh countdown after successful fetch if (autoRefreshEnabled.value) { autoRefreshCountdown.value = Math.floor(autoRefreshIntervalMs.value / 1000) @@ -686,15 +684,14 @@ onMounted(async () => { // Start auto refresh if enabled if (autoRefreshEnabled.value) { - resumeAutoRefresh() resumeCountdown() } }) async function loadThresholds() { try { - const settings = await opsAPI.getAlertRuntimeSettings() - metricThresholds.value = settings.thresholds || null + const thresholds = await opsAPI.getMetricThresholds() + metricThresholds.value = thresholds || null } catch (err) { console.warn('[OpsDashboard] Failed to load thresholds', err) metricThresholds.value = null @@ -704,7 +701,6 @@ async function loadThresholds() { onUnmounted(() => { window.removeEventListener('keydown', handleKeydown) abortDashboardFetch() - pauseAutoRefresh() pauseCountdown() }) @@ -712,10 +708,8 @@ onUnmounted(() => { watch(autoRefreshEnabled, (enabled) => { if (enabled) { autoRefreshCountdown.value = Math.floor(autoRefreshIntervalMs.value / 1000) - resumeAutoRefresh() resumeCountdown() } else { - pauseAutoRefresh() pauseCountdown() autoRefreshCountdown.value = 0 } diff --git a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue index 2104d1f7..acb0de1b 100644 --- a/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue +++ b/frontend/src/views/admin/ops/components/OpsConcurrencyCard.vue @@ -1,12 +1,12 @@