refactor: 移除 Ops 监控模块
移除未完成的运维监控功能,简化系统架构: - 删除 ops_handler, ops_service, ops_repo 等后端代码 - 删除 ops 相关数据库迁移文件 - 删除前端 OpsDashboard 页面和 API
This commit is contained in:
@@ -1,402 +0,0 @@
|
|||||||
package admin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"math"
|
|
||||||
"net/http"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OpsHandler handles ops dashboard endpoints.
|
|
||||||
type OpsHandler struct {
|
|
||||||
opsService *service.OpsService
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOpsHandler creates a new OpsHandler.
|
|
||||||
func NewOpsHandler(opsService *service.OpsService) *OpsHandler {
|
|
||||||
return &OpsHandler{opsService: opsService}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMetrics returns the latest ops metrics snapshot.
|
|
||||||
// GET /api/v1/admin/ops/metrics
|
|
||||||
func (h *OpsHandler) GetMetrics(c *gin.Context) {
|
|
||||||
metrics, err := h.opsService.GetLatestMetrics(c.Request.Context())
|
|
||||||
if err != nil {
|
|
||||||
response.Error(c, http.StatusInternalServerError, "Failed to get ops metrics")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
response.Success(c, metrics)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListMetricsHistory returns a time-range slice of metrics for charts.
|
|
||||||
// GET /api/v1/admin/ops/metrics/history
|
|
||||||
//
|
|
||||||
// Query params:
|
|
||||||
// - window_minutes: int (default 1)
|
|
||||||
// - minutes: int (lookback; optional)
|
|
||||||
// - start_time/end_time: RFC3339 timestamps (optional; overrides minutes when provided)
|
|
||||||
// - limit: int (optional; max 100, default 300 for backward compatibility)
|
|
||||||
func (h *OpsHandler) ListMetricsHistory(c *gin.Context) {
|
|
||||||
windowMinutes := 1
|
|
||||||
if v := c.Query("window_minutes"); v != "" {
|
|
||||||
if parsed, err := strconv.Atoi(v); err == nil && parsed > 0 {
|
|
||||||
windowMinutes = parsed
|
|
||||||
} else {
|
|
||||||
response.BadRequest(c, "Invalid window_minutes")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
limit := 300
|
|
||||||
limitProvided := false
|
|
||||||
if v := c.Query("limit"); v != "" {
|
|
||||||
parsed, err := strconv.Atoi(v)
|
|
||||||
if err != nil || parsed <= 0 || parsed > 5000 {
|
|
||||||
response.BadRequest(c, "Invalid limit (must be 1-5000)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
limit = parsed
|
|
||||||
limitProvided = true
|
|
||||||
}
|
|
||||||
|
|
||||||
endTime := time.Now()
|
|
||||||
startTime := time.Time{}
|
|
||||||
|
|
||||||
if startTimeStr := c.Query("start_time"); startTimeStr != "" {
|
|
||||||
parsed, err := time.Parse(time.RFC3339, startTimeStr)
|
|
||||||
if err != nil {
|
|
||||||
response.BadRequest(c, "Invalid start_time format (RFC3339)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
startTime = parsed
|
|
||||||
}
|
|
||||||
if endTimeStr := c.Query("end_time"); endTimeStr != "" {
|
|
||||||
parsed, err := time.Parse(time.RFC3339, endTimeStr)
|
|
||||||
if err != nil {
|
|
||||||
response.BadRequest(c, "Invalid end_time format (RFC3339)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
endTime = parsed
|
|
||||||
}
|
|
||||||
|
|
||||||
// If explicit range not provided, use lookback minutes.
|
|
||||||
if startTime.IsZero() {
|
|
||||||
if v := c.Query("minutes"); v != "" {
|
|
||||||
minutes, err := strconv.Atoi(v)
|
|
||||||
if err != nil || minutes <= 0 {
|
|
||||||
response.BadRequest(c, "Invalid minutes")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if minutes > 60*24*7 {
|
|
||||||
minutes = 60 * 24 * 7
|
|
||||||
}
|
|
||||||
startTime = endTime.Add(-time.Duration(minutes) * time.Minute)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default time range: last 24 hours.
|
|
||||||
if startTime.IsZero() {
|
|
||||||
startTime = endTime.Add(-24 * time.Hour)
|
|
||||||
if !limitProvided {
|
|
||||||
// Metrics are collected at 1-minute cadence; 24h requires ~1440 points.
|
|
||||||
limit = 24 * 60
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if startTime.After(endTime) {
|
|
||||||
response.BadRequest(c, "Invalid time range: start_time must be <= end_time")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
items, err := h.opsService.ListMetricsHistory(c.Request.Context(), windowMinutes, startTime, endTime, limit)
|
|
||||||
if err != nil {
|
|
||||||
response.Error(c, http.StatusInternalServerError, "Failed to list ops metrics history")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
response.Success(c, gin.H{"items": items})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListErrorLogs lists recent error logs with optional filters.
|
|
||||||
// GET /api/v1/admin/ops/error-logs
|
|
||||||
//
|
|
||||||
// Query params:
|
|
||||||
// - start_time/end_time: RFC3339 timestamps (optional)
|
|
||||||
// - platform: string (optional)
|
|
||||||
// - phase: string (optional)
|
|
||||||
// - severity: string (optional)
|
|
||||||
// - q: string (optional; fuzzy match)
|
|
||||||
// - limit: int (optional; default 100; max 500)
|
|
||||||
func (h *OpsHandler) ListErrorLogs(c *gin.Context) {
|
|
||||||
var filters service.OpsErrorLogFilters
|
|
||||||
|
|
||||||
if startTimeStr := c.Query("start_time"); startTimeStr != "" {
|
|
||||||
startTime, err := time.Parse(time.RFC3339, startTimeStr)
|
|
||||||
if err != nil {
|
|
||||||
response.BadRequest(c, "Invalid start_time format (RFC3339)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
filters.StartTime = &startTime
|
|
||||||
}
|
|
||||||
if endTimeStr := c.Query("end_time"); endTimeStr != "" {
|
|
||||||
endTime, err := time.Parse(time.RFC3339, endTimeStr)
|
|
||||||
if err != nil {
|
|
||||||
response.BadRequest(c, "Invalid end_time format (RFC3339)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
filters.EndTime = &endTime
|
|
||||||
}
|
|
||||||
|
|
||||||
if filters.StartTime != nil && filters.EndTime != nil && filters.StartTime.After(*filters.EndTime) {
|
|
||||||
response.BadRequest(c, "Invalid time range: start_time must be <= end_time")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
filters.Platform = c.Query("platform")
|
|
||||||
filters.Phase = c.Query("phase")
|
|
||||||
filters.Severity = c.Query("severity")
|
|
||||||
filters.Query = c.Query("q")
|
|
||||||
|
|
||||||
filters.Limit = 100
|
|
||||||
if limitStr := c.Query("limit"); limitStr != "" {
|
|
||||||
limit, err := strconv.Atoi(limitStr)
|
|
||||||
if err != nil || limit <= 0 || limit > 500 {
|
|
||||||
response.BadRequest(c, "Invalid limit (must be 1-500)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
filters.Limit = limit
|
|
||||||
}
|
|
||||||
|
|
||||||
items, total, err := h.opsService.ListErrorLogs(c.Request.Context(), filters)
|
|
||||||
if err != nil {
|
|
||||||
response.Error(c, http.StatusInternalServerError, "Failed to list error logs")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
response.Success(c, gin.H{
|
|
||||||
"items": items,
|
|
||||||
"total": total,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetDashboardOverview returns realtime ops dashboard overview.
|
|
||||||
// GET /api/v1/admin/ops/dashboard/overview
|
|
||||||
//
|
|
||||||
// Query params:
|
|
||||||
// - time_range: string (optional; default "1h") one of: 5m, 30m, 1h, 6h, 24h
|
|
||||||
func (h *OpsHandler) GetDashboardOverview(c *gin.Context) {
|
|
||||||
timeRange := c.Query("time_range")
|
|
||||||
if timeRange == "" {
|
|
||||||
timeRange = "1h"
|
|
||||||
}
|
|
||||||
|
|
||||||
switch timeRange {
|
|
||||||
case "5m", "30m", "1h", "6h", "24h":
|
|
||||||
default:
|
|
||||||
response.BadRequest(c, "Invalid time_range (supported: 5m, 30m, 1h, 6h, 24h)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := h.opsService.GetDashboardOverview(c.Request.Context(), timeRange)
|
|
||||||
if err != nil {
|
|
||||||
response.Error(c, http.StatusInternalServerError, "Failed to get dashboard overview")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
response.Success(c, data)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetProviderHealth returns upstream provider health comparison data.
|
|
||||||
// GET /api/v1/admin/ops/dashboard/providers
|
|
||||||
//
|
|
||||||
// Query params:
|
|
||||||
// - time_range: string (optional; default "1h") one of: 5m, 30m, 1h, 6h, 24h
|
|
||||||
func (h *OpsHandler) GetProviderHealth(c *gin.Context) {
|
|
||||||
timeRange := c.Query("time_range")
|
|
||||||
if timeRange == "" {
|
|
||||||
timeRange = "1h"
|
|
||||||
}
|
|
||||||
|
|
||||||
switch timeRange {
|
|
||||||
case "5m", "30m", "1h", "6h", "24h":
|
|
||||||
default:
|
|
||||||
response.BadRequest(c, "Invalid time_range (supported: 5m, 30m, 1h, 6h, 24h)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
providers, err := h.opsService.GetProviderHealth(c.Request.Context(), timeRange)
|
|
||||||
if err != nil {
|
|
||||||
response.Error(c, http.StatusInternalServerError, "Failed to get provider health")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var totalRequests int64
|
|
||||||
var weightedSuccess float64
|
|
||||||
var bestProvider string
|
|
||||||
var worstProvider string
|
|
||||||
var bestRate float64
|
|
||||||
var worstRate float64
|
|
||||||
hasRate := false
|
|
||||||
|
|
||||||
for _, p := range providers {
|
|
||||||
if p == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
totalRequests += p.RequestCount
|
|
||||||
weightedSuccess += (p.SuccessRate / 100) * float64(p.RequestCount)
|
|
||||||
|
|
||||||
if p.RequestCount <= 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !hasRate {
|
|
||||||
bestProvider = p.Name
|
|
||||||
worstProvider = p.Name
|
|
||||||
bestRate = p.SuccessRate
|
|
||||||
worstRate = p.SuccessRate
|
|
||||||
hasRate = true
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.SuccessRate > bestRate {
|
|
||||||
bestProvider = p.Name
|
|
||||||
bestRate = p.SuccessRate
|
|
||||||
}
|
|
||||||
if p.SuccessRate < worstRate {
|
|
||||||
worstProvider = p.Name
|
|
||||||
worstRate = p.SuccessRate
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
avgSuccessRate := 0.0
|
|
||||||
if totalRequests > 0 {
|
|
||||||
avgSuccessRate = (weightedSuccess / float64(totalRequests)) * 100
|
|
||||||
avgSuccessRate = math.Round(avgSuccessRate*100) / 100
|
|
||||||
}
|
|
||||||
|
|
||||||
response.Success(c, gin.H{
|
|
||||||
"providers": providers,
|
|
||||||
"summary": gin.H{
|
|
||||||
"total_requests": totalRequests,
|
|
||||||
"avg_success_rate": avgSuccessRate,
|
|
||||||
"best_provider": bestProvider,
|
|
||||||
"worst_provider": worstProvider,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetErrorLogs returns a paginated error log list with multi-dimensional filters.
|
|
||||||
// GET /api/v1/admin/ops/errors
|
|
||||||
func (h *OpsHandler) GetErrorLogs(c *gin.Context) {
|
|
||||||
page, pageSize := response.ParsePagination(c)
|
|
||||||
|
|
||||||
filter := &service.ErrorLogFilter{
|
|
||||||
Page: page,
|
|
||||||
PageSize: pageSize,
|
|
||||||
}
|
|
||||||
|
|
||||||
if startTimeStr := c.Query("start_time"); startTimeStr != "" {
|
|
||||||
startTime, err := time.Parse(time.RFC3339, startTimeStr)
|
|
||||||
if err != nil {
|
|
||||||
response.BadRequest(c, "Invalid start_time format (RFC3339)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
filter.StartTime = &startTime
|
|
||||||
}
|
|
||||||
if endTimeStr := c.Query("end_time"); endTimeStr != "" {
|
|
||||||
endTime, err := time.Parse(time.RFC3339, endTimeStr)
|
|
||||||
if err != nil {
|
|
||||||
response.BadRequest(c, "Invalid end_time format (RFC3339)")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
filter.EndTime = &endTime
|
|
||||||
}
|
|
||||||
|
|
||||||
if filter.StartTime != nil && filter.EndTime != nil && filter.StartTime.After(*filter.EndTime) {
|
|
||||||
response.BadRequest(c, "Invalid time range: start_time must be <= end_time")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if errorCodeStr := c.Query("error_code"); errorCodeStr != "" {
|
|
||||||
code, err := strconv.Atoi(errorCodeStr)
|
|
||||||
if err != nil || code < 0 {
|
|
||||||
response.BadRequest(c, "Invalid error_code")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
filter.ErrorCode = &code
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep both parameter names for compatibility: provider (docs) and platform (legacy).
|
|
||||||
filter.Provider = c.Query("provider")
|
|
||||||
if filter.Provider == "" {
|
|
||||||
filter.Provider = c.Query("platform")
|
|
||||||
}
|
|
||||||
|
|
||||||
if accountIDStr := c.Query("account_id"); accountIDStr != "" {
|
|
||||||
accountID, err := strconv.ParseInt(accountIDStr, 10, 64)
|
|
||||||
if err != nil || accountID <= 0 {
|
|
||||||
response.BadRequest(c, "Invalid account_id")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
filter.AccountID = &accountID
|
|
||||||
}
|
|
||||||
|
|
||||||
out, err := h.opsService.GetErrorLogs(c.Request.Context(), filter)
|
|
||||||
if err != nil {
|
|
||||||
response.Error(c, http.StatusInternalServerError, "Failed to get error logs")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
response.Success(c, gin.H{
|
|
||||||
"errors": out.Errors,
|
|
||||||
"total": out.Total,
|
|
||||||
"page": out.Page,
|
|
||||||
"page_size": out.PageSize,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLatencyHistogram returns the latency distribution histogram.
|
|
||||||
// GET /api/v1/admin/ops/dashboard/latency-histogram
|
|
||||||
func (h *OpsHandler) GetLatencyHistogram(c *gin.Context) {
|
|
||||||
timeRange := c.Query("time_range")
|
|
||||||
if timeRange == "" {
|
|
||||||
timeRange = "1h"
|
|
||||||
}
|
|
||||||
|
|
||||||
buckets, err := h.opsService.GetLatencyHistogram(c.Request.Context(), timeRange)
|
|
||||||
if err != nil {
|
|
||||||
response.Error(c, http.StatusInternalServerError, "Failed to get latency histogram")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
totalRequests := int64(0)
|
|
||||||
for _, b := range buckets {
|
|
||||||
totalRequests += b.Count
|
|
||||||
}
|
|
||||||
|
|
||||||
response.Success(c, gin.H{
|
|
||||||
"buckets": buckets,
|
|
||||||
"total_requests": totalRequests,
|
|
||||||
"slow_request_threshold": 1000,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetErrorDistribution returns the error distribution.
|
|
||||||
// GET /api/v1/admin/ops/dashboard/errors/distribution
|
|
||||||
func (h *OpsHandler) GetErrorDistribution(c *gin.Context) {
|
|
||||||
timeRange := c.Query("time_range")
|
|
||||||
if timeRange == "" {
|
|
||||||
timeRange = "1h"
|
|
||||||
}
|
|
||||||
|
|
||||||
items, err := h.opsService.GetErrorDistribution(c.Request.Context(), timeRange)
|
|
||||||
if err != nil {
|
|
||||||
response.Error(c, http.StatusInternalServerError, "Failed to get error distribution")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
response.Success(c, gin.H{
|
|
||||||
"items": items,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
@@ -1,286 +0,0 @@
|
|||||||
package admin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/netip"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
"github.com/gorilla/websocket"
|
|
||||||
)
|
|
||||||
|
|
||||||
type OpsWSProxyConfig struct {
|
|
||||||
TrustProxy bool
|
|
||||||
TrustedProxies []netip.Prefix
|
|
||||||
OriginPolicy string
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
envOpsWSTrustProxy = "OPS_WS_TRUST_PROXY"
|
|
||||||
envOpsWSTrustedProxies = "OPS_WS_TRUSTED_PROXIES"
|
|
||||||
envOpsWSOriginPolicy = "OPS_WS_ORIGIN_POLICY"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
OriginPolicyStrict = "strict"
|
|
||||||
OriginPolicyPermissive = "permissive"
|
|
||||||
)
|
|
||||||
|
|
||||||
var opsWSProxyConfig = loadOpsWSProxyConfigFromEnv()
|
|
||||||
|
|
||||||
var upgrader = websocket.Upgrader{
|
|
||||||
CheckOrigin: func(r *http.Request) bool {
|
|
||||||
return isAllowedOpsWSOrigin(r)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// QPSWSHandler handles realtime QPS push via WebSocket.
|
|
||||||
// GET /api/v1/admin/ops/ws/qps
|
|
||||||
func (h *OpsHandler) QPSWSHandler(c *gin.Context) {
|
|
||||||
conn, err := upgrader.Upgrade(c.Writer, c.Request, nil)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsWS] upgrade failed: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer func() { _ = conn.Close() }()
|
|
||||||
|
|
||||||
// Set pong handler
|
|
||||||
if err := conn.SetReadDeadline(time.Now().Add(60 * time.Second)); err != nil {
|
|
||||||
log.Printf("[OpsWS] set read deadline failed: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
conn.SetPongHandler(func(string) error {
|
|
||||||
return conn.SetReadDeadline(time.Now().Add(60 * time.Second))
|
|
||||||
})
|
|
||||||
|
|
||||||
// Push QPS data every 2 seconds
|
|
||||||
ticker := time.NewTicker(2 * time.Second)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
// Heartbeat ping every 30 seconds
|
|
||||||
pingTicker := time.NewTicker(30 * time.Second)
|
|
||||||
defer pingTicker.Stop()
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(c.Request.Context())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
// Fetch 1m window stats for current QPS
|
|
||||||
data, err := h.opsService.GetDashboardOverview(ctx, "5m")
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsWS] get overview failed: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
payload := gin.H{
|
|
||||||
"type": "qps_update",
|
|
||||||
"timestamp": time.Now().Format(time.RFC3339),
|
|
||||||
"data": gin.H{
|
|
||||||
"qps": data.QPS.Current,
|
|
||||||
"tps": data.TPS.Current,
|
|
||||||
"request_count": data.Errors.TotalCount + int64(data.QPS.Avg1h*60), // Rough estimate
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
msg, _ := json.Marshal(payload)
|
|
||||||
if err := conn.WriteMessage(websocket.TextMessage, msg); err != nil {
|
|
||||||
log.Printf("[OpsWS] write failed: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-pingTicker.C:
|
|
||||||
if err := conn.WriteMessage(websocket.PingMessage, nil); err != nil {
|
|
||||||
log.Printf("[OpsWS] ping failed: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isAllowedOpsWSOrigin(r *http.Request) bool {
|
|
||||||
if r == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
origin := strings.TrimSpace(r.Header.Get("Origin"))
|
|
||||||
if origin == "" {
|
|
||||||
switch strings.ToLower(strings.TrimSpace(opsWSProxyConfig.OriginPolicy)) {
|
|
||||||
case OriginPolicyStrict:
|
|
||||||
return false
|
|
||||||
case OriginPolicyPermissive, "":
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
parsed, err := url.Parse(origin)
|
|
||||||
if err != nil || parsed.Hostname() == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
originHost := strings.ToLower(parsed.Hostname())
|
|
||||||
|
|
||||||
trustProxyHeaders := shouldTrustOpsWSProxyHeaders(r)
|
|
||||||
reqHost := hostWithoutPort(r.Host)
|
|
||||||
if trustProxyHeaders {
|
|
||||||
xfHost := strings.TrimSpace(r.Header.Get("X-Forwarded-Host"))
|
|
||||||
if xfHost != "" {
|
|
||||||
xfHost = strings.TrimSpace(strings.Split(xfHost, ",")[0])
|
|
||||||
if xfHost != "" {
|
|
||||||
reqHost = hostWithoutPort(xfHost)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
reqHost = strings.ToLower(reqHost)
|
|
||||||
if reqHost == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return originHost == reqHost
|
|
||||||
}
|
|
||||||
|
|
||||||
func shouldTrustOpsWSProxyHeaders(r *http.Request) bool {
|
|
||||||
if r == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if !opsWSProxyConfig.TrustProxy {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
peerIP, ok := requestPeerIP(r)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return isAddrInTrustedProxies(peerIP, opsWSProxyConfig.TrustedProxies)
|
|
||||||
}
|
|
||||||
|
|
||||||
func requestPeerIP(r *http.Request) (netip.Addr, bool) {
|
|
||||||
if r == nil {
|
|
||||||
return netip.Addr{}, false
|
|
||||||
}
|
|
||||||
host, _, err := net.SplitHostPort(strings.TrimSpace(r.RemoteAddr))
|
|
||||||
if err != nil {
|
|
||||||
host = strings.TrimSpace(r.RemoteAddr)
|
|
||||||
}
|
|
||||||
host = strings.TrimPrefix(host, "[")
|
|
||||||
host = strings.TrimSuffix(host, "]")
|
|
||||||
if host == "" {
|
|
||||||
return netip.Addr{}, false
|
|
||||||
}
|
|
||||||
addr, err := netip.ParseAddr(host)
|
|
||||||
if err != nil {
|
|
||||||
return netip.Addr{}, false
|
|
||||||
}
|
|
||||||
return addr.Unmap(), true
|
|
||||||
}
|
|
||||||
|
|
||||||
func isAddrInTrustedProxies(addr netip.Addr, trusted []netip.Prefix) bool {
|
|
||||||
if !addr.IsValid() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
for _, p := range trusted {
|
|
||||||
if p.Contains(addr) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func loadOpsWSProxyConfigFromEnv() OpsWSProxyConfig {
|
|
||||||
cfg := OpsWSProxyConfig{
|
|
||||||
TrustProxy: true,
|
|
||||||
TrustedProxies: defaultTrustedProxies(),
|
|
||||||
OriginPolicy: OriginPolicyPermissive,
|
|
||||||
}
|
|
||||||
|
|
||||||
if v := strings.TrimSpace(os.Getenv(envOpsWSTrustProxy)); v != "" {
|
|
||||||
if parsed, err := strconv.ParseBool(v); err == nil {
|
|
||||||
cfg.TrustProxy = parsed
|
|
||||||
} else {
|
|
||||||
log.Printf("[OpsWS] invalid %s=%q (expected bool); using default=%v", envOpsWSTrustProxy, v, cfg.TrustProxy)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if raw := strings.TrimSpace(os.Getenv(envOpsWSTrustedProxies)); raw != "" {
|
|
||||||
prefixes, invalid := parseTrustedProxyList(raw)
|
|
||||||
if len(invalid) > 0 {
|
|
||||||
log.Printf("[OpsWS] invalid %s entries ignored: %s", envOpsWSTrustedProxies, strings.Join(invalid, ", "))
|
|
||||||
}
|
|
||||||
cfg.TrustedProxies = prefixes
|
|
||||||
}
|
|
||||||
|
|
||||||
if v := strings.TrimSpace(os.Getenv(envOpsWSOriginPolicy)); v != "" {
|
|
||||||
normalized := strings.ToLower(v)
|
|
||||||
switch normalized {
|
|
||||||
case OriginPolicyStrict, OriginPolicyPermissive:
|
|
||||||
cfg.OriginPolicy = normalized
|
|
||||||
default:
|
|
||||||
log.Printf("[OpsWS] invalid %s=%q (expected %q or %q); using default=%q", envOpsWSOriginPolicy, v, OriginPolicyStrict, OriginPolicyPermissive, cfg.OriginPolicy)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
func defaultTrustedProxies() []netip.Prefix {
|
|
||||||
prefixes, _ := parseTrustedProxyList("127.0.0.0/8,::1/128")
|
|
||||||
return prefixes
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseTrustedProxyList(raw string) (prefixes []netip.Prefix, invalid []string) {
|
|
||||||
for _, token := range strings.Split(raw, ",") {
|
|
||||||
item := strings.TrimSpace(token)
|
|
||||||
if item == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
p netip.Prefix
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
if strings.Contains(item, "/") {
|
|
||||||
p, err = netip.ParsePrefix(item)
|
|
||||||
} else {
|
|
||||||
var addr netip.Addr
|
|
||||||
addr, err = netip.ParseAddr(item)
|
|
||||||
if err == nil {
|
|
||||||
addr = addr.Unmap()
|
|
||||||
bits := 128
|
|
||||||
if addr.Is4() {
|
|
||||||
bits = 32
|
|
||||||
}
|
|
||||||
p = netip.PrefixFrom(addr, bits)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil || !p.IsValid() {
|
|
||||||
invalid = append(invalid, item)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
prefixes = append(prefixes, p.Masked())
|
|
||||||
}
|
|
||||||
return prefixes, invalid
|
|
||||||
}
|
|
||||||
|
|
||||||
func hostWithoutPort(hostport string) string {
|
|
||||||
hostport = strings.TrimSpace(hostport)
|
|
||||||
if hostport == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
if host, _, err := net.SplitHostPort(hostport); err == nil {
|
|
||||||
return host
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(hostport, "[") && strings.HasSuffix(hostport, "]") {
|
|
||||||
return strings.Trim(hostport, "[]")
|
|
||||||
}
|
|
||||||
parts := strings.Split(hostport, ":")
|
|
||||||
return parts[0]
|
|
||||||
}
|
|
||||||
@@ -1,123 +0,0 @@
|
|||||||
package admin
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/http"
|
|
||||||
"net/netip"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestIsAllowedOpsWSOrigin_AllowsEmptyOrigin(t *testing.T) {
|
|
||||||
original := opsWSProxyConfig
|
|
||||||
t.Cleanup(func() { opsWSProxyConfig = original })
|
|
||||||
opsWSProxyConfig = OpsWSProxyConfig{OriginPolicy: OriginPolicyPermissive}
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://example.test", nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("NewRequest: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !isAllowedOpsWSOrigin(req) {
|
|
||||||
t.Fatalf("expected empty Origin to be allowed")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsAllowedOpsWSOrigin_RejectsEmptyOrigin_WhenStrict(t *testing.T) {
|
|
||||||
original := opsWSProxyConfig
|
|
||||||
t.Cleanup(func() { opsWSProxyConfig = original })
|
|
||||||
opsWSProxyConfig = OpsWSProxyConfig{OriginPolicy: OriginPolicyStrict}
|
|
||||||
|
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://example.test", nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("NewRequest: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if isAllowedOpsWSOrigin(req) {
|
|
||||||
t.Fatalf("expected empty Origin to be rejected under strict policy")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestIsAllowedOpsWSOrigin_UsesXForwardedHostOnlyFromTrustedProxy(t *testing.T) {
|
|
||||||
original := opsWSProxyConfig
|
|
||||||
t.Cleanup(func() { opsWSProxyConfig = original })
|
|
||||||
|
|
||||||
opsWSProxyConfig = OpsWSProxyConfig{
|
|
||||||
TrustProxy: true,
|
|
||||||
TrustedProxies: []netip.Prefix{
|
|
||||||
netip.MustParsePrefix("127.0.0.0/8"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Untrusted peer: ignore X-Forwarded-Host and compare against r.Host.
|
|
||||||
{
|
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://internal.service.local", nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("NewRequest: %v", err)
|
|
||||||
}
|
|
||||||
req.RemoteAddr = "192.0.2.1:12345"
|
|
||||||
req.Host = "internal.service.local"
|
|
||||||
req.Header.Set("Origin", "https://public.example.com")
|
|
||||||
req.Header.Set("X-Forwarded-Host", "public.example.com")
|
|
||||||
|
|
||||||
if isAllowedOpsWSOrigin(req) {
|
|
||||||
t.Fatalf("expected Origin to be rejected when peer is not a trusted proxy")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Trusted peer: allow X-Forwarded-Host to participate in Origin validation.
|
|
||||||
{
|
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://internal.service.local", nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("NewRequest: %v", err)
|
|
||||||
}
|
|
||||||
req.RemoteAddr = "127.0.0.1:23456"
|
|
||||||
req.Host = "internal.service.local"
|
|
||||||
req.Header.Set("Origin", "https://public.example.com")
|
|
||||||
req.Header.Set("X-Forwarded-Host", "public.example.com")
|
|
||||||
|
|
||||||
if !isAllowedOpsWSOrigin(req) {
|
|
||||||
t.Fatalf("expected Origin to be accepted when peer is a trusted proxy")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadOpsWSProxyConfigFromEnv_OriginPolicy(t *testing.T) {
|
|
||||||
t.Setenv(envOpsWSOriginPolicy, "STRICT")
|
|
||||||
cfg := loadOpsWSProxyConfigFromEnv()
|
|
||||||
if cfg.OriginPolicy != OriginPolicyStrict {
|
|
||||||
t.Fatalf("OriginPolicy=%q, want %q", cfg.OriginPolicy, OriginPolicyStrict)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestLoadOpsWSProxyConfigFromEnv_OriginPolicyInvalidUsesDefault(t *testing.T) {
|
|
||||||
t.Setenv(envOpsWSOriginPolicy, "nope")
|
|
||||||
cfg := loadOpsWSProxyConfigFromEnv()
|
|
||||||
if cfg.OriginPolicy != OriginPolicyPermissive {
|
|
||||||
t.Fatalf("OriginPolicy=%q, want %q", cfg.OriginPolicy, OriginPolicyPermissive)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestParseTrustedProxyList(t *testing.T) {
|
|
||||||
prefixes, invalid := parseTrustedProxyList("10.0.0.1, 10.0.0.0/8, bad, ::1/128")
|
|
||||||
if len(prefixes) != 3 {
|
|
||||||
t.Fatalf("prefixes=%d, want 3", len(prefixes))
|
|
||||||
}
|
|
||||||
if len(invalid) != 1 || invalid[0] != "bad" {
|
|
||||||
t.Fatalf("invalid=%v, want [bad]", invalid)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRequestPeerIP_ParsesIPv6(t *testing.T) {
|
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://example.test", nil)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("NewRequest: %v", err)
|
|
||||||
}
|
|
||||||
req.RemoteAddr = "[::1]:1234"
|
|
||||||
|
|
||||||
addr, ok := requestPeerIP(req)
|
|
||||||
if !ok {
|
|
||||||
t.Fatalf("expected IPv6 peer IP to parse")
|
|
||||||
}
|
|
||||||
if addr != netip.MustParseAddr("::1") {
|
|
||||||
t.Fatalf("addr=%s, want ::1", addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,166 +0,0 @@
|
|||||||
package handler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware"
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
opsModelKey = "ops_model"
|
|
||||||
opsStreamKey = "ops_stream"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
opsErrorLogWorkerCount = 10
|
|
||||||
opsErrorLogQueueSize = 256
|
|
||||||
opsErrorLogTimeout = 2 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
type opsErrorLogJob struct {
|
|
||||||
ops *service.OpsService
|
|
||||||
entry *service.OpsErrorLog
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
opsErrorLogOnce sync.Once
|
|
||||||
opsErrorLogQueue chan opsErrorLogJob
|
|
||||||
)
|
|
||||||
|
|
||||||
func startOpsErrorLogWorkers() {
|
|
||||||
opsErrorLogQueue = make(chan opsErrorLogJob, opsErrorLogQueueSize)
|
|
||||||
for i := 0; i < opsErrorLogWorkerCount; i++ {
|
|
||||||
go func() {
|
|
||||||
for job := range opsErrorLogQueue {
|
|
||||||
if job.ops == nil || job.entry == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), opsErrorLogTimeout)
|
|
||||||
_ = job.ops.RecordError(ctx, job.entry)
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func enqueueOpsErrorLog(ops *service.OpsService, entry *service.OpsErrorLog) {
|
|
||||||
if ops == nil || entry == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
opsErrorLogOnce.Do(startOpsErrorLogWorkers)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case opsErrorLogQueue <- opsErrorLogJob{ops: ops, entry: entry}:
|
|
||||||
default:
|
|
||||||
// Queue is full; drop to avoid blocking request handling.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setOpsRequestContext(c *gin.Context, model string, stream bool) {
|
|
||||||
c.Set(opsModelKey, model)
|
|
||||||
c.Set(opsStreamKey, stream)
|
|
||||||
}
|
|
||||||
|
|
||||||
func recordOpsError(c *gin.Context, ops *service.OpsService, status int, errType, message, fallbackPlatform string) {
|
|
||||||
if ops == nil || c == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
model, _ := c.Get(opsModelKey)
|
|
||||||
stream, _ := c.Get(opsStreamKey)
|
|
||||||
|
|
||||||
var modelName string
|
|
||||||
if m, ok := model.(string); ok {
|
|
||||||
modelName = m
|
|
||||||
}
|
|
||||||
streaming, _ := stream.(bool)
|
|
||||||
|
|
||||||
apiKey, _ := middleware2.GetAPIKeyFromContext(c)
|
|
||||||
|
|
||||||
logEntry := &service.OpsErrorLog{
|
|
||||||
Phase: classifyOpsPhase(errType, message),
|
|
||||||
Type: errType,
|
|
||||||
Severity: classifyOpsSeverity(errType, status),
|
|
||||||
StatusCode: status,
|
|
||||||
Platform: resolveOpsPlatform(apiKey, fallbackPlatform),
|
|
||||||
Model: modelName,
|
|
||||||
RequestID: c.Writer.Header().Get("x-request-id"),
|
|
||||||
Message: message,
|
|
||||||
ClientIP: c.ClientIP(),
|
|
||||||
RequestPath: func() string {
|
|
||||||
if c.Request != nil && c.Request.URL != nil {
|
|
||||||
return c.Request.URL.Path
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}(),
|
|
||||||
Stream: streaming,
|
|
||||||
}
|
|
||||||
|
|
||||||
if apiKey != nil {
|
|
||||||
logEntry.APIKeyID = &apiKey.ID
|
|
||||||
if apiKey.User != nil {
|
|
||||||
logEntry.UserID = &apiKey.User.ID
|
|
||||||
}
|
|
||||||
if apiKey.GroupID != nil {
|
|
||||||
logEntry.GroupID = apiKey.GroupID
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
enqueueOpsErrorLog(ops, logEntry)
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveOpsPlatform(apiKey *service.APIKey, fallback string) string {
|
|
||||||
if apiKey != nil && apiKey.Group != nil && apiKey.Group.Platform != "" {
|
|
||||||
return apiKey.Group.Platform
|
|
||||||
}
|
|
||||||
return fallback
|
|
||||||
}
|
|
||||||
|
|
||||||
func classifyOpsPhase(errType, message string) string {
|
|
||||||
msg := strings.ToLower(message)
|
|
||||||
switch errType {
|
|
||||||
case "authentication_error":
|
|
||||||
return "auth"
|
|
||||||
case "billing_error", "subscription_error":
|
|
||||||
return "billing"
|
|
||||||
case "rate_limit_error":
|
|
||||||
if strings.Contains(msg, "concurrency") || strings.Contains(msg, "pending") {
|
|
||||||
return "concurrency"
|
|
||||||
}
|
|
||||||
return "upstream"
|
|
||||||
case "invalid_request_error":
|
|
||||||
return "response"
|
|
||||||
case "upstream_error", "overloaded_error":
|
|
||||||
return "upstream"
|
|
||||||
case "api_error":
|
|
||||||
if strings.Contains(msg, "no available accounts") {
|
|
||||||
return "scheduling"
|
|
||||||
}
|
|
||||||
return "internal"
|
|
||||||
default:
|
|
||||||
return "internal"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func classifyOpsSeverity(errType string, status int) string {
|
|
||||||
switch errType {
|
|
||||||
case "invalid_request_error", "authentication_error", "billing_error", "subscription_error":
|
|
||||||
return "P3"
|
|
||||||
}
|
|
||||||
if status >= 500 {
|
|
||||||
return "P1"
|
|
||||||
}
|
|
||||||
if status == 429 {
|
|
||||||
return "P1"
|
|
||||||
}
|
|
||||||
if status >= 400 {
|
|
||||||
return "P2"
|
|
||||||
}
|
|
||||||
return "P3"
|
|
||||||
}
|
|
||||||
@@ -1,190 +0,0 @@
|
|||||||
package repository
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ListErrorLogs queries ops_error_logs with optional filters and pagination.
|
|
||||||
// It returns the list items and the total count of matching rows.
|
|
||||||
func (r *OpsRepository) ListErrorLogs(ctx context.Context, filter *service.ErrorLogFilter) ([]*service.ErrorLog, int64, error) {
|
|
||||||
page := 1
|
|
||||||
pageSize := 20
|
|
||||||
if filter != nil {
|
|
||||||
if filter.Page > 0 {
|
|
||||||
page = filter.Page
|
|
||||||
}
|
|
||||||
if filter.PageSize > 0 {
|
|
||||||
pageSize = filter.PageSize
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pageSize > 100 {
|
|
||||||
pageSize = 100
|
|
||||||
}
|
|
||||||
offset := (page - 1) * pageSize
|
|
||||||
|
|
||||||
conditions := make([]string, 0)
|
|
||||||
args := make([]any, 0)
|
|
||||||
|
|
||||||
addCondition := func(condition string, values ...any) {
|
|
||||||
conditions = append(conditions, condition)
|
|
||||||
args = append(args, values...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if filter != nil {
|
|
||||||
// 默认查询最近 24 小时
|
|
||||||
if filter.StartTime == nil && filter.EndTime == nil {
|
|
||||||
defaultStart := time.Now().Add(-24 * time.Hour)
|
|
||||||
filter.StartTime = &defaultStart
|
|
||||||
}
|
|
||||||
|
|
||||||
if filter.StartTime != nil {
|
|
||||||
addCondition(fmt.Sprintf("created_at >= $%d", len(args)+1), *filter.StartTime)
|
|
||||||
}
|
|
||||||
if filter.EndTime != nil {
|
|
||||||
addCondition(fmt.Sprintf("created_at <= $%d", len(args)+1), *filter.EndTime)
|
|
||||||
}
|
|
||||||
if filter.ErrorCode != nil {
|
|
||||||
addCondition(fmt.Sprintf("status_code = $%d", len(args)+1), *filter.ErrorCode)
|
|
||||||
}
|
|
||||||
if provider := strings.TrimSpace(filter.Provider); provider != "" {
|
|
||||||
addCondition(fmt.Sprintf("platform = $%d", len(args)+1), provider)
|
|
||||||
}
|
|
||||||
if filter.AccountID != nil {
|
|
||||||
addCondition(fmt.Sprintf("account_id = $%d", len(args)+1), *filter.AccountID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
where := ""
|
|
||||||
if len(conditions) > 0 {
|
|
||||||
where = "WHERE " + strings.Join(conditions, " AND ")
|
|
||||||
}
|
|
||||||
|
|
||||||
countQuery := fmt.Sprintf(`SELECT COUNT(1) FROM ops_error_logs %s`, where)
|
|
||||||
var total int64
|
|
||||||
if err := scanSingleRow(ctx, r.sql, countQuery, args, &total); err != nil {
|
|
||||||
if err == sql.ErrNoRows {
|
|
||||||
total = 0
|
|
||||||
} else {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
listQuery := fmt.Sprintf(`
|
|
||||||
SELECT
|
|
||||||
id,
|
|
||||||
created_at,
|
|
||||||
severity,
|
|
||||||
request_id,
|
|
||||||
account_id,
|
|
||||||
request_path,
|
|
||||||
platform,
|
|
||||||
model,
|
|
||||||
status_code,
|
|
||||||
error_message,
|
|
||||||
duration_ms,
|
|
||||||
retry_count,
|
|
||||||
stream
|
|
||||||
FROM ops_error_logs
|
|
||||||
%s
|
|
||||||
ORDER BY created_at DESC
|
|
||||||
LIMIT $%d OFFSET $%d
|
|
||||||
`, where, len(args)+1, len(args)+2)
|
|
||||||
|
|
||||||
listArgs := append(append([]any{}, args...), pageSize, offset)
|
|
||||||
rows, err := r.sql.QueryContext(ctx, listQuery, listArgs...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
defer func() { _ = rows.Close() }()
|
|
||||||
|
|
||||||
results := make([]*service.ErrorLog, 0)
|
|
||||||
for rows.Next() {
|
|
||||||
var (
|
|
||||||
id int64
|
|
||||||
createdAt time.Time
|
|
||||||
severity sql.NullString
|
|
||||||
requestID sql.NullString
|
|
||||||
accountID sql.NullInt64
|
|
||||||
requestURI sql.NullString
|
|
||||||
platform sql.NullString
|
|
||||||
model sql.NullString
|
|
||||||
statusCode sql.NullInt64
|
|
||||||
message sql.NullString
|
|
||||||
durationMs sql.NullInt64
|
|
||||||
retryCount sql.NullInt64
|
|
||||||
stream sql.NullBool
|
|
||||||
)
|
|
||||||
|
|
||||||
if err := rows.Scan(
|
|
||||||
&id,
|
|
||||||
&createdAt,
|
|
||||||
&severity,
|
|
||||||
&requestID,
|
|
||||||
&accountID,
|
|
||||||
&requestURI,
|
|
||||||
&platform,
|
|
||||||
&model,
|
|
||||||
&statusCode,
|
|
||||||
&message,
|
|
||||||
&durationMs,
|
|
||||||
&retryCount,
|
|
||||||
&stream,
|
|
||||||
); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
entry := &service.ErrorLog{
|
|
||||||
ID: id,
|
|
||||||
Timestamp: createdAt,
|
|
||||||
Level: levelFromSeverity(severity.String),
|
|
||||||
RequestID: requestID.String,
|
|
||||||
APIPath: requestURI.String,
|
|
||||||
Provider: platform.String,
|
|
||||||
Model: model.String,
|
|
||||||
HTTPCode: int(statusCode.Int64),
|
|
||||||
Stream: stream.Bool,
|
|
||||||
}
|
|
||||||
if accountID.Valid {
|
|
||||||
entry.AccountID = strconv.FormatInt(accountID.Int64, 10)
|
|
||||||
}
|
|
||||||
if message.Valid {
|
|
||||||
entry.ErrorMessage = message.String
|
|
||||||
}
|
|
||||||
if durationMs.Valid {
|
|
||||||
v := int(durationMs.Int64)
|
|
||||||
entry.DurationMs = &v
|
|
||||||
}
|
|
||||||
if retryCount.Valid {
|
|
||||||
v := int(retryCount.Int64)
|
|
||||||
entry.RetryCount = &v
|
|
||||||
}
|
|
||||||
|
|
||||||
results = append(results, entry)
|
|
||||||
}
|
|
||||||
if err := rows.Err(); err != nil {
|
|
||||||
return nil, 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return results, total, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func levelFromSeverity(severity string) string {
|
|
||||||
sev := strings.ToUpper(strings.TrimSpace(severity))
|
|
||||||
switch sev {
|
|
||||||
case "P0", "P1":
|
|
||||||
return "CRITICAL"
|
|
||||||
case "P2":
|
|
||||||
return "ERROR"
|
|
||||||
case "P3":
|
|
||||||
return "WARN"
|
|
||||||
default:
|
|
||||||
return "ERROR"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
package repository
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
|
||||||
"github.com/redis/go-redis/v9"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
opsLatestMetricsKey = "ops:metrics:latest"
|
|
||||||
|
|
||||||
opsDashboardOverviewKeyPrefix = "ops:dashboard:overview:"
|
|
||||||
|
|
||||||
opsLatestMetricsTTL = 10 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
func (r *OpsRepository) GetCachedLatestSystemMetric(ctx context.Context) (*service.OpsMetrics, error) {
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = context.Background()
|
|
||||||
}
|
|
||||||
if r == nil || r.rdb == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := r.rdb.Get(ctx, opsLatestMetricsKey).Bytes()
|
|
||||||
if errors.Is(err, redis.Nil) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("redis get cached latest system metric: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var metric service.OpsMetrics
|
|
||||||
if err := json.Unmarshal(data, &metric); err != nil {
|
|
||||||
return nil, fmt.Errorf("unmarshal cached latest system metric: %w", err)
|
|
||||||
}
|
|
||||||
return &metric, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OpsRepository) SetCachedLatestSystemMetric(ctx context.Context, metric *service.OpsMetrics) error {
|
|
||||||
if metric == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = context.Background()
|
|
||||||
}
|
|
||||||
if r == nil || r.rdb == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := json.Marshal(metric)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("marshal cached latest system metric: %w", err)
|
|
||||||
}
|
|
||||||
return r.rdb.Set(ctx, opsLatestMetricsKey, data, opsLatestMetricsTTL).Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OpsRepository) GetCachedDashboardOverview(ctx context.Context, timeRange string) (*service.DashboardOverviewData, error) {
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = context.Background()
|
|
||||||
}
|
|
||||||
if r == nil || r.rdb == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
rangeKey := strings.TrimSpace(timeRange)
|
|
||||||
if rangeKey == "" {
|
|
||||||
rangeKey = "1h"
|
|
||||||
}
|
|
||||||
|
|
||||||
key := opsDashboardOverviewKeyPrefix + rangeKey
|
|
||||||
data, err := r.rdb.Get(ctx, key).Bytes()
|
|
||||||
if errors.Is(err, redis.Nil) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("redis get cached dashboard overview: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
var overview service.DashboardOverviewData
|
|
||||||
if err := json.Unmarshal(data, &overview); err != nil {
|
|
||||||
return nil, fmt.Errorf("unmarshal cached dashboard overview: %w", err)
|
|
||||||
}
|
|
||||||
return &overview, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OpsRepository) SetCachedDashboardOverview(ctx context.Context, timeRange string, data *service.DashboardOverviewData, ttl time.Duration) error {
|
|
||||||
if data == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ttl <= 0 {
|
|
||||||
ttl = 10 * time.Second
|
|
||||||
}
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = context.Background()
|
|
||||||
}
|
|
||||||
if r == nil || r.rdb == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
rangeKey := strings.TrimSpace(timeRange)
|
|
||||||
if rangeKey == "" {
|
|
||||||
rangeKey = "1h"
|
|
||||||
}
|
|
||||||
|
|
||||||
payload, err := json.Marshal(data)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("marshal cached dashboard overview: %w", err)
|
|
||||||
}
|
|
||||||
key := opsDashboardOverviewKeyPrefix + rangeKey
|
|
||||||
return r.rdb.Set(ctx, key, payload, ttl).Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OpsRepository) PingRedis(ctx context.Context) error {
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = context.Background()
|
|
||||||
}
|
|
||||||
if r == nil || r.rdb == nil {
|
|
||||||
return errors.New("redis client is nil")
|
|
||||||
}
|
|
||||||
return r.rdb.Ping(ctx).Err()
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,55 +0,0 @@
|
|||||||
package middleware
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/Wei-Shaw/sub2api/internal/service"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
opsAuthErrorLogWorkerCount = 10
|
|
||||||
opsAuthErrorLogQueueSize = 256
|
|
||||||
opsAuthErrorLogTimeout = 2 * time.Second
|
|
||||||
)
|
|
||||||
|
|
||||||
type opsAuthErrorLogJob struct {
|
|
||||||
ops *service.OpsService
|
|
||||||
entry *service.OpsErrorLog
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
opsAuthErrorLogOnce sync.Once
|
|
||||||
opsAuthErrorLogQueue chan opsAuthErrorLogJob
|
|
||||||
)
|
|
||||||
|
|
||||||
func startOpsAuthErrorLogWorkers() {
|
|
||||||
opsAuthErrorLogQueue = make(chan opsAuthErrorLogJob, opsAuthErrorLogQueueSize)
|
|
||||||
for i := 0; i < opsAuthErrorLogWorkerCount; i++ {
|
|
||||||
go func() {
|
|
||||||
for job := range opsAuthErrorLogQueue {
|
|
||||||
if job.ops == nil || job.entry == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), opsAuthErrorLogTimeout)
|
|
||||||
_ = job.ops.RecordError(ctx, job.entry)
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func enqueueOpsAuthErrorLog(ops *service.OpsService, entry *service.OpsErrorLog) {
|
|
||||||
if ops == nil || entry == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
opsAuthErrorLogOnce.Do(startOpsAuthErrorLogWorkers)
|
|
||||||
|
|
||||||
select {
|
|
||||||
case opsAuthErrorLogQueue <- opsAuthErrorLogJob{ops: ops, entry: entry}:
|
|
||||||
default:
|
|
||||||
// Queue is full; drop to avoid blocking request handling.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,99 +0,0 @@
|
|||||||
package service
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrorLog represents an ops error log item for list queries.
|
|
||||||
//
|
|
||||||
// Field naming matches docs/API-运维监控中心2.0.md (L3 根因追踪 - 错误日志列表).
|
|
||||||
type ErrorLog struct {
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
Timestamp time.Time `json:"timestamp"`
|
|
||||||
|
|
||||||
Level string `json:"level,omitempty"`
|
|
||||||
RequestID string `json:"request_id,omitempty"`
|
|
||||||
AccountID string `json:"account_id,omitempty"`
|
|
||||||
APIPath string `json:"api_path,omitempty"`
|
|
||||||
Provider string `json:"provider,omitempty"`
|
|
||||||
Model string `json:"model,omitempty"`
|
|
||||||
HTTPCode int `json:"http_code,omitempty"`
|
|
||||||
ErrorMessage string `json:"error_message,omitempty"`
|
|
||||||
|
|
||||||
DurationMs *int `json:"duration_ms,omitempty"`
|
|
||||||
RetryCount *int `json:"retry_count,omitempty"`
|
|
||||||
Stream bool `json:"stream,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ErrorLogFilter describes optional filters and pagination for listing ops error logs.
|
|
||||||
type ErrorLogFilter struct {
|
|
||||||
StartTime *time.Time
|
|
||||||
EndTime *time.Time
|
|
||||||
|
|
||||||
ErrorCode *int
|
|
||||||
Provider string
|
|
||||||
AccountID *int64
|
|
||||||
|
|
||||||
Page int
|
|
||||||
PageSize int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *ErrorLogFilter) normalize() (page, pageSize int) {
|
|
||||||
page = 1
|
|
||||||
pageSize = 20
|
|
||||||
if f == nil {
|
|
||||||
return page, pageSize
|
|
||||||
}
|
|
||||||
|
|
||||||
if f.Page > 0 {
|
|
||||||
page = f.Page
|
|
||||||
}
|
|
||||||
if f.PageSize > 0 {
|
|
||||||
pageSize = f.PageSize
|
|
||||||
}
|
|
||||||
if pageSize > 100 {
|
|
||||||
pageSize = 100
|
|
||||||
}
|
|
||||||
return page, pageSize
|
|
||||||
}
|
|
||||||
|
|
||||||
type ErrorLogListResponse struct {
|
|
||||||
Errors []*ErrorLog `json:"errors"`
|
|
||||||
Total int64 `json:"total"`
|
|
||||||
Page int `json:"page"`
|
|
||||||
PageSize int `json:"page_size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsService) GetErrorLogs(ctx context.Context, filter *ErrorLogFilter) (*ErrorLogListResponse, error) {
|
|
||||||
if s == nil || s.repo == nil {
|
|
||||||
return &ErrorLogListResponse{
|
|
||||||
Errors: []*ErrorLog{},
|
|
||||||
Total: 0,
|
|
||||||
Page: 1,
|
|
||||||
PageSize: 20,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
page, pageSize := filter.normalize()
|
|
||||||
if filter == nil {
|
|
||||||
filter = &ErrorLogFilter{}
|
|
||||||
}
|
|
||||||
filter.Page = page
|
|
||||||
filter.PageSize = pageSize
|
|
||||||
|
|
||||||
items, total, err := s.repo.ListErrorLogs(ctx, filter)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if items == nil {
|
|
||||||
items = []*ErrorLog{}
|
|
||||||
}
|
|
||||||
|
|
||||||
return &ErrorLogListResponse{
|
|
||||||
Errors: items,
|
|
||||||
Total: total,
|
|
||||||
Page: page,
|
|
||||||
PageSize: pageSize,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
@@ -1,834 +0,0 @@
|
|||||||
package service
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type OpsAlertService struct {
|
|
||||||
opsService *OpsService
|
|
||||||
userService *UserService
|
|
||||||
emailService *EmailService
|
|
||||||
httpClient *http.Client
|
|
||||||
|
|
||||||
interval time.Duration
|
|
||||||
|
|
||||||
startOnce sync.Once
|
|
||||||
stopOnce sync.Once
|
|
||||||
stopCtx context.Context
|
|
||||||
stop context.CancelFunc
|
|
||||||
wg sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
// opsAlertEvalInterval defines how often OpsAlertService evaluates alert rules.
|
|
||||||
//
|
|
||||||
// Production uses opsMetricsInterval. Tests may override this variable to keep
|
|
||||||
// integration tests fast without changing production defaults.
|
|
||||||
var opsAlertEvalInterval = opsMetricsInterval
|
|
||||||
|
|
||||||
func NewOpsAlertService(opsService *OpsService, userService *UserService, emailService *EmailService) *OpsAlertService {
|
|
||||||
return &OpsAlertService{
|
|
||||||
opsService: opsService,
|
|
||||||
userService: userService,
|
|
||||||
emailService: emailService,
|
|
||||||
httpClient: &http.Client{Timeout: 10 * time.Second},
|
|
||||||
interval: opsAlertEvalInterval,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start launches the background alert evaluation loop.
|
|
||||||
//
|
|
||||||
// Stop must be called during shutdown to ensure the goroutine exits.
|
|
||||||
func (s *OpsAlertService) Start() {
|
|
||||||
s.StartWithContext(context.Background())
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartWithContext is like Start but allows the caller to provide a parent context.
|
|
||||||
// When the parent context is canceled, the service stops automatically.
|
|
||||||
func (s *OpsAlertService) StartWithContext(ctx context.Context) {
|
|
||||||
if s == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = context.Background()
|
|
||||||
}
|
|
||||||
|
|
||||||
s.startOnce.Do(func() {
|
|
||||||
if s.interval <= 0 {
|
|
||||||
s.interval = opsAlertEvalInterval
|
|
||||||
}
|
|
||||||
|
|
||||||
s.stopCtx, s.stop = context.WithCancel(ctx)
|
|
||||||
s.wg.Add(1)
|
|
||||||
go s.run()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stop gracefully stops the background goroutine started by Start/StartWithContext.
|
|
||||||
// It is safe to call Stop multiple times.
|
|
||||||
func (s *OpsAlertService) Stop() {
|
|
||||||
if s == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.stopOnce.Do(func() {
|
|
||||||
if s.stop != nil {
|
|
||||||
s.stop()
|
|
||||||
}
|
|
||||||
})
|
|
||||||
s.wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsAlertService) run() {
|
|
||||||
defer s.wg.Done()
|
|
||||||
|
|
||||||
ticker := time.NewTicker(s.interval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
s.evaluateOnce()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
s.evaluateOnce()
|
|
||||||
case <-s.stopCtx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsAlertService) evaluateOnce() {
|
|
||||||
ctx, cancel := context.WithTimeout(s.stopCtx, opsAlertEvaluateTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
s.Evaluate(ctx, time.Now())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsAlertService) Evaluate(ctx context.Context, now time.Time) {
|
|
||||||
if s == nil || s.opsService == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
rules, err := s.opsService.ListAlertRules(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsAlert] failed to list rules: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if len(rules) == 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
maxSustainedByWindow := make(map[int]int)
|
|
||||||
for _, rule := range rules {
|
|
||||||
if !rule.Enabled {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
window := rule.WindowMinutes
|
|
||||||
if window <= 0 {
|
|
||||||
window = 1
|
|
||||||
}
|
|
||||||
sustained := rule.SustainedMinutes
|
|
||||||
if sustained <= 0 {
|
|
||||||
sustained = 1
|
|
||||||
}
|
|
||||||
if sustained > maxSustainedByWindow[window] {
|
|
||||||
maxSustainedByWindow[window] = sustained
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
metricsByWindow := make(map[int][]OpsMetrics)
|
|
||||||
for window, limit := range maxSustainedByWindow {
|
|
||||||
metrics, err := s.opsService.ListRecentSystemMetrics(ctx, window, limit)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsAlert] failed to load metrics window=%dm: %v", window, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
metricsByWindow[window] = metrics
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, rule := range rules {
|
|
||||||
if !rule.Enabled {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
window := rule.WindowMinutes
|
|
||||||
if window <= 0 {
|
|
||||||
window = 1
|
|
||||||
}
|
|
||||||
sustained := rule.SustainedMinutes
|
|
||||||
if sustained <= 0 {
|
|
||||||
sustained = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
metrics := metricsByWindow[window]
|
|
||||||
selected, ok := selectContiguousMetrics(metrics, sustained, now)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
breached, latestValue, ok := evaluateRule(rule, selected)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
activeEvent, err := s.opsService.GetActiveAlertEvent(ctx, rule.ID)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsAlert] failed to get active event (rule=%d): %v", rule.ID, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if breached {
|
|
||||||
if activeEvent != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
lastEvent, err := s.opsService.GetLatestAlertEvent(ctx, rule.ID)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsAlert] failed to get latest event (rule=%d): %v", rule.ID, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if lastEvent != nil && rule.CooldownMinutes > 0 {
|
|
||||||
cooldown := time.Duration(rule.CooldownMinutes) * time.Minute
|
|
||||||
if now.Sub(lastEvent.FiredAt) < cooldown {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
event := &OpsAlertEvent{
|
|
||||||
RuleID: rule.ID,
|
|
||||||
Severity: rule.Severity,
|
|
||||||
Status: OpsAlertStatusFiring,
|
|
||||||
Title: fmt.Sprintf("%s: %s", rule.Severity, rule.Name),
|
|
||||||
Description: buildAlertDescription(rule, latestValue),
|
|
||||||
MetricValue: latestValue,
|
|
||||||
ThresholdValue: rule.Threshold,
|
|
||||||
FiredAt: now,
|
|
||||||
CreatedAt: now,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.opsService.CreateAlertEvent(ctx, event); err != nil {
|
|
||||||
log.Printf("[OpsAlert] failed to create event (rule=%d): %v", rule.ID, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
emailSent, webhookSent := s.dispatchNotifications(ctx, rule, event)
|
|
||||||
if emailSent || webhookSent {
|
|
||||||
if err := s.opsService.UpdateAlertEventNotifications(ctx, event.ID, emailSent, webhookSent); err != nil {
|
|
||||||
log.Printf("[OpsAlert] failed to update notification flags (event=%d): %v", event.ID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if activeEvent != nil {
|
|
||||||
resolvedAt := now
|
|
||||||
if err := s.opsService.UpdateAlertEventStatus(ctx, activeEvent.ID, OpsAlertStatusResolved, &resolvedAt); err != nil {
|
|
||||||
log.Printf("[OpsAlert] failed to resolve event (event=%d): %v", activeEvent.ID, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const opsMetricsContinuityTolerance = 20 * time.Second
|
|
||||||
|
|
||||||
// selectContiguousMetrics picks the newest N metrics and verifies they are continuous.
|
|
||||||
//
|
|
||||||
// This prevents a sustained rule from triggering when metrics sampling has gaps
|
|
||||||
// (e.g. collector downtime) and avoids evaluating "stale" data.
|
|
||||||
//
|
|
||||||
// Assumptions:
|
|
||||||
// - Metrics are ordered by UpdatedAt DESC (newest first).
|
|
||||||
// - Metrics are expected to be collected at opsMetricsInterval cadence.
|
|
||||||
func selectContiguousMetrics(metrics []OpsMetrics, needed int, now time.Time) ([]OpsMetrics, bool) {
|
|
||||||
if needed <= 0 {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
if len(metrics) < needed {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
newest := metrics[0].UpdatedAt
|
|
||||||
if newest.IsZero() {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
if now.Sub(newest) > opsMetricsInterval+opsMetricsContinuityTolerance {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
selected := metrics[:needed]
|
|
||||||
for i := 0; i < len(selected)-1; i++ {
|
|
||||||
a := selected[i].UpdatedAt
|
|
||||||
b := selected[i+1].UpdatedAt
|
|
||||||
if a.IsZero() || b.IsZero() {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
gap := a.Sub(b)
|
|
||||||
if gap < opsMetricsInterval-opsMetricsContinuityTolerance || gap > opsMetricsInterval+opsMetricsContinuityTolerance {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return selected, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func evaluateRule(rule OpsAlertRule, metrics []OpsMetrics) (bool, float64, bool) {
|
|
||||||
if len(metrics) == 0 {
|
|
||||||
return false, 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
latestValue, ok := metricValue(metrics[0], rule.MetricType)
|
|
||||||
if !ok {
|
|
||||||
return false, 0, false
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, metric := range metrics {
|
|
||||||
value, ok := metricValue(metric, rule.MetricType)
|
|
||||||
if !ok || !compareMetric(value, rule.Operator, rule.Threshold) {
|
|
||||||
return false, latestValue, true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, latestValue, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func metricValue(metric OpsMetrics, metricType string) (float64, bool) {
|
|
||||||
switch metricType {
|
|
||||||
case OpsMetricSuccessRate:
|
|
||||||
if metric.RequestCount == 0 {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
return metric.SuccessRate, true
|
|
||||||
case OpsMetricErrorRate:
|
|
||||||
if metric.RequestCount == 0 {
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
return metric.ErrorRate, true
|
|
||||||
case OpsMetricP95LatencyMs:
|
|
||||||
return float64(metric.P95LatencyMs), true
|
|
||||||
case OpsMetricP99LatencyMs:
|
|
||||||
return float64(metric.P99LatencyMs), true
|
|
||||||
case OpsMetricHTTP2Errors:
|
|
||||||
return float64(metric.HTTP2Errors), true
|
|
||||||
case OpsMetricCPUUsagePercent:
|
|
||||||
return metric.CPUUsagePercent, true
|
|
||||||
case OpsMetricMemoryUsagePercent:
|
|
||||||
return metric.MemoryUsagePercent, true
|
|
||||||
case OpsMetricQueueDepth:
|
|
||||||
return float64(metric.ConcurrencyQueueDepth), true
|
|
||||||
default:
|
|
||||||
return 0, false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func compareMetric(value float64, operator string, threshold float64) bool {
|
|
||||||
switch operator {
|
|
||||||
case ">":
|
|
||||||
return value > threshold
|
|
||||||
case ">=":
|
|
||||||
return value >= threshold
|
|
||||||
case "<":
|
|
||||||
return value < threshold
|
|
||||||
case "<=":
|
|
||||||
return value <= threshold
|
|
||||||
case "==":
|
|
||||||
return value == threshold
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildAlertDescription(rule OpsAlertRule, value float64) string {
|
|
||||||
window := rule.WindowMinutes
|
|
||||||
if window <= 0 {
|
|
||||||
window = 1
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("Rule %s triggered: %s %s %.2f (current %.2f) over last %dm",
|
|
||||||
rule.Name,
|
|
||||||
rule.MetricType,
|
|
||||||
rule.Operator,
|
|
||||||
rule.Threshold,
|
|
||||||
value,
|
|
||||||
window,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsAlertService) dispatchNotifications(ctx context.Context, rule OpsAlertRule, event *OpsAlertEvent) (bool, bool) {
|
|
||||||
emailSent := false
|
|
||||||
webhookSent := false
|
|
||||||
|
|
||||||
notifyCtx, cancel := s.notificationContext(ctx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
if rule.NotifyEmail {
|
|
||||||
emailSent = s.sendEmailNotification(notifyCtx, rule, event)
|
|
||||||
}
|
|
||||||
if rule.NotifyWebhook && rule.WebhookURL != "" {
|
|
||||||
webhookSent = s.sendWebhookNotification(notifyCtx, rule, event)
|
|
||||||
}
|
|
||||||
// Fallback channel: if email is enabled but ultimately fails, try webhook even if the
|
|
||||||
// webhook toggle is off (as long as a webhook URL is configured).
|
|
||||||
if rule.NotifyEmail && !emailSent && !rule.NotifyWebhook && rule.WebhookURL != "" {
|
|
||||||
log.Printf("[OpsAlert] email failed; attempting webhook fallback (rule=%d)", rule.ID)
|
|
||||||
webhookSent = s.sendWebhookNotification(notifyCtx, rule, event)
|
|
||||||
}
|
|
||||||
|
|
||||||
return emailSent, webhookSent
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
opsAlertEvaluateTimeout = 45 * time.Second
|
|
||||||
opsAlertNotificationTimeout = 30 * time.Second
|
|
||||||
opsAlertEmailMaxRetries = 3
|
|
||||||
)
|
|
||||||
|
|
||||||
var opsAlertEmailBackoff = []time.Duration{
|
|
||||||
1 * time.Second,
|
|
||||||
2 * time.Second,
|
|
||||||
4 * time.Second,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsAlertService) notificationContext(ctx context.Context) (context.Context, context.CancelFunc) {
|
|
||||||
parent := ctx
|
|
||||||
if s != nil && s.stopCtx != nil {
|
|
||||||
parent = s.stopCtx
|
|
||||||
}
|
|
||||||
if parent == nil {
|
|
||||||
parent = context.Background()
|
|
||||||
}
|
|
||||||
return context.WithTimeout(parent, opsAlertNotificationTimeout)
|
|
||||||
}
|
|
||||||
|
|
||||||
var opsAlertSleep = sleepWithContext
|
|
||||||
|
|
||||||
func sleepWithContext(ctx context.Context, d time.Duration) error {
|
|
||||||
if d <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if ctx == nil {
|
|
||||||
time.Sleep(d)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
timer := time.NewTimer(d)
|
|
||||||
defer timer.Stop()
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return ctx.Err()
|
|
||||||
case <-timer.C:
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func retryWithBackoff(
|
|
||||||
ctx context.Context,
|
|
||||||
maxRetries int,
|
|
||||||
backoff []time.Duration,
|
|
||||||
fn func() error,
|
|
||||||
onError func(attempt int, total int, nextDelay time.Duration, err error),
|
|
||||||
) error {
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = context.Background()
|
|
||||||
}
|
|
||||||
if maxRetries < 0 {
|
|
||||||
maxRetries = 0
|
|
||||||
}
|
|
||||||
totalAttempts := maxRetries + 1
|
|
||||||
|
|
||||||
var lastErr error
|
|
||||||
for attempt := 1; attempt <= totalAttempts; attempt++ {
|
|
||||||
if attempt > 1 {
|
|
||||||
backoffIdx := attempt - 2
|
|
||||||
if backoffIdx < len(backoff) {
|
|
||||||
if err := opsAlertSleep(ctx, backoff[backoffIdx]); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := ctx.Err(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := fn(); err != nil {
|
|
||||||
lastErr = err
|
|
||||||
nextDelay := time.Duration(0)
|
|
||||||
if attempt < totalAttempts {
|
|
||||||
nextIdx := attempt - 1
|
|
||||||
if nextIdx < len(backoff) {
|
|
||||||
nextDelay = backoff[nextIdx]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if onError != nil {
|
|
||||||
onError(attempt, totalAttempts, nextDelay, err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return lastErr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsAlertService) sendEmailNotification(ctx context.Context, rule OpsAlertRule, event *OpsAlertEvent) bool {
|
|
||||||
if s.emailService == nil || s.userService == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = context.Background()
|
|
||||||
}
|
|
||||||
|
|
||||||
admin, err := s.userService.GetFirstAdmin(ctx)
|
|
||||||
if err != nil || admin == nil || admin.Email == "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
subject := fmt.Sprintf("[Ops Alert][%s] %s", rule.Severity, rule.Name)
|
|
||||||
body := fmt.Sprintf(
|
|
||||||
"Alert triggered: %s\n\nMetric: %s\nThreshold: %.2f\nCurrent: %.2f\nWindow: %dm\nStatus: %s\nTime: %s",
|
|
||||||
rule.Name,
|
|
||||||
rule.MetricType,
|
|
||||||
rule.Threshold,
|
|
||||||
event.MetricValue,
|
|
||||||
rule.WindowMinutes,
|
|
||||||
event.Status,
|
|
||||||
event.FiredAt.Format(time.RFC3339),
|
|
||||||
)
|
|
||||||
|
|
||||||
config, err := s.emailService.GetSMTPConfig(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsAlert] email config load failed: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := retryWithBackoff(
|
|
||||||
ctx,
|
|
||||||
opsAlertEmailMaxRetries,
|
|
||||||
opsAlertEmailBackoff,
|
|
||||||
func() error {
|
|
||||||
return s.emailService.SendEmailWithConfig(config, admin.Email, subject, body)
|
|
||||||
},
|
|
||||||
func(attempt int, total int, nextDelay time.Duration, err error) {
|
|
||||||
if attempt < total {
|
|
||||||
log.Printf("[OpsAlert] email send failed (attempt=%d/%d), retrying in %s: %v", attempt, total, nextDelay, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Printf("[OpsAlert] email send failed (attempt=%d/%d), giving up: %v", attempt, total, err)
|
|
||||||
},
|
|
||||||
); err != nil {
|
|
||||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
|
||||||
log.Printf("[OpsAlert] email send canceled: %v", err)
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsAlertService) sendWebhookNotification(ctx context.Context, rule OpsAlertRule, event *OpsAlertEvent) bool {
|
|
||||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
webhookTarget, err := validateWebhookURL(ctx, rule.WebhookURL)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsAlert] invalid webhook url (rule=%d): %v", rule.ID, err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
payload := map[string]any{
|
|
||||||
"rule_id": rule.ID,
|
|
||||||
"rule_name": rule.Name,
|
|
||||||
"severity": rule.Severity,
|
|
||||||
"status": event.Status,
|
|
||||||
"metric_type": rule.MetricType,
|
|
||||||
"metric_value": event.MetricValue,
|
|
||||||
"threshold_value": rule.Threshold,
|
|
||||||
"window_minutes": rule.WindowMinutes,
|
|
||||||
"fired_at": event.FiredAt.Format(time.RFC3339),
|
|
||||||
}
|
|
||||||
|
|
||||||
body, err := json.Marshal(payload)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, webhookTarget.URL.String(), bytes.NewReader(body))
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
|
|
||||||
resp, err := buildWebhookHTTPClient(s.httpClient, webhookTarget).Do(req)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsAlert] webhook send failed: %v", err)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
defer func() { _ = resp.Body.Close() }()
|
|
||||||
|
|
||||||
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusMultipleChoices {
|
|
||||||
log.Printf("[OpsAlert] webhook returned status %d", resp.StatusCode)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
const webhookHTTPClientTimeout = 10 * time.Second
|
|
||||||
|
|
||||||
func buildWebhookHTTPClient(base *http.Client, webhookTarget *validatedWebhookTarget) *http.Client {
|
|
||||||
var client http.Client
|
|
||||||
if base != nil {
|
|
||||||
client = *base
|
|
||||||
}
|
|
||||||
if client.Timeout <= 0 {
|
|
||||||
client.Timeout = webhookHTTPClientTimeout
|
|
||||||
}
|
|
||||||
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
|
|
||||||
return http.ErrUseLastResponse
|
|
||||||
}
|
|
||||||
if webhookTarget != nil {
|
|
||||||
client.Transport = buildWebhookTransport(client.Transport, webhookTarget)
|
|
||||||
}
|
|
||||||
return &client
|
|
||||||
}
|
|
||||||
|
|
||||||
var disallowedWebhookIPNets = []net.IPNet{
|
|
||||||
// "this host on this network" / unspecified.
|
|
||||||
mustParseCIDR("0.0.0.0/8"),
|
|
||||||
mustParseCIDR("127.0.0.0/8"), // loopback (includes 127.0.0.1)
|
|
||||||
mustParseCIDR("10.0.0.0/8"), // RFC1918
|
|
||||||
mustParseCIDR("192.168.0.0/16"), // RFC1918
|
|
||||||
mustParseCIDR("172.16.0.0/12"), // RFC1918 (172.16.0.0 - 172.31.255.255)
|
|
||||||
mustParseCIDR("100.64.0.0/10"), // RFC6598 (carrier-grade NAT)
|
|
||||||
mustParseCIDR("169.254.0.0/16"), // IPv4 link-local (includes 169.254.169.254 metadata IP on many clouds)
|
|
||||||
mustParseCIDR("198.18.0.0/15"), // RFC2544 benchmark testing
|
|
||||||
mustParseCIDR("224.0.0.0/4"), // IPv4 multicast
|
|
||||||
mustParseCIDR("240.0.0.0/4"), // IPv4 reserved
|
|
||||||
mustParseCIDR("::/128"), // IPv6 unspecified
|
|
||||||
mustParseCIDR("::1/128"), // IPv6 loopback
|
|
||||||
mustParseCIDR("fc00::/7"), // IPv6 unique local
|
|
||||||
mustParseCIDR("fe80::/10"), // IPv6 link-local
|
|
||||||
mustParseCIDR("ff00::/8"), // IPv6 multicast
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustParseCIDR(cidr string) net.IPNet {
|
|
||||||
_, block, err := net.ParseCIDR(cidr)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return *block
|
|
||||||
}
|
|
||||||
|
|
||||||
var lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
|
|
||||||
return net.DefaultResolver.LookupIPAddr(ctx, host)
|
|
||||||
}
|
|
||||||
|
|
||||||
type validatedWebhookTarget struct {
|
|
||||||
URL *url.URL
|
|
||||||
|
|
||||||
host string
|
|
||||||
port string
|
|
||||||
pinnedIPs []net.IP
|
|
||||||
}
|
|
||||||
|
|
||||||
var webhookBaseDialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
|
||||||
dialer := net.Dialer{
|
|
||||||
Timeout: 5 * time.Second,
|
|
||||||
KeepAlive: 30 * time.Second,
|
|
||||||
}
|
|
||||||
return dialer.DialContext(ctx, network, addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func buildWebhookTransport(base http.RoundTripper, webhookTarget *validatedWebhookTarget) http.RoundTripper {
|
|
||||||
if webhookTarget == nil || webhookTarget.URL == nil {
|
|
||||||
return base
|
|
||||||
}
|
|
||||||
|
|
||||||
var transport *http.Transport
|
|
||||||
switch typed := base.(type) {
|
|
||||||
case *http.Transport:
|
|
||||||
if typed != nil {
|
|
||||||
transport = typed.Clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if transport == nil {
|
|
||||||
if defaultTransport, ok := http.DefaultTransport.(*http.Transport); ok && defaultTransport != nil {
|
|
||||||
transport = defaultTransport.Clone()
|
|
||||||
} else {
|
|
||||||
transport = (&http.Transport{}).Clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
webhookHost := webhookTarget.host
|
|
||||||
webhookPort := webhookTarget.port
|
|
||||||
pinnedIPs := append([]net.IP(nil), webhookTarget.pinnedIPs...)
|
|
||||||
|
|
||||||
transport.Proxy = nil
|
|
||||||
transport.DialTLSContext = nil
|
|
||||||
transport.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
|
||||||
host, port, err := net.SplitHostPort(addr)
|
|
||||||
if err != nil || host == "" || port == "" {
|
|
||||||
return nil, fmt.Errorf("webhook dial target is invalid: %q", addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
canonicalHost := strings.TrimSuffix(strings.ToLower(host), ".")
|
|
||||||
if canonicalHost != webhookHost || port != webhookPort {
|
|
||||||
return nil, fmt.Errorf("webhook dial target mismatch: %q", addr)
|
|
||||||
}
|
|
||||||
|
|
||||||
var lastErr error
|
|
||||||
for _, ip := range pinnedIPs {
|
|
||||||
if isDisallowedWebhookIP(ip) {
|
|
||||||
lastErr = fmt.Errorf("webhook target resolves to a disallowed ip")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
dialAddr := net.JoinHostPort(ip.String(), port)
|
|
||||||
conn, err := webhookBaseDialContext(ctx, network, dialAddr)
|
|
||||||
if err == nil {
|
|
||||||
return conn, nil
|
|
||||||
}
|
|
||||||
lastErr = err
|
|
||||||
}
|
|
||||||
if lastErr == nil {
|
|
||||||
lastErr = errors.New("webhook target has no resolved addresses")
|
|
||||||
}
|
|
||||||
return nil, lastErr
|
|
||||||
}
|
|
||||||
|
|
||||||
return transport
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateWebhookURL(ctx context.Context, raw string) (*validatedWebhookTarget, error) {
|
|
||||||
raw = strings.TrimSpace(raw)
|
|
||||||
if raw == "" {
|
|
||||||
return nil, errors.New("webhook url is empty")
|
|
||||||
}
|
|
||||||
// Avoid request smuggling / header injection vectors.
|
|
||||||
if strings.ContainsAny(raw, "\r\n") {
|
|
||||||
return nil, errors.New("webhook url contains invalid characters")
|
|
||||||
}
|
|
||||||
|
|
||||||
parsed, err := url.Parse(raw)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New("webhook url format is invalid")
|
|
||||||
}
|
|
||||||
if !strings.EqualFold(parsed.Scheme, "https") {
|
|
||||||
return nil, errors.New("webhook url scheme must be https")
|
|
||||||
}
|
|
||||||
parsed.Scheme = "https"
|
|
||||||
if parsed.Host == "" || parsed.Hostname() == "" {
|
|
||||||
return nil, errors.New("webhook url must include host")
|
|
||||||
}
|
|
||||||
if parsed.User != nil {
|
|
||||||
return nil, errors.New("webhook url must not include userinfo")
|
|
||||||
}
|
|
||||||
if parsed.Port() != "" {
|
|
||||||
port, err := strconv.Atoi(parsed.Port())
|
|
||||||
if err != nil || port < 1 || port > 65535 {
|
|
||||||
return nil, errors.New("webhook url port is invalid")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
host := strings.TrimSuffix(strings.ToLower(parsed.Hostname()), ".")
|
|
||||||
if host == "localhost" {
|
|
||||||
return nil, errors.New("webhook url host must not be localhost")
|
|
||||||
}
|
|
||||||
|
|
||||||
if ip := net.ParseIP(host); ip != nil {
|
|
||||||
if isDisallowedWebhookIP(ip) {
|
|
||||||
return nil, errors.New("webhook url host resolves to a disallowed ip")
|
|
||||||
}
|
|
||||||
return &validatedWebhookTarget{
|
|
||||||
URL: parsed,
|
|
||||||
host: host,
|
|
||||||
port: portForScheme(parsed),
|
|
||||||
pinnedIPs: []net.IP{ip},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if ctx == nil {
|
|
||||||
ctx = context.Background()
|
|
||||||
}
|
|
||||||
ips, err := lookupIPAddrs(ctx, host)
|
|
||||||
if err != nil || len(ips) == 0 {
|
|
||||||
return nil, errors.New("webhook url host cannot be resolved")
|
|
||||||
}
|
|
||||||
pinned := make([]net.IP, 0, len(ips))
|
|
||||||
for _, addr := range ips {
|
|
||||||
if isDisallowedWebhookIP(addr.IP) {
|
|
||||||
return nil, errors.New("webhook url host resolves to a disallowed ip")
|
|
||||||
}
|
|
||||||
if addr.IP != nil {
|
|
||||||
pinned = append(pinned, addr.IP)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(pinned) == 0 {
|
|
||||||
return nil, errors.New("webhook url host cannot be resolved")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &validatedWebhookTarget{
|
|
||||||
URL: parsed,
|
|
||||||
host: host,
|
|
||||||
port: portForScheme(parsed),
|
|
||||||
pinnedIPs: uniqueResolvedIPs(pinned),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func isDisallowedWebhookIP(ip net.IP) bool {
|
|
||||||
if ip == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if ip4 := ip.To4(); ip4 != nil {
|
|
||||||
ip = ip4
|
|
||||||
} else if ip16 := ip.To16(); ip16 != nil {
|
|
||||||
ip = ip16
|
|
||||||
} else {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Disallow non-public addresses even if they're not explicitly covered by the CIDR list.
|
|
||||||
// This provides defense-in-depth against SSRF targets such as link-local, multicast, and
|
|
||||||
// unspecified addresses, and ensures any "pinned" IP is still blocked at dial time.
|
|
||||||
if ip.IsUnspecified() ||
|
|
||||||
ip.IsLoopback() ||
|
|
||||||
ip.IsMulticast() ||
|
|
||||||
ip.IsLinkLocalUnicast() ||
|
|
||||||
ip.IsLinkLocalMulticast() ||
|
|
||||||
ip.IsPrivate() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, block := range disallowedWebhookIPNets {
|
|
||||||
if block.Contains(ip) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func portForScheme(u *url.URL) string {
|
|
||||||
if u != nil && u.Port() != "" {
|
|
||||||
return u.Port()
|
|
||||||
}
|
|
||||||
return "443"
|
|
||||||
}
|
|
||||||
|
|
||||||
func uniqueResolvedIPs(ips []net.IP) []net.IP {
|
|
||||||
seen := make(map[string]struct{}, len(ips))
|
|
||||||
out := make([]net.IP, 0, len(ips))
|
|
||||||
for _, ip := range ips {
|
|
||||||
if ip == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
key := ip.String()
|
|
||||||
if _, ok := seen[key]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[key] = struct{}{}
|
|
||||||
out = append(out, ip)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
@@ -1,271 +0,0 @@
|
|||||||
//go:build integration
|
|
||||||
|
|
||||||
package service
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"database/sql"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
// This integration test protects the DI startup contract for OpsAlertService.
|
|
||||||
//
|
|
||||||
// Background:
|
|
||||||
// - OpsMetricsCollector previously called alertService.Start()/Evaluate() directly.
|
|
||||||
// - Those direct calls were removed, so OpsAlertService must now start via DI
|
|
||||||
// (ProvideOpsAlertService in wire.go) and run its own evaluation ticker.
|
|
||||||
//
|
|
||||||
// What we validate here:
|
|
||||||
// 1. When we construct via the Wire provider functions (ProvideOpsAlertService +
|
|
||||||
// ProvideOpsMetricsCollector), OpsAlertService starts automatically.
|
|
||||||
// 2. Its evaluation loop continues to tick even if OpsMetricsCollector is stopped,
|
|
||||||
// proving the alert evaluator is independent.
|
|
||||||
// 3. The evaluation path can trigger alert logic (CreateAlertEvent called).
|
|
||||||
func TestOpsAlertService_StartedViaWireProviders_RunsIndependentTicker(t *testing.T) {
|
|
||||||
oldInterval := opsAlertEvalInterval
|
|
||||||
opsAlertEvalInterval = 25 * time.Millisecond
|
|
||||||
t.Cleanup(func() { opsAlertEvalInterval = oldInterval })
|
|
||||||
|
|
||||||
repo := newFakeOpsRepository()
|
|
||||||
opsService := NewOpsService(repo, nil)
|
|
||||||
|
|
||||||
// Start via the Wire provider function (the production DI path).
|
|
||||||
alertService := ProvideOpsAlertService(opsService, nil, nil)
|
|
||||||
t.Cleanup(alertService.Stop)
|
|
||||||
|
|
||||||
// Construct via ProvideOpsMetricsCollector (wire.go). Stop immediately to ensure
|
|
||||||
// the alert ticker keeps running without the metrics collector.
|
|
||||||
collector := ProvideOpsMetricsCollector(opsService, NewConcurrencyService(nil))
|
|
||||||
collector.Stop()
|
|
||||||
|
|
||||||
// Wait for at least one evaluation (run() calls evaluateOnce immediately).
|
|
||||||
require.Eventually(t, func() bool {
|
|
||||||
return repo.listRulesCalls.Load() >= 1
|
|
||||||
}, 1*time.Second, 5*time.Millisecond)
|
|
||||||
|
|
||||||
// Confirm the evaluation loop keeps ticking after the metrics collector is stopped.
|
|
||||||
callsAfterCollectorStop := repo.listRulesCalls.Load()
|
|
||||||
require.Eventually(t, func() bool {
|
|
||||||
return repo.listRulesCalls.Load() >= callsAfterCollectorStop+2
|
|
||||||
}, 1*time.Second, 5*time.Millisecond)
|
|
||||||
|
|
||||||
// Confirm the evaluation logic actually fires an alert event at least once.
|
|
||||||
select {
|
|
||||||
case <-repo.eventCreatedCh:
|
|
||||||
// ok
|
|
||||||
case <-time.After(2 * time.Second):
|
|
||||||
t.Fatalf("expected OpsAlertService to create an alert event, but none was created (ListAlertRules calls=%d)", repo.listRulesCalls.Load())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFakeOpsRepository() *fakeOpsRepository {
|
|
||||||
return &fakeOpsRepository{
|
|
||||||
eventCreatedCh: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fakeOpsRepository is a lightweight in-memory stub of OpsRepository for integration tests.
|
|
||||||
// It avoids real DB/Redis usage and provides deterministic responses fast.
|
|
||||||
type fakeOpsRepository struct {
|
|
||||||
listRulesCalls atomic.Int64
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
activeEvent *OpsAlertEvent
|
|
||||||
latestEvent *OpsAlertEvent
|
|
||||||
nextEventID int64
|
|
||||||
eventCreatedCh chan struct{}
|
|
||||||
eventOnce sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) CreateErrorLog(ctx context.Context, log *OpsErrorLog) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) ListErrorLogsLegacy(ctx context.Context, filters OpsErrorLogFilters) ([]OpsErrorLog, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) ListErrorLogs(ctx context.Context, filter *ErrorLogFilter) ([]*ErrorLog, int64, error) {
|
|
||||||
return nil, 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) GetLatestSystemMetric(ctx context.Context) (*OpsMetrics, error) {
|
|
||||||
return &OpsMetrics{WindowMinutes: 1}, sql.ErrNoRows
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) CreateSystemMetric(ctx context.Context, metric *OpsMetrics) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) GetWindowStats(ctx context.Context, startTime, endTime time.Time) (*OpsWindowStats, error) {
|
|
||||||
return &OpsWindowStats{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) GetProviderStats(ctx context.Context, startTime, endTime time.Time) ([]*ProviderStats, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) GetLatencyHistogram(ctx context.Context, startTime, endTime time.Time) ([]*LatencyHistogramItem, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) GetErrorDistribution(ctx context.Context, startTime, endTime time.Time) ([]*ErrorDistributionItem, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) ListRecentSystemMetrics(ctx context.Context, windowMinutes, limit int) ([]OpsMetrics, error) {
|
|
||||||
if limit <= 0 {
|
|
||||||
limit = 1
|
|
||||||
}
|
|
||||||
now := time.Now()
|
|
||||||
metrics := make([]OpsMetrics, 0, limit)
|
|
||||||
for i := 0; i < limit; i++ {
|
|
||||||
metrics = append(metrics, OpsMetrics{
|
|
||||||
WindowMinutes: windowMinutes,
|
|
||||||
CPUUsagePercent: 99,
|
|
||||||
UpdatedAt: now.Add(-time.Duration(i) * opsMetricsInterval),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return metrics, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) ListSystemMetricsRange(ctx context.Context, windowMinutes int, startTime, endTime time.Time, limit int) ([]OpsMetrics, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) ListAlertRules(ctx context.Context) ([]OpsAlertRule, error) {
|
|
||||||
call := r.listRulesCalls.Add(1)
|
|
||||||
// Delay enabling rules slightly so the test can stop OpsMetricsCollector first,
|
|
||||||
// then observe the alert evaluator ticking independently.
|
|
||||||
if call < 5 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return []OpsAlertRule{
|
|
||||||
{
|
|
||||||
ID: 1,
|
|
||||||
Name: "cpu too high (test)",
|
|
||||||
Enabled: true,
|
|
||||||
MetricType: OpsMetricCPUUsagePercent,
|
|
||||||
Operator: ">",
|
|
||||||
Threshold: 0,
|
|
||||||
WindowMinutes: 1,
|
|
||||||
SustainedMinutes: 1,
|
|
||||||
Severity: "P1",
|
|
||||||
NotifyEmail: false,
|
|
||||||
NotifyWebhook: false,
|
|
||||||
CooldownMinutes: 0,
|
|
||||||
},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) {
|
|
||||||
r.mu.Lock()
|
|
||||||
defer r.mu.Unlock()
|
|
||||||
if r.activeEvent == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if r.activeEvent.RuleID != ruleID {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if r.activeEvent.Status != OpsAlertStatusFiring {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
clone := *r.activeEvent
|
|
||||||
return &clone, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) GetLatestAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) {
|
|
||||||
r.mu.Lock()
|
|
||||||
defer r.mu.Unlock()
|
|
||||||
if r.latestEvent == nil || r.latestEvent.RuleID != ruleID {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
clone := *r.latestEvent
|
|
||||||
return &clone, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) CreateAlertEvent(ctx context.Context, event *OpsAlertEvent) error {
|
|
||||||
if event == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
r.mu.Lock()
|
|
||||||
defer r.mu.Unlock()
|
|
||||||
|
|
||||||
r.nextEventID++
|
|
||||||
event.ID = r.nextEventID
|
|
||||||
|
|
||||||
clone := *event
|
|
||||||
r.latestEvent = &clone
|
|
||||||
if clone.Status == OpsAlertStatusFiring {
|
|
||||||
r.activeEvent = &clone
|
|
||||||
}
|
|
||||||
|
|
||||||
r.eventOnce.Do(func() { close(r.eventCreatedCh) })
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error {
|
|
||||||
r.mu.Lock()
|
|
||||||
defer r.mu.Unlock()
|
|
||||||
if r.activeEvent != nil && r.activeEvent.ID == eventID {
|
|
||||||
r.activeEvent.Status = status
|
|
||||||
r.activeEvent.ResolvedAt = resolvedAt
|
|
||||||
}
|
|
||||||
if r.latestEvent != nil && r.latestEvent.ID == eventID {
|
|
||||||
r.latestEvent.Status = status
|
|
||||||
r.latestEvent.ResolvedAt = resolvedAt
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) UpdateAlertEventNotifications(ctx context.Context, eventID int64, emailSent, webhookSent bool) error {
|
|
||||||
r.mu.Lock()
|
|
||||||
defer r.mu.Unlock()
|
|
||||||
if r.activeEvent != nil && r.activeEvent.ID == eventID {
|
|
||||||
r.activeEvent.EmailSent = emailSent
|
|
||||||
r.activeEvent.WebhookSent = webhookSent
|
|
||||||
}
|
|
||||||
if r.latestEvent != nil && r.latestEvent.ID == eventID {
|
|
||||||
r.latestEvent.EmailSent = emailSent
|
|
||||||
r.latestEvent.WebhookSent = webhookSent
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) CountActiveAlerts(ctx context.Context) (int, error) {
|
|
||||||
r.mu.Lock()
|
|
||||||
defer r.mu.Unlock()
|
|
||||||
if r.activeEvent == nil {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
return 1, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) GetOverviewStats(ctx context.Context, startTime, endTime time.Time) (*OverviewStats, error) {
|
|
||||||
return &OverviewStats{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) GetCachedLatestSystemMetric(ctx context.Context) (*OpsMetrics, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) SetCachedLatestSystemMetric(ctx context.Context, metric *OpsMetrics) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) GetCachedDashboardOverview(ctx context.Context, timeRange string) (*DashboardOverviewData, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) SetCachedDashboardOverview(ctx context.Context, timeRange string, data *DashboardOverviewData, ttl time.Duration) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *fakeOpsRepository) PingRedis(ctx context.Context) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,315 +0,0 @@
|
|||||||
//go:build unit || opsalert_unit
|
|
||||||
|
|
||||||
package service
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"errors"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSelectContiguousMetrics_Contiguous(t *testing.T) {
|
|
||||||
now := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
|
|
||||||
metrics := []OpsMetrics{
|
|
||||||
{UpdatedAt: now},
|
|
||||||
{UpdatedAt: now.Add(-1 * time.Minute)},
|
|
||||||
{UpdatedAt: now.Add(-2 * time.Minute)},
|
|
||||||
}
|
|
||||||
|
|
||||||
selected, ok := selectContiguousMetrics(metrics, 3, now)
|
|
||||||
require.True(t, ok)
|
|
||||||
require.Len(t, selected, 3)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSelectContiguousMetrics_GapFails(t *testing.T) {
|
|
||||||
now := time.Date(2026, 1, 1, 0, 0, 0, 0, time.UTC)
|
|
||||||
metrics := []OpsMetrics{
|
|
||||||
{UpdatedAt: now},
|
|
||||||
// Missing the -1m sample (gap ~=2m).
|
|
||||||
{UpdatedAt: now.Add(-2 * time.Minute)},
|
|
||||||
{UpdatedAt: now.Add(-3 * time.Minute)},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok := selectContiguousMetrics(metrics, 3, now)
|
|
||||||
require.False(t, ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSelectContiguousMetrics_StaleNewestFails(t *testing.T) {
|
|
||||||
now := time.Date(2026, 1, 1, 0, 10, 0, 0, time.UTC)
|
|
||||||
metrics := []OpsMetrics{
|
|
||||||
{UpdatedAt: now.Add(-10 * time.Minute)},
|
|
||||||
{UpdatedAt: now.Add(-11 * time.Minute)},
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok := selectContiguousMetrics(metrics, 2, now)
|
|
||||||
require.False(t, ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestMetricValue_SuccessRate_NoTrafficIsNoData(t *testing.T) {
|
|
||||||
metric := OpsMetrics{
|
|
||||||
RequestCount: 0,
|
|
||||||
SuccessRate: 0,
|
|
||||||
}
|
|
||||||
value, ok := metricValue(metric, OpsMetricSuccessRate)
|
|
||||||
require.False(t, ok)
|
|
||||||
require.Equal(t, 0.0, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOpsAlertService_StopWithoutStart_NoPanic(t *testing.T) {
|
|
||||||
s := NewOpsAlertService(nil, nil, nil)
|
|
||||||
require.NotPanics(t, func() { s.Stop() })
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOpsAlertService_StartStop_Graceful(t *testing.T) {
|
|
||||||
s := NewOpsAlertService(nil, nil, nil)
|
|
||||||
s.interval = 5 * time.Millisecond
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
s.StartWithContext(ctx)
|
|
||||||
|
|
||||||
done := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
s.Stop()
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
|
|
||||||
select {
|
|
||||||
case <-done:
|
|
||||||
// ok
|
|
||||||
case <-time.After(1 * time.Second):
|
|
||||||
t.Fatal("Stop did not return; background goroutine likely stuck")
|
|
||||||
}
|
|
||||||
|
|
||||||
require.NotPanics(t, func() { s.Stop() })
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestBuildWebhookHTTPClient_DefaultTimeout(t *testing.T) {
|
|
||||||
client := buildWebhookHTTPClient(nil, nil)
|
|
||||||
require.Equal(t, webhookHTTPClientTimeout, client.Timeout)
|
|
||||||
require.NotNil(t, client.CheckRedirect)
|
|
||||||
require.ErrorIs(t, client.CheckRedirect(nil, nil), http.ErrUseLastResponse)
|
|
||||||
|
|
||||||
base := &http.Client{}
|
|
||||||
client = buildWebhookHTTPClient(base, nil)
|
|
||||||
require.Equal(t, webhookHTTPClientTimeout, client.Timeout)
|
|
||||||
require.NotNil(t, client.CheckRedirect)
|
|
||||||
|
|
||||||
base = &http.Client{Timeout: 2 * time.Second}
|
|
||||||
client = buildWebhookHTTPClient(base, nil)
|
|
||||||
require.Equal(t, 2*time.Second, client.Timeout)
|
|
||||||
require.NotNil(t, client.CheckRedirect)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateWebhookURL_RequiresHTTPS(t *testing.T) {
|
|
||||||
oldLookup := lookupIPAddrs
|
|
||||||
t.Cleanup(func() { lookupIPAddrs = oldLookup })
|
|
||||||
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
|
|
||||||
return []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := validateWebhookURL(context.Background(), "http://example.com/webhook")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateWebhookURL_InvalidFormatRejected(t *testing.T) {
|
|
||||||
_, err := validateWebhookURL(context.Background(), "https://[::1")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateWebhookURL_RejectsUserinfo(t *testing.T) {
|
|
||||||
oldLookup := lookupIPAddrs
|
|
||||||
t.Cleanup(func() { lookupIPAddrs = oldLookup })
|
|
||||||
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
|
|
||||||
return []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := validateWebhookURL(context.Background(), "https://user:pass@example.com/webhook")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateWebhookURL_RejectsLocalhost(t *testing.T) {
|
|
||||||
_, err := validateWebhookURL(context.Background(), "https://localhost/webhook")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateWebhookURL_RejectsPrivateIPLiteral(t *testing.T) {
|
|
||||||
cases := []string{
|
|
||||||
"https://0.0.0.0/webhook",
|
|
||||||
"https://127.0.0.1/webhook",
|
|
||||||
"https://10.0.0.1/webhook",
|
|
||||||
"https://192.168.1.2/webhook",
|
|
||||||
"https://172.16.0.1/webhook",
|
|
||||||
"https://172.31.255.255/webhook",
|
|
||||||
"https://100.64.0.1/webhook",
|
|
||||||
"https://169.254.169.254/webhook",
|
|
||||||
"https://198.18.0.1/webhook",
|
|
||||||
"https://224.0.0.1/webhook",
|
|
||||||
"https://240.0.0.1/webhook",
|
|
||||||
"https://[::]/webhook",
|
|
||||||
"https://[::1]/webhook",
|
|
||||||
"https://[ff02::1]/webhook",
|
|
||||||
}
|
|
||||||
for _, tc := range cases {
|
|
||||||
t.Run(tc, func(t *testing.T) {
|
|
||||||
_, err := validateWebhookURL(context.Background(), tc)
|
|
||||||
require.Error(t, err)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateWebhookURL_RejectsPrivateIPViaDNS(t *testing.T) {
|
|
||||||
oldLookup := lookupIPAddrs
|
|
||||||
t.Cleanup(func() { lookupIPAddrs = oldLookup })
|
|
||||||
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
|
|
||||||
require.Equal(t, "internal.example", host)
|
|
||||||
return []net.IPAddr{{IP: net.ParseIP("10.0.0.2")}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := validateWebhookURL(context.Background(), "https://internal.example/webhook")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateWebhookURL_RejectsLinkLocalIPViaDNS(t *testing.T) {
|
|
||||||
oldLookup := lookupIPAddrs
|
|
||||||
t.Cleanup(func() { lookupIPAddrs = oldLookup })
|
|
||||||
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
|
|
||||||
require.Equal(t, "metadata.example", host)
|
|
||||||
return []net.IPAddr{{IP: net.ParseIP("169.254.169.254")}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := validateWebhookURL(context.Background(), "https://metadata.example/webhook")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateWebhookURL_AllowsPublicHostViaDNS(t *testing.T) {
|
|
||||||
oldLookup := lookupIPAddrs
|
|
||||||
t.Cleanup(func() { lookupIPAddrs = oldLookup })
|
|
||||||
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
|
|
||||||
require.Equal(t, "example.com", host)
|
|
||||||
return []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
target, err := validateWebhookURL(context.Background(), "https://example.com:443/webhook")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, "https", target.URL.Scheme)
|
|
||||||
require.Equal(t, "example.com", target.URL.Hostname())
|
|
||||||
require.Equal(t, "443", target.URL.Port())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestValidateWebhookURL_RejectsInvalidPort(t *testing.T) {
|
|
||||||
oldLookup := lookupIPAddrs
|
|
||||||
t.Cleanup(func() { lookupIPAddrs = oldLookup })
|
|
||||||
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
|
|
||||||
return []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := validateWebhookURL(context.Background(), "https://example.com:99999/webhook")
|
|
||||||
require.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWebhookTransport_UsesPinnedIP_NoDNSRebinding(t *testing.T) {
|
|
||||||
oldLookup := lookupIPAddrs
|
|
||||||
oldDial := webhookBaseDialContext
|
|
||||||
t.Cleanup(func() {
|
|
||||||
lookupIPAddrs = oldLookup
|
|
||||||
webhookBaseDialContext = oldDial
|
|
||||||
})
|
|
||||||
|
|
||||||
lookupCalls := 0
|
|
||||||
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
|
|
||||||
lookupCalls++
|
|
||||||
require.Equal(t, "example.com", host)
|
|
||||||
return []net.IPAddr{{IP: net.ParseIP("93.184.216.34")}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
target, err := validateWebhookURL(context.Background(), "https://example.com/webhook")
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 1, lookupCalls)
|
|
||||||
|
|
||||||
lookupIPAddrs = func(ctx context.Context, host string) ([]net.IPAddr, error) {
|
|
||||||
lookupCalls++
|
|
||||||
return []net.IPAddr{{IP: net.ParseIP("10.0.0.1")}}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var dialAddrs []string
|
|
||||||
webhookBaseDialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
|
||||||
dialAddrs = append(dialAddrs, addr)
|
|
||||||
return nil, errors.New("dial blocked in test")
|
|
||||||
}
|
|
||||||
|
|
||||||
client := buildWebhookHTTPClient(nil, target)
|
|
||||||
transport, ok := client.Transport.(*http.Transport)
|
|
||||||
require.True(t, ok)
|
|
||||||
|
|
||||||
_, err = transport.DialContext(context.Background(), "tcp", "example.com:443")
|
|
||||||
require.Error(t, err)
|
|
||||||
require.Equal(t, []string{"93.184.216.34:443"}, dialAddrs)
|
|
||||||
require.Equal(t, 1, lookupCalls, "dial path must not re-resolve DNS")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRetryWithBackoff_SucceedsAfterRetries(t *testing.T) {
|
|
||||||
oldSleep := opsAlertSleep
|
|
||||||
t.Cleanup(func() { opsAlertSleep = oldSleep })
|
|
||||||
|
|
||||||
var slept []time.Duration
|
|
||||||
opsAlertSleep = func(ctx context.Context, d time.Duration) error {
|
|
||||||
slept = append(slept, d)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
attempts := 0
|
|
||||||
err := retryWithBackoff(
|
|
||||||
context.Background(),
|
|
||||||
3,
|
|
||||||
[]time.Duration{time.Second, 2 * time.Second, 4 * time.Second},
|
|
||||||
func() error {
|
|
||||||
attempts++
|
|
||||||
if attempts <= 3 {
|
|
||||||
return errors.New("send failed")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
},
|
|
||||||
nil,
|
|
||||||
)
|
|
||||||
require.NoError(t, err)
|
|
||||||
require.Equal(t, 4, attempts)
|
|
||||||
require.Equal(t, []time.Duration{time.Second, 2 * time.Second, 4 * time.Second}, slept)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRetryWithBackoff_ContextCanceledStopsRetries(t *testing.T) {
|
|
||||||
oldSleep := opsAlertSleep
|
|
||||||
t.Cleanup(func() { opsAlertSleep = oldSleep })
|
|
||||||
|
|
||||||
var slept []time.Duration
|
|
||||||
opsAlertSleep = func(ctx context.Context, d time.Duration) error {
|
|
||||||
slept = append(slept, d)
|
|
||||||
return ctx.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
attempts := 0
|
|
||||||
err := retryWithBackoff(
|
|
||||||
ctx,
|
|
||||||
3,
|
|
||||||
[]time.Duration{time.Second, 2 * time.Second, 4 * time.Second},
|
|
||||||
func() error {
|
|
||||||
attempts++
|
|
||||||
return errors.New("send failed")
|
|
||||||
},
|
|
||||||
func(attempt int, total int, nextDelay time.Duration, err error) {
|
|
||||||
if attempt == 1 {
|
|
||||||
cancel()
|
|
||||||
}
|
|
||||||
},
|
|
||||||
)
|
|
||||||
require.ErrorIs(t, err, context.Canceled)
|
|
||||||
require.Equal(t, 1, attempts)
|
|
||||||
require.Equal(t, []time.Duration{time.Second}, slept)
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
package service
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
OpsAlertStatusFiring = "firing"
|
|
||||||
OpsAlertStatusResolved = "resolved"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
OpsMetricSuccessRate = "success_rate"
|
|
||||||
OpsMetricErrorRate = "error_rate"
|
|
||||||
OpsMetricP95LatencyMs = "p95_latency_ms"
|
|
||||||
OpsMetricP99LatencyMs = "p99_latency_ms"
|
|
||||||
OpsMetricHTTP2Errors = "http2_errors"
|
|
||||||
OpsMetricCPUUsagePercent = "cpu_usage_percent"
|
|
||||||
OpsMetricMemoryUsagePercent = "memory_usage_percent"
|
|
||||||
OpsMetricQueueDepth = "concurrency_queue_depth"
|
|
||||||
)
|
|
||||||
|
|
||||||
type OpsAlertRule struct {
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
Name string `json:"name"`
|
|
||||||
Description string `json:"description"`
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
MetricType string `json:"metric_type"`
|
|
||||||
Operator string `json:"operator"`
|
|
||||||
Threshold float64 `json:"threshold"`
|
|
||||||
WindowMinutes int `json:"window_minutes"`
|
|
||||||
SustainedMinutes int `json:"sustained_minutes"`
|
|
||||||
Severity string `json:"severity"`
|
|
||||||
NotifyEmail bool `json:"notify_email"`
|
|
||||||
NotifyWebhook bool `json:"notify_webhook"`
|
|
||||||
WebhookURL string `json:"webhook_url"`
|
|
||||||
CooldownMinutes int `json:"cooldown_minutes"`
|
|
||||||
DimensionFilters map[string]any `json:"dimension_filters,omitempty"`
|
|
||||||
NotifyChannels []string `json:"notify_channels,omitempty"`
|
|
||||||
NotifyConfig map[string]any `json:"notify_config,omitempty"`
|
|
||||||
CreatedAt time.Time `json:"created_at"`
|
|
||||||
UpdatedAt time.Time `json:"updated_at"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type OpsAlertEvent struct {
|
|
||||||
ID int64 `json:"id"`
|
|
||||||
RuleID int64 `json:"rule_id"`
|
|
||||||
Severity string `json:"severity"`
|
|
||||||
Status string `json:"status"`
|
|
||||||
Title string `json:"title"`
|
|
||||||
Description string `json:"description"`
|
|
||||||
MetricValue float64 `json:"metric_value"`
|
|
||||||
ThresholdValue float64 `json:"threshold_value"`
|
|
||||||
FiredAt time.Time `json:"fired_at"`
|
|
||||||
ResolvedAt *time.Time `json:"resolved_at"`
|
|
||||||
EmailSent bool `json:"email_sent"`
|
|
||||||
WebhookSent bool `json:"webhook_sent"`
|
|
||||||
CreatedAt time.Time `json:"created_at"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsService) ListAlertRules(ctx context.Context) ([]OpsAlertRule, error) {
|
|
||||||
return s.repo.ListAlertRules(ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsService) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) {
|
|
||||||
return s.repo.GetActiveAlertEvent(ctx, ruleID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsService) GetLatestAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) {
|
|
||||||
return s.repo.GetLatestAlertEvent(ctx, ruleID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsService) CreateAlertEvent(ctx context.Context, event *OpsAlertEvent) error {
|
|
||||||
return s.repo.CreateAlertEvent(ctx, event)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsService) UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error {
|
|
||||||
return s.repo.UpdateAlertEventStatus(ctx, eventID, status, resolvedAt)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsService) UpdateAlertEventNotifications(ctx context.Context, eventID int64, emailSent, webhookSent bool) error {
|
|
||||||
return s.repo.UpdateAlertEventNotifications(ctx, eventID, emailSent, webhookSent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsService) ListRecentSystemMetrics(ctx context.Context, windowMinutes, limit int) ([]OpsMetrics, error) {
|
|
||||||
return s.repo.ListRecentSystemMetrics(ctx, windowMinutes, limit)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *OpsService) CountActiveAlerts(ctx context.Context) (int, error) {
|
|
||||||
return s.repo.CountActiveAlerts(ctx)
|
|
||||||
}
|
|
||||||
@@ -1,203 +0,0 @@
|
|||||||
package service
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"log"
|
|
||||||
"runtime"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/shirou/gopsutil/v4/cpu"
|
|
||||||
"github.com/shirou/gopsutil/v4/mem"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
opsMetricsInterval = 1 * time.Minute
|
|
||||||
opsMetricsCollectTimeout = 10 * time.Second
|
|
||||||
|
|
||||||
opsMetricsWindowShortMinutes = 1
|
|
||||||
opsMetricsWindowLongMinutes = 5
|
|
||||||
|
|
||||||
bytesPerMB = 1024 * 1024
|
|
||||||
cpuUsageSampleInterval = 0 * time.Second
|
|
||||||
|
|
||||||
percentScale = 100
|
|
||||||
)
|
|
||||||
|
|
||||||
type OpsMetricsCollector struct {
|
|
||||||
opsService *OpsService
|
|
||||||
concurrencyService *ConcurrencyService
|
|
||||||
interval time.Duration
|
|
||||||
lastGCPauseTotal uint64
|
|
||||||
lastGCPauseMu sync.Mutex
|
|
||||||
stopCh chan struct{}
|
|
||||||
startOnce sync.Once
|
|
||||||
stopOnce sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewOpsMetricsCollector(opsService *OpsService, concurrencyService *ConcurrencyService) *OpsMetricsCollector {
|
|
||||||
return &OpsMetricsCollector{
|
|
||||||
opsService: opsService,
|
|
||||||
concurrencyService: concurrencyService,
|
|
||||||
interval: opsMetricsInterval,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *OpsMetricsCollector) Start() {
|
|
||||||
if c == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.startOnce.Do(func() {
|
|
||||||
if c.stopCh == nil {
|
|
||||||
c.stopCh = make(chan struct{})
|
|
||||||
}
|
|
||||||
go c.run()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *OpsMetricsCollector) Stop() {
|
|
||||||
if c == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.stopOnce.Do(func() {
|
|
||||||
if c.stopCh != nil {
|
|
||||||
close(c.stopCh)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *OpsMetricsCollector) run() {
|
|
||||||
ticker := time.NewTicker(c.interval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
|
|
||||||
c.collectOnce()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
c.collectOnce()
|
|
||||||
case <-c.stopCh:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *OpsMetricsCollector) collectOnce() {
|
|
||||||
if c.opsService == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), opsMetricsCollectTimeout)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
systemStats := c.collectSystemStats(ctx)
|
|
||||||
queueDepth := c.collectQueueDepth(ctx)
|
|
||||||
activeAlerts := c.collectActiveAlerts(ctx)
|
|
||||||
|
|
||||||
for _, window := range []int{opsMetricsWindowShortMinutes, opsMetricsWindowLongMinutes} {
|
|
||||||
startTime := now.Add(-time.Duration(window) * time.Minute)
|
|
||||||
windowStats, err := c.opsService.GetWindowStats(ctx, startTime, now)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsMetrics] failed to get window stats (%dm): %v", window, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
successRate, errorRate := computeRates(windowStats.SuccessCount, windowStats.ErrorCount)
|
|
||||||
requestCount := windowStats.SuccessCount + windowStats.ErrorCount
|
|
||||||
metric := &OpsMetrics{
|
|
||||||
WindowMinutes: window,
|
|
||||||
RequestCount: requestCount,
|
|
||||||
SuccessCount: windowStats.SuccessCount,
|
|
||||||
ErrorCount: windowStats.ErrorCount,
|
|
||||||
SuccessRate: successRate,
|
|
||||||
ErrorRate: errorRate,
|
|
||||||
P95LatencyMs: windowStats.P95LatencyMs,
|
|
||||||
P99LatencyMs: windowStats.P99LatencyMs,
|
|
||||||
HTTP2Errors: windowStats.HTTP2Errors,
|
|
||||||
ActiveAlerts: activeAlerts,
|
|
||||||
CPUUsagePercent: systemStats.cpuUsage,
|
|
||||||
MemoryUsedMB: systemStats.memoryUsedMB,
|
|
||||||
MemoryTotalMB: systemStats.memoryTotalMB,
|
|
||||||
MemoryUsagePercent: systemStats.memoryUsagePercent,
|
|
||||||
HeapAllocMB: systemStats.heapAllocMB,
|
|
||||||
GCPauseMs: systemStats.gcPauseMs,
|
|
||||||
ConcurrencyQueueDepth: queueDepth,
|
|
||||||
UpdatedAt: now,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := c.opsService.RecordMetrics(ctx, metric); err != nil {
|
|
||||||
log.Printf("[OpsMetrics] failed to record metrics (%dm): %v", window, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func computeRates(successCount, errorCount int64) (float64, float64) {
|
|
||||||
total := successCount + errorCount
|
|
||||||
if total == 0 {
|
|
||||||
// No traffic => no data. Rates are kept at 0 and request_count will be 0.
|
|
||||||
// The UI should render this as N/A instead of "100% success".
|
|
||||||
return 0, 0
|
|
||||||
}
|
|
||||||
successRate := float64(successCount) / float64(total) * percentScale
|
|
||||||
errorRate := float64(errorCount) / float64(total) * percentScale
|
|
||||||
return successRate, errorRate
|
|
||||||
}
|
|
||||||
|
|
||||||
type opsSystemStats struct {
|
|
||||||
cpuUsage float64
|
|
||||||
memoryUsedMB int64
|
|
||||||
memoryTotalMB int64
|
|
||||||
memoryUsagePercent float64
|
|
||||||
heapAllocMB int64
|
|
||||||
gcPauseMs float64
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *OpsMetricsCollector) collectSystemStats(ctx context.Context) opsSystemStats {
|
|
||||||
stats := opsSystemStats{}
|
|
||||||
|
|
||||||
if percents, err := cpu.PercentWithContext(ctx, cpuUsageSampleInterval, false); err == nil && len(percents) > 0 {
|
|
||||||
stats.cpuUsage = percents[0]
|
|
||||||
}
|
|
||||||
|
|
||||||
if vm, err := mem.VirtualMemoryWithContext(ctx); err == nil {
|
|
||||||
stats.memoryUsedMB = int64(vm.Used / bytesPerMB)
|
|
||||||
stats.memoryTotalMB = int64(vm.Total / bytesPerMB)
|
|
||||||
stats.memoryUsagePercent = vm.UsedPercent
|
|
||||||
}
|
|
||||||
|
|
||||||
var memStats runtime.MemStats
|
|
||||||
runtime.ReadMemStats(&memStats)
|
|
||||||
stats.heapAllocMB = int64(memStats.HeapAlloc / bytesPerMB)
|
|
||||||
c.lastGCPauseMu.Lock()
|
|
||||||
if c.lastGCPauseTotal != 0 && memStats.PauseTotalNs >= c.lastGCPauseTotal {
|
|
||||||
stats.gcPauseMs = float64(memStats.PauseTotalNs-c.lastGCPauseTotal) / float64(time.Millisecond)
|
|
||||||
}
|
|
||||||
c.lastGCPauseTotal = memStats.PauseTotalNs
|
|
||||||
c.lastGCPauseMu.Unlock()
|
|
||||||
|
|
||||||
return stats
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *OpsMetricsCollector) collectQueueDepth(ctx context.Context) int {
|
|
||||||
if c.concurrencyService == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
depth, err := c.concurrencyService.GetTotalWaitCount(ctx)
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("[OpsMetrics] failed to get queue depth: %v", err)
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return depth
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *OpsMetricsCollector) collectActiveAlerts(ctx context.Context) int {
|
|
||||||
if c.opsService == nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
count, err := c.opsService.CountActiveAlerts(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return count
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,48 +0,0 @@
|
|||||||
-- Ops error logs and system metrics
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS ops_error_logs (
|
|
||||||
id BIGSERIAL PRIMARY KEY,
|
|
||||||
request_id VARCHAR(64),
|
|
||||||
user_id BIGINT,
|
|
||||||
api_key_id BIGINT,
|
|
||||||
account_id BIGINT,
|
|
||||||
group_id BIGINT,
|
|
||||||
client_ip INET,
|
|
||||||
error_phase VARCHAR(32) NOT NULL,
|
|
||||||
error_type VARCHAR(64) NOT NULL,
|
|
||||||
severity VARCHAR(4) NOT NULL,
|
|
||||||
status_code INT,
|
|
||||||
platform VARCHAR(32),
|
|
||||||
model VARCHAR(100),
|
|
||||||
request_path VARCHAR(256),
|
|
||||||
stream BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
error_message TEXT,
|
|
||||||
error_body TEXT,
|
|
||||||
provider_error_code VARCHAR(64),
|
|
||||||
provider_error_type VARCHAR(64),
|
|
||||||
is_retryable BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
is_user_actionable BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
retry_count INT NOT NULL DEFAULT 0,
|
|
||||||
completion_status VARCHAR(16),
|
|
||||||
duration_ms INT,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_created_at ON ops_error_logs (created_at DESC);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_phase ON ops_error_logs (error_phase);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_platform ON ops_error_logs (platform);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_severity ON ops_error_logs (severity);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_error_logs_phase_platform_time ON ops_error_logs (error_phase, platform, created_at DESC);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS ops_system_metrics (
|
|
||||||
id BIGSERIAL PRIMARY KEY,
|
|
||||||
success_rate DOUBLE PRECISION,
|
|
||||||
error_rate DOUBLE PRECISION,
|
|
||||||
p95_latency_ms INT,
|
|
||||||
p99_latency_ms INT,
|
|
||||||
http2_errors INT,
|
|
||||||
active_alerts INT,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_created_at ON ops_system_metrics (created_at DESC);
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
-- Extend ops_system_metrics with windowed/system stats
|
|
||||||
|
|
||||||
ALTER TABLE ops_system_metrics
|
|
||||||
ADD COLUMN IF NOT EXISTS window_minutes INT NOT NULL DEFAULT 1,
|
|
||||||
ADD COLUMN IF NOT EXISTS cpu_usage_percent DOUBLE PRECISION,
|
|
||||||
ADD COLUMN IF NOT EXISTS memory_used_mb BIGINT,
|
|
||||||
ADD COLUMN IF NOT EXISTS memory_total_mb BIGINT,
|
|
||||||
ADD COLUMN IF NOT EXISTS memory_usage_percent DOUBLE PRECISION,
|
|
||||||
ADD COLUMN IF NOT EXISTS heap_alloc_mb BIGINT,
|
|
||||||
ADD COLUMN IF NOT EXISTS gc_pause_ms DOUBLE PRECISION,
|
|
||||||
ADD COLUMN IF NOT EXISTS concurrency_queue_depth INT;
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_window_time
|
|
||||||
ON ops_system_metrics (window_minutes, created_at DESC);
|
|
||||||
@@ -1,42 +0,0 @@
|
|||||||
-- Ops alert rules and events
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS ops_alert_rules (
|
|
||||||
id BIGSERIAL PRIMARY KEY,
|
|
||||||
name VARCHAR(128) NOT NULL,
|
|
||||||
description TEXT,
|
|
||||||
enabled BOOLEAN NOT NULL DEFAULT TRUE,
|
|
||||||
metric_type VARCHAR(64) NOT NULL,
|
|
||||||
operator VARCHAR(8) NOT NULL,
|
|
||||||
threshold DOUBLE PRECISION NOT NULL,
|
|
||||||
window_minutes INT NOT NULL DEFAULT 1,
|
|
||||||
sustained_minutes INT NOT NULL DEFAULT 1,
|
|
||||||
severity VARCHAR(4) NOT NULL DEFAULT 'P1',
|
|
||||||
notify_email BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
notify_webhook BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
webhook_url TEXT,
|
|
||||||
cooldown_minutes INT NOT NULL DEFAULT 10,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_alert_rules_enabled ON ops_alert_rules (enabled);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_alert_rules_metric ON ops_alert_rules (metric_type, window_minutes);
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS ops_alert_events (
|
|
||||||
id BIGSERIAL PRIMARY KEY,
|
|
||||||
rule_id BIGINT NOT NULL REFERENCES ops_alert_rules(id) ON DELETE CASCADE,
|
|
||||||
severity VARCHAR(4) NOT NULL,
|
|
||||||
status VARCHAR(16) NOT NULL DEFAULT 'firing',
|
|
||||||
title VARCHAR(200),
|
|
||||||
description TEXT,
|
|
||||||
metric_value DOUBLE PRECISION,
|
|
||||||
threshold_value DOUBLE PRECISION,
|
|
||||||
fired_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
||||||
resolved_at TIMESTAMPTZ,
|
|
||||||
email_sent BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
webhook_sent BOOLEAN NOT NULL DEFAULT FALSE,
|
|
||||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_alert_events_rule_status ON ops_alert_events (rule_id, status);
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_alert_events_fired_at ON ops_alert_events (fired_at DESC);
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
-- Seed default ops alert rules (idempotent)
|
|
||||||
|
|
||||||
INSERT INTO ops_alert_rules (
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
enabled,
|
|
||||||
metric_type,
|
|
||||||
operator,
|
|
||||||
threshold,
|
|
||||||
window_minutes,
|
|
||||||
sustained_minutes,
|
|
||||||
severity,
|
|
||||||
notify_email,
|
|
||||||
notify_webhook,
|
|
||||||
webhook_url,
|
|
||||||
cooldown_minutes
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
'Global success rate < 99%',
|
|
||||||
'Trigger when the 1-minute success rate drops below 99% for 2 consecutive minutes.',
|
|
||||||
TRUE,
|
|
||||||
'success_rate',
|
|
||||||
'<',
|
|
||||||
99,
|
|
||||||
1,
|
|
||||||
2,
|
|
||||||
'P1',
|
|
||||||
TRUE,
|
|
||||||
FALSE,
|
|
||||||
NULL,
|
|
||||||
10
|
|
||||||
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules);
|
|
||||||
@@ -1,205 +0,0 @@
|
|||||||
-- Seed additional ops alert rules (idempotent)
|
|
||||||
|
|
||||||
INSERT INTO ops_alert_rules (
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
enabled,
|
|
||||||
metric_type,
|
|
||||||
operator,
|
|
||||||
threshold,
|
|
||||||
window_minutes,
|
|
||||||
sustained_minutes,
|
|
||||||
severity,
|
|
||||||
notify_email,
|
|
||||||
notify_webhook,
|
|
||||||
webhook_url,
|
|
||||||
cooldown_minutes
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
'Global error rate > 1%',
|
|
||||||
'Trigger when the 1-minute error rate exceeds 1% for 2 consecutive minutes.',
|
|
||||||
TRUE,
|
|
||||||
'error_rate',
|
|
||||||
'>',
|
|
||||||
1,
|
|
||||||
1,
|
|
||||||
2,
|
|
||||||
'P1',
|
|
||||||
TRUE,
|
|
||||||
CASE
|
|
||||||
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
|
|
||||||
ELSE TRUE
|
|
||||||
END,
|
|
||||||
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
|
|
||||||
10
|
|
||||||
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'Global error rate > 1%');
|
|
||||||
|
|
||||||
INSERT INTO ops_alert_rules (
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
enabled,
|
|
||||||
metric_type,
|
|
||||||
operator,
|
|
||||||
threshold,
|
|
||||||
window_minutes,
|
|
||||||
sustained_minutes,
|
|
||||||
severity,
|
|
||||||
notify_email,
|
|
||||||
notify_webhook,
|
|
||||||
webhook_url,
|
|
||||||
cooldown_minutes
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
'P99 latency > 2000ms',
|
|
||||||
'Trigger when the 5-minute P99 latency exceeds 2000ms for 2 consecutive samples.',
|
|
||||||
TRUE,
|
|
||||||
'p99_latency_ms',
|
|
||||||
'>',
|
|
||||||
2000,
|
|
||||||
5,
|
|
||||||
2,
|
|
||||||
'P1',
|
|
||||||
TRUE,
|
|
||||||
CASE
|
|
||||||
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
|
|
||||||
ELSE TRUE
|
|
||||||
END,
|
|
||||||
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
|
|
||||||
15
|
|
||||||
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'P99 latency > 2000ms');
|
|
||||||
|
|
||||||
INSERT INTO ops_alert_rules (
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
enabled,
|
|
||||||
metric_type,
|
|
||||||
operator,
|
|
||||||
threshold,
|
|
||||||
window_minutes,
|
|
||||||
sustained_minutes,
|
|
||||||
severity,
|
|
||||||
notify_email,
|
|
||||||
notify_webhook,
|
|
||||||
webhook_url,
|
|
||||||
cooldown_minutes
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
'HTTP/2 errors > 20',
|
|
||||||
'Trigger when HTTP/2 errors exceed 20 in the last minute for 2 consecutive minutes.',
|
|
||||||
TRUE,
|
|
||||||
'http2_errors',
|
|
||||||
'>',
|
|
||||||
20,
|
|
||||||
1,
|
|
||||||
2,
|
|
||||||
'P2',
|
|
||||||
FALSE,
|
|
||||||
CASE
|
|
||||||
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
|
|
||||||
ELSE TRUE
|
|
||||||
END,
|
|
||||||
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
|
|
||||||
10
|
|
||||||
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'HTTP/2 errors > 20');
|
|
||||||
|
|
||||||
INSERT INTO ops_alert_rules (
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
enabled,
|
|
||||||
metric_type,
|
|
||||||
operator,
|
|
||||||
threshold,
|
|
||||||
window_minutes,
|
|
||||||
sustained_minutes,
|
|
||||||
severity,
|
|
||||||
notify_email,
|
|
||||||
notify_webhook,
|
|
||||||
webhook_url,
|
|
||||||
cooldown_minutes
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
'CPU usage > 85%',
|
|
||||||
'Trigger when CPU usage exceeds 85% for 5 consecutive minutes.',
|
|
||||||
TRUE,
|
|
||||||
'cpu_usage_percent',
|
|
||||||
'>',
|
|
||||||
85,
|
|
||||||
1,
|
|
||||||
5,
|
|
||||||
'P2',
|
|
||||||
FALSE,
|
|
||||||
CASE
|
|
||||||
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
|
|
||||||
ELSE TRUE
|
|
||||||
END,
|
|
||||||
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
|
|
||||||
15
|
|
||||||
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'CPU usage > 85%');
|
|
||||||
|
|
||||||
INSERT INTO ops_alert_rules (
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
enabled,
|
|
||||||
metric_type,
|
|
||||||
operator,
|
|
||||||
threshold,
|
|
||||||
window_minutes,
|
|
||||||
sustained_minutes,
|
|
||||||
severity,
|
|
||||||
notify_email,
|
|
||||||
notify_webhook,
|
|
||||||
webhook_url,
|
|
||||||
cooldown_minutes
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
'Memory usage > 90%',
|
|
||||||
'Trigger when memory usage exceeds 90% for 5 consecutive minutes.',
|
|
||||||
TRUE,
|
|
||||||
'memory_usage_percent',
|
|
||||||
'>',
|
|
||||||
90,
|
|
||||||
1,
|
|
||||||
5,
|
|
||||||
'P2',
|
|
||||||
FALSE,
|
|
||||||
CASE
|
|
||||||
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
|
|
||||||
ELSE TRUE
|
|
||||||
END,
|
|
||||||
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
|
|
||||||
15
|
|
||||||
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'Memory usage > 90%');
|
|
||||||
|
|
||||||
INSERT INTO ops_alert_rules (
|
|
||||||
name,
|
|
||||||
description,
|
|
||||||
enabled,
|
|
||||||
metric_type,
|
|
||||||
operator,
|
|
||||||
threshold,
|
|
||||||
window_minutes,
|
|
||||||
sustained_minutes,
|
|
||||||
severity,
|
|
||||||
notify_email,
|
|
||||||
notify_webhook,
|
|
||||||
webhook_url,
|
|
||||||
cooldown_minutes
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
'Queue depth > 50',
|
|
||||||
'Trigger when concurrency queue depth exceeds 50 for 2 consecutive minutes.',
|
|
||||||
TRUE,
|
|
||||||
'concurrency_queue_depth',
|
|
||||||
'>',
|
|
||||||
50,
|
|
||||||
1,
|
|
||||||
2,
|
|
||||||
'P2',
|
|
||||||
FALSE,
|
|
||||||
CASE
|
|
||||||
WHEN (SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1) IS NULL THEN FALSE
|
|
||||||
ELSE TRUE
|
|
||||||
END,
|
|
||||||
(SELECT webhook_url FROM ops_alert_rules WHERE webhook_url IS NOT NULL AND webhook_url <> '' LIMIT 1),
|
|
||||||
10
|
|
||||||
WHERE NOT EXISTS (SELECT 1 FROM ops_alert_rules WHERE name = 'Queue depth > 50');
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
-- Enable webhook notifications for rules with webhook_url configured
|
|
||||||
|
|
||||||
UPDATE ops_alert_rules
|
|
||||||
SET notify_webhook = TRUE
|
|
||||||
WHERE webhook_url IS NOT NULL
|
|
||||||
AND webhook_url <> ''
|
|
||||||
AND notify_webhook IS DISTINCT FROM TRUE;
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
-- Add request counts to ops_system_metrics so the UI/alerts can distinguish "no traffic" from "healthy".
|
|
||||||
|
|
||||||
ALTER TABLE ops_system_metrics
|
|
||||||
ADD COLUMN IF NOT EXISTS request_count BIGINT NOT NULL DEFAULT 0,
|
|
||||||
ADD COLUMN IF NOT EXISTS success_count BIGINT NOT NULL DEFAULT 0,
|
|
||||||
ADD COLUMN IF NOT EXISTS error_count BIGINT NOT NULL DEFAULT 0;
|
|
||||||
@@ -1,272 +0,0 @@
|
|||||||
-- 运维监控中心 2.0 - 数据库 Schema 增强
|
|
||||||
-- 创建时间: 2026-01-02
|
|
||||||
-- 说明: 扩展监控指标,支持多维度分析和告警管理
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 1. 扩展 ops_system_metrics 表
|
|
||||||
-- ============================================
|
|
||||||
|
|
||||||
-- 添加 RED 指标列
|
|
||||||
ALTER TABLE ops_system_metrics
|
|
||||||
ADD COLUMN IF NOT EXISTS qps DECIMAL(10,2) DEFAULT 0,
|
|
||||||
ADD COLUMN IF NOT EXISTS tps DECIMAL(10,2) DEFAULT 0,
|
|
||||||
|
|
||||||
-- 错误分类
|
|
||||||
ADD COLUMN IF NOT EXISTS error_4xx_count BIGINT DEFAULT 0,
|
|
||||||
ADD COLUMN IF NOT EXISTS error_5xx_count BIGINT DEFAULT 0,
|
|
||||||
ADD COLUMN IF NOT EXISTS error_timeout_count BIGINT DEFAULT 0,
|
|
||||||
|
|
||||||
-- 延迟指标扩展
|
|
||||||
ADD COLUMN IF NOT EXISTS latency_p50 DECIMAL(10,2),
|
|
||||||
ADD COLUMN IF NOT EXISTS latency_p999 DECIMAL(10,2),
|
|
||||||
ADD COLUMN IF NOT EXISTS latency_avg DECIMAL(10,2),
|
|
||||||
ADD COLUMN IF NOT EXISTS latency_max DECIMAL(10,2),
|
|
||||||
|
|
||||||
-- 上游延迟
|
|
||||||
ADD COLUMN IF NOT EXISTS upstream_latency_avg DECIMAL(10,2),
|
|
||||||
|
|
||||||
-- 资源指标
|
|
||||||
ADD COLUMN IF NOT EXISTS disk_used BIGINT,
|
|
||||||
ADD COLUMN IF NOT EXISTS disk_total BIGINT,
|
|
||||||
ADD COLUMN IF NOT EXISTS disk_iops BIGINT,
|
|
||||||
ADD COLUMN IF NOT EXISTS network_in_bytes BIGINT,
|
|
||||||
ADD COLUMN IF NOT EXISTS network_out_bytes BIGINT,
|
|
||||||
|
|
||||||
-- 饱和度指标
|
|
||||||
ADD COLUMN IF NOT EXISTS goroutine_count INT,
|
|
||||||
ADD COLUMN IF NOT EXISTS db_conn_active INT,
|
|
||||||
ADD COLUMN IF NOT EXISTS db_conn_idle INT,
|
|
||||||
ADD COLUMN IF NOT EXISTS db_conn_waiting INT,
|
|
||||||
|
|
||||||
-- 业务指标
|
|
||||||
ADD COLUMN IF NOT EXISTS token_consumed BIGINT DEFAULT 0,
|
|
||||||
ADD COLUMN IF NOT EXISTS token_rate DECIMAL(10,2) DEFAULT 0,
|
|
||||||
ADD COLUMN IF NOT EXISTS active_subscriptions INT DEFAULT 0,
|
|
||||||
|
|
||||||
-- 维度标签 (支持多维度分析)
|
|
||||||
ADD COLUMN IF NOT EXISTS tags JSONB;
|
|
||||||
|
|
||||||
-- 添加 JSONB 索引以加速标签查询
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_metrics_tags ON ops_system_metrics USING GIN(tags);
|
|
||||||
|
|
||||||
-- 添加注释
|
|
||||||
COMMENT ON COLUMN ops_system_metrics.qps IS '每秒查询数 (Queries Per Second)';
|
|
||||||
COMMENT ON COLUMN ops_system_metrics.tps IS '每秒事务数 (Transactions Per Second)';
|
|
||||||
COMMENT ON COLUMN ops_system_metrics.error_4xx_count IS '客户端错误数量 (4xx)';
|
|
||||||
COMMENT ON COLUMN ops_system_metrics.error_5xx_count IS '服务端错误数量 (5xx)';
|
|
||||||
COMMENT ON COLUMN ops_system_metrics.error_timeout_count IS '超时错误数量';
|
|
||||||
COMMENT ON COLUMN ops_system_metrics.upstream_latency_avg IS '上游 API 平均延迟 (ms)';
|
|
||||||
COMMENT ON COLUMN ops_system_metrics.goroutine_count IS 'Goroutine 数量 (检测泄露)';
|
|
||||||
COMMENT ON COLUMN ops_system_metrics.tags IS '维度标签 (JSON), 如: {"account_id": "123", "api_path": "/v1/chat"}';
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 2. 创建维度统计表
|
|
||||||
-- ============================================
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS ops_dimension_stats (
|
|
||||||
id BIGSERIAL PRIMARY KEY,
|
|
||||||
timestamp TIMESTAMPTZ NOT NULL,
|
|
||||||
|
|
||||||
-- 维度类型: account, api_path, provider, region
|
|
||||||
dimension_type VARCHAR(50) NOT NULL,
|
|
||||||
dimension_value VARCHAR(255) NOT NULL,
|
|
||||||
|
|
||||||
-- 统计指标
|
|
||||||
request_count BIGINT DEFAULT 0,
|
|
||||||
success_count BIGINT DEFAULT 0,
|
|
||||||
error_count BIGINT DEFAULT 0,
|
|
||||||
success_rate DECIMAL(5,2),
|
|
||||||
error_rate DECIMAL(5,2),
|
|
||||||
|
|
||||||
-- 性能指标
|
|
||||||
latency_p50 DECIMAL(10,2),
|
|
||||||
latency_p95 DECIMAL(10,2),
|
|
||||||
latency_p99 DECIMAL(10,2),
|
|
||||||
|
|
||||||
-- 业务指标
|
|
||||||
token_consumed BIGINT DEFAULT 0,
|
|
||||||
cost_usd DECIMAL(10,4) DEFAULT 0,
|
|
||||||
|
|
||||||
created_at TIMESTAMPTZ DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
-- 创建复合索引以加速维度查询
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_dim_type_value_time
|
|
||||||
ON ops_dimension_stats(dimension_type, dimension_value, timestamp DESC);
|
|
||||||
|
|
||||||
-- 创建单独的时间索引用于范围查询
|
|
||||||
CREATE INDEX IF NOT EXISTS idx_ops_dim_timestamp
|
|
||||||
ON ops_dimension_stats(timestamp DESC);
|
|
||||||
|
|
||||||
-- 添加注释
|
|
||||||
COMMENT ON TABLE ops_dimension_stats IS '多维度统计表,支持按账户/API/Provider等维度下钻分析';
|
|
||||||
COMMENT ON COLUMN ops_dimension_stats.dimension_type IS '维度类型: account(账户), api_path(接口), provider(上游), region(地域)';
|
|
||||||
COMMENT ON COLUMN ops_dimension_stats.dimension_value IS '维度值,如: 账户ID, /v1/chat, openai, us-east-1';
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 3. 创建告警规则表
|
|
||||||
-- ============================================
|
|
||||||
|
|
||||||
ALTER TABLE ops_alert_rules
|
|
||||||
ADD COLUMN IF NOT EXISTS dimension_filters JSONB,
|
|
||||||
ADD COLUMN IF NOT EXISTS notify_channels JSONB,
|
|
||||||
ADD COLUMN IF NOT EXISTS notify_config JSONB,
|
|
||||||
ADD COLUMN IF NOT EXISTS created_by VARCHAR(100),
|
|
||||||
ADD COLUMN IF NOT EXISTS last_triggered_at TIMESTAMPTZ;
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 4. 告警历史表 (使用现有的 ops_alert_events)
|
|
||||||
-- ============================================
|
|
||||||
-- 注意: 后端代码使用 ops_alert_events 表,不创建新表
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 5. 创建数据清理配置表
|
|
||||||
-- ============================================
|
|
||||||
|
|
||||||
CREATE TABLE IF NOT EXISTS ops_data_retention_config (
|
|
||||||
id SERIAL PRIMARY KEY,
|
|
||||||
table_name VARCHAR(100) NOT NULL UNIQUE,
|
|
||||||
retention_days INT NOT NULL, -- 保留天数
|
|
||||||
enabled BOOLEAN DEFAULT true,
|
|
||||||
last_cleanup_at TIMESTAMPTZ,
|
|
||||||
created_at TIMESTAMPTZ DEFAULT NOW(),
|
|
||||||
updated_at TIMESTAMPTZ DEFAULT NOW()
|
|
||||||
);
|
|
||||||
|
|
||||||
-- 插入默认配置
|
|
||||||
INSERT INTO ops_data_retention_config (table_name, retention_days) VALUES
|
|
||||||
('ops_system_metrics', 30), -- 系统指标保留 30 天
|
|
||||||
('ops_dimension_stats', 30), -- 维度统计保留 30 天
|
|
||||||
('ops_error_logs', 30), -- 错误日志保留 30 天
|
|
||||||
('ops_alert_events', 90), -- 告警事件保留 90 天
|
|
||||||
('usage_logs', 90) -- 使用日志保留 90 天
|
|
||||||
ON CONFLICT (table_name) DO NOTHING;
|
|
||||||
|
|
||||||
COMMENT ON TABLE ops_data_retention_config IS '数据保留策略配置表';
|
|
||||||
COMMENT ON COLUMN ops_data_retention_config.retention_days IS '数据保留天数,超过此天数的数据将被自动清理';
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 6. 创建辅助函数
|
|
||||||
-- ============================================
|
|
||||||
|
|
||||||
-- 函数: 计算健康度评分
|
|
||||||
-- 权重: SLA(40%) + 错误率(30%) + 延迟(20%) + 资源(10%)
|
|
||||||
CREATE OR REPLACE FUNCTION calculate_health_score(
|
|
||||||
p_success_rate DECIMAL,
|
|
||||||
p_error_rate DECIMAL,
|
|
||||||
p_latency_p99 DECIMAL,
|
|
||||||
p_cpu_usage DECIMAL
|
|
||||||
) RETURNS INT AS $$
|
|
||||||
DECLARE
|
|
||||||
sla_score INT;
|
|
||||||
error_score INT;
|
|
||||||
latency_score INT;
|
|
||||||
resource_score INT;
|
|
||||||
BEGIN
|
|
||||||
-- SLA 评分 (40分)
|
|
||||||
sla_score := CASE
|
|
||||||
WHEN p_success_rate >= 99.9 THEN 40
|
|
||||||
WHEN p_success_rate >= 99.5 THEN 35
|
|
||||||
WHEN p_success_rate >= 99.0 THEN 30
|
|
||||||
WHEN p_success_rate >= 95.0 THEN 20
|
|
||||||
ELSE 10
|
|
||||||
END;
|
|
||||||
|
|
||||||
-- 错误率评分 (30分)
|
|
||||||
error_score := CASE
|
|
||||||
WHEN p_error_rate <= 0.1 THEN 30
|
|
||||||
WHEN p_error_rate <= 0.5 THEN 25
|
|
||||||
WHEN p_error_rate <= 1.0 THEN 20
|
|
||||||
WHEN p_error_rate <= 5.0 THEN 10
|
|
||||||
ELSE 5
|
|
||||||
END;
|
|
||||||
|
|
||||||
-- 延迟评分 (20分)
|
|
||||||
latency_score := CASE
|
|
||||||
WHEN p_latency_p99 <= 500 THEN 20
|
|
||||||
WHEN p_latency_p99 <= 1000 THEN 15
|
|
||||||
WHEN p_latency_p99 <= 3000 THEN 10
|
|
||||||
WHEN p_latency_p99 <= 5000 THEN 5
|
|
||||||
ELSE 0
|
|
||||||
END;
|
|
||||||
|
|
||||||
-- 资源评分 (10分)
|
|
||||||
resource_score := CASE
|
|
||||||
WHEN p_cpu_usage <= 50 THEN 10
|
|
||||||
WHEN p_cpu_usage <= 70 THEN 7
|
|
||||||
WHEN p_cpu_usage <= 85 THEN 5
|
|
||||||
ELSE 2
|
|
||||||
END;
|
|
||||||
|
|
||||||
RETURN sla_score + error_score + latency_score + resource_score;
|
|
||||||
END;
|
|
||||||
$$ LANGUAGE plpgsql IMMUTABLE;
|
|
||||||
|
|
||||||
COMMENT ON FUNCTION calculate_health_score IS '计算系统健康度评分 (0-100),权重: SLA 40% + 错误率 30% + 延迟 20% + 资源 10%';
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 7. 创建视图: 最新指标快照
|
|
||||||
-- ============================================
|
|
||||||
|
|
||||||
CREATE OR REPLACE VIEW ops_latest_metrics AS
|
|
||||||
SELECT
|
|
||||||
m.*,
|
|
||||||
calculate_health_score(
|
|
||||||
m.success_rate::DECIMAL,
|
|
||||||
m.error_rate::DECIMAL,
|
|
||||||
m.p99_latency_ms::DECIMAL,
|
|
||||||
m.cpu_usage_percent::DECIMAL
|
|
||||||
) AS health_score
|
|
||||||
FROM ops_system_metrics m
|
|
||||||
WHERE m.window_minutes = 1
|
|
||||||
AND m.created_at = (SELECT MAX(created_at) FROM ops_system_metrics WHERE window_minutes = 1)
|
|
||||||
LIMIT 1;
|
|
||||||
|
|
||||||
COMMENT ON VIEW ops_latest_metrics IS '最新的系统指标快照,包含健康度评分';
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 8. 创建视图: 活跃告警列表
|
|
||||||
-- ============================================
|
|
||||||
|
|
||||||
CREATE OR REPLACE VIEW ops_active_alerts AS
|
|
||||||
SELECT
|
|
||||||
e.id,
|
|
||||||
e.rule_id,
|
|
||||||
r.name AS rule_name,
|
|
||||||
r.metric_type,
|
|
||||||
e.fired_at,
|
|
||||||
e.metric_value,
|
|
||||||
e.threshold_value,
|
|
||||||
r.severity,
|
|
||||||
EXTRACT(EPOCH FROM (NOW() - e.fired_at))::INT AS duration_seconds
|
|
||||||
FROM ops_alert_events e
|
|
||||||
JOIN ops_alert_rules r ON e.rule_id = r.id
|
|
||||||
WHERE e.status = 'firing'
|
|
||||||
ORDER BY e.fired_at DESC;
|
|
||||||
|
|
||||||
COMMENT ON VIEW ops_active_alerts IS '当前活跃的告警列表';
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 9. 权限设置 (可选)
|
|
||||||
-- ============================================
|
|
||||||
|
|
||||||
-- 如果有专门的 ops 用户,可以授权
|
|
||||||
-- GRANT SELECT, INSERT, UPDATE ON ops_system_metrics TO ops_user;
|
|
||||||
-- GRANT SELECT, INSERT ON ops_dimension_stats TO ops_user;
|
|
||||||
-- GRANT ALL ON ops_alert_rules TO ops_user;
|
|
||||||
-- GRANT ALL ON ops_alert_events TO ops_user;
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 10. 数据完整性检查
|
|
||||||
-- ============================================
|
|
||||||
|
|
||||||
-- 确保现有数据的兼容性
|
|
||||||
UPDATE ops_system_metrics
|
|
||||||
SET
|
|
||||||
qps = COALESCE(request_count / (window_minutes * 60.0), 0),
|
|
||||||
error_rate = COALESCE((error_count::DECIMAL / NULLIF(request_count, 0)) * 100, 0)
|
|
||||||
WHERE qps = 0 AND request_count > 0;
|
|
||||||
|
|
||||||
-- ============================================
|
|
||||||
-- 完成
|
|
||||||
-- ============================================
|
|
||||||
@@ -1,324 +0,0 @@
|
|||||||
/**
|
|
||||||
* Admin Ops API endpoints
|
|
||||||
* Provides stability metrics and error logs for ops dashboard
|
|
||||||
*/
|
|
||||||
|
|
||||||
import { apiClient } from '../client'
|
|
||||||
|
|
||||||
export type OpsSeverity = 'P0' | 'P1' | 'P2' | 'P3'
|
|
||||||
export type OpsPhase =
|
|
||||||
| 'auth'
|
|
||||||
| 'concurrency'
|
|
||||||
| 'billing'
|
|
||||||
| 'scheduling'
|
|
||||||
| 'network'
|
|
||||||
| 'upstream'
|
|
||||||
| 'response'
|
|
||||||
| 'internal'
|
|
||||||
export type OpsPlatform = 'gemini' | 'openai' | 'anthropic' | 'antigravity'
|
|
||||||
|
|
||||||
export interface OpsMetrics {
|
|
||||||
window_minutes: number
|
|
||||||
request_count: number
|
|
||||||
success_count: number
|
|
||||||
error_count: number
|
|
||||||
success_rate: number
|
|
||||||
error_rate: number
|
|
||||||
p95_latency_ms: number
|
|
||||||
p99_latency_ms: number
|
|
||||||
http2_errors: number
|
|
||||||
active_alerts: number
|
|
||||||
cpu_usage_percent?: number
|
|
||||||
memory_used_mb?: number
|
|
||||||
memory_total_mb?: number
|
|
||||||
memory_usage_percent?: number
|
|
||||||
heap_alloc_mb?: number
|
|
||||||
gc_pause_ms?: number
|
|
||||||
concurrency_queue_depth?: number
|
|
||||||
updated_at?: string
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface OpsErrorLog {
|
|
||||||
id: number
|
|
||||||
created_at: string
|
|
||||||
phase: OpsPhase
|
|
||||||
type: string
|
|
||||||
severity: OpsSeverity
|
|
||||||
status_code: number
|
|
||||||
platform: OpsPlatform
|
|
||||||
model: string
|
|
||||||
latency_ms: number | null
|
|
||||||
request_id: string
|
|
||||||
message: string
|
|
||||||
user_id?: number | null
|
|
||||||
api_key_id?: number | null
|
|
||||||
account_id?: number | null
|
|
||||||
group_id?: number | null
|
|
||||||
client_ip?: string
|
|
||||||
request_path?: string
|
|
||||||
stream?: boolean
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface OpsErrorListParams {
|
|
||||||
start_time?: string
|
|
||||||
end_time?: string
|
|
||||||
platform?: OpsPlatform
|
|
||||||
phase?: OpsPhase
|
|
||||||
severity?: OpsSeverity
|
|
||||||
q?: string
|
|
||||||
/**
|
|
||||||
* Max 500 (legacy endpoint uses a hard cap); use paginated /admin/ops/errors for larger result sets.
|
|
||||||
*/
|
|
||||||
limit?: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface OpsErrorListResponse {
|
|
||||||
items: OpsErrorLog[]
|
|
||||||
total?: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface OpsMetricsHistoryParams {
|
|
||||||
window_minutes?: number
|
|
||||||
minutes?: number
|
|
||||||
start_time?: string
|
|
||||||
end_time?: string
|
|
||||||
limit?: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface OpsMetricsHistoryResponse {
|
|
||||||
items: OpsMetrics[]
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get latest ops metrics snapshot
|
|
||||||
*/
|
|
||||||
export async function getMetrics(): Promise<OpsMetrics> {
|
|
||||||
const { data } = await apiClient.get<OpsMetrics>('/admin/ops/metrics')
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* List metrics history for charts
|
|
||||||
*/
|
|
||||||
export async function listMetricsHistory(params?: OpsMetricsHistoryParams): Promise<OpsMetricsHistoryResponse> {
|
|
||||||
const { data } = await apiClient.get<OpsMetricsHistoryResponse>('/admin/ops/metrics/history', { params })
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* List recent error logs with optional filters
|
|
||||||
*/
|
|
||||||
export async function listErrors(params?: OpsErrorListParams): Promise<OpsErrorListResponse> {
|
|
||||||
const { data } = await apiClient.get<OpsErrorListResponse>('/admin/ops/error-logs', { params })
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface OpsDashboardOverview {
|
|
||||||
timestamp: string
|
|
||||||
health_score: number
|
|
||||||
sla: {
|
|
||||||
current: number
|
|
||||||
threshold: number
|
|
||||||
status: string
|
|
||||||
trend: string
|
|
||||||
change_24h: number
|
|
||||||
}
|
|
||||||
qps: {
|
|
||||||
current: number
|
|
||||||
peak_1h: number
|
|
||||||
avg_1h: number
|
|
||||||
change_vs_yesterday: number
|
|
||||||
}
|
|
||||||
tps: {
|
|
||||||
current: number
|
|
||||||
peak_1h: number
|
|
||||||
avg_1h: number
|
|
||||||
}
|
|
||||||
latency: {
|
|
||||||
p50: number
|
|
||||||
p95: number
|
|
||||||
p99: number
|
|
||||||
p999: number
|
|
||||||
avg: number
|
|
||||||
max: number
|
|
||||||
threshold_p99: number
|
|
||||||
status: string
|
|
||||||
}
|
|
||||||
errors: {
|
|
||||||
total_count: number
|
|
||||||
error_rate: number
|
|
||||||
'4xx_count': number
|
|
||||||
'5xx_count': number
|
|
||||||
timeout_count: number
|
|
||||||
top_error?: {
|
|
||||||
code: string
|
|
||||||
message: string
|
|
||||||
count: number
|
|
||||||
}
|
|
||||||
}
|
|
||||||
resources: {
|
|
||||||
cpu_usage: number
|
|
||||||
memory_usage: number
|
|
||||||
disk_usage: number
|
|
||||||
goroutines: number
|
|
||||||
db_connections: {
|
|
||||||
active: number
|
|
||||||
idle: number
|
|
||||||
waiting: number
|
|
||||||
max: number
|
|
||||||
}
|
|
||||||
}
|
|
||||||
system_status: {
|
|
||||||
redis: string
|
|
||||||
database: string
|
|
||||||
background_jobs: string
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ProviderHealthData {
|
|
||||||
name: string
|
|
||||||
request_count: number
|
|
||||||
success_rate: number
|
|
||||||
error_rate: number
|
|
||||||
latency_avg: number
|
|
||||||
latency_p99: number
|
|
||||||
status: string
|
|
||||||
errors_by_type: {
|
|
||||||
'4xx': number
|
|
||||||
'5xx': number
|
|
||||||
timeout: number
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ProviderHealthResponse {
|
|
||||||
providers: ProviderHealthData[]
|
|
||||||
summary: {
|
|
||||||
total_requests: number
|
|
||||||
avg_success_rate: number
|
|
||||||
best_provider: string
|
|
||||||
worst_provider: string
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface LatencyHistogramResponse {
|
|
||||||
buckets: {
|
|
||||||
range: string
|
|
||||||
count: number
|
|
||||||
percentage: number
|
|
||||||
}[]
|
|
||||||
total_requests: number
|
|
||||||
slow_request_threshold: number
|
|
||||||
}
|
|
||||||
|
|
||||||
export interface ErrorDistributionResponse {
|
|
||||||
items: {
|
|
||||||
code: string
|
|
||||||
message: string
|
|
||||||
count: number
|
|
||||||
percentage: number
|
|
||||||
}[]
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get realtime ops dashboard overview
|
|
||||||
*/
|
|
||||||
export async function getDashboardOverview(timeRange = '1h'): Promise<OpsDashboardOverview> {
|
|
||||||
const { data } = await apiClient.get<OpsDashboardOverview>('/admin/ops/dashboard/overview', {
|
|
||||||
params: { time_range: timeRange }
|
|
||||||
})
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get provider health comparison
|
|
||||||
*/
|
|
||||||
export async function getProviderHealth(timeRange = '1h'): Promise<ProviderHealthResponse> {
|
|
||||||
const { data } = await apiClient.get<ProviderHealthResponse>('/admin/ops/dashboard/providers', {
|
|
||||||
params: { time_range: timeRange }
|
|
||||||
})
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get latency histogram
|
|
||||||
*/
|
|
||||||
export async function getLatencyHistogram(timeRange = '1h'): Promise<LatencyHistogramResponse> {
|
|
||||||
const { data } = await apiClient.get<LatencyHistogramResponse>('/admin/ops/dashboard/latency-histogram', {
|
|
||||||
params: { time_range: timeRange }
|
|
||||||
})
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get error distribution
|
|
||||||
*/
|
|
||||||
export async function getErrorDistribution(timeRange = '1h'): Promise<ErrorDistributionResponse> {
|
|
||||||
const { data } = await apiClient.get<ErrorDistributionResponse>('/admin/ops/dashboard/errors/distribution', {
|
|
||||||
params: { time_range: timeRange }
|
|
||||||
})
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Subscribe to realtime QPS updates via WebSocket
|
|
||||||
*/
|
|
||||||
export function subscribeQPS(onMessage: (data: any) => void): () => void {
|
|
||||||
let ws: WebSocket | null = null
|
|
||||||
let reconnectAttempts = 0
|
|
||||||
const maxReconnectAttempts = 5
|
|
||||||
let reconnectTimer: ReturnType<typeof setTimeout> | null = null
|
|
||||||
let shouldReconnect = true
|
|
||||||
|
|
||||||
const connect = () => {
|
|
||||||
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:'
|
|
||||||
const host = window.location.host
|
|
||||||
ws = new WebSocket(`${protocol}//${host}/api/v1/admin/ops/ws/qps`)
|
|
||||||
|
|
||||||
ws.onopen = () => {
|
|
||||||
console.log('[OpsWS] Connected')
|
|
||||||
reconnectAttempts = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
ws.onmessage = (e) => {
|
|
||||||
const data = JSON.parse(e.data)
|
|
||||||
onMessage(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
ws.onerror = (error) => {
|
|
||||||
console.error('[OpsWS] Connection error:', error)
|
|
||||||
}
|
|
||||||
|
|
||||||
ws.onclose = () => {
|
|
||||||
console.log('[OpsWS] Connection closed')
|
|
||||||
if (shouldReconnect && reconnectAttempts < maxReconnectAttempts) {
|
|
||||||
const delay = Math.min(1000 * Math.pow(2, reconnectAttempts), 30000)
|
|
||||||
console.log(`[OpsWS] Reconnecting in ${delay}ms...`)
|
|
||||||
reconnectTimer = setTimeout(() => {
|
|
||||||
reconnectAttempts++
|
|
||||||
connect()
|
|
||||||
}, delay)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
connect()
|
|
||||||
|
|
||||||
return () => {
|
|
||||||
shouldReconnect = false
|
|
||||||
if (reconnectTimer) clearTimeout(reconnectTimer)
|
|
||||||
if (ws) ws.close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
export const opsAPI = {
|
|
||||||
getMetrics,
|
|
||||||
listMetricsHistory,
|
|
||||||
listErrors,
|
|
||||||
getDashboardOverview,
|
|
||||||
getProviderHealth,
|
|
||||||
getLatencyHistogram,
|
|
||||||
getErrorDistribution,
|
|
||||||
subscribeQPS
|
|
||||||
}
|
|
||||||
|
|
||||||
export default opsAPI
|
|
||||||
@@ -1,417 +0,0 @@
|
|||||||
<script setup lang="ts">
|
|
||||||
import { ref, computed, onMounted, onUnmounted, watch } from 'vue'
|
|
||||||
import { useI18n } from 'vue-i18n'
|
|
||||||
import { Bar, Doughnut } from 'vue-chartjs'
|
|
||||||
import {
|
|
||||||
Chart as ChartJS,
|
|
||||||
Title,
|
|
||||||
Tooltip,
|
|
||||||
Legend,
|
|
||||||
LineElement,
|
|
||||||
LinearScale,
|
|
||||||
PointElement,
|
|
||||||
CategoryScale,
|
|
||||||
BarElement,
|
|
||||||
ArcElement
|
|
||||||
} from 'chart.js'
|
|
||||||
import { useIntervalFn } from '@vueuse/core'
|
|
||||||
import AppLayout from '@/components/layout/AppLayout.vue'
|
|
||||||
import { opsAPI, type OpsDashboardOverview, type ProviderHealthData, type LatencyHistogramResponse, type ErrorDistributionResponse } from '@/api/admin/ops'
|
|
||||||
import { useAuthStore } from '@/stores/auth'
|
|
||||||
|
|
||||||
ChartJS.register(
|
|
||||||
Title,
|
|
||||||
Tooltip,
|
|
||||||
Legend,
|
|
||||||
LineElement,
|
|
||||||
LinearScale,
|
|
||||||
PointElement,
|
|
||||||
CategoryScale,
|
|
||||||
BarElement,
|
|
||||||
ArcElement
|
|
||||||
)
|
|
||||||
|
|
||||||
const { t } = useI18n()
|
|
||||||
const authStore = useAuthStore()
|
|
||||||
const loading = ref(false)
|
|
||||||
const errorMessage = ref('')
|
|
||||||
const timeRange = ref('1h')
|
|
||||||
const lastUpdated = ref(new Date())
|
|
||||||
|
|
||||||
const overview = ref<OpsDashboardOverview | null>(null)
|
|
||||||
const providers = ref<ProviderHealthData[]>([])
|
|
||||||
const latencyData = ref<LatencyHistogramResponse | null>(null)
|
|
||||||
const errorDistribution = ref<ErrorDistributionResponse | null>(null)
|
|
||||||
|
|
||||||
// WebSocket for real-time QPS
|
|
||||||
const realTimeQPS = ref(0)
|
|
||||||
const realTimeTPS = ref(0)
|
|
||||||
const wsConnected = ref(false)
|
|
||||||
let ws: WebSocket | null = null
|
|
||||||
let reconnectTimer: ReturnType<typeof setTimeout> | null = null
|
|
||||||
|
|
||||||
const connectWS = () => {
|
|
||||||
const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:'
|
|
||||||
const wsBaseUrl = import.meta.env.VITE_WS_BASE_URL || window.location.host
|
|
||||||
const wsURL = new URL(`${protocol}//${wsBaseUrl}/api/v1/admin/ops/ws/qps`)
|
|
||||||
const token = authStore.token || localStorage.getItem('auth_token')
|
|
||||||
if (token) {
|
|
||||||
wsURL.searchParams.set('token', token)
|
|
||||||
}
|
|
||||||
ws = new WebSocket(wsURL.toString())
|
|
||||||
|
|
||||||
ws.onopen = () => {
|
|
||||||
wsConnected.value = true
|
|
||||||
}
|
|
||||||
|
|
||||||
ws.onmessage = (event) => {
|
|
||||||
try {
|
|
||||||
const payload = JSON.parse(event.data)
|
|
||||||
if (payload && typeof payload === 'object' && payload.type === 'qps_update' && payload.data) {
|
|
||||||
realTimeQPS.value = payload.data.qps || 0
|
|
||||||
realTimeTPS.value = payload.data.tps || 0
|
|
||||||
}
|
|
||||||
} catch (e) {
|
|
||||||
console.error('WS parse error', e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ws.onclose = () => {
|
|
||||||
wsConnected.value = false
|
|
||||||
if (reconnectTimer) clearTimeout(reconnectTimer)
|
|
||||||
reconnectTimer = setTimeout(connectWS, 5000)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const fetchData = async () => {
|
|
||||||
loading.value = true
|
|
||||||
errorMessage.value = ''
|
|
||||||
try {
|
|
||||||
const [ov, pr, lt, er] = await Promise.all([
|
|
||||||
opsAPI.getDashboardOverview(timeRange.value),
|
|
||||||
opsAPI.getProviderHealth(timeRange.value),
|
|
||||||
opsAPI.getLatencyHistogram(timeRange.value),
|
|
||||||
opsAPI.getErrorDistribution(timeRange.value)
|
|
||||||
])
|
|
||||||
overview.value = ov
|
|
||||||
providers.value = pr.providers
|
|
||||||
latencyData.value = lt
|
|
||||||
errorDistribution.value = er
|
|
||||||
lastUpdated.value = new Date()
|
|
||||||
} catch (err) {
|
|
||||||
console.error('Failed to fetch ops data', err)
|
|
||||||
errorMessage.value = '数据加载失败,请稍后重试'
|
|
||||||
} finally {
|
|
||||||
loading.value = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Refresh data every 30 seconds (fallback for L2/L3)
|
|
||||||
useIntervalFn(fetchData, 30000)
|
|
||||||
|
|
||||||
onMounted(() => {
|
|
||||||
fetchData()
|
|
||||||
connectWS()
|
|
||||||
})
|
|
||||||
|
|
||||||
onUnmounted(() => {
|
|
||||||
if (ws) ws.close()
|
|
||||||
if (reconnectTimer) clearTimeout(reconnectTimer)
|
|
||||||
})
|
|
||||||
|
|
||||||
watch(timeRange, () => {
|
|
||||||
fetchData()
|
|
||||||
})
|
|
||||||
|
|
||||||
// Chart Data: Latency Distribution
|
|
||||||
const latencyChartData = computed(() => {
|
|
||||||
if (!latencyData.value) return null
|
|
||||||
return {
|
|
||||||
labels: latencyData.value.buckets.map(b => b.range),
|
|
||||||
datasets: [
|
|
||||||
{
|
|
||||||
label: t('admin.ops.charts.requestCount'),
|
|
||||||
data: latencyData.value.buckets.map(b => b.count),
|
|
||||||
backgroundColor: '#3b82f6',
|
|
||||||
borderRadius: 4
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Chart Data: Error Distribution
|
|
||||||
const errorChartData = computed(() => {
|
|
||||||
if (!errorDistribution.value) return null
|
|
||||||
return {
|
|
||||||
labels: errorDistribution.value.items.map(i => i.code),
|
|
||||||
datasets: [
|
|
||||||
{
|
|
||||||
data: errorDistribution.value.items.map(i => i.count),
|
|
||||||
backgroundColor: [
|
|
||||||
'#ef4444', '#f59e0b', '#3b82f6', '#10b981', '#8b5cf6', '#ec4899'
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
// Chart Data: Provider SLA
|
|
||||||
const providerChartData = computed(() => {
|
|
||||||
if (!providers.value.length) return null
|
|
||||||
return {
|
|
||||||
labels: providers.value.map(p => p.name),
|
|
||||||
datasets: [
|
|
||||||
{
|
|
||||||
label: 'SLA (%)',
|
|
||||||
data: providers.value.map(p => p.success_rate),
|
|
||||||
backgroundColor: providers.value.map(p => p.success_rate > 99.5 ? '#10b981' : p.success_rate > 98 ? '#f59e0b' : '#ef4444'),
|
|
||||||
borderRadius: 4
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
const chartOptions = {
|
|
||||||
responsive: true,
|
|
||||||
maintainAspectRatio: false,
|
|
||||||
plugins: {
|
|
||||||
legend: {
|
|
||||||
display: false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
scales: {
|
|
||||||
y: {
|
|
||||||
beginAtZero: true,
|
|
||||||
grid: {
|
|
||||||
display: false
|
|
||||||
}
|
|
||||||
},
|
|
||||||
x: {
|
|
||||||
grid: {
|
|
||||||
display: false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const healthScoreClass = computed(() => {
|
|
||||||
const score = overview.value?.health_score || 0
|
|
||||||
if (score >= 90) return 'text-green-500 border-green-500'
|
|
||||||
if (score >= 70) return 'text-yellow-500 border-yellow-500'
|
|
||||||
return 'text-red-500 border-red-500'
|
|
||||||
})
|
|
||||||
|
|
||||||
</script>
|
|
||||||
|
|
||||||
<template>
|
|
||||||
<AppLayout>
|
|
||||||
<div class="space-y-6 pb-12">
|
|
||||||
<!-- Error Message -->
|
|
||||||
<div v-if="errorMessage" class="rounded-2xl bg-red-50 p-4 text-sm text-red-600 dark:bg-red-900/20 dark:text-red-400">
|
|
||||||
{{ errorMessage }}
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<!-- L1: Header & Realtime Stats -->
|
|
||||||
<div class="flex flex-wrap items-center justify-between gap-4 rounded-2xl bg-white p-6 shadow-sm ring-1 ring-gray-900/5 dark:bg-dark-800 dark:ring-dark-700">
|
|
||||||
<div class="flex items-center gap-6">
|
|
||||||
<!-- Health Score Gauge -->
|
|
||||||
<div class="flex h-20 w-20 flex-col items-center justify-center rounded-full border-4 bg-gray-50 dark:bg-dark-900" :class="healthScoreClass">
|
|
||||||
<span class="text-2xl font-black">{{ overview?.health_score || '--' }}</span>
|
|
||||||
<span class="text-[10px] font-bold opacity-60">HEALTH</span>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div>
|
|
||||||
<h1 class="text-xl font-black text-gray-900 dark:text-white">运维监控中心 2.0</h1>
|
|
||||||
<div class="mt-1 flex items-center gap-3">
|
|
||||||
<span class="flex items-center gap-1.5">
|
|
||||||
<span class="h-2 w-2 rounded-full bg-green-500 animate-pulse" v-if="wsConnected"></span>
|
|
||||||
<span class="h-2 w-2 rounded-full bg-red-500" v-else></span>
|
|
||||||
<span class="text-xs font-medium text-gray-500">{{ wsConnected ? '实时连接中' : '连接已断开' }}</span>
|
|
||||||
</span>
|
|
||||||
<span class="text-xs text-gray-400">最后更新: {{ lastUpdated.toLocaleTimeString() }}</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="flex items-center gap-4">
|
|
||||||
<div class="hidden items-center gap-6 border-r border-gray-100 pr-6 dark:border-dark-700 lg:flex">
|
|
||||||
<div class="text-center">
|
|
||||||
<div class="text-sm font-black text-gray-900 dark:text-white">{{ realTimeQPS.toFixed(1) }}</div>
|
|
||||||
<div class="text-[10px] font-bold text-gray-400 uppercase">实时 QPS</div>
|
|
||||||
</div>
|
|
||||||
<div class="text-center">
|
|
||||||
<div class="text-sm font-black text-gray-900 dark:text-white">{{ (realTimeTPS / 1000).toFixed(1) }}K</div>
|
|
||||||
<div class="text-[10px] font-bold text-gray-400 uppercase">实时 TPS</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<select v-model="timeRange" class="rounded-lg border-gray-200 bg-gray-50 py-1.5 pl-3 pr-8 text-sm font-medium text-gray-700 focus:border-blue-500 focus:ring-blue-500 dark:border-dark-700 dark:bg-dark-900 dark:text-gray-300">
|
|
||||||
<option value="5m">5 分钟</option>
|
|
||||||
<option value="30m">30 分钟</option>
|
|
||||||
<option value="1h">1 小时</option>
|
|
||||||
<option value="24h">24 小时</option>
|
|
||||||
</select>
|
|
||||||
|
|
||||||
<button @click="fetchData" :disabled="loading" class="flex h-9 w-9 items-center justify-center rounded-lg bg-gray-100 text-gray-500 hover:bg-gray-200 dark:bg-dark-700 dark:text-gray-400">
|
|
||||||
<svg class="h-5 w-5" :class="{ 'animate-spin': loading }" fill="none" viewBox="0 0 24 24" stroke="currentColor">
|
|
||||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M4 4v5h.582m15.356 2A8.001 8.001 0 004.582 9m0 0H9m11 11v-5h-.581m0 0a8.003 8.003 0 01-15.357-2m15.357 2H15" />
|
|
||||||
</svg>
|
|
||||||
</button>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<!-- L1: Core Metrics Grid -->
|
|
||||||
<div class="grid grid-cols-1 gap-4 sm:grid-cols-2 lg:grid-cols-4">
|
|
||||||
<div class="rounded-2xl bg-white p-5 shadow-sm ring-1 ring-gray-900/5 dark:bg-dark-800 dark:ring-dark-700">
|
|
||||||
<div class="flex items-center justify-between">
|
|
||||||
<span class="text-xs font-bold text-gray-400 uppercase tracking-wider">服务可用率 (SLA)</span>
|
|
||||||
<span class="rounded-full bg-green-50 px-2 py-0.5 text-[10px] font-bold text-green-600 dark:bg-green-900/30">{{ overview?.sla.status }}</span>
|
|
||||||
</div>
|
|
||||||
<div class="mt-2 flex items-baseline gap-2">
|
|
||||||
<span class="text-2xl font-black text-gray-900 dark:text-white">{{ overview?.sla.current.toFixed(2) }}%</span>
|
|
||||||
<span class="text-xs font-bold" :class="overview?.sla.change_24h && overview.sla.change_24h >= 0 ? 'text-green-500' : 'text-red-500'">
|
|
||||||
{{ overview?.sla.change_24h && overview.sla.change_24h >= 0 ? '+' : '' }}{{ overview?.sla.change_24h }}%
|
|
||||||
</span>
|
|
||||||
</div>
|
|
||||||
<div class="mt-3 h-1 w-full overflow-hidden rounded-full bg-gray-100 dark:bg-dark-700">
|
|
||||||
<div class="h-full bg-green-500" :style="{ width: `${overview?.sla.current}%` }"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="rounded-2xl bg-white p-5 shadow-sm ring-1 ring-gray-900/5 dark:bg-dark-800 dark:ring-dark-700">
|
|
||||||
<div class="flex items-center justify-between">
|
|
||||||
<span class="text-xs font-bold text-gray-400 uppercase tracking-wider">P99 响应延迟</span>
|
|
||||||
<span class="rounded-full bg-blue-50 px-2 py-0.5 text-[10px] font-bold text-blue-600 dark:bg-blue-900/30">Target 1s</span>
|
|
||||||
</div>
|
|
||||||
<div class="mt-2 flex items-baseline gap-2">
|
|
||||||
<span class="text-2xl font-black text-gray-900 dark:text-white">{{ overview?.latency.p99 }}ms</span>
|
|
||||||
<span class="text-xs font-bold text-gray-400">Avg: {{ overview?.latency.avg }}ms</span>
|
|
||||||
</div>
|
|
||||||
<div class="mt-3 flex gap-1">
|
|
||||||
<div v-for="i in 10" :key="i" class="h-1 flex-1 rounded-full" :class="i <= (overview?.latency.p99 || 0) / 200 ? 'bg-blue-500' : 'bg-gray-100 dark:bg-dark-700'"></div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="rounded-2xl bg-white p-5 shadow-sm ring-1 ring-gray-900/5 dark:bg-dark-800 dark:ring-dark-700">
|
|
||||||
<div class="flex items-center justify-between">
|
|
||||||
<span class="text-xs font-bold text-gray-400 uppercase tracking-wider">周期请求总数</span>
|
|
||||||
<svg class="h-4 w-4 text-gray-300" fill="none" viewBox="0 0 24 24" stroke="currentColor"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M13 7h8m0 0v8m0-8l-8 8-4-4-6 6" /></svg>
|
|
||||||
</div>
|
|
||||||
<div class="mt-2 flex items-baseline gap-2">
|
|
||||||
<span class="text-2xl font-black text-gray-900 dark:text-white">{{ overview?.qps.avg_1h.toFixed(1) }}</span>
|
|
||||||
<span class="text-xs font-bold text-gray-400">req/s</span>
|
|
||||||
</div>
|
|
||||||
<div class="mt-1 text-[10px] font-bold text-gray-400 uppercase">对比昨日: {{ overview?.qps.change_vs_yesterday }}%</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class="rounded-2xl bg-white p-5 shadow-sm ring-1 ring-gray-900/5 dark:bg-dark-800 dark:ring-dark-700">
|
|
||||||
<div class="flex items-center justify-between">
|
|
||||||
<span class="text-xs font-bold text-gray-400 uppercase tracking-wider">周期错误数</span>
|
|
||||||
<span class="rounded-full bg-red-50 px-2 py-0.5 text-[10px] font-bold text-red-600 dark:bg-red-900/30">{{ overview?.errors.error_rate.toFixed(2) }}%</span>
|
|
||||||
</div>
|
|
||||||
<div class="mt-2 flex items-baseline gap-2">
|
|
||||||
<span class="text-2xl font-black text-gray-900 dark:text-white">{{ overview?.errors.total_count }}</span>
|
|
||||||
<span class="text-xs font-bold text-red-500">5xx: {{ overview?.errors['5xx_count'] }}</span>
|
|
||||||
</div>
|
|
||||||
<div class="mt-1 text-[10px] font-bold text-gray-400 uppercase">主要错误码: {{ overview?.errors.top_error?.code || 'N/A' }}</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<!-- L2: Visual Analysis -->
|
|
||||||
<div class="grid grid-cols-1 gap-6 lg:grid-cols-2">
|
|
||||||
<!-- Latency Distribution -->
|
|
||||||
<div class="rounded-2xl bg-white p-6 shadow-sm ring-1 ring-gray-900/5 dark:bg-dark-800 dark:ring-dark-700">
|
|
||||||
<div class="mb-6 flex items-center justify-between">
|
|
||||||
<h3 class="text-sm font-black text-gray-900 dark:text-white uppercase tracking-wider">请求延迟分布</h3>
|
|
||||||
</div>
|
|
||||||
<div class="h-64">
|
|
||||||
<Bar v-if="latencyChartData" :data="latencyChartData" :options="chartOptions" />
|
|
||||||
<div v-else class="flex h-full items-center justify-center text-gray-400">加载中...</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<!-- Provider Health -->
|
|
||||||
<div class="rounded-2xl bg-white p-6 shadow-sm ring-1 ring-gray-900/5 dark:bg-dark-800 dark:ring-dark-700">
|
|
||||||
<div class="mb-6 flex items-center justify-between">
|
|
||||||
<h3 class="text-sm font-black text-gray-900 dark:text-white uppercase tracking-wider">上游供应商健康度 (SLA)</h3>
|
|
||||||
</div>
|
|
||||||
<div class="h-64">
|
|
||||||
<Bar v-if="providerChartData" :data="providerChartData" :options="chartOptions" />
|
|
||||||
<div v-else class="flex h-full items-center justify-center text-gray-400">加载中...</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<!-- Error Distribution -->
|
|
||||||
<div class="rounded-2xl bg-white p-6 shadow-sm ring-1 ring-gray-900/5 dark:bg-dark-800 dark:ring-dark-700">
|
|
||||||
<div class="mb-6 flex items-center justify-between">
|
|
||||||
<h3 class="text-sm font-black text-gray-900 dark:text-white uppercase tracking-wider">错误类型分布</h3>
|
|
||||||
</div>
|
|
||||||
<div class="flex h-64 gap-6">
|
|
||||||
<div class="relative w-1/2">
|
|
||||||
<Doughnut v-if="errorChartData" :data="errorChartData" :options="{ ...chartOptions, cutout: '70%' }" />
|
|
||||||
</div>
|
|
||||||
<div class="flex flex-1 flex-col justify-center space-y-3">
|
|
||||||
<div v-for="(item, idx) in errorDistribution?.items.slice(0, 5)" :key="item.code" class="flex items-center justify-between">
|
|
||||||
<div class="flex items-center gap-2">
|
|
||||||
<div class="h-2 w-2 rounded-full" :style="{ backgroundColor: ['#ef4444', '#f59e0b', '#3b82f6', '#10b981', '#8b5cf6'][idx] }"></div>
|
|
||||||
<span class="text-xs font-bold text-gray-700 dark:text-gray-300">{{ item.code }}</span>
|
|
||||||
</div>
|
|
||||||
<span class="text-xs font-black text-gray-900 dark:text-white">{{ item.percentage }}%</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<!-- System Resources -->
|
|
||||||
<div class="rounded-2xl bg-white p-6 shadow-sm ring-1 ring-gray-900/5 dark:bg-dark-800 dark:ring-dark-700">
|
|
||||||
<div class="mb-6 flex items-center justify-between">
|
|
||||||
<h3 class="text-sm font-black text-gray-900 dark:text-white uppercase tracking-wider">系统运行状态</h3>
|
|
||||||
</div>
|
|
||||||
<div class="grid grid-cols-2 gap-6">
|
|
||||||
<div class="space-y-4">
|
|
||||||
<div>
|
|
||||||
<div class="mb-1 flex justify-between text-[10px] font-bold text-gray-400 uppercase">CPU 使用率</div>
|
|
||||||
<div class="h-2 w-full rounded-full bg-gray-100 dark:bg-dark-700">
|
|
||||||
<div class="h-full rounded-full bg-purple-500" :style="{ width: `${overview?.resources.cpu_usage}%` }"></div>
|
|
||||||
</div>
|
|
||||||
<div class="mt-1 text-right text-xs font-bold text-gray-900 dark:text-white">{{ overview?.resources.cpu_usage }}%</div>
|
|
||||||
</div>
|
|
||||||
<div>
|
|
||||||
<div class="mb-1 flex justify-between text-[10px] font-bold text-gray-400 uppercase">内存使用率</div>
|
|
||||||
<div class="h-2 w-full rounded-full bg-gray-100 dark:bg-dark-700">
|
|
||||||
<div class="h-full rounded-full bg-indigo-500" :style="{ width: `${overview?.resources.memory_usage}%` }"></div>
|
|
||||||
</div>
|
|
||||||
<div class="mt-1 text-right text-xs font-bold text-gray-900 dark:text-white">{{ overview?.resources.memory_usage }}%</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
<div class="flex flex-col justify-center space-y-4 rounded-xl bg-gray-50 p-4 dark:bg-dark-900">
|
|
||||||
<div class="flex items-center justify-between">
|
|
||||||
<span class="text-[10px] font-bold text-gray-400 uppercase">Redis 状态</span>
|
|
||||||
<span class="text-xs font-bold text-green-500 uppercase">{{ overview?.system_status.redis }}</span>
|
|
||||||
</div>
|
|
||||||
<div class="flex items-center justify-between">
|
|
||||||
<span class="text-[10px] font-bold text-gray-400 uppercase">DB 连接</span>
|
|
||||||
<span class="text-xs font-bold text-gray-900 dark:text-white">{{ overview?.resources.db_connections.active }} / {{ overview?.resources.db_connections.max }}</span>
|
|
||||||
</div>
|
|
||||||
<div class="flex items-center justify-between">
|
|
||||||
<span class="text-[10px] font-bold text-gray-400 uppercase">Goroutines</span>
|
|
||||||
<span class="text-xs font-bold text-gray-900 dark:text-white">{{ overview?.resources.goroutines }}</span>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</AppLayout>
|
|
||||||
</template>
|
|
||||||
|
|
||||||
<style scoped>
|
|
||||||
/* Custom select styling */
|
|
||||||
select {
|
|
||||||
appearance: none;
|
|
||||||
background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%236b7280' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e");
|
|
||||||
background-repeat: no-repeat;
|
|
||||||
background-position: right 0.5rem center;
|
|
||||||
background-size: 1.5em 1.5em;
|
|
||||||
}
|
|
||||||
</style>
|
|
||||||
Reference in New Issue
Block a user