From daf10907e4131097e9108ff1c0f0f28174f71c79 Mon Sep 17 00:00:00 2001
From: yangjianbo
Date: Wed, 14 Jan 2026 15:55:44 +0800
Subject: [PATCH 1/4] =?UTF-8?q?fix(=E8=AE=A4=E8=AF=81):=20=E4=BF=AE?=
=?UTF-8?q?=E5=A4=8D=20OAuth=20token=20=E7=BC=93=E5=AD=98=E5=A4=B1?=
=?UTF-8?q?=E6=95=88=E4=B8=8E=20401=20=E5=A4=84=E7=90=86?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
新增 token 缓存失效接口并在刷新后清理
401 限流支持自定义规则与可配置冷却时间
补齐缓存失效与 401 处理测试
测试: make test
---
backend/cmd/server/wire_gen.go | 7 +-
backend/internal/config/config.go | 4 +-
.../internal/repository/gemini_token_cache.go | 5 +
.../gemini_token_cache_integration_test.go | 47 +++
.../repository/gemini_token_cache_test.go | 28 ++
.../service/antigravity_token_provider.go | 4 +-
.../internal/service/gemini_token_cache.go | 1 +
.../internal/service/gemini_token_provider.go | 4 +-
backend/internal/service/ratelimit_service.go | 175 ++++++---
.../service/ratelimit_service_401_test.go | 353 +++++++++++++++++
.../service/token_cache_invalidator.go | 35 ++
.../service/token_cache_invalidator_test.go | 97 +++++
.../internal/service/token_cache_key_test.go | 153 ++++++++
.../internal/service/token_refresh_service.go | 23 +-
.../service/token_refresh_service_test.go | 361 ++++++++++++++++++
backend/internal/service/wire.go | 6 +-
config.yaml | 3 +
deploy/.env.example | 11 +
deploy/config.example.yaml | 3 +
19 files changed, 1257 insertions(+), 63 deletions(-)
create mode 100644 backend/internal/repository/gemini_token_cache_integration_test.go
create mode 100644 backend/internal/repository/gemini_token_cache_test.go
create mode 100644 backend/internal/service/ratelimit_service_401_test.go
create mode 100644 backend/internal/service/token_cache_invalidator.go
create mode 100644 backend/internal/service/token_cache_invalidator_test.go
create mode 100644 backend/internal/service/token_cache_key_test.go
create mode 100644 backend/internal/service/token_refresh_service_test.go
diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go
index 6b5ffad4..08e40010 100644
--- a/backend/cmd/server/wire_gen.go
+++ b/backend/cmd/server/wire_gen.go
@@ -98,12 +98,13 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
geminiQuotaService := service.NewGeminiQuotaService(configConfig, settingRepository)
tempUnschedCache := repository.NewTempUnschedCache(redisClient)
timeoutCounterCache := repository.NewTimeoutCounterCache(redisClient)
- rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, settingService)
+ geminiTokenCache := repository.NewGeminiTokenCache(redisClient)
+ tokenCacheInvalidator := service.NewCompositeTokenCacheInvalidator(geminiTokenCache)
+ rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, settingService, tokenCacheInvalidator)
claudeUsageFetcher := repository.NewClaudeUsageFetcher()
antigravityQuotaFetcher := service.NewAntigravityQuotaFetcher(proxyRepository)
usageCache := service.NewUsageCache()
accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache)
- geminiTokenCache := repository.NewGeminiTokenCache(redisClient)
geminiTokenProvider := service.NewGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService)
gatewayCache := repository.NewGatewayCache(redisClient)
antigravityTokenProvider := service.NewAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService)
@@ -166,7 +167,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig)
opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig)
opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig)
- tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, configConfig)
+ tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, tokenCacheInvalidator, configConfig)
accountExpiryService := service.ProvideAccountExpiryService(accountRepository)
v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService)
application := &Application{
diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go
index 944e0f84..87221256 100644
--- a/backend/internal/config/config.go
+++ b/backend/internal/config/config.go
@@ -435,7 +435,8 @@ type DefaultConfig struct {
}
type RateLimitConfig struct {
- OverloadCooldownMinutes int `mapstructure:"overload_cooldown_minutes"` // 529过载冷却时间(分钟)
+ OverloadCooldownMinutes int `mapstructure:"overload_cooldown_minutes"` // 529过载冷却时间(分钟)
+ OAuth401CooldownMinutes int `mapstructure:"oauth_401_cooldown_minutes"` // OAuth 401 临时不可调度冷却时间(分钟)
}
// APIKeyAuthCacheConfig API Key 认证缓存配置
@@ -709,6 +710,7 @@ func setDefaults() {
// RateLimit
viper.SetDefault("rate_limit.overload_cooldown_minutes", 10)
+ viper.SetDefault("rate_limit.oauth_401_cooldown_minutes", 5)
// Pricing - 从 price-mirror 分支同步,该分支维护了 sha256 哈希文件用于增量更新检查
viper.SetDefault("pricing.remote_url", "https://raw.githubusercontent.com/Wei-Shaw/claude-relay-service/price-mirror/model_prices_and_context_window.json")
diff --git a/backend/internal/repository/gemini_token_cache.go b/backend/internal/repository/gemini_token_cache.go
index a7270556..82c14def 100644
--- a/backend/internal/repository/gemini_token_cache.go
+++ b/backend/internal/repository/gemini_token_cache.go
@@ -33,6 +33,11 @@ func (c *geminiTokenCache) SetAccessToken(ctx context.Context, cacheKey string,
return c.rdb.Set(ctx, key, token, ttl).Err()
}
+func (c *geminiTokenCache) DeleteAccessToken(ctx context.Context, cacheKey string) error {
+ key := fmt.Sprintf("%s%s", geminiTokenKeyPrefix, cacheKey)
+ return c.rdb.Del(ctx, key).Err()
+}
+
func (c *geminiTokenCache) AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error) {
key := fmt.Sprintf("%s%s", geminiRefreshLockKeyPrefix, cacheKey)
return c.rdb.SetNX(ctx, key, 1, ttl).Result()
diff --git a/backend/internal/repository/gemini_token_cache_integration_test.go b/backend/internal/repository/gemini_token_cache_integration_test.go
new file mode 100644
index 00000000..4fe89865
--- /dev/null
+++ b/backend/internal/repository/gemini_token_cache_integration_test.go
@@ -0,0 +1,47 @@
+//go:build integration
+
+package repository
+
+import (
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/Wei-Shaw/sub2api/internal/service"
+ "github.com/redis/go-redis/v9"
+ "github.com/stretchr/testify/require"
+ "github.com/stretchr/testify/suite"
+)
+
+type GeminiTokenCacheSuite struct {
+ IntegrationRedisSuite
+ cache service.GeminiTokenCache
+}
+
+func (s *GeminiTokenCacheSuite) SetupTest() {
+ s.IntegrationRedisSuite.SetupTest()
+ s.cache = NewGeminiTokenCache(s.rdb)
+}
+
+func (s *GeminiTokenCacheSuite) TestDeleteAccessToken() {
+ cacheKey := "project-123"
+ token := "token-value"
+ require.NoError(s.T(), s.cache.SetAccessToken(s.ctx, cacheKey, token, time.Minute))
+
+ got, err := s.cache.GetAccessToken(s.ctx, cacheKey)
+ require.NoError(s.T(), err)
+ require.Equal(s.T(), token, got)
+
+ require.NoError(s.T(), s.cache.DeleteAccessToken(s.ctx, cacheKey))
+
+ _, err = s.cache.GetAccessToken(s.ctx, cacheKey)
+ require.True(s.T(), errors.Is(err, redis.Nil), "expected redis.Nil after delete")
+}
+
+func (s *GeminiTokenCacheSuite) TestDeleteAccessToken_MissingKey() {
+ require.NoError(s.T(), s.cache.DeleteAccessToken(s.ctx, "missing-key"))
+}
+
+func TestGeminiTokenCacheSuite(t *testing.T) {
+ suite.Run(t, new(GeminiTokenCacheSuite))
+}
diff --git a/backend/internal/repository/gemini_token_cache_test.go b/backend/internal/repository/gemini_token_cache_test.go
new file mode 100644
index 00000000..4fcebfdd
--- /dev/null
+++ b/backend/internal/repository/gemini_token_cache_test.go
@@ -0,0 +1,28 @@
+//go:build unit
+
+package repository
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/redis/go-redis/v9"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGeminiTokenCache_DeleteAccessToken_RedisError(t *testing.T) {
+ rdb := redis.NewClient(&redis.Options{
+ Addr: "127.0.0.1:1",
+ DialTimeout: 50 * time.Millisecond,
+ ReadTimeout: 50 * time.Millisecond,
+ WriteTimeout: 50 * time.Millisecond,
+ })
+ t.Cleanup(func() {
+ _ = rdb.Close()
+ })
+
+ cache := NewGeminiTokenCache(rdb)
+ err := cache.DeleteAccessToken(context.Background(), "broken")
+ require.Error(t, err)
+}
diff --git a/backend/internal/service/antigravity_token_provider.go b/backend/internal/service/antigravity_token_provider.go
index cbd1bef4..c5dc55db 100644
--- a/backend/internal/service/antigravity_token_provider.go
+++ b/backend/internal/service/antigravity_token_provider.go
@@ -45,7 +45,7 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account *
return "", errors.New("not an antigravity oauth account")
}
- cacheKey := antigravityTokenCacheKey(account)
+ cacheKey := AntigravityTokenCacheKey(account)
// 1. 先尝试缓存
if p.tokenCache != nil {
@@ -121,7 +121,7 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account *
return accessToken, nil
}
-func antigravityTokenCacheKey(account *Account) string {
+func AntigravityTokenCacheKey(account *Account) string {
projectID := strings.TrimSpace(account.GetCredential("project_id"))
if projectID != "" {
return "ag:" + projectID
diff --git a/backend/internal/service/gemini_token_cache.go b/backend/internal/service/gemini_token_cache.go
index d5e64f9a..70f246da 100644
--- a/backend/internal/service/gemini_token_cache.go
+++ b/backend/internal/service/gemini_token_cache.go
@@ -10,6 +10,7 @@ type GeminiTokenCache interface {
// cacheKey should be stable for the token scope; for GeminiCli OAuth we primarily use project_id.
GetAccessToken(ctx context.Context, cacheKey string) (string, error)
SetAccessToken(ctx context.Context, cacheKey string, token string, ttl time.Duration) error
+ DeleteAccessToken(ctx context.Context, cacheKey string) error
AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error)
ReleaseRefreshLock(ctx context.Context, cacheKey string) error
diff --git a/backend/internal/service/gemini_token_provider.go b/backend/internal/service/gemini_token_provider.go
index 0257d19f..a5cacc9a 100644
--- a/backend/internal/service/gemini_token_provider.go
+++ b/backend/internal/service/gemini_token_provider.go
@@ -40,7 +40,7 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou
return "", errors.New("not a gemini oauth account")
}
- cacheKey := geminiTokenCacheKey(account)
+ cacheKey := GeminiTokenCacheKey(account)
// 1) Try cache first.
if p.tokenCache != nil {
@@ -151,7 +151,7 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou
return accessToken, nil
}
-func geminiTokenCacheKey(account *Account) string {
+func GeminiTokenCacheKey(account *Account) string {
projectID := strings.TrimSpace(account.GetCredential("project_id"))
if projectID != "" {
return projectID
diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go
index 62b0c6b8..a9523216 100644
--- a/backend/internal/service/ratelimit_service.go
+++ b/backend/internal/service/ratelimit_service.go
@@ -3,7 +3,7 @@ package service
import (
"context"
"encoding/json"
- "log"
+ "log/slog"
"net/http"
"strconv"
"strings"
@@ -15,15 +15,16 @@ import (
// RateLimitService 处理限流和过载状态管理
type RateLimitService struct {
- accountRepo AccountRepository
- usageRepo UsageLogRepository
- cfg *config.Config
- geminiQuotaService *GeminiQuotaService
- tempUnschedCache TempUnschedCache
- timeoutCounterCache TimeoutCounterCache
- settingService *SettingService
- usageCacheMu sync.RWMutex
- usageCache map[int64]*geminiUsageCacheEntry
+ accountRepo AccountRepository
+ usageRepo UsageLogRepository
+ cfg *config.Config
+ geminiQuotaService *GeminiQuotaService
+ tempUnschedCache TempUnschedCache
+ timeoutCounterCache TimeoutCounterCache
+ settingService *SettingService
+ tokenCacheInvalidator TokenCacheInvalidator
+ usageCacheMu sync.RWMutex
+ usageCache map[int64]*geminiUsageCacheEntry
}
type geminiUsageCacheEntry struct {
@@ -56,6 +57,11 @@ func (s *RateLimitService) SetSettingService(settingService *SettingService) {
s.settingService = settingService
}
+// SetTokenCacheInvalidator 设置 token 缓存清理器(可选依赖)
+func (s *RateLimitService) SetTokenCacheInvalidator(invalidator TokenCacheInvalidator) {
+ s.tokenCacheInvalidator = invalidator
+}
+
// HandleUpstreamError 处理上游错误响应,标记账号状态
// 返回是否应该停止该账号的调度
func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Account, statusCode int, headers http.Header, responseBody []byte) (shouldDisable bool) {
@@ -63,11 +69,16 @@ func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Acc
// 如果启用且错误码不在列表中,则不处理(不停止调度、不标记限流/过载)
customErrorCodesEnabled := account.IsCustomErrorCodesEnabled()
if !account.ShouldHandleErrorCode(statusCode) {
- log.Printf("Account %d: error %d skipped (not in custom error codes)", account.ID, statusCode)
+ slog.Info("account_error_code_skipped", "account_id", account.ID, "status_code", statusCode)
return false
}
- tempMatched := s.tryTempUnschedulable(ctx, account, statusCode, responseBody)
+ isOAuth401 := statusCode == 401 && account.Type == AccountTypeOAuth &&
+ (account.Platform == PlatformAntigravity || account.Platform == PlatformGemini)
+ tempMatched := false
+ if !isOAuth401 || account.IsTempUnschedulableEnabled() {
+ tempMatched = s.tryTempUnschedulable(ctx, account, statusCode, responseBody)
+ }
upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(responseBody))
upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg)
if upstreamMsg != "" {
@@ -76,7 +87,19 @@ func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Acc
switch statusCode {
case 401:
- // 认证失败:停止调度,记录错误
+ if isOAuth401 {
+ if tempMatched {
+ if s.tokenCacheInvalidator != nil {
+ if err := s.tokenCacheInvalidator.InvalidateToken(ctx, account); err != nil {
+ slog.Warn("oauth_401_invalidate_cache_failed", "account_id", account.ID, "error", err)
+ }
+ }
+ shouldDisable = true
+ } else {
+ shouldDisable = s.handleOAuth401TempUnschedulable(ctx, account, upstreamMsg)
+ }
+ break
+ }
msg := "Authentication failed (401): invalid or expired credentials"
if upstreamMsg != "" {
msg = "Authentication failed (401): " + upstreamMsg
@@ -116,7 +139,7 @@ func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Acc
shouldDisable = true
} else if statusCode >= 500 {
// 未启用自定义错误码时:仅记录5xx错误
- log.Printf("Account %d received upstream error %d", account.ID, statusCode)
+ slog.Warn("account_upstream_error", "account_id", account.ID, "status_code", statusCode)
shouldDisable = false
}
}
@@ -127,6 +150,63 @@ func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Acc
return shouldDisable
}
+func (s *RateLimitService) handleOAuth401TempUnschedulable(ctx context.Context, account *Account, upstreamMsg string) bool {
+ if account == nil {
+ return false
+ }
+
+ if s.tokenCacheInvalidator != nil {
+ if err := s.tokenCacheInvalidator.InvalidateToken(ctx, account); err != nil {
+ slog.Warn("oauth_401_invalidate_cache_failed", "account_id", account.ID, "error", err)
+ }
+ }
+
+ now := time.Now()
+ until := now.Add(s.oauth401Cooldown())
+ msg := "Authentication failed (401): invalid or expired credentials"
+ if upstreamMsg != "" {
+ msg = "Authentication failed (401): " + upstreamMsg
+ }
+
+ state := &TempUnschedState{
+ UntilUnix: until.Unix(),
+ TriggeredAtUnix: now.Unix(),
+ StatusCode: 401,
+ MatchedKeyword: "oauth_401",
+ RuleIndex: -1, // -1 表示非规则触发,而是 OAuth 401 特殊处理
+ ErrorMessage: msg,
+ }
+
+ reason := ""
+ if raw, err := json.Marshal(state); err == nil {
+ reason = string(raw)
+ }
+ if reason == "" {
+ reason = msg
+ }
+
+ if err := s.accountRepo.SetTempUnschedulable(ctx, account.ID, until, reason); err != nil {
+ slog.Warn("oauth_401_set_temp_unschedulable_failed", "account_id", account.ID, "error", err)
+ return false
+ }
+
+ if s.tempUnschedCache != nil {
+ if err := s.tempUnschedCache.SetTempUnsched(ctx, account.ID, state); err != nil {
+ slog.Warn("oauth_401_set_temp_unsched_cache_failed", "account_id", account.ID, "error", err)
+ }
+ }
+
+ slog.Info("oauth_401_temp_unschedulable", "account_id", account.ID, "until", until)
+ return true
+}
+
+func (s *RateLimitService) oauth401Cooldown() time.Duration {
+ if s != nil && s.cfg != nil && s.cfg.RateLimit.OAuth401CooldownMinutes > 0 {
+ return time.Duration(s.cfg.RateLimit.OAuth401CooldownMinutes) * time.Minute
+ }
+ return 5 * time.Minute
+}
+
// PreCheckUsage proactively checks local quota before dispatching a request.
// Returns false when the account should be skipped.
func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, requestedModel string) (bool, error) {
@@ -188,7 +268,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account,
// NOTE:
// - This is a local precheck to reduce upstream 429s.
// - Do NOT mark the account as rate-limited here; rate_limit_reset_at should reflect real upstream 429s.
- log.Printf("[Gemini PreCheck] Account %d reached daily quota (%d/%d), skip until %v", account.ID, used, limit, resetAt)
+ slog.Info("gemini_precheck_daily_quota_reached", "account_id", account.ID, "used", used, "limit", limit, "reset_at", resetAt)
return false, nil
}
}
@@ -231,7 +311,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account,
if used >= limit {
resetAt := start.Add(time.Minute)
// Do not persist "rate limited" status from local precheck. See note above.
- log.Printf("[Gemini PreCheck] Account %d reached minute quota (%d/%d), skip until %v", account.ID, used, limit, resetAt)
+ slog.Info("gemini_precheck_minute_quota_reached", "account_id", account.ID, "used", used, "limit", limit, "reset_at", resetAt)
return false, nil
}
}
@@ -288,20 +368,20 @@ func (s *RateLimitService) GeminiCooldown(ctx context.Context, account *Account)
// handleAuthError 处理认证类错误(401/403),停止账号调度
func (s *RateLimitService) handleAuthError(ctx context.Context, account *Account, errorMsg string) {
if err := s.accountRepo.SetError(ctx, account.ID, errorMsg); err != nil {
- log.Printf("SetError failed for account %d: %v", account.ID, err)
+ slog.Warn("account_set_error_failed", "account_id", account.ID, "error", err)
return
}
- log.Printf("Account %d disabled due to auth error: %s", account.ID, errorMsg)
+ slog.Warn("account_disabled_auth_error", "account_id", account.ID, "error", errorMsg)
}
// handleCustomErrorCode 处理自定义错误码,停止账号调度
func (s *RateLimitService) handleCustomErrorCode(ctx context.Context, account *Account, statusCode int, errorMsg string) {
msg := "Custom error code " + strconv.Itoa(statusCode) + ": " + errorMsg
if err := s.accountRepo.SetError(ctx, account.ID, msg); err != nil {
- log.Printf("SetError failed for account %d: %v", account.ID, err)
+ slog.Warn("account_set_error_failed", "account_id", account.ID, "status_code", statusCode, "error", err)
return
}
- log.Printf("Account %d disabled due to custom error code %d: %s", account.ID, statusCode, errorMsg)
+ slog.Warn("account_disabled_custom_error", "account_id", account.ID, "status_code", statusCode, "error", errorMsg)
}
// handle429 处理429限流错误
@@ -313,7 +393,7 @@ func (s *RateLimitService) handle429(ctx context.Context, account *Account, head
// 没有重置时间,使用默认5分钟
resetAt := time.Now().Add(5 * time.Minute)
if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetAt); err != nil {
- log.Printf("SetRateLimited failed for account %d: %v", account.ID, err)
+ slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err)
}
return
}
@@ -321,10 +401,10 @@ func (s *RateLimitService) handle429(ctx context.Context, account *Account, head
// 解析Unix时间戳
ts, err := strconv.ParseInt(resetTimestamp, 10, 64)
if err != nil {
- log.Printf("Parse reset timestamp failed: %v", err)
+ slog.Warn("rate_limit_reset_parse_failed", "reset_timestamp", resetTimestamp, "error", err)
resetAt := time.Now().Add(5 * time.Minute)
if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetAt); err != nil {
- log.Printf("SetRateLimited failed for account %d: %v", account.ID, err)
+ slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err)
}
return
}
@@ -333,7 +413,7 @@ func (s *RateLimitService) handle429(ctx context.Context, account *Account, head
// 标记限流状态
if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetAt); err != nil {
- log.Printf("SetRateLimited failed for account %d: %v", account.ID, err)
+ slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err)
return
}
@@ -341,10 +421,10 @@ func (s *RateLimitService) handle429(ctx context.Context, account *Account, head
windowEnd := resetAt
windowStart := resetAt.Add(-5 * time.Hour)
if err := s.accountRepo.UpdateSessionWindow(ctx, account.ID, &windowStart, &windowEnd, "rejected"); err != nil {
- log.Printf("UpdateSessionWindow failed for account %d: %v", account.ID, err)
+ slog.Warn("rate_limit_update_session_window_failed", "account_id", account.ID, "error", err)
}
- log.Printf("Account %d rate limited until %v", account.ID, resetAt)
+ slog.Info("account_rate_limited", "account_id", account.ID, "reset_at", resetAt)
}
// handle529 处理529过载错误
@@ -357,11 +437,11 @@ func (s *RateLimitService) handle529(ctx context.Context, account *Account) {
until := time.Now().Add(time.Duration(cooldownMinutes) * time.Minute)
if err := s.accountRepo.SetOverloaded(ctx, account.ID, until); err != nil {
- log.Printf("SetOverloaded failed for account %d: %v", account.ID, err)
+ slog.Warn("overload_set_failed", "account_id", account.ID, "error", err)
return
}
- log.Printf("Account %d overloaded until %v", account.ID, until)
+ slog.Info("account_overloaded", "account_id", account.ID, "until", until)
}
// UpdateSessionWindow 从成功响应更新5h窗口状态
@@ -384,17 +464,17 @@ func (s *RateLimitService) UpdateSessionWindow(ctx context.Context, account *Acc
end := start.Add(5 * time.Hour)
windowStart = &start
windowEnd = &end
- log.Printf("Account %d: initializing 5h window from %v to %v (status: %s)", account.ID, start, end, status)
+ slog.Info("account_session_window_initialized", "account_id", account.ID, "window_start", start, "window_end", end, "status", status)
}
if err := s.accountRepo.UpdateSessionWindow(ctx, account.ID, windowStart, windowEnd, status); err != nil {
- log.Printf("UpdateSessionWindow failed for account %d: %v", account.ID, err)
+ slog.Warn("session_window_update_failed", "account_id", account.ID, "error", err)
}
// 如果状态为allowed且之前有限流,说明窗口已重置,清除限流状态
if status == "allowed" && account.IsRateLimited() {
if err := s.ClearRateLimit(ctx, account.ID); err != nil {
- log.Printf("ClearRateLimit failed for account %d: %v", account.ID, err)
+ slog.Warn("rate_limit_clear_failed", "account_id", account.ID, "error", err)
}
}
}
@@ -413,7 +493,7 @@ func (s *RateLimitService) ClearTempUnschedulable(ctx context.Context, accountID
}
if s.tempUnschedCache != nil {
if err := s.tempUnschedCache.DeleteTempUnsched(ctx, accountID); err != nil {
- log.Printf("DeleteTempUnsched failed for account %d: %v", accountID, err)
+ slog.Warn("temp_unsched_cache_delete_failed", "account_id", accountID, "error", err)
}
}
return nil
@@ -460,7 +540,7 @@ func (s *RateLimitService) GetTempUnschedStatus(ctx context.Context, accountID i
if s.tempUnschedCache != nil {
if err := s.tempUnschedCache.SetTempUnsched(ctx, accountID, state); err != nil {
- log.Printf("SetTempUnsched failed for account %d: %v", accountID, err)
+ slog.Warn("temp_unsched_cache_set_failed", "account_id", accountID, "error", err)
}
}
@@ -563,17 +643,17 @@ func (s *RateLimitService) triggerTempUnschedulable(ctx context.Context, account
}
if err := s.accountRepo.SetTempUnschedulable(ctx, account.ID, until, reason); err != nil {
- log.Printf("SetTempUnschedulable failed for account %d: %v", account.ID, err)
+ slog.Warn("temp_unsched_set_failed", "account_id", account.ID, "error", err)
return false
}
if s.tempUnschedCache != nil {
if err := s.tempUnschedCache.SetTempUnsched(ctx, account.ID, state); err != nil {
- log.Printf("SetTempUnsched cache failed for account %d: %v", account.ID, err)
+ slog.Warn("temp_unsched_cache_set_failed", "account_id", account.ID, "error", err)
}
}
- log.Printf("Account %d temp unschedulable until %v (rule %d, code %d)", account.ID, until, ruleIndex, statusCode)
+ slog.Info("account_temp_unschedulable", "account_id", account.ID, "until", until, "rule_index", ruleIndex, "status_code", statusCode)
return true
}
@@ -597,13 +677,13 @@ func (s *RateLimitService) HandleStreamTimeout(ctx context.Context, account *Acc
// 获取系统设置
if s.settingService == nil {
- log.Printf("[StreamTimeout] settingService not configured, skipping timeout handling for account %d", account.ID)
+ slog.Warn("stream_timeout_setting_service_missing", "account_id", account.ID)
return false
}
settings, err := s.settingService.GetStreamTimeoutSettings(ctx)
if err != nil {
- log.Printf("[StreamTimeout] Failed to get settings: %v", err)
+ slog.Warn("stream_timeout_get_settings_failed", "account_id", account.ID, "error", err)
return false
}
@@ -620,14 +700,13 @@ func (s *RateLimitService) HandleStreamTimeout(ctx context.Context, account *Acc
if s.timeoutCounterCache != nil {
count, err = s.timeoutCounterCache.IncrementTimeoutCount(ctx, account.ID, settings.ThresholdWindowMinutes)
if err != nil {
- log.Printf("[StreamTimeout] Failed to increment timeout count for account %d: %v", account.ID, err)
+ slog.Warn("stream_timeout_increment_count_failed", "account_id", account.ID, "error", err)
// 继续处理,使用 count=1
count = 1
}
}
- log.Printf("[StreamTimeout] Account %d timeout count: %d/%d (window: %d min, model: %s)",
- account.ID, count, settings.ThresholdCount, settings.ThresholdWindowMinutes, model)
+ slog.Info("stream_timeout_count", "account_id", account.ID, "count", count, "threshold", settings.ThresholdCount, "window_minutes", settings.ThresholdWindowMinutes, "model", model)
// 检查是否达到阈值
if count < int64(settings.ThresholdCount) {
@@ -668,24 +747,24 @@ func (s *RateLimitService) triggerStreamTimeoutTempUnsched(ctx context.Context,
}
if err := s.accountRepo.SetTempUnschedulable(ctx, account.ID, until, reason); err != nil {
- log.Printf("[StreamTimeout] SetTempUnschedulable failed for account %d: %v", account.ID, err)
+ slog.Warn("stream_timeout_set_temp_unsched_failed", "account_id", account.ID, "error", err)
return false
}
if s.tempUnschedCache != nil {
if err := s.tempUnschedCache.SetTempUnsched(ctx, account.ID, state); err != nil {
- log.Printf("[StreamTimeout] SetTempUnsched cache failed for account %d: %v", account.ID, err)
+ slog.Warn("stream_timeout_set_temp_unsched_cache_failed", "account_id", account.ID, "error", err)
}
}
// 重置超时计数
if s.timeoutCounterCache != nil {
if err := s.timeoutCounterCache.ResetTimeoutCount(ctx, account.ID); err != nil {
- log.Printf("[StreamTimeout] ResetTimeoutCount failed for account %d: %v", account.ID, err)
+ slog.Warn("stream_timeout_reset_count_failed", "account_id", account.ID, "error", err)
}
}
- log.Printf("[StreamTimeout] Account %d marked as temp unschedulable until %v (model: %s)", account.ID, until, model)
+ slog.Info("stream_timeout_temp_unschedulable", "account_id", account.ID, "until", until, "model", model)
return true
}
@@ -694,17 +773,17 @@ func (s *RateLimitService) triggerStreamTimeoutError(ctx context.Context, accoun
errorMsg := "Stream data interval timeout (repeated failures) for model: " + model
if err := s.accountRepo.SetError(ctx, account.ID, errorMsg); err != nil {
- log.Printf("[StreamTimeout] SetError failed for account %d: %v", account.ID, err)
+ slog.Warn("stream_timeout_set_error_failed", "account_id", account.ID, "error", err)
return false
}
// 重置超时计数
if s.timeoutCounterCache != nil {
if err := s.timeoutCounterCache.ResetTimeoutCount(ctx, account.ID); err != nil {
- log.Printf("[StreamTimeout] ResetTimeoutCount failed for account %d: %v", account.ID, err)
+ slog.Warn("stream_timeout_reset_count_failed", "account_id", account.ID, "error", err)
}
}
- log.Printf("[StreamTimeout] Account %d marked as error (model: %s)", account.ID, model)
+ slog.Warn("stream_timeout_account_error", "account_id", account.ID, "model", model)
return true
}
diff --git a/backend/internal/service/ratelimit_service_401_test.go b/backend/internal/service/ratelimit_service_401_test.go
new file mode 100644
index 00000000..2c43b1cf
--- /dev/null
+++ b/backend/internal/service/ratelimit_service_401_test.go
@@ -0,0 +1,353 @@
+//go:build unit
+
+package service
+
+import (
+ "context"
+ "errors"
+ "net/http"
+ "testing"
+ "time"
+
+ "github.com/Wei-Shaw/sub2api/internal/config"
+ "github.com/stretchr/testify/require"
+)
+
+type rateLimitAccountRepoStub struct {
+ mockAccountRepoForGemini
+ tempCalls int
+ tempUntil time.Time
+ tempReason string
+ setErrorCalls int
+}
+
+func (r *rateLimitAccountRepoStub) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error {
+ r.tempCalls++
+ r.tempUntil = until
+ r.tempReason = reason
+ return nil
+}
+
+func (r *rateLimitAccountRepoStub) SetError(ctx context.Context, id int64, errorMsg string) error {
+ r.setErrorCalls++
+ return nil
+}
+
+type tokenCacheInvalidatorRecorder struct {
+ accounts []*Account
+ err error
+}
+
+func (r *tokenCacheInvalidatorRecorder) InvalidateToken(ctx context.Context, account *Account) error {
+ r.accounts = append(r.accounts, account)
+ return r.err
+}
+
+func TestRateLimitService_HandleUpstreamError_OAuth401TempUnschedulable(t *testing.T) {
+ tests := []struct {
+ name string
+ platform string
+ }{
+ {name: "gemini", platform: PlatformGemini},
+ {name: "antigravity", platform: PlatformAntigravity},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ repo := &rateLimitAccountRepoStub{}
+ invalidator := &tokenCacheInvalidatorRecorder{}
+ service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
+ service.SetTokenCacheInvalidator(invalidator)
+ account := &Account{
+ ID: 100,
+ Platform: tt.platform,
+ Type: AccountTypeOAuth,
+ }
+
+ start := time.Now()
+ shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized"))
+
+ require.True(t, shouldDisable)
+ require.Equal(t, 1, repo.tempCalls)
+ require.Equal(t, 0, repo.setErrorCalls)
+ require.Len(t, invalidator.accounts, 1)
+ require.WithinDuration(t, start.Add(5*time.Minute), repo.tempUntil, 10*time.Second)
+ require.NotEmpty(t, repo.tempReason)
+ })
+ }
+}
+
+func TestRateLimitService_HandleUpstreamError_OAuth401InvalidatorError(t *testing.T) {
+ repo := &rateLimitAccountRepoStub{}
+ invalidator := &tokenCacheInvalidatorRecorder{err: errors.New("boom")}
+ service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
+ service.SetTokenCacheInvalidator(invalidator)
+ account := &Account{
+ ID: 101,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+
+ shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized"))
+
+ require.True(t, shouldDisable)
+ require.Equal(t, 1, repo.tempCalls)
+ require.Equal(t, 0, repo.setErrorCalls)
+ require.Len(t, invalidator.accounts, 1)
+}
+
+func TestRateLimitService_HandleUpstreamError_OAuth401CustomRule(t *testing.T) {
+ repo := &rateLimitAccountRepoStub{}
+ invalidator := &tokenCacheInvalidatorRecorder{}
+ service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
+ service.SetTokenCacheInvalidator(invalidator)
+ account := &Account{
+ ID: 103,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ Credentials: map[string]any{
+ "temp_unschedulable_enabled": true,
+ "temp_unschedulable_rules": []any{
+ map[string]any{
+ "error_code": 401,
+ "keywords": []any{"unauthorized"},
+ "duration_minutes": 30,
+ "description": "custom rule",
+ },
+ },
+ },
+ }
+
+ start := time.Now()
+ shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized"))
+
+ require.True(t, shouldDisable)
+ require.Equal(t, 1, repo.tempCalls)
+ require.Equal(t, 0, repo.setErrorCalls)
+ require.Len(t, invalidator.accounts, 1)
+ require.WithinDuration(t, start.Add(30*time.Minute), repo.tempUntil, 10*time.Second)
+}
+
+func TestRateLimitService_HandleUpstreamError_NonOAuth401(t *testing.T) {
+ repo := &rateLimitAccountRepoStub{}
+ invalidator := &tokenCacheInvalidatorRecorder{}
+ service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
+ service.SetTokenCacheInvalidator(invalidator)
+ account := &Account{
+ ID: 102,
+ Platform: PlatformOpenAI,
+ Type: AccountTypeAPIKey,
+ }
+
+ shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized"))
+
+ require.True(t, shouldDisable)
+ require.Equal(t, 0, repo.tempCalls)
+ require.Equal(t, 1, repo.setErrorCalls)
+ require.Empty(t, invalidator.accounts)
+}
+
+// TestRateLimitService_HandleOAuth401_NilAccount 测试 account 为 nil 的情况
+func TestRateLimitService_HandleOAuth401_NilAccount(t *testing.T) {
+ repo := &rateLimitAccountRepoStub{}
+ service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
+
+ result := service.handleOAuth401TempUnschedulable(context.Background(), nil, "error")
+
+ require.False(t, result)
+ require.Equal(t, 0, repo.tempCalls)
+}
+
+// TestRateLimitService_HandleOAuth401_NilInvalidator 测试 tokenCacheInvalidator 为 nil 的情况
+func TestRateLimitService_HandleOAuth401_NilInvalidator(t *testing.T) {
+ repo := &rateLimitAccountRepoStub{}
+ service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
+ // 不设置 tokenCacheInvalidator
+ account := &Account{
+ ID: 200,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+
+ result := service.handleOAuth401TempUnschedulable(context.Background(), account, "error")
+
+ require.True(t, result)
+ require.Equal(t, 1, repo.tempCalls)
+}
+
+// TestRateLimitService_HandleOAuth401_SetTempUnschedulableFailed 测试 SetTempUnschedulable 失败的情况
+func TestRateLimitService_HandleOAuth401_SetTempUnschedulableFailed(t *testing.T) {
+ repo := &rateLimitAccountRepoStubWithError{
+ setTempErr: errors.New("db error"),
+ }
+ invalidator := &tokenCacheInvalidatorRecorder{}
+ service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
+ service.SetTokenCacheInvalidator(invalidator)
+ account := &Account{
+ ID: 201,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+
+ result := service.handleOAuth401TempUnschedulable(context.Background(), account, "error")
+
+ require.False(t, result) // 失败应返回 false
+ require.Len(t, invalidator.accounts, 1) // 但 invalidator 仍然被调用
+}
+
+// rateLimitAccountRepoStubWithError 支持返回错误的 stub
+type rateLimitAccountRepoStubWithError struct {
+ mockAccountRepoForGemini
+ setTempErr error
+ setErrorCalls int
+}
+
+func (r *rateLimitAccountRepoStubWithError) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error {
+ return r.setTempErr
+}
+
+func (r *rateLimitAccountRepoStubWithError) SetError(ctx context.Context, id int64, errorMsg string) error {
+ r.setErrorCalls++
+ return nil
+}
+
+// TestRateLimitService_HandleOAuth401_WithTempUnschedCache 测试 tempUnschedCache 存在的情况
+func TestRateLimitService_HandleOAuth401_WithTempUnschedCache(t *testing.T) {
+ repo := &rateLimitAccountRepoStub{}
+ invalidator := &tokenCacheInvalidatorRecorder{}
+ tempCache := &tempUnschedCacheStub{}
+ service := NewRateLimitService(repo, nil, &config.Config{}, nil, tempCache)
+ service.SetTokenCacheInvalidator(invalidator)
+ account := &Account{
+ ID: 202,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+
+ result := service.handleOAuth401TempUnschedulable(context.Background(), account, "error")
+
+ require.True(t, result)
+ require.Equal(t, 1, repo.tempCalls)
+ require.Equal(t, 1, tempCache.setCalls)
+}
+
+// TestRateLimitService_HandleOAuth401_TempUnschedCacheError 测试 tempUnschedCache 设置失败的情况
+func TestRateLimitService_HandleOAuth401_TempUnschedCacheError(t *testing.T) {
+ repo := &rateLimitAccountRepoStub{}
+ invalidator := &tokenCacheInvalidatorRecorder{}
+ tempCache := &tempUnschedCacheStub{setErr: errors.New("cache error")}
+ service := NewRateLimitService(repo, nil, &config.Config{}, nil, tempCache)
+ service.SetTokenCacheInvalidator(invalidator)
+ account := &Account{
+ ID: 203,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+
+ result := service.handleOAuth401TempUnschedulable(context.Background(), account, "error")
+
+ require.True(t, result) // 缓存错误不影响主流程
+ require.Equal(t, 1, repo.tempCalls)
+}
+
+// tempUnschedCacheStub 用于测试的 TempUnschedCache stub
+type tempUnschedCacheStub struct {
+ setCalls int
+ setErr error
+}
+
+func (c *tempUnschedCacheStub) GetTempUnsched(ctx context.Context, accountID int64) (*TempUnschedState, error) {
+ return nil, nil
+}
+
+func (c *tempUnschedCacheStub) SetTempUnsched(ctx context.Context, accountID int64, state *TempUnschedState) error {
+ c.setCalls++
+ return c.setErr
+}
+
+func (c *tempUnschedCacheStub) DeleteTempUnsched(ctx context.Context, accountID int64) error {
+ return nil
+}
+
+// TestRateLimitService_OAuth401Cooldown 测试 oauth401Cooldown 函数
+func TestRateLimitService_OAuth401Cooldown(t *testing.T) {
+ tests := []struct {
+ name string
+ cfg *config.Config
+ expected time.Duration
+ }{
+ {
+ name: "default_when_config_zero",
+ cfg: &config.Config{RateLimit: config.RateLimitConfig{OAuth401CooldownMinutes: 0}},
+ expected: 5 * time.Minute,
+ },
+ {
+ name: "custom_cooldown_10_minutes",
+ cfg: &config.Config{RateLimit: config.RateLimitConfig{OAuth401CooldownMinutes: 10}},
+ expected: 10 * time.Minute,
+ },
+ {
+ name: "custom_cooldown_1_minute",
+ cfg: &config.Config{RateLimit: config.RateLimitConfig{OAuth401CooldownMinutes: 1}},
+ expected: 1 * time.Minute,
+ },
+ {
+ name: "negative_value_uses_default",
+ cfg: &config.Config{RateLimit: config.RateLimitConfig{OAuth401CooldownMinutes: -5}},
+ expected: 5 * time.Minute,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ service := NewRateLimitService(nil, nil, tt.cfg, nil, nil)
+ result := service.oauth401Cooldown()
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
+
+// TestRateLimitService_OAuth401Cooldown_NilConfig 测试 cfg 为 nil 的情况
+func TestRateLimitService_OAuth401Cooldown_NilConfig(t *testing.T) {
+ service := &RateLimitService{cfg: nil}
+ result := service.oauth401Cooldown()
+ require.Equal(t, 5*time.Minute, result)
+}
+
+// TestRateLimitService_HandleOAuth401_WithCustomCooldown 测试自定义 cooldown 配置
+func TestRateLimitService_HandleOAuth401_WithCustomCooldown(t *testing.T) {
+ repo := &rateLimitAccountRepoStub{}
+ cfg := &config.Config{
+ RateLimit: config.RateLimitConfig{
+ OAuth401CooldownMinutes: 15,
+ },
+ }
+ service := NewRateLimitService(repo, nil, cfg, nil, nil)
+ account := &Account{
+ ID: 204,
+ Platform: PlatformAntigravity,
+ Type: AccountTypeOAuth,
+ }
+
+ start := time.Now()
+ result := service.handleOAuth401TempUnschedulable(context.Background(), account, "error")
+
+ require.True(t, result)
+ require.WithinDuration(t, start.Add(15*time.Minute), repo.tempUntil, 10*time.Second)
+}
+
+// TestRateLimitService_HandleOAuth401_EmptyUpstreamMsg 测试 upstreamMsg 为空的情况
+func TestRateLimitService_HandleOAuth401_EmptyUpstreamMsg(t *testing.T) {
+ repo := &rateLimitAccountRepoStub{}
+ service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
+ account := &Account{
+ ID: 205,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+
+ result := service.handleOAuth401TempUnschedulable(context.Background(), account, "")
+
+ require.True(t, result)
+ require.Contains(t, repo.tempReason, "Authentication failed (401)")
+}
diff --git a/backend/internal/service/token_cache_invalidator.go b/backend/internal/service/token_cache_invalidator.go
new file mode 100644
index 00000000..aacdf266
--- /dev/null
+++ b/backend/internal/service/token_cache_invalidator.go
@@ -0,0 +1,35 @@
+package service
+
+import "context"
+
+type TokenCacheInvalidator interface {
+ InvalidateToken(ctx context.Context, account *Account) error
+}
+
+type CompositeTokenCacheInvalidator struct {
+ geminiCache GeminiTokenCache
+}
+
+func NewCompositeTokenCacheInvalidator(geminiCache GeminiTokenCache) *CompositeTokenCacheInvalidator {
+ return &CompositeTokenCacheInvalidator{
+ geminiCache: geminiCache,
+ }
+}
+
+func (c *CompositeTokenCacheInvalidator) InvalidateToken(ctx context.Context, account *Account) error {
+ if c == nil || c.geminiCache == nil || account == nil {
+ return nil
+ }
+ if account.Type != AccountTypeOAuth {
+ return nil
+ }
+
+ switch account.Platform {
+ case PlatformGemini:
+ return c.geminiCache.DeleteAccessToken(ctx, GeminiTokenCacheKey(account))
+ case PlatformAntigravity:
+ return c.geminiCache.DeleteAccessToken(ctx, AntigravityTokenCacheKey(account))
+ default:
+ return nil
+ }
+}
diff --git a/backend/internal/service/token_cache_invalidator_test.go b/backend/internal/service/token_cache_invalidator_test.go
new file mode 100644
index 00000000..0090ed24
--- /dev/null
+++ b/backend/internal/service/token_cache_invalidator_test.go
@@ -0,0 +1,97 @@
+//go:build unit
+
+package service
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+)
+
+type geminiTokenCacheStub struct {
+ deletedKeys []string
+ deleteErr error
+}
+
+func (s *geminiTokenCacheStub) GetAccessToken(ctx context.Context, cacheKey string) (string, error) {
+ return "", nil
+}
+
+func (s *geminiTokenCacheStub) SetAccessToken(ctx context.Context, cacheKey string, token string, ttl time.Duration) error {
+ return nil
+}
+
+func (s *geminiTokenCacheStub) DeleteAccessToken(ctx context.Context, cacheKey string) error {
+ s.deletedKeys = append(s.deletedKeys, cacheKey)
+ return s.deleteErr
+}
+
+func (s *geminiTokenCacheStub) AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error) {
+ return true, nil
+}
+
+func (s *geminiTokenCacheStub) ReleaseRefreshLock(ctx context.Context, cacheKey string) error {
+ return nil
+}
+
+func TestCompositeTokenCacheInvalidator_Gemini(t *testing.T) {
+ cache := &geminiTokenCacheStub{}
+ invalidator := NewCompositeTokenCacheInvalidator(cache)
+ account := &Account{
+ ID: 10,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ Credentials: map[string]any{
+ "project_id": "project-x",
+ },
+ }
+
+ err := invalidator.InvalidateToken(context.Background(), account)
+ require.NoError(t, err)
+ require.Equal(t, []string{"project-x"}, cache.deletedKeys)
+}
+
+func TestCompositeTokenCacheInvalidator_Antigravity(t *testing.T) {
+ cache := &geminiTokenCacheStub{}
+ invalidator := NewCompositeTokenCacheInvalidator(cache)
+ account := &Account{
+ ID: 99,
+ Platform: PlatformAntigravity,
+ Type: AccountTypeOAuth,
+ Credentials: map[string]any{
+ "project_id": "ag-project",
+ },
+ }
+
+ err := invalidator.InvalidateToken(context.Background(), account)
+ require.NoError(t, err)
+ require.Equal(t, []string{"ag:ag-project"}, cache.deletedKeys)
+}
+
+func TestCompositeTokenCacheInvalidator_SkipNonOAuth(t *testing.T) {
+ cache := &geminiTokenCacheStub{}
+ invalidator := NewCompositeTokenCacheInvalidator(cache)
+ account := &Account{
+ ID: 1,
+ Platform: PlatformGemini,
+ Type: AccountTypeAPIKey,
+ }
+
+ err := invalidator.InvalidateToken(context.Background(), account)
+ require.NoError(t, err)
+ require.Empty(t, cache.deletedKeys)
+}
+
+func TestCompositeTokenCacheInvalidator_NilCache(t *testing.T) {
+ invalidator := NewCompositeTokenCacheInvalidator(nil)
+ account := &Account{
+ ID: 2,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+
+ err := invalidator.InvalidateToken(context.Background(), account)
+ require.NoError(t, err)
+}
diff --git a/backend/internal/service/token_cache_key_test.go b/backend/internal/service/token_cache_key_test.go
new file mode 100644
index 00000000..0dc751c6
--- /dev/null
+++ b/backend/internal/service/token_cache_key_test.go
@@ -0,0 +1,153 @@
+//go:build unit
+
+package service
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestGeminiTokenCacheKey(t *testing.T) {
+ tests := []struct {
+ name string
+ account *Account
+ expected string
+ }{
+ {
+ name: "with_project_id",
+ account: &Account{
+ ID: 100,
+ Credentials: map[string]any{
+ "project_id": "my-project-123",
+ },
+ },
+ expected: "my-project-123",
+ },
+ {
+ name: "project_id_with_whitespace",
+ account: &Account{
+ ID: 101,
+ Credentials: map[string]any{
+ "project_id": " project-with-spaces ",
+ },
+ },
+ expected: "project-with-spaces",
+ },
+ {
+ name: "empty_project_id_fallback_to_account_id",
+ account: &Account{
+ ID: 102,
+ Credentials: map[string]any{
+ "project_id": "",
+ },
+ },
+ expected: "account:102",
+ },
+ {
+ name: "whitespace_only_project_id_fallback_to_account_id",
+ account: &Account{
+ ID: 103,
+ Credentials: map[string]any{
+ "project_id": " ",
+ },
+ },
+ expected: "account:103",
+ },
+ {
+ name: "no_project_id_key_fallback_to_account_id",
+ account: &Account{
+ ID: 104,
+ Credentials: map[string]any{},
+ },
+ expected: "account:104",
+ },
+ {
+ name: "nil_credentials_fallback_to_account_id",
+ account: &Account{
+ ID: 105,
+ Credentials: nil,
+ },
+ expected: "account:105",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := GeminiTokenCacheKey(tt.account)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
+
+func TestAntigravityTokenCacheKey(t *testing.T) {
+ tests := []struct {
+ name string
+ account *Account
+ expected string
+ }{
+ {
+ name: "with_project_id",
+ account: &Account{
+ ID: 200,
+ Credentials: map[string]any{
+ "project_id": "ag-project-456",
+ },
+ },
+ expected: "ag:ag-project-456",
+ },
+ {
+ name: "project_id_with_whitespace",
+ account: &Account{
+ ID: 201,
+ Credentials: map[string]any{
+ "project_id": " ag-project-spaces ",
+ },
+ },
+ expected: "ag:ag-project-spaces",
+ },
+ {
+ name: "empty_project_id_fallback_to_account_id",
+ account: &Account{
+ ID: 202,
+ Credentials: map[string]any{
+ "project_id": "",
+ },
+ },
+ expected: "ag:account:202",
+ },
+ {
+ name: "whitespace_only_project_id_fallback_to_account_id",
+ account: &Account{
+ ID: 203,
+ Credentials: map[string]any{
+ "project_id": " ",
+ },
+ },
+ expected: "ag:account:203",
+ },
+ {
+ name: "no_project_id_key_fallback_to_account_id",
+ account: &Account{
+ ID: 204,
+ Credentials: map[string]any{},
+ },
+ expected: "ag:account:204",
+ },
+ {
+ name: "nil_credentials_fallback_to_account_id",
+ account: &Account{
+ ID: 205,
+ Credentials: nil,
+ },
+ expected: "ag:account:205",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := AntigravityTokenCacheKey(tt.account)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
diff --git a/backend/internal/service/token_refresh_service.go b/backend/internal/service/token_refresh_service.go
index 3ed35f04..4d513d07 100644
--- a/backend/internal/service/token_refresh_service.go
+++ b/backend/internal/service/token_refresh_service.go
@@ -14,9 +14,10 @@ import (
// TokenRefreshService OAuth token自动刷新服务
// 定期检查并刷新即将过期的token
type TokenRefreshService struct {
- accountRepo AccountRepository
- refreshers []TokenRefresher
- cfg *config.TokenRefreshConfig
+ accountRepo AccountRepository
+ refreshers []TokenRefresher
+ cfg *config.TokenRefreshConfig
+ cacheInvalidator TokenCacheInvalidator
stopCh chan struct{}
wg sync.WaitGroup
@@ -29,12 +30,14 @@ func NewTokenRefreshService(
openaiOAuthService *OpenAIOAuthService,
geminiOAuthService *GeminiOAuthService,
antigravityOAuthService *AntigravityOAuthService,
+ cacheInvalidator TokenCacheInvalidator,
cfg *config.Config,
) *TokenRefreshService {
s := &TokenRefreshService{
- accountRepo: accountRepo,
- cfg: &cfg.TokenRefresh,
- stopCh: make(chan struct{}),
+ accountRepo: accountRepo,
+ cfg: &cfg.TokenRefresh,
+ cacheInvalidator: cacheInvalidator,
+ stopCh: make(chan struct{}),
}
// 注册平台特定的刷新器
@@ -169,6 +172,14 @@ func (s *TokenRefreshService) refreshWithRetry(ctx context.Context, account *Acc
if err := s.accountRepo.Update(ctx, account); err != nil {
return fmt.Errorf("failed to save credentials: %w", err)
}
+ if s.cacheInvalidator != nil && account.Type == AccountTypeOAuth &&
+ (account.Platform == PlatformGemini || account.Platform == PlatformAntigravity) {
+ if err := s.cacheInvalidator.InvalidateToken(ctx, account); err != nil {
+ log.Printf("[TokenRefresh] Failed to invalidate token cache for account %d: %v", account.ID, err)
+ } else {
+ log.Printf("[TokenRefresh] Token cache invalidated for account %d", account.ID)
+ }
+ }
return nil
}
diff --git a/backend/internal/service/token_refresh_service_test.go b/backend/internal/service/token_refresh_service_test.go
new file mode 100644
index 00000000..b11a0adc
--- /dev/null
+++ b/backend/internal/service/token_refresh_service_test.go
@@ -0,0 +1,361 @@
+//go:build unit
+
+package service
+
+import (
+ "context"
+ "errors"
+ "testing"
+ "time"
+
+ "github.com/Wei-Shaw/sub2api/internal/config"
+ "github.com/stretchr/testify/require"
+)
+
+type tokenRefreshAccountRepo struct {
+ mockAccountRepoForGemini
+ updateCalls int
+ setErrorCalls int
+ lastAccount *Account
+ updateErr error
+}
+
+func (r *tokenRefreshAccountRepo) Update(ctx context.Context, account *Account) error {
+ r.updateCalls++
+ r.lastAccount = account
+ return r.updateErr
+}
+
+func (r *tokenRefreshAccountRepo) SetError(ctx context.Context, id int64, errorMsg string) error {
+ r.setErrorCalls++
+ return nil
+}
+
+type tokenCacheInvalidatorStub struct {
+ calls int
+ err error
+}
+
+func (s *tokenCacheInvalidatorStub) InvalidateToken(ctx context.Context, account *Account) error {
+ s.calls++
+ return s.err
+}
+
+type tokenRefresherStub struct {
+ credentials map[string]any
+ err error
+}
+
+func (r *tokenRefresherStub) CanRefresh(account *Account) bool {
+ return true
+}
+
+func (r *tokenRefresherStub) NeedsRefresh(account *Account, refreshWindowDuration time.Duration) bool {
+ return true
+}
+
+func (r *tokenRefresherStub) Refresh(ctx context.Context, account *Account) (map[string]any, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ return r.credentials, nil
+}
+
+func TestTokenRefreshService_RefreshWithRetry_InvalidatesCache(t *testing.T) {
+ repo := &tokenRefreshAccountRepo{}
+ invalidator := &tokenCacheInvalidatorStub{}
+ cfg := &config.Config{
+ TokenRefresh: config.TokenRefreshConfig{
+ MaxRetries: 1,
+ RetryBackoffSeconds: 0,
+ },
+ }
+ service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg)
+ account := &Account{
+ ID: 5,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+ refresher := &tokenRefresherStub{
+ credentials: map[string]any{
+ "access_token": "new-token",
+ },
+ }
+
+ err := service.refreshWithRetry(context.Background(), account, refresher)
+ require.NoError(t, err)
+ require.Equal(t, 1, repo.updateCalls)
+ require.Equal(t, 1, invalidator.calls)
+ require.Equal(t, "new-token", account.GetCredential("access_token"))
+}
+
+func TestTokenRefreshService_RefreshWithRetry_InvalidatorErrorIgnored(t *testing.T) {
+ repo := &tokenRefreshAccountRepo{}
+ invalidator := &tokenCacheInvalidatorStub{err: errors.New("invalidate failed")}
+ cfg := &config.Config{
+ TokenRefresh: config.TokenRefreshConfig{
+ MaxRetries: 1,
+ RetryBackoffSeconds: 0,
+ },
+ }
+ service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg)
+ account := &Account{
+ ID: 6,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+ refresher := &tokenRefresherStub{
+ credentials: map[string]any{
+ "access_token": "token",
+ },
+ }
+
+ err := service.refreshWithRetry(context.Background(), account, refresher)
+ require.NoError(t, err)
+ require.Equal(t, 1, repo.updateCalls)
+ require.Equal(t, 1, invalidator.calls)
+}
+
+func TestTokenRefreshService_RefreshWithRetry_NilInvalidator(t *testing.T) {
+ repo := &tokenRefreshAccountRepo{}
+ cfg := &config.Config{
+ TokenRefresh: config.TokenRefreshConfig{
+ MaxRetries: 1,
+ RetryBackoffSeconds: 0,
+ },
+ }
+ service := NewTokenRefreshService(repo, nil, nil, nil, nil, nil, cfg)
+ account := &Account{
+ ID: 7,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+ refresher := &tokenRefresherStub{
+ credentials: map[string]any{
+ "access_token": "token",
+ },
+ }
+
+ err := service.refreshWithRetry(context.Background(), account, refresher)
+ require.NoError(t, err)
+ require.Equal(t, 1, repo.updateCalls)
+}
+
+// TestTokenRefreshService_RefreshWithRetry_Antigravity 测试 Antigravity 平台的缓存失效
+func TestTokenRefreshService_RefreshWithRetry_Antigravity(t *testing.T) {
+ repo := &tokenRefreshAccountRepo{}
+ invalidator := &tokenCacheInvalidatorStub{}
+ cfg := &config.Config{
+ TokenRefresh: config.TokenRefreshConfig{
+ MaxRetries: 1,
+ RetryBackoffSeconds: 0,
+ },
+ }
+ service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg)
+ account := &Account{
+ ID: 8,
+ Platform: PlatformAntigravity,
+ Type: AccountTypeOAuth,
+ }
+ refresher := &tokenRefresherStub{
+ credentials: map[string]any{
+ "access_token": "ag-token",
+ },
+ }
+
+ err := service.refreshWithRetry(context.Background(), account, refresher)
+ require.NoError(t, err)
+ require.Equal(t, 1, repo.updateCalls)
+ require.Equal(t, 1, invalidator.calls) // Antigravity 也应触发缓存失效
+}
+
+// TestTokenRefreshService_RefreshWithRetry_NonOAuthAccount 测试非 OAuth 账号不触发缓存失效
+func TestTokenRefreshService_RefreshWithRetry_NonOAuthAccount(t *testing.T) {
+ repo := &tokenRefreshAccountRepo{}
+ invalidator := &tokenCacheInvalidatorStub{}
+ cfg := &config.Config{
+ TokenRefresh: config.TokenRefreshConfig{
+ MaxRetries: 1,
+ RetryBackoffSeconds: 0,
+ },
+ }
+ service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg)
+ account := &Account{
+ ID: 9,
+ Platform: PlatformGemini,
+ Type: AccountTypeAPIKey, // 非 OAuth
+ }
+ refresher := &tokenRefresherStub{
+ credentials: map[string]any{
+ "access_token": "token",
+ },
+ }
+
+ err := service.refreshWithRetry(context.Background(), account, refresher)
+ require.NoError(t, err)
+ require.Equal(t, 1, repo.updateCalls)
+ require.Equal(t, 0, invalidator.calls) // 非 OAuth 不触发缓存失效
+}
+
+// TestTokenRefreshService_RefreshWithRetry_OtherPlatformOAuth 测试其他平台的 OAuth 账号不触发缓存失效
+func TestTokenRefreshService_RefreshWithRetry_OtherPlatformOAuth(t *testing.T) {
+ repo := &tokenRefreshAccountRepo{}
+ invalidator := &tokenCacheInvalidatorStub{}
+ cfg := &config.Config{
+ TokenRefresh: config.TokenRefreshConfig{
+ MaxRetries: 1,
+ RetryBackoffSeconds: 0,
+ },
+ }
+ service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg)
+ account := &Account{
+ ID: 10,
+ Platform: PlatformOpenAI, // 其他平台
+ Type: AccountTypeOAuth,
+ }
+ refresher := &tokenRefresherStub{
+ credentials: map[string]any{
+ "access_token": "token",
+ },
+ }
+
+ err := service.refreshWithRetry(context.Background(), account, refresher)
+ require.NoError(t, err)
+ require.Equal(t, 1, repo.updateCalls)
+ require.Equal(t, 0, invalidator.calls) // 其他平台不触发缓存失效
+}
+
+// TestTokenRefreshService_RefreshWithRetry_UpdateFailed 测试更新失败的情况
+func TestTokenRefreshService_RefreshWithRetry_UpdateFailed(t *testing.T) {
+ repo := &tokenRefreshAccountRepo{updateErr: errors.New("update failed")}
+ invalidator := &tokenCacheInvalidatorStub{}
+ cfg := &config.Config{
+ TokenRefresh: config.TokenRefreshConfig{
+ MaxRetries: 1,
+ RetryBackoffSeconds: 0,
+ },
+ }
+ service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg)
+ account := &Account{
+ ID: 11,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+ refresher := &tokenRefresherStub{
+ credentials: map[string]any{
+ "access_token": "token",
+ },
+ }
+
+ err := service.refreshWithRetry(context.Background(), account, refresher)
+ require.Error(t, err)
+ require.Contains(t, err.Error(), "failed to save credentials")
+ require.Equal(t, 1, repo.updateCalls)
+ require.Equal(t, 0, invalidator.calls) // 更新失败时不应触发缓存失效
+}
+
+// TestTokenRefreshService_RefreshWithRetry_RefreshFailed 测试刷新失败的情况
+func TestTokenRefreshService_RefreshWithRetry_RefreshFailed(t *testing.T) {
+ repo := &tokenRefreshAccountRepo{}
+ invalidator := &tokenCacheInvalidatorStub{}
+ cfg := &config.Config{
+ TokenRefresh: config.TokenRefreshConfig{
+ MaxRetries: 2,
+ RetryBackoffSeconds: 0,
+ },
+ }
+ service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg)
+ account := &Account{
+ ID: 12,
+ Platform: PlatformGemini,
+ Type: AccountTypeOAuth,
+ }
+ refresher := &tokenRefresherStub{
+ err: errors.New("refresh failed"),
+ }
+
+ err := service.refreshWithRetry(context.Background(), account, refresher)
+ require.Error(t, err)
+ require.Equal(t, 0, repo.updateCalls) // 刷新失败不应更新
+ require.Equal(t, 0, invalidator.calls) // 刷新失败不应触发缓存失效
+ require.Equal(t, 1, repo.setErrorCalls) // 应设置错误状态
+}
+
+// TestTokenRefreshService_RefreshWithRetry_AntigravityRefreshFailed 测试 Antigravity 刷新失败不设置错误状态
+func TestTokenRefreshService_RefreshWithRetry_AntigravityRefreshFailed(t *testing.T) {
+ repo := &tokenRefreshAccountRepo{}
+ invalidator := &tokenCacheInvalidatorStub{}
+ cfg := &config.Config{
+ TokenRefresh: config.TokenRefreshConfig{
+ MaxRetries: 1,
+ RetryBackoffSeconds: 0,
+ },
+ }
+ service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg)
+ account := &Account{
+ ID: 13,
+ Platform: PlatformAntigravity,
+ Type: AccountTypeOAuth,
+ }
+ refresher := &tokenRefresherStub{
+ err: errors.New("network error"), // 可重试错误
+ }
+
+ err := service.refreshWithRetry(context.Background(), account, refresher)
+ require.Error(t, err)
+ require.Equal(t, 0, repo.updateCalls)
+ require.Equal(t, 0, invalidator.calls)
+ require.Equal(t, 0, repo.setErrorCalls) // Antigravity 可重试错误不设置错误状态
+}
+
+// TestTokenRefreshService_RefreshWithRetry_AntigravityNonRetryableError 测试 Antigravity 不可重试错误
+func TestTokenRefreshService_RefreshWithRetry_AntigravityNonRetryableError(t *testing.T) {
+ repo := &tokenRefreshAccountRepo{}
+ invalidator := &tokenCacheInvalidatorStub{}
+ cfg := &config.Config{
+ TokenRefresh: config.TokenRefreshConfig{
+ MaxRetries: 3,
+ RetryBackoffSeconds: 0,
+ },
+ }
+ service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg)
+ account := &Account{
+ ID: 14,
+ Platform: PlatformAntigravity,
+ Type: AccountTypeOAuth,
+ }
+ refresher := &tokenRefresherStub{
+ err: errors.New("invalid_grant: token revoked"), // 不可重试错误
+ }
+
+ err := service.refreshWithRetry(context.Background(), account, refresher)
+ require.Error(t, err)
+ require.Equal(t, 0, repo.updateCalls)
+ require.Equal(t, 0, invalidator.calls)
+ require.Equal(t, 1, repo.setErrorCalls) // 不可重试错误应设置错误状态
+}
+
+// TestIsNonRetryableRefreshError 测试不可重试错误判断
+func TestIsNonRetryableRefreshError(t *testing.T) {
+ tests := []struct {
+ name string
+ err error
+ expected bool
+ }{
+ {name: "nil_error", err: nil, expected: false},
+ {name: "network_error", err: errors.New("network timeout"), expected: false},
+ {name: "invalid_grant", err: errors.New("invalid_grant"), expected: true},
+ {name: "invalid_client", err: errors.New("invalid_client"), expected: true},
+ {name: "unauthorized_client", err: errors.New("unauthorized_client"), expected: true},
+ {name: "access_denied", err: errors.New("access_denied"), expected: true},
+ {name: "invalid_grant_with_desc", err: errors.New("Error: invalid_grant - token revoked"), expected: true},
+ {name: "case_insensitive", err: errors.New("INVALID_GRANT"), expected: true},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := isNonRetryableRefreshError(tt.err)
+ require.Equal(t, tt.expected, result)
+ })
+ }
+}
diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go
index 5326bace..05dbb0b0 100644
--- a/backend/internal/service/wire.go
+++ b/backend/internal/service/wire.go
@@ -42,9 +42,10 @@ func ProvideTokenRefreshService(
openaiOAuthService *OpenAIOAuthService,
geminiOAuthService *GeminiOAuthService,
antigravityOAuthService *AntigravityOAuthService,
+ cacheInvalidator TokenCacheInvalidator,
cfg *config.Config,
) *TokenRefreshService {
- svc := NewTokenRefreshService(accountRepo, oauthService, openaiOAuthService, geminiOAuthService, antigravityOAuthService, cfg)
+ svc := NewTokenRefreshService(accountRepo, oauthService, openaiOAuthService, geminiOAuthService, antigravityOAuthService, cacheInvalidator, cfg)
svc.Start()
return svc
}
@@ -108,10 +109,12 @@ func ProvideRateLimitService(
tempUnschedCache TempUnschedCache,
timeoutCounterCache TimeoutCounterCache,
settingService *SettingService,
+ tokenCacheInvalidator TokenCacheInvalidator,
) *RateLimitService {
svc := NewRateLimitService(accountRepo, usageRepo, cfg, geminiQuotaService, tempUnschedCache)
svc.SetTimeoutCounterCache(timeoutCounterCache)
svc.SetSettingService(settingService)
+ svc.SetTokenCacheInvalidator(tokenCacheInvalidator)
return svc
}
@@ -210,6 +213,7 @@ var ProviderSet = wire.NewSet(
NewOpenAIOAuthService,
NewGeminiOAuthService,
NewGeminiQuotaService,
+ NewCompositeTokenCacheInvalidator,
NewAntigravityOAuthService,
NewGeminiTokenProvider,
NewGeminiMessagesCompatService,
diff --git a/config.yaml b/config.yaml
index 424ce9eb..bd399874 100644
--- a/config.yaml
+++ b/config.yaml
@@ -387,6 +387,9 @@ rate_limit:
# Cooldown time (in minutes) when upstream returns 529 (overloaded)
# 上游返回 529(过载)时的冷却时间(分钟)
overload_cooldown_minutes: 10
+ # Cooldown time (in minutes) for OAuth 401 temporary unschedulable
+ # OAuth 401 临时不可调度冷却时间(分钟)
+ oauth_401_cooldown_minutes: 5
# =============================================================================
# Pricing Data Source (Optional)
diff --git a/deploy/.env.example b/deploy/.env.example
index e5cf8b32..3af969ef 100644
--- a/deploy/.env.example
+++ b/deploy/.env.example
@@ -69,6 +69,17 @@ JWT_EXPIRE_HOUR=24
# Leave unset to use default ./config.yaml
#CONFIG_FILE=./config.yaml
+# -----------------------------------------------------------------------------
+# Rate Limiting (Optional)
+# 速率限制(可选)
+# -----------------------------------------------------------------------------
+# Cooldown time (in minutes) when upstream returns 529 (overloaded)
+# 上游返回 529(过载)时的冷却时间(分钟)
+RATE_LIMIT_OVERLOAD_COOLDOWN_MINUTES=10
+# Cooldown time (in minutes) for OAuth 401 temporary unschedulable
+# OAuth 401 临时不可调度冷却时间(分钟)
+RATE_LIMIT_OAUTH_401_COOLDOWN_MINUTES=5
+
# -----------------------------------------------------------------------------
# Gateway Scheduling (Optional)
# 调度缓存与受控回源配置(缓存就绪且命中时不读 DB)
diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml
index ce2439f4..fa8a30c7 100644
--- a/deploy/config.example.yaml
+++ b/deploy/config.example.yaml
@@ -429,6 +429,9 @@ rate_limit:
# Cooldown time (in minutes) when upstream returns 529 (overloaded)
# 上游返回 529(过载)时的冷却时间(分钟)
overload_cooldown_minutes: 10
+ # Cooldown time (in minutes) for OAuth 401 temporary unschedulable
+ # OAuth 401 临时不可调度冷却时间(分钟)
+ oauth_401_cooldown_minutes: 5
# =============================================================================
# Pricing Data Source (Optional)
From a458e684bc3a431d096fd5667e74103108c1f5d6 Mon Sep 17 00:00:00 2001
From: yangjianbo
Date: Thu, 15 Jan 2026 15:06:34 +0800
Subject: [PATCH 2/4] =?UTF-8?q?fix(=E8=AE=A4=E8=AF=81):=20OAuth=20401=20?=
=?UTF-8?q?=E7=9B=B4=E6=8E=A5=E6=A0=87=E8=AE=B0=E9=94=99=E8=AF=AF=E7=8A=B6?=
=?UTF-8?q?=E6=80=81?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
- OAuth 401 清理缓存并设置错误状态
- 移除 oauth_401_cooldown_minutes 配置及示例
- 更新 401 相关单测
破坏性变更: OAuth 401 不再临时不可调度,需手动恢复
---
backend/internal/config/config.go | 4 +-
backend/internal/service/ratelimit_service.go | 76 +----
.../service/ratelimit_service_401_test.go | 280 ++----------------
config.yaml | 3 -
deploy/.env.example | 3 -
deploy/config.example.yaml | 3 -
6 files changed, 31 insertions(+), 338 deletions(-)
diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go
index 87221256..944e0f84 100644
--- a/backend/internal/config/config.go
+++ b/backend/internal/config/config.go
@@ -435,8 +435,7 @@ type DefaultConfig struct {
}
type RateLimitConfig struct {
- OverloadCooldownMinutes int `mapstructure:"overload_cooldown_minutes"` // 529过载冷却时间(分钟)
- OAuth401CooldownMinutes int `mapstructure:"oauth_401_cooldown_minutes"` // OAuth 401 临时不可调度冷却时间(分钟)
+ OverloadCooldownMinutes int `mapstructure:"overload_cooldown_minutes"` // 529过载冷却时间(分钟)
}
// APIKeyAuthCacheConfig API Key 认证缓存配置
@@ -710,7 +709,6 @@ func setDefaults() {
// RateLimit
viper.SetDefault("rate_limit.overload_cooldown_minutes", 10)
- viper.SetDefault("rate_limit.oauth_401_cooldown_minutes", 5)
// Pricing - 从 price-mirror 分支同步,该分支维护了 sha256 哈希文件用于增量更新检查
viper.SetDefault("pricing.remote_url", "https://raw.githubusercontent.com/Wei-Shaw/claude-relay-service/price-mirror/model_prices_and_context_window.json")
diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go
index a9523216..8c4ffb68 100644
--- a/backend/internal/service/ratelimit_service.go
+++ b/backend/internal/service/ratelimit_service.go
@@ -73,10 +73,8 @@ func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Acc
return false
}
- isOAuth401 := statusCode == 401 && account.Type == AccountTypeOAuth &&
- (account.Platform == PlatformAntigravity || account.Platform == PlatformGemini)
tempMatched := false
- if !isOAuth401 || account.IsTempUnschedulableEnabled() {
+ if statusCode != 401 {
tempMatched = s.tryTempUnschedulable(ctx, account, statusCode, responseBody)
}
upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(responseBody))
@@ -87,18 +85,13 @@ func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Acc
switch statusCode {
case 401:
- if isOAuth401 {
- if tempMatched {
- if s.tokenCacheInvalidator != nil {
- if err := s.tokenCacheInvalidator.InvalidateToken(ctx, account); err != nil {
- slog.Warn("oauth_401_invalidate_cache_failed", "account_id", account.ID, "error", err)
- }
+ if account.Type == AccountTypeOAuth &&
+ (account.Platform == PlatformAntigravity || account.Platform == PlatformGemini) {
+ if s.tokenCacheInvalidator != nil {
+ if err := s.tokenCacheInvalidator.InvalidateToken(ctx, account); err != nil {
+ slog.Warn("oauth_401_invalidate_cache_failed", "account_id", account.ID, "error", err)
}
- shouldDisable = true
- } else {
- shouldDisable = s.handleOAuth401TempUnschedulable(ctx, account, upstreamMsg)
}
- break
}
msg := "Authentication failed (401): invalid or expired credentials"
if upstreamMsg != "" {
@@ -150,63 +143,6 @@ func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Acc
return shouldDisable
}
-func (s *RateLimitService) handleOAuth401TempUnschedulable(ctx context.Context, account *Account, upstreamMsg string) bool {
- if account == nil {
- return false
- }
-
- if s.tokenCacheInvalidator != nil {
- if err := s.tokenCacheInvalidator.InvalidateToken(ctx, account); err != nil {
- slog.Warn("oauth_401_invalidate_cache_failed", "account_id", account.ID, "error", err)
- }
- }
-
- now := time.Now()
- until := now.Add(s.oauth401Cooldown())
- msg := "Authentication failed (401): invalid or expired credentials"
- if upstreamMsg != "" {
- msg = "Authentication failed (401): " + upstreamMsg
- }
-
- state := &TempUnschedState{
- UntilUnix: until.Unix(),
- TriggeredAtUnix: now.Unix(),
- StatusCode: 401,
- MatchedKeyword: "oauth_401",
- RuleIndex: -1, // -1 表示非规则触发,而是 OAuth 401 特殊处理
- ErrorMessage: msg,
- }
-
- reason := ""
- if raw, err := json.Marshal(state); err == nil {
- reason = string(raw)
- }
- if reason == "" {
- reason = msg
- }
-
- if err := s.accountRepo.SetTempUnschedulable(ctx, account.ID, until, reason); err != nil {
- slog.Warn("oauth_401_set_temp_unschedulable_failed", "account_id", account.ID, "error", err)
- return false
- }
-
- if s.tempUnschedCache != nil {
- if err := s.tempUnschedCache.SetTempUnsched(ctx, account.ID, state); err != nil {
- slog.Warn("oauth_401_set_temp_unsched_cache_failed", "account_id", account.ID, "error", err)
- }
- }
-
- slog.Info("oauth_401_temp_unschedulable", "account_id", account.ID, "until", until)
- return true
-}
-
-func (s *RateLimitService) oauth401Cooldown() time.Duration {
- if s != nil && s.cfg != nil && s.cfg.RateLimit.OAuth401CooldownMinutes > 0 {
- return time.Duration(s.cfg.RateLimit.OAuth401CooldownMinutes) * time.Minute
- }
- return 5 * time.Minute
-}
-
// PreCheckUsage proactively checks local quota before dispatching a request.
// Returns false when the account should be skipped.
func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, requestedModel string) (bool, error) {
diff --git a/backend/internal/service/ratelimit_service_401_test.go b/backend/internal/service/ratelimit_service_401_test.go
index 2c43b1cf..36357a4b 100644
--- a/backend/internal/service/ratelimit_service_401_test.go
+++ b/backend/internal/service/ratelimit_service_401_test.go
@@ -15,21 +15,19 @@ import (
type rateLimitAccountRepoStub struct {
mockAccountRepoForGemini
- tempCalls int
- tempUntil time.Time
- tempReason string
setErrorCalls int
-}
-
-func (r *rateLimitAccountRepoStub) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error {
- r.tempCalls++
- r.tempUntil = until
- r.tempReason = reason
- return nil
+ tempCalls int
+ lastErrorMsg string
}
func (r *rateLimitAccountRepoStub) SetError(ctx context.Context, id int64, errorMsg string) error {
r.setErrorCalls++
+ r.lastErrorMsg = errorMsg
+ return nil
+}
+
+func (r *rateLimitAccountRepoStub) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error {
+ r.tempCalls++
return nil
}
@@ -43,7 +41,7 @@ func (r *tokenCacheInvalidatorRecorder) InvalidateToken(ctx context.Context, acc
return r.err
}
-func TestRateLimitService_HandleUpstreamError_OAuth401TempUnschedulable(t *testing.T) {
+func TestRateLimitService_HandleUpstreamError_OAuth401MarksError(t *testing.T) {
tests := []struct {
name string
platform string
@@ -62,17 +60,26 @@ func TestRateLimitService_HandleUpstreamError_OAuth401TempUnschedulable(t *testi
ID: 100,
Platform: tt.platform,
Type: AccountTypeOAuth,
+ Credentials: map[string]any{
+ "temp_unschedulable_enabled": true,
+ "temp_unschedulable_rules": []any{
+ map[string]any{
+ "error_code": 401,
+ "keywords": []any{"unauthorized"},
+ "duration_minutes": 30,
+ "description": "custom rule",
+ },
+ },
+ },
}
- start := time.Now()
shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized"))
require.True(t, shouldDisable)
- require.Equal(t, 1, repo.tempCalls)
- require.Equal(t, 0, repo.setErrorCalls)
+ require.Equal(t, 1, repo.setErrorCalls)
+ require.Equal(t, 0, repo.tempCalls)
+ require.Contains(t, repo.lastErrorMsg, "Authentication failed (401)")
require.Len(t, invalidator.accounts, 1)
- require.WithinDuration(t, start.Add(5*time.Minute), repo.tempUntil, 10*time.Second)
- require.NotEmpty(t, repo.tempReason)
})
}
}
@@ -91,43 +98,10 @@ func TestRateLimitService_HandleUpstreamError_OAuth401InvalidatorError(t *testin
shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized"))
require.True(t, shouldDisable)
- require.Equal(t, 1, repo.tempCalls)
- require.Equal(t, 0, repo.setErrorCalls)
+ require.Equal(t, 1, repo.setErrorCalls)
require.Len(t, invalidator.accounts, 1)
}
-func TestRateLimitService_HandleUpstreamError_OAuth401CustomRule(t *testing.T) {
- repo := &rateLimitAccountRepoStub{}
- invalidator := &tokenCacheInvalidatorRecorder{}
- service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
- service.SetTokenCacheInvalidator(invalidator)
- account := &Account{
- ID: 103,
- Platform: PlatformGemini,
- Type: AccountTypeOAuth,
- Credentials: map[string]any{
- "temp_unschedulable_enabled": true,
- "temp_unschedulable_rules": []any{
- map[string]any{
- "error_code": 401,
- "keywords": []any{"unauthorized"},
- "duration_minutes": 30,
- "description": "custom rule",
- },
- },
- },
- }
-
- start := time.Now()
- shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized"))
-
- require.True(t, shouldDisable)
- require.Equal(t, 1, repo.tempCalls)
- require.Equal(t, 0, repo.setErrorCalls)
- require.Len(t, invalidator.accounts, 1)
- require.WithinDuration(t, start.Add(30*time.Minute), repo.tempUntil, 10*time.Second)
-}
-
func TestRateLimitService_HandleUpstreamError_NonOAuth401(t *testing.T) {
repo := &rateLimitAccountRepoStub{}
invalidator := &tokenCacheInvalidatorRecorder{}
@@ -142,212 +116,6 @@ func TestRateLimitService_HandleUpstreamError_NonOAuth401(t *testing.T) {
shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized"))
require.True(t, shouldDisable)
- require.Equal(t, 0, repo.tempCalls)
require.Equal(t, 1, repo.setErrorCalls)
require.Empty(t, invalidator.accounts)
}
-
-// TestRateLimitService_HandleOAuth401_NilAccount 测试 account 为 nil 的情况
-func TestRateLimitService_HandleOAuth401_NilAccount(t *testing.T) {
- repo := &rateLimitAccountRepoStub{}
- service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
-
- result := service.handleOAuth401TempUnschedulable(context.Background(), nil, "error")
-
- require.False(t, result)
- require.Equal(t, 0, repo.tempCalls)
-}
-
-// TestRateLimitService_HandleOAuth401_NilInvalidator 测试 tokenCacheInvalidator 为 nil 的情况
-func TestRateLimitService_HandleOAuth401_NilInvalidator(t *testing.T) {
- repo := &rateLimitAccountRepoStub{}
- service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
- // 不设置 tokenCacheInvalidator
- account := &Account{
- ID: 200,
- Platform: PlatformGemini,
- Type: AccountTypeOAuth,
- }
-
- result := service.handleOAuth401TempUnschedulable(context.Background(), account, "error")
-
- require.True(t, result)
- require.Equal(t, 1, repo.tempCalls)
-}
-
-// TestRateLimitService_HandleOAuth401_SetTempUnschedulableFailed 测试 SetTempUnschedulable 失败的情况
-func TestRateLimitService_HandleOAuth401_SetTempUnschedulableFailed(t *testing.T) {
- repo := &rateLimitAccountRepoStubWithError{
- setTempErr: errors.New("db error"),
- }
- invalidator := &tokenCacheInvalidatorRecorder{}
- service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
- service.SetTokenCacheInvalidator(invalidator)
- account := &Account{
- ID: 201,
- Platform: PlatformGemini,
- Type: AccountTypeOAuth,
- }
-
- result := service.handleOAuth401TempUnschedulable(context.Background(), account, "error")
-
- require.False(t, result) // 失败应返回 false
- require.Len(t, invalidator.accounts, 1) // 但 invalidator 仍然被调用
-}
-
-// rateLimitAccountRepoStubWithError 支持返回错误的 stub
-type rateLimitAccountRepoStubWithError struct {
- mockAccountRepoForGemini
- setTempErr error
- setErrorCalls int
-}
-
-func (r *rateLimitAccountRepoStubWithError) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error {
- return r.setTempErr
-}
-
-func (r *rateLimitAccountRepoStubWithError) SetError(ctx context.Context, id int64, errorMsg string) error {
- r.setErrorCalls++
- return nil
-}
-
-// TestRateLimitService_HandleOAuth401_WithTempUnschedCache 测试 tempUnschedCache 存在的情况
-func TestRateLimitService_HandleOAuth401_WithTempUnschedCache(t *testing.T) {
- repo := &rateLimitAccountRepoStub{}
- invalidator := &tokenCacheInvalidatorRecorder{}
- tempCache := &tempUnschedCacheStub{}
- service := NewRateLimitService(repo, nil, &config.Config{}, nil, tempCache)
- service.SetTokenCacheInvalidator(invalidator)
- account := &Account{
- ID: 202,
- Platform: PlatformGemini,
- Type: AccountTypeOAuth,
- }
-
- result := service.handleOAuth401TempUnschedulable(context.Background(), account, "error")
-
- require.True(t, result)
- require.Equal(t, 1, repo.tempCalls)
- require.Equal(t, 1, tempCache.setCalls)
-}
-
-// TestRateLimitService_HandleOAuth401_TempUnschedCacheError 测试 tempUnschedCache 设置失败的情况
-func TestRateLimitService_HandleOAuth401_TempUnschedCacheError(t *testing.T) {
- repo := &rateLimitAccountRepoStub{}
- invalidator := &tokenCacheInvalidatorRecorder{}
- tempCache := &tempUnschedCacheStub{setErr: errors.New("cache error")}
- service := NewRateLimitService(repo, nil, &config.Config{}, nil, tempCache)
- service.SetTokenCacheInvalidator(invalidator)
- account := &Account{
- ID: 203,
- Platform: PlatformGemini,
- Type: AccountTypeOAuth,
- }
-
- result := service.handleOAuth401TempUnschedulable(context.Background(), account, "error")
-
- require.True(t, result) // 缓存错误不影响主流程
- require.Equal(t, 1, repo.tempCalls)
-}
-
-// tempUnschedCacheStub 用于测试的 TempUnschedCache stub
-type tempUnschedCacheStub struct {
- setCalls int
- setErr error
-}
-
-func (c *tempUnschedCacheStub) GetTempUnsched(ctx context.Context, accountID int64) (*TempUnschedState, error) {
- return nil, nil
-}
-
-func (c *tempUnschedCacheStub) SetTempUnsched(ctx context.Context, accountID int64, state *TempUnschedState) error {
- c.setCalls++
- return c.setErr
-}
-
-func (c *tempUnschedCacheStub) DeleteTempUnsched(ctx context.Context, accountID int64) error {
- return nil
-}
-
-// TestRateLimitService_OAuth401Cooldown 测试 oauth401Cooldown 函数
-func TestRateLimitService_OAuth401Cooldown(t *testing.T) {
- tests := []struct {
- name string
- cfg *config.Config
- expected time.Duration
- }{
- {
- name: "default_when_config_zero",
- cfg: &config.Config{RateLimit: config.RateLimitConfig{OAuth401CooldownMinutes: 0}},
- expected: 5 * time.Minute,
- },
- {
- name: "custom_cooldown_10_minutes",
- cfg: &config.Config{RateLimit: config.RateLimitConfig{OAuth401CooldownMinutes: 10}},
- expected: 10 * time.Minute,
- },
- {
- name: "custom_cooldown_1_minute",
- cfg: &config.Config{RateLimit: config.RateLimitConfig{OAuth401CooldownMinutes: 1}},
- expected: 1 * time.Minute,
- },
- {
- name: "negative_value_uses_default",
- cfg: &config.Config{RateLimit: config.RateLimitConfig{OAuth401CooldownMinutes: -5}},
- expected: 5 * time.Minute,
- },
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- service := NewRateLimitService(nil, nil, tt.cfg, nil, nil)
- result := service.oauth401Cooldown()
- require.Equal(t, tt.expected, result)
- })
- }
-}
-
-// TestRateLimitService_OAuth401Cooldown_NilConfig 测试 cfg 为 nil 的情况
-func TestRateLimitService_OAuth401Cooldown_NilConfig(t *testing.T) {
- service := &RateLimitService{cfg: nil}
- result := service.oauth401Cooldown()
- require.Equal(t, 5*time.Minute, result)
-}
-
-// TestRateLimitService_HandleOAuth401_WithCustomCooldown 测试自定义 cooldown 配置
-func TestRateLimitService_HandleOAuth401_WithCustomCooldown(t *testing.T) {
- repo := &rateLimitAccountRepoStub{}
- cfg := &config.Config{
- RateLimit: config.RateLimitConfig{
- OAuth401CooldownMinutes: 15,
- },
- }
- service := NewRateLimitService(repo, nil, cfg, nil, nil)
- account := &Account{
- ID: 204,
- Platform: PlatformAntigravity,
- Type: AccountTypeOAuth,
- }
-
- start := time.Now()
- result := service.handleOAuth401TempUnschedulable(context.Background(), account, "error")
-
- require.True(t, result)
- require.WithinDuration(t, start.Add(15*time.Minute), repo.tempUntil, 10*time.Second)
-}
-
-// TestRateLimitService_HandleOAuth401_EmptyUpstreamMsg 测试 upstreamMsg 为空的情况
-func TestRateLimitService_HandleOAuth401_EmptyUpstreamMsg(t *testing.T) {
- repo := &rateLimitAccountRepoStub{}
- service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil)
- account := &Account{
- ID: 205,
- Platform: PlatformGemini,
- Type: AccountTypeOAuth,
- }
-
- result := service.handleOAuth401TempUnschedulable(context.Background(), account, "")
-
- require.True(t, result)
- require.Contains(t, repo.tempReason, "Authentication failed (401)")
-}
diff --git a/config.yaml b/config.yaml
index bd399874..424ce9eb 100644
--- a/config.yaml
+++ b/config.yaml
@@ -387,9 +387,6 @@ rate_limit:
# Cooldown time (in minutes) when upstream returns 529 (overloaded)
# 上游返回 529(过载)时的冷却时间(分钟)
overload_cooldown_minutes: 10
- # Cooldown time (in minutes) for OAuth 401 temporary unschedulable
- # OAuth 401 临时不可调度冷却时间(分钟)
- oauth_401_cooldown_minutes: 5
# =============================================================================
# Pricing Data Source (Optional)
diff --git a/deploy/.env.example b/deploy/.env.example
index 3af969ef..f21a3c62 100644
--- a/deploy/.env.example
+++ b/deploy/.env.example
@@ -76,9 +76,6 @@ JWT_EXPIRE_HOUR=24
# Cooldown time (in minutes) when upstream returns 529 (overloaded)
# 上游返回 529(过载)时的冷却时间(分钟)
RATE_LIMIT_OVERLOAD_COOLDOWN_MINUTES=10
-# Cooldown time (in minutes) for OAuth 401 temporary unschedulable
-# OAuth 401 临时不可调度冷却时间(分钟)
-RATE_LIMIT_OAUTH_401_COOLDOWN_MINUTES=5
# -----------------------------------------------------------------------------
# Gateway Scheduling (Optional)
diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml
index fa8a30c7..ce2439f4 100644
--- a/deploy/config.example.yaml
+++ b/deploy/config.example.yaml
@@ -429,9 +429,6 @@ rate_limit:
# Cooldown time (in minutes) when upstream returns 529 (overloaded)
# 上游返回 529(过载)时的冷却时间(分钟)
overload_cooldown_minutes: 10
- # Cooldown time (in minutes) for OAuth 401 temporary unschedulable
- # OAuth 401 临时不可调度冷却时间(分钟)
- oauth_401_cooldown_minutes: 5
# =============================================================================
# Pricing Data Source (Optional)
From 90bce60b85b40f09e60deb17b4a358535a510c1b Mon Sep 17 00:00:00 2001
From: yangjianbo
Date: Thu, 15 Jan 2026 15:14:44 +0800
Subject: [PATCH 3/4] feat: merge dev
---
PR_DESCRIPTION.md | 164 +++++
backend/cmd/server/wire_gen.go | 13 +-
backend/ent/account.go | 13 +
backend/ent/account/account.go | 10 +
backend/ent/account/where.go | 45 ++
backend/ent/account_create.go | 85 +++
backend/ent/account_update.go | 54 ++
backend/ent/migrate/schema.go | 44 +-
backend/ent/mutation.go | 198 +++++-
backend/ent/runtime/runtime.go | 26 +-
backend/ent/schema/account.go | 6 +
backend/ent/schema/usage_log.go | 6 +
backend/ent/usagelog.go | 16 +-
backend/ent/usagelog/usagelog.go | 8 +
backend/ent/usagelog/where.go | 55 ++
backend/ent/usagelog_create.go | 98 +++
backend/ent/usagelog_update.go | 72 +++
.../internal/handler/admin/account_handler.go | 19 +
.../handler/admin/dashboard_handler.go | 50 +-
.../handler/admin/ops_alerts_handler.go | 176 +++++-
backend/internal/handler/admin/ops_handler.go | 589 +++++++++++++++++-
.../internal/handler/admin/proxy_handler.go | 32 +-
backend/internal/handler/dto/mappers.go | 22 +-
backend/internal/handler/dto/types.go | 29 +-
backend/internal/handler/ops_error_logger.go | 76 ++-
.../internal/pkg/usagestats/account_stats.go | 12 +-
.../pkg/usagestats/usage_log_types.go | 31 +-
backend/internal/repository/account_repo.go | 16 +
.../repository/dashboard_aggregation_repo.go | 47 +-
backend/internal/repository/ops_repo.go | 553 +++++++++++++---
.../internal/repository/ops_repo_alerts.go | 166 ++++-
.../repository/proxy_latency_cache.go | 74 +++
.../repository/proxy_probe_service.go | 7 +-
backend/internal/repository/proxy_repo.go | 44 +-
...eduler_snapshot_outbox_integration_test.go | 2 +-
backend/internal/repository/usage_log_repo.go | 183 ++++--
.../usage_log_repo_integration_test.go | 96 ++-
backend/internal/repository/wire.go | 1 +
backend/internal/server/api_contract_test.go | 41 +-
backend/internal/server/routes/admin.go | 22 +-
backend/internal/service/account.go | 37 +-
.../account_billing_rate_multiplier_test.go | 27 +
backend/internal/service/account_service.go | 17 +-
.../internal/service/account_usage_service.go | 36 +-
backend/internal/service/admin_service.go | 196 +++++-
.../service/admin_service_bulk_update_test.go | 6 +-
.../service/admin_service_delete_test.go | 24 +-
.../service/antigravity_gateway_service.go | 17 +
backend/internal/service/dashboard_service.go | 8 +-
backend/internal/service/gateway_service.go | 58 +-
.../service/gemini_messages_compat_service.go | 23 +
.../service/openai_gateway_service.go | 53 +-
.../service/ops_alert_evaluator_service.go | 37 +-
backend/internal/service/ops_alert_models.go | 29 +-
backend/internal/service/ops_alerts.go | 72 ++-
backend/internal/service/ops_health_score.go | 41 +-
.../internal/service/ops_health_score_test.go | 43 +-
backend/internal/service/ops_models.go | 65 +-
backend/internal/service/ops_port.go | 18 +-
backend/internal/service/ops_retry.go | 126 +++-
backend/internal/service/ops_service.go | 110 +++-
backend/internal/service/ops_settings.go | 13 +-
.../internal/service/ops_settings_models.go | 13 +-
.../internal/service/ops_upstream_context.go | 41 +-
backend/internal/service/proxy.go | 13 +-
.../internal/service/proxy_latency_cache.go | 18 +
backend/internal/service/proxy_service.go | 2 +
backend/internal/service/ratelimit_service.go | 4 +-
backend/internal/service/usage_log.go | 2 +
.../037_add_account_rate_multiplier.sql | 14 +
backend/migrations/037_ops_alert_silences.sql | 28 +
...results_and_standardize_classification.sql | 111 ++++
frontend/src/api/admin/dashboard.ts | 8 +
frontend/src/api/admin/ops.ts | 210 ++++++-
frontend/src/api/admin/proxies.ts | 26 +-
frontend/src/api/admin/usage.ts | 1 +
.../components/account/AccountStatsModal.vue | 70 ++-
.../account/AccountTodayStatsCell.vue | 13 +-
.../account/BulkEditAccountModal.vue | 41 +-
.../components/account/CreateAccountModal.vue | 11 +-
.../components/account/EditAccountModal.vue | 9 +-
.../components/account/UsageProgressBar.vue | 15 +-
.../admin/account/AccountStatsModal.vue | 60 +-
.../admin/usage/UsageStatsCards.vue | 15 +-
.../src/components/admin/usage/UsageTable.vue | 39 +-
.../components/admin/user/UserCreateModal.vue | 2 +-
frontend/src/components/common/DataTable.vue | 121 +++-
frontend/src/i18n/locales/en.ts | 253 ++++++--
frontend/src/i18n/locales/zh.ts | 256 ++++++--
frontend/src/style.css | 2 +-
frontend/src/types/index.ts | 31 +-
frontend/src/views/admin/AccountsView.vue | 78 ++-
frontend/src/views/admin/ProxiesView.vue | 377 ++++++++++-
frontend/src/views/admin/UsageView.vue | 18 +-
frontend/src/views/admin/UsersView.vue | 279 +++++----
frontend/src/views/admin/ops/OpsDashboard.vue | 39 +-
.../ops/components/OpsAlertEventsCard.vue | 555 +++++++++++++++--
.../ops/components/OpsAlertRulesCard.vue | 18 -
.../ops/components/OpsDashboardHeader.vue | 99 ++-
.../ops/components/OpsDashboardSkeleton.vue | 104 +++-
.../ops/components/OpsErrorDetailModal.vue | 484 +++++---------
.../ops/components/OpsErrorDetailsModal.vue | 162 ++---
.../admin/ops/components/OpsErrorLogTable.vue | 272 ++++----
.../ops/components/OpsRequestDetailsModal.vue | 4 +-
.../ops/components/OpsRuntimeSettingsCard.vue | 51 +-
.../ops/components/OpsSettingsDialog.vue | 86 +--
.../components/OpsThroughputTrendChart.vue | 4 +-
107 files changed, 6705 insertions(+), 1611 deletions(-)
create mode 100644 PR_DESCRIPTION.md
create mode 100644 backend/internal/repository/proxy_latency_cache.go
create mode 100644 backend/internal/service/account_billing_rate_multiplier_test.go
create mode 100644 backend/internal/service/proxy_latency_cache.go
create mode 100644 backend/migrations/037_add_account_rate_multiplier.sql
create mode 100644 backend/migrations/037_ops_alert_silences.sql
create mode 100644 backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql
diff --git a/PR_DESCRIPTION.md b/PR_DESCRIPTION.md
new file mode 100644
index 00000000..b240f45c
--- /dev/null
+++ b/PR_DESCRIPTION.md
@@ -0,0 +1,164 @@
+## 概述
+
+全面增强运维监控系统(Ops)的错误日志管理和告警静默功能,优化前端 UI 组件代码质量和用户体验。本次更新重构了核心服务层和数据访问层,提升系统可维护性和运维效率。
+
+## 主要改动
+
+### 1. 错误日志查询优化
+
+**功能特性:**
+- 新增 GetErrorLogByID 接口,支持按 ID 精确查询错误详情
+- 优化错误日志过滤逻辑,支持多维度筛选(平台、阶段、来源、所有者等)
+- 改进查询参数处理,简化代码结构
+- 增强错误分类和标准化处理
+- 支持错误解决状态追踪(resolved 字段)
+
+**技术实现:**
+- `ops_handler.go` - 新增单条错误日志查询接口
+- `ops_repo.go` - 优化数据查询和过滤条件构建
+- `ops_models.go` - 扩展错误日志数据模型
+- 前端 API 接口同步更新
+
+### 2. 告警静默功能
+
+**功能特性:**
+- 支持按规则、平台、分组、区域等维度静默告警
+- 可设置静默时长和原因说明
+- 静默记录可追溯,记录创建人和创建时间
+- 自动过期机制,避免永久静默
+
+**技术实现:**
+- `037_ops_alert_silences.sql` - 新增告警静默表
+- `ops_alerts.go` - 告警静默逻辑实现
+- `ops_alerts_handler.go` - 告警静默 API 接口
+- `OpsAlertEventsCard.vue` - 前端告警静默操作界面
+
+**数据库结构:**
+
+| 字段 | 类型 | 说明 |
+|------|------|------|
+| rule_id | BIGINT | 告警规则 ID |
+| platform | VARCHAR(64) | 平台标识 |
+| group_id | BIGINT | 分组 ID(可选) |
+| region | VARCHAR(64) | 区域(可选) |
+| until | TIMESTAMPTZ | 静默截止时间 |
+| reason | TEXT | 静默原因 |
+| created_by | BIGINT | 创建人 ID |
+
+### 3. 错误分类标准化
+
+**功能特性:**
+- 统一错误阶段分类(request|auth|routing|upstream|network|internal)
+- 规范错误归属分类(client|provider|platform)
+- 标准化错误来源分类(client_request|upstream_http|gateway)
+- 自动迁移历史数据到新分类体系
+
+**技术实现:**
+- `038_ops_errors_resolution_retry_results_and_standardize_classification.sql` - 分类标准化迁移
+- 自动映射历史遗留分类到新标准
+- 自动解决已恢复的上游错误(客户端状态码 < 400)
+
+### 4. Gateway 服务集成
+
+**功能特性:**
+- 完善各 Gateway 服务的 Ops 集成
+- 统一错误日志记录接口
+- 增强上游错误追踪能力
+
+**涉及服务:**
+- `antigravity_gateway_service.go` - Antigravity 网关集成
+- `gateway_service.go` - 通用网关集成
+- `gemini_messages_compat_service.go` - Gemini 兼容层集成
+- `openai_gateway_service.go` - OpenAI 网关集成
+
+### 5. 前端 UI 优化
+
+**代码重构:**
+- 大幅简化错误详情模态框代码(从 828 行优化到 450 行)
+- 优化错误日志表格组件,提升可读性
+- 清理未使用的 i18n 翻译,减少冗余
+- 统一组件代码风格和格式
+- 优化骨架屏组件,更好匹配实际看板布局
+
+**布局改进:**
+- 修复模态框内容溢出和滚动问题
+- 优化表格布局,使用 flex 布局确保正确显示
+- 改进看板头部布局和交互
+- 提升响应式体验
+- 骨架屏支持全屏模式适配
+
+**交互优化:**
+- 优化告警事件卡片功能和展示
+- 改进错误详情展示逻辑
+- 增强请求详情模态框
+- 完善运行时设置卡片
+- 改进加载动画效果
+
+### 6. 国际化完善
+
+**文案补充:**
+- 补充错误日志相关的英文翻译
+- 添加告警静默功能的中英文文案
+- 完善提示文本和错误信息
+- 统一术语翻译标准
+
+## 文件变更
+
+**后端(26 个文件):**
+- `backend/internal/handler/admin/ops_alerts_handler.go` - 告警接口增强
+- `backend/internal/handler/admin/ops_handler.go` - 错误日志接口优化
+- `backend/internal/handler/ops_error_logger.go` - 错误记录器增强
+- `backend/internal/repository/ops_repo.go` - 数据访问层重构
+- `backend/internal/repository/ops_repo_alerts.go` - 告警数据访问增强
+- `backend/internal/service/ops_*.go` - 核心服务层重构(10 个文件)
+- `backend/internal/service/*_gateway_service.go` - Gateway 集成(4 个文件)
+- `backend/internal/server/routes/admin.go` - 路由配置更新
+- `backend/migrations/*.sql` - 数据库迁移(2 个文件)
+- 测试文件更新(5 个文件)
+
+**前端(13 个文件):**
+- `frontend/src/views/admin/ops/OpsDashboard.vue` - 看板主页优化
+- `frontend/src/views/admin/ops/components/*.vue` - 组件重构(10 个文件)
+- `frontend/src/api/admin/ops.ts` - API 接口扩展
+- `frontend/src/i18n/locales/*.ts` - 国际化文本(2 个文件)
+
+## 代码统计
+
+- 44 个文件修改
+- 3733 行新增
+- 995 行删除
+- 净增加 2738 行
+
+## 核心改进
+
+**可维护性提升:**
+- 重构核心服务层,职责更清晰
+- 简化前端组件代码,降低复杂度
+- 统一代码风格和命名规范
+- 清理冗余代码和未使用的翻译
+- 标准化错误分类体系
+
+**功能完善:**
+- 告警静默功能,减少告警噪音
+- 错误日志查询优化,提升运维效率
+- Gateway 服务集成完善,统一监控能力
+- 错误解决状态追踪,便于问题管理
+
+**用户体验优化:**
+- 修复多个 UI 布局问题
+- 优化交互流程
+- 完善国际化支持
+- 提升响应式体验
+- 改进加载状态展示
+
+## 测试验证
+
+- ✅ 错误日志查询和过滤功能
+- ✅ 告警静默创建和自动过期
+- ✅ 错误分类标准化迁移
+- ✅ Gateway 服务错误日志记录
+- ✅ 前端组件布局和交互
+- ✅ 骨架屏全屏模式适配
+- ✅ 国际化文本完整性
+- ✅ API 接口功能正确性
+- ✅ 数据库迁移执行成功
diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go
index 08e40010..c8304831 100644
--- a/backend/cmd/server/wire_gen.go
+++ b/backend/cmd/server/wire_gen.go
@@ -67,7 +67,6 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
userHandler := handler.NewUserHandler(userService)
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
usageLogRepository := repository.NewUsageLogRepository(client, db)
- dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db)
usageService := service.NewUsageService(usageLogRepository, userRepository, client, apiKeyAuthCacheInvalidator)
usageHandler := handler.NewUsageHandler(usageService, apiKeyService)
redeemCodeRepository := repository.NewRedeemCodeRepository(client)
@@ -76,15 +75,17 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator)
redeemHandler := handler.NewRedeemHandler(redeemService)
subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService)
+ dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db)
dashboardStatsCache := repository.NewDashboardCache(redisClient, configConfig)
+ dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig)
timingWheelService := service.ProvideTimingWheelService()
dashboardAggregationService := service.ProvideDashboardAggregationService(dashboardAggregationRepository, timingWheelService, configConfig)
- dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig)
dashboardHandler := admin.NewDashboardHandler(dashboardService, dashboardAggregationService)
accountRepository := repository.NewAccountRepository(client, db)
proxyRepository := repository.NewProxyRepository(client, db)
proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig)
- adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, billingCacheService, proxyExitInfoProber, apiKeyAuthCacheInvalidator)
+ proxyLatencyCache := repository.NewProxyLatencyCache(redisClient)
+ adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator)
adminUserHandler := admin.NewUserHandler(adminService)
groupHandler := admin.NewGroupHandler(adminService)
claudeOAuthClient := repository.NewClaudeOAuthClient()
@@ -113,9 +114,6 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
accountTestService := service.NewAccountTestService(accountRepository, geminiTokenProvider, antigravityGatewayService, httpUpstream, configConfig)
concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig)
concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig)
- schedulerCache := repository.NewSchedulerCache(redisClient)
- schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db)
- schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig)
crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig)
accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService)
oAuthHandler := admin.NewOAuthHandler(oAuthService)
@@ -126,6 +124,9 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) {
adminRedeemHandler := admin.NewRedeemHandler(adminService)
promoHandler := admin.NewPromoHandler(promoService)
opsRepository := repository.NewOpsRepository(db)
+ schedulerCache := repository.NewSchedulerCache(redisClient)
+ schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db)
+ schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig)
pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig)
pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient)
if err != nil {
diff --git a/backend/ent/account.go b/backend/ent/account.go
index e960d324..038aa7e5 100644
--- a/backend/ent/account.go
+++ b/backend/ent/account.go
@@ -43,6 +43,8 @@ type Account struct {
Concurrency int `json:"concurrency,omitempty"`
// Priority holds the value of the "priority" field.
Priority int `json:"priority,omitempty"`
+ // RateMultiplier holds the value of the "rate_multiplier" field.
+ RateMultiplier float64 `json:"rate_multiplier,omitempty"`
// Status holds the value of the "status" field.
Status string `json:"status,omitempty"`
// ErrorMessage holds the value of the "error_message" field.
@@ -135,6 +137,8 @@ func (*Account) scanValues(columns []string) ([]any, error) {
values[i] = new([]byte)
case account.FieldAutoPauseOnExpired, account.FieldSchedulable:
values[i] = new(sql.NullBool)
+ case account.FieldRateMultiplier:
+ values[i] = new(sql.NullFloat64)
case account.FieldID, account.FieldProxyID, account.FieldConcurrency, account.FieldPriority:
values[i] = new(sql.NullInt64)
case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldSessionWindowStatus:
@@ -241,6 +245,12 @@ func (_m *Account) assignValues(columns []string, values []any) error {
} else if value.Valid {
_m.Priority = int(value.Int64)
}
+ case account.FieldRateMultiplier:
+ if value, ok := values[i].(*sql.NullFloat64); !ok {
+ return fmt.Errorf("unexpected type %T for field rate_multiplier", values[i])
+ } else if value.Valid {
+ _m.RateMultiplier = value.Float64
+ }
case account.FieldStatus:
if value, ok := values[i].(*sql.NullString); !ok {
return fmt.Errorf("unexpected type %T for field status", values[i])
@@ -420,6 +430,9 @@ func (_m *Account) String() string {
builder.WriteString("priority=")
builder.WriteString(fmt.Sprintf("%v", _m.Priority))
builder.WriteString(", ")
+ builder.WriteString("rate_multiplier=")
+ builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier))
+ builder.WriteString(", ")
builder.WriteString("status=")
builder.WriteString(_m.Status)
builder.WriteString(", ")
diff --git a/backend/ent/account/account.go b/backend/ent/account/account.go
index 402e16ee..73c0e8c2 100644
--- a/backend/ent/account/account.go
+++ b/backend/ent/account/account.go
@@ -39,6 +39,8 @@ const (
FieldConcurrency = "concurrency"
// FieldPriority holds the string denoting the priority field in the database.
FieldPriority = "priority"
+ // FieldRateMultiplier holds the string denoting the rate_multiplier field in the database.
+ FieldRateMultiplier = "rate_multiplier"
// FieldStatus holds the string denoting the status field in the database.
FieldStatus = "status"
// FieldErrorMessage holds the string denoting the error_message field in the database.
@@ -116,6 +118,7 @@ var Columns = []string{
FieldProxyID,
FieldConcurrency,
FieldPriority,
+ FieldRateMultiplier,
FieldStatus,
FieldErrorMessage,
FieldLastUsedAt,
@@ -174,6 +177,8 @@ var (
DefaultConcurrency int
// DefaultPriority holds the default value on creation for the "priority" field.
DefaultPriority int
+ // DefaultRateMultiplier holds the default value on creation for the "rate_multiplier" field.
+ DefaultRateMultiplier float64
// DefaultStatus holds the default value on creation for the "status" field.
DefaultStatus string
// StatusValidator is a validator for the "status" field. It is called by the builders before save.
@@ -244,6 +249,11 @@ func ByPriority(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldPriority, opts...).ToFunc()
}
+// ByRateMultiplier orders the results by the rate_multiplier field.
+func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc()
+}
+
// ByStatus orders the results by the status field.
func ByStatus(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldStatus, opts...).ToFunc()
diff --git a/backend/ent/account/where.go b/backend/ent/account/where.go
index 6c639fd1..dea1127a 100644
--- a/backend/ent/account/where.go
+++ b/backend/ent/account/where.go
@@ -105,6 +105,11 @@ func Priority(v int) predicate.Account {
return predicate.Account(sql.FieldEQ(FieldPriority, v))
}
+// RateMultiplier applies equality check predicate on the "rate_multiplier" field. It's identical to RateMultiplierEQ.
+func RateMultiplier(v float64) predicate.Account {
+ return predicate.Account(sql.FieldEQ(FieldRateMultiplier, v))
+}
+
// Status applies equality check predicate on the "status" field. It's identical to StatusEQ.
func Status(v string) predicate.Account {
return predicate.Account(sql.FieldEQ(FieldStatus, v))
@@ -675,6 +680,46 @@ func PriorityLTE(v int) predicate.Account {
return predicate.Account(sql.FieldLTE(FieldPriority, v))
}
+// RateMultiplierEQ applies the EQ predicate on the "rate_multiplier" field.
+func RateMultiplierEQ(v float64) predicate.Account {
+ return predicate.Account(sql.FieldEQ(FieldRateMultiplier, v))
+}
+
+// RateMultiplierNEQ applies the NEQ predicate on the "rate_multiplier" field.
+func RateMultiplierNEQ(v float64) predicate.Account {
+ return predicate.Account(sql.FieldNEQ(FieldRateMultiplier, v))
+}
+
+// RateMultiplierIn applies the In predicate on the "rate_multiplier" field.
+func RateMultiplierIn(vs ...float64) predicate.Account {
+ return predicate.Account(sql.FieldIn(FieldRateMultiplier, vs...))
+}
+
+// RateMultiplierNotIn applies the NotIn predicate on the "rate_multiplier" field.
+func RateMultiplierNotIn(vs ...float64) predicate.Account {
+ return predicate.Account(sql.FieldNotIn(FieldRateMultiplier, vs...))
+}
+
+// RateMultiplierGT applies the GT predicate on the "rate_multiplier" field.
+func RateMultiplierGT(v float64) predicate.Account {
+ return predicate.Account(sql.FieldGT(FieldRateMultiplier, v))
+}
+
+// RateMultiplierGTE applies the GTE predicate on the "rate_multiplier" field.
+func RateMultiplierGTE(v float64) predicate.Account {
+ return predicate.Account(sql.FieldGTE(FieldRateMultiplier, v))
+}
+
+// RateMultiplierLT applies the LT predicate on the "rate_multiplier" field.
+func RateMultiplierLT(v float64) predicate.Account {
+ return predicate.Account(sql.FieldLT(FieldRateMultiplier, v))
+}
+
+// RateMultiplierLTE applies the LTE predicate on the "rate_multiplier" field.
+func RateMultiplierLTE(v float64) predicate.Account {
+ return predicate.Account(sql.FieldLTE(FieldRateMultiplier, v))
+}
+
// StatusEQ applies the EQ predicate on the "status" field.
func StatusEQ(v string) predicate.Account {
return predicate.Account(sql.FieldEQ(FieldStatus, v))
diff --git a/backend/ent/account_create.go b/backend/ent/account_create.go
index 0725d43d..42a561cf 100644
--- a/backend/ent/account_create.go
+++ b/backend/ent/account_create.go
@@ -153,6 +153,20 @@ func (_c *AccountCreate) SetNillablePriority(v *int) *AccountCreate {
return _c
}
+// SetRateMultiplier sets the "rate_multiplier" field.
+func (_c *AccountCreate) SetRateMultiplier(v float64) *AccountCreate {
+ _c.mutation.SetRateMultiplier(v)
+ return _c
+}
+
+// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil.
+func (_c *AccountCreate) SetNillableRateMultiplier(v *float64) *AccountCreate {
+ if v != nil {
+ _c.SetRateMultiplier(*v)
+ }
+ return _c
+}
+
// SetStatus sets the "status" field.
func (_c *AccountCreate) SetStatus(v string) *AccountCreate {
_c.mutation.SetStatus(v)
@@ -429,6 +443,10 @@ func (_c *AccountCreate) defaults() error {
v := account.DefaultPriority
_c.mutation.SetPriority(v)
}
+ if _, ok := _c.mutation.RateMultiplier(); !ok {
+ v := account.DefaultRateMultiplier
+ _c.mutation.SetRateMultiplier(v)
+ }
if _, ok := _c.mutation.Status(); !ok {
v := account.DefaultStatus
_c.mutation.SetStatus(v)
@@ -488,6 +506,9 @@ func (_c *AccountCreate) check() error {
if _, ok := _c.mutation.Priority(); !ok {
return &ValidationError{Name: "priority", err: errors.New(`ent: missing required field "Account.priority"`)}
}
+ if _, ok := _c.mutation.RateMultiplier(); !ok {
+ return &ValidationError{Name: "rate_multiplier", err: errors.New(`ent: missing required field "Account.rate_multiplier"`)}
+ }
if _, ok := _c.mutation.Status(); !ok {
return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Account.status"`)}
}
@@ -578,6 +599,10 @@ func (_c *AccountCreate) createSpec() (*Account, *sqlgraph.CreateSpec) {
_spec.SetField(account.FieldPriority, field.TypeInt, value)
_node.Priority = value
}
+ if value, ok := _c.mutation.RateMultiplier(); ok {
+ _spec.SetField(account.FieldRateMultiplier, field.TypeFloat64, value)
+ _node.RateMultiplier = value
+ }
if value, ok := _c.mutation.Status(); ok {
_spec.SetField(account.FieldStatus, field.TypeString, value)
_node.Status = value
@@ -893,6 +918,24 @@ func (u *AccountUpsert) AddPriority(v int) *AccountUpsert {
return u
}
+// SetRateMultiplier sets the "rate_multiplier" field.
+func (u *AccountUpsert) SetRateMultiplier(v float64) *AccountUpsert {
+ u.Set(account.FieldRateMultiplier, v)
+ return u
+}
+
+// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create.
+func (u *AccountUpsert) UpdateRateMultiplier() *AccountUpsert {
+ u.SetExcluded(account.FieldRateMultiplier)
+ return u
+}
+
+// AddRateMultiplier adds v to the "rate_multiplier" field.
+func (u *AccountUpsert) AddRateMultiplier(v float64) *AccountUpsert {
+ u.Add(account.FieldRateMultiplier, v)
+ return u
+}
+
// SetStatus sets the "status" field.
func (u *AccountUpsert) SetStatus(v string) *AccountUpsert {
u.Set(account.FieldStatus, v)
@@ -1325,6 +1368,27 @@ func (u *AccountUpsertOne) UpdatePriority() *AccountUpsertOne {
})
}
+// SetRateMultiplier sets the "rate_multiplier" field.
+func (u *AccountUpsertOne) SetRateMultiplier(v float64) *AccountUpsertOne {
+ return u.Update(func(s *AccountUpsert) {
+ s.SetRateMultiplier(v)
+ })
+}
+
+// AddRateMultiplier adds v to the "rate_multiplier" field.
+func (u *AccountUpsertOne) AddRateMultiplier(v float64) *AccountUpsertOne {
+ return u.Update(func(s *AccountUpsert) {
+ s.AddRateMultiplier(v)
+ })
+}
+
+// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create.
+func (u *AccountUpsertOne) UpdateRateMultiplier() *AccountUpsertOne {
+ return u.Update(func(s *AccountUpsert) {
+ s.UpdateRateMultiplier()
+ })
+}
+
// SetStatus sets the "status" field.
func (u *AccountUpsertOne) SetStatus(v string) *AccountUpsertOne {
return u.Update(func(s *AccountUpsert) {
@@ -1956,6 +2020,27 @@ func (u *AccountUpsertBulk) UpdatePriority() *AccountUpsertBulk {
})
}
+// SetRateMultiplier sets the "rate_multiplier" field.
+func (u *AccountUpsertBulk) SetRateMultiplier(v float64) *AccountUpsertBulk {
+ return u.Update(func(s *AccountUpsert) {
+ s.SetRateMultiplier(v)
+ })
+}
+
+// AddRateMultiplier adds v to the "rate_multiplier" field.
+func (u *AccountUpsertBulk) AddRateMultiplier(v float64) *AccountUpsertBulk {
+ return u.Update(func(s *AccountUpsert) {
+ s.AddRateMultiplier(v)
+ })
+}
+
+// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create.
+func (u *AccountUpsertBulk) UpdateRateMultiplier() *AccountUpsertBulk {
+ return u.Update(func(s *AccountUpsert) {
+ s.UpdateRateMultiplier()
+ })
+}
+
// SetStatus sets the "status" field.
func (u *AccountUpsertBulk) SetStatus(v string) *AccountUpsertBulk {
return u.Update(func(s *AccountUpsert) {
diff --git a/backend/ent/account_update.go b/backend/ent/account_update.go
index dcc3212d..63fab096 100644
--- a/backend/ent/account_update.go
+++ b/backend/ent/account_update.go
@@ -193,6 +193,27 @@ func (_u *AccountUpdate) AddPriority(v int) *AccountUpdate {
return _u
}
+// SetRateMultiplier sets the "rate_multiplier" field.
+func (_u *AccountUpdate) SetRateMultiplier(v float64) *AccountUpdate {
+ _u.mutation.ResetRateMultiplier()
+ _u.mutation.SetRateMultiplier(v)
+ return _u
+}
+
+// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil.
+func (_u *AccountUpdate) SetNillableRateMultiplier(v *float64) *AccountUpdate {
+ if v != nil {
+ _u.SetRateMultiplier(*v)
+ }
+ return _u
+}
+
+// AddRateMultiplier adds value to the "rate_multiplier" field.
+func (_u *AccountUpdate) AddRateMultiplier(v float64) *AccountUpdate {
+ _u.mutation.AddRateMultiplier(v)
+ return _u
+}
+
// SetStatus sets the "status" field.
func (_u *AccountUpdate) SetStatus(v string) *AccountUpdate {
_u.mutation.SetStatus(v)
@@ -629,6 +650,12 @@ func (_u *AccountUpdate) sqlSave(ctx context.Context) (_node int, err error) {
if value, ok := _u.mutation.AddedPriority(); ok {
_spec.AddField(account.FieldPriority, field.TypeInt, value)
}
+ if value, ok := _u.mutation.RateMultiplier(); ok {
+ _spec.SetField(account.FieldRateMultiplier, field.TypeFloat64, value)
+ }
+ if value, ok := _u.mutation.AddedRateMultiplier(); ok {
+ _spec.AddField(account.FieldRateMultiplier, field.TypeFloat64, value)
+ }
if value, ok := _u.mutation.Status(); ok {
_spec.SetField(account.FieldStatus, field.TypeString, value)
}
@@ -1005,6 +1032,27 @@ func (_u *AccountUpdateOne) AddPriority(v int) *AccountUpdateOne {
return _u
}
+// SetRateMultiplier sets the "rate_multiplier" field.
+func (_u *AccountUpdateOne) SetRateMultiplier(v float64) *AccountUpdateOne {
+ _u.mutation.ResetRateMultiplier()
+ _u.mutation.SetRateMultiplier(v)
+ return _u
+}
+
+// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil.
+func (_u *AccountUpdateOne) SetNillableRateMultiplier(v *float64) *AccountUpdateOne {
+ if v != nil {
+ _u.SetRateMultiplier(*v)
+ }
+ return _u
+}
+
+// AddRateMultiplier adds value to the "rate_multiplier" field.
+func (_u *AccountUpdateOne) AddRateMultiplier(v float64) *AccountUpdateOne {
+ _u.mutation.AddRateMultiplier(v)
+ return _u
+}
+
// SetStatus sets the "status" field.
func (_u *AccountUpdateOne) SetStatus(v string) *AccountUpdateOne {
_u.mutation.SetStatus(v)
@@ -1471,6 +1519,12 @@ func (_u *AccountUpdateOne) sqlSave(ctx context.Context) (_node *Account, err er
if value, ok := _u.mutation.AddedPriority(); ok {
_spec.AddField(account.FieldPriority, field.TypeInt, value)
}
+ if value, ok := _u.mutation.RateMultiplier(); ok {
+ _spec.SetField(account.FieldRateMultiplier, field.TypeFloat64, value)
+ }
+ if value, ok := _u.mutation.AddedRateMultiplier(); ok {
+ _spec.AddField(account.FieldRateMultiplier, field.TypeFloat64, value)
+ }
if value, ok := _u.mutation.Status(); ok {
_spec.SetField(account.FieldStatus, field.TypeString, value)
}
diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go
index 41cd8b01..d769f611 100644
--- a/backend/ent/migrate/schema.go
+++ b/backend/ent/migrate/schema.go
@@ -79,6 +79,7 @@ var (
{Name: "extra", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}},
{Name: "concurrency", Type: field.TypeInt, Default: 3},
{Name: "priority", Type: field.TypeInt, Default: 50},
+ {Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}},
{Name: "status", Type: field.TypeString, Size: 20, Default: "active"},
{Name: "error_message", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}},
{Name: "last_used_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}},
@@ -101,7 +102,7 @@ var (
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "accounts_proxies_proxy",
- Columns: []*schema.Column{AccountsColumns[24]},
+ Columns: []*schema.Column{AccountsColumns[25]},
RefColumns: []*schema.Column{ProxiesColumns[0]},
OnDelete: schema.SetNull,
},
@@ -120,12 +121,12 @@ var (
{
Name: "account_status",
Unique: false,
- Columns: []*schema.Column{AccountsColumns[12]},
+ Columns: []*schema.Column{AccountsColumns[13]},
},
{
Name: "account_proxy_id",
Unique: false,
- Columns: []*schema.Column{AccountsColumns[24]},
+ Columns: []*schema.Column{AccountsColumns[25]},
},
{
Name: "account_priority",
@@ -135,27 +136,27 @@ var (
{
Name: "account_last_used_at",
Unique: false,
- Columns: []*schema.Column{AccountsColumns[14]},
+ Columns: []*schema.Column{AccountsColumns[15]},
},
{
Name: "account_schedulable",
Unique: false,
- Columns: []*schema.Column{AccountsColumns[17]},
+ Columns: []*schema.Column{AccountsColumns[18]},
},
{
Name: "account_rate_limited_at",
Unique: false,
- Columns: []*schema.Column{AccountsColumns[18]},
+ Columns: []*schema.Column{AccountsColumns[19]},
},
{
Name: "account_rate_limit_reset_at",
Unique: false,
- Columns: []*schema.Column{AccountsColumns[19]},
+ Columns: []*schema.Column{AccountsColumns[20]},
},
{
Name: "account_overload_until",
Unique: false,
- Columns: []*schema.Column{AccountsColumns[20]},
+ Columns: []*schema.Column{AccountsColumns[21]},
},
{
Name: "account_deleted_at",
@@ -449,6 +450,7 @@ var (
{Name: "total_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
{Name: "actual_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}},
{Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}},
+ {Name: "account_rate_multiplier", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(10,4)"}},
{Name: "billing_type", Type: field.TypeInt8, Default: 0},
{Name: "stream", Type: field.TypeBool, Default: false},
{Name: "duration_ms", Type: field.TypeInt, Nullable: true},
@@ -472,31 +474,31 @@ var (
ForeignKeys: []*schema.ForeignKey{
{
Symbol: "usage_logs_api_keys_usage_logs",
- Columns: []*schema.Column{UsageLogsColumns[25]},
+ Columns: []*schema.Column{UsageLogsColumns[26]},
RefColumns: []*schema.Column{APIKeysColumns[0]},
OnDelete: schema.NoAction,
},
{
Symbol: "usage_logs_accounts_usage_logs",
- Columns: []*schema.Column{UsageLogsColumns[26]},
+ Columns: []*schema.Column{UsageLogsColumns[27]},
RefColumns: []*schema.Column{AccountsColumns[0]},
OnDelete: schema.NoAction,
},
{
Symbol: "usage_logs_groups_usage_logs",
- Columns: []*schema.Column{UsageLogsColumns[27]},
+ Columns: []*schema.Column{UsageLogsColumns[28]},
RefColumns: []*schema.Column{GroupsColumns[0]},
OnDelete: schema.SetNull,
},
{
Symbol: "usage_logs_users_usage_logs",
- Columns: []*schema.Column{UsageLogsColumns[28]},
+ Columns: []*schema.Column{UsageLogsColumns[29]},
RefColumns: []*schema.Column{UsersColumns[0]},
OnDelete: schema.NoAction,
},
{
Symbol: "usage_logs_user_subscriptions_usage_logs",
- Columns: []*schema.Column{UsageLogsColumns[29]},
+ Columns: []*schema.Column{UsageLogsColumns[30]},
RefColumns: []*schema.Column{UserSubscriptionsColumns[0]},
OnDelete: schema.SetNull,
},
@@ -505,32 +507,32 @@ var (
{
Name: "usagelog_user_id",
Unique: false,
- Columns: []*schema.Column{UsageLogsColumns[28]},
+ Columns: []*schema.Column{UsageLogsColumns[29]},
},
{
Name: "usagelog_api_key_id",
Unique: false,
- Columns: []*schema.Column{UsageLogsColumns[25]},
+ Columns: []*schema.Column{UsageLogsColumns[26]},
},
{
Name: "usagelog_account_id",
Unique: false,
- Columns: []*schema.Column{UsageLogsColumns[26]},
+ Columns: []*schema.Column{UsageLogsColumns[27]},
},
{
Name: "usagelog_group_id",
Unique: false,
- Columns: []*schema.Column{UsageLogsColumns[27]},
+ Columns: []*schema.Column{UsageLogsColumns[28]},
},
{
Name: "usagelog_subscription_id",
Unique: false,
- Columns: []*schema.Column{UsageLogsColumns[29]},
+ Columns: []*schema.Column{UsageLogsColumns[30]},
},
{
Name: "usagelog_created_at",
Unique: false,
- Columns: []*schema.Column{UsageLogsColumns[24]},
+ Columns: []*schema.Column{UsageLogsColumns[25]},
},
{
Name: "usagelog_model",
@@ -545,12 +547,12 @@ var (
{
Name: "usagelog_user_id_created_at",
Unique: false,
- Columns: []*schema.Column{UsageLogsColumns[28], UsageLogsColumns[24]},
+ Columns: []*schema.Column{UsageLogsColumns[29], UsageLogsColumns[25]},
},
{
Name: "usagelog_api_key_id_created_at",
Unique: false,
- Columns: []*schema.Column{UsageLogsColumns[25], UsageLogsColumns[24]},
+ Columns: []*schema.Column{UsageLogsColumns[26], UsageLogsColumns[25]},
},
},
}
diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go
index 732abd1c..3509efed 100644
--- a/backend/ent/mutation.go
+++ b/backend/ent/mutation.go
@@ -1187,6 +1187,8 @@ type AccountMutation struct {
addconcurrency *int
priority *int
addpriority *int
+ rate_multiplier *float64
+ addrate_multiplier *float64
status *string
error_message *string
last_used_at *time.Time
@@ -1822,6 +1824,62 @@ func (m *AccountMutation) ResetPriority() {
m.addpriority = nil
}
+// SetRateMultiplier sets the "rate_multiplier" field.
+func (m *AccountMutation) SetRateMultiplier(f float64) {
+ m.rate_multiplier = &f
+ m.addrate_multiplier = nil
+}
+
+// RateMultiplier returns the value of the "rate_multiplier" field in the mutation.
+func (m *AccountMutation) RateMultiplier() (r float64, exists bool) {
+ v := m.rate_multiplier
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldRateMultiplier returns the old "rate_multiplier" field's value of the Account entity.
+// If the Account object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *AccountMutation) OldRateMultiplier(ctx context.Context) (v float64, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldRateMultiplier is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldRateMultiplier requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldRateMultiplier: %w", err)
+ }
+ return oldValue.RateMultiplier, nil
+}
+
+// AddRateMultiplier adds f to the "rate_multiplier" field.
+func (m *AccountMutation) AddRateMultiplier(f float64) {
+ if m.addrate_multiplier != nil {
+ *m.addrate_multiplier += f
+ } else {
+ m.addrate_multiplier = &f
+ }
+}
+
+// AddedRateMultiplier returns the value that was added to the "rate_multiplier" field in this mutation.
+func (m *AccountMutation) AddedRateMultiplier() (r float64, exists bool) {
+ v := m.addrate_multiplier
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// ResetRateMultiplier resets all changes to the "rate_multiplier" field.
+func (m *AccountMutation) ResetRateMultiplier() {
+ m.rate_multiplier = nil
+ m.addrate_multiplier = nil
+}
+
// SetStatus sets the "status" field.
func (m *AccountMutation) SetStatus(s string) {
m.status = &s
@@ -2540,7 +2598,7 @@ func (m *AccountMutation) Type() string {
// order to get all numeric fields that were incremented/decremented, call
// AddedFields().
func (m *AccountMutation) Fields() []string {
- fields := make([]string, 0, 24)
+ fields := make([]string, 0, 25)
if m.created_at != nil {
fields = append(fields, account.FieldCreatedAt)
}
@@ -2577,6 +2635,9 @@ func (m *AccountMutation) Fields() []string {
if m.priority != nil {
fields = append(fields, account.FieldPriority)
}
+ if m.rate_multiplier != nil {
+ fields = append(fields, account.FieldRateMultiplier)
+ }
if m.status != nil {
fields = append(fields, account.FieldStatus)
}
@@ -2645,6 +2706,8 @@ func (m *AccountMutation) Field(name string) (ent.Value, bool) {
return m.Concurrency()
case account.FieldPriority:
return m.Priority()
+ case account.FieldRateMultiplier:
+ return m.RateMultiplier()
case account.FieldStatus:
return m.Status()
case account.FieldErrorMessage:
@@ -2702,6 +2765,8 @@ func (m *AccountMutation) OldField(ctx context.Context, name string) (ent.Value,
return m.OldConcurrency(ctx)
case account.FieldPriority:
return m.OldPriority(ctx)
+ case account.FieldRateMultiplier:
+ return m.OldRateMultiplier(ctx)
case account.FieldStatus:
return m.OldStatus(ctx)
case account.FieldErrorMessage:
@@ -2819,6 +2884,13 @@ func (m *AccountMutation) SetField(name string, value ent.Value) error {
}
m.SetPriority(v)
return nil
+ case account.FieldRateMultiplier:
+ v, ok := value.(float64)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetRateMultiplier(v)
+ return nil
case account.FieldStatus:
v, ok := value.(string)
if !ok {
@@ -2917,6 +2989,9 @@ func (m *AccountMutation) AddedFields() []string {
if m.addpriority != nil {
fields = append(fields, account.FieldPriority)
}
+ if m.addrate_multiplier != nil {
+ fields = append(fields, account.FieldRateMultiplier)
+ }
return fields
}
@@ -2929,6 +3004,8 @@ func (m *AccountMutation) AddedField(name string) (ent.Value, bool) {
return m.AddedConcurrency()
case account.FieldPriority:
return m.AddedPriority()
+ case account.FieldRateMultiplier:
+ return m.AddedRateMultiplier()
}
return nil, false
}
@@ -2952,6 +3029,13 @@ func (m *AccountMutation) AddField(name string, value ent.Value) error {
}
m.AddPriority(v)
return nil
+ case account.FieldRateMultiplier:
+ v, ok := value.(float64)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.AddRateMultiplier(v)
+ return nil
}
return fmt.Errorf("unknown Account numeric field %s", name)
}
@@ -3090,6 +3174,9 @@ func (m *AccountMutation) ResetField(name string) error {
case account.FieldPriority:
m.ResetPriority()
return nil
+ case account.FieldRateMultiplier:
+ m.ResetRateMultiplier()
+ return nil
case account.FieldStatus:
m.ResetStatus()
return nil
@@ -10190,6 +10277,8 @@ type UsageLogMutation struct {
addactual_cost *float64
rate_multiplier *float64
addrate_multiplier *float64
+ account_rate_multiplier *float64
+ addaccount_rate_multiplier *float64
billing_type *int8
addbilling_type *int8
stream *bool
@@ -11323,6 +11412,76 @@ func (m *UsageLogMutation) ResetRateMultiplier() {
m.addrate_multiplier = nil
}
+// SetAccountRateMultiplier sets the "account_rate_multiplier" field.
+func (m *UsageLogMutation) SetAccountRateMultiplier(f float64) {
+ m.account_rate_multiplier = &f
+ m.addaccount_rate_multiplier = nil
+}
+
+// AccountRateMultiplier returns the value of the "account_rate_multiplier" field in the mutation.
+func (m *UsageLogMutation) AccountRateMultiplier() (r float64, exists bool) {
+ v := m.account_rate_multiplier
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// OldAccountRateMultiplier returns the old "account_rate_multiplier" field's value of the UsageLog entity.
+// If the UsageLog object wasn't provided to the builder, the object is fetched from the database.
+// An error is returned if the mutation operation is not UpdateOne, or the database query fails.
+func (m *UsageLogMutation) OldAccountRateMultiplier(ctx context.Context) (v *float64, err error) {
+ if !m.op.Is(OpUpdateOne) {
+ return v, errors.New("OldAccountRateMultiplier is only allowed on UpdateOne operations")
+ }
+ if m.id == nil || m.oldValue == nil {
+ return v, errors.New("OldAccountRateMultiplier requires an ID field in the mutation")
+ }
+ oldValue, err := m.oldValue(ctx)
+ if err != nil {
+ return v, fmt.Errorf("querying old value for OldAccountRateMultiplier: %w", err)
+ }
+ return oldValue.AccountRateMultiplier, nil
+}
+
+// AddAccountRateMultiplier adds f to the "account_rate_multiplier" field.
+func (m *UsageLogMutation) AddAccountRateMultiplier(f float64) {
+ if m.addaccount_rate_multiplier != nil {
+ *m.addaccount_rate_multiplier += f
+ } else {
+ m.addaccount_rate_multiplier = &f
+ }
+}
+
+// AddedAccountRateMultiplier returns the value that was added to the "account_rate_multiplier" field in this mutation.
+func (m *UsageLogMutation) AddedAccountRateMultiplier() (r float64, exists bool) {
+ v := m.addaccount_rate_multiplier
+ if v == nil {
+ return
+ }
+ return *v, true
+}
+
+// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field.
+func (m *UsageLogMutation) ClearAccountRateMultiplier() {
+ m.account_rate_multiplier = nil
+ m.addaccount_rate_multiplier = nil
+ m.clearedFields[usagelog.FieldAccountRateMultiplier] = struct{}{}
+}
+
+// AccountRateMultiplierCleared returns if the "account_rate_multiplier" field was cleared in this mutation.
+func (m *UsageLogMutation) AccountRateMultiplierCleared() bool {
+ _, ok := m.clearedFields[usagelog.FieldAccountRateMultiplier]
+ return ok
+}
+
+// ResetAccountRateMultiplier resets all changes to the "account_rate_multiplier" field.
+func (m *UsageLogMutation) ResetAccountRateMultiplier() {
+ m.account_rate_multiplier = nil
+ m.addaccount_rate_multiplier = nil
+ delete(m.clearedFields, usagelog.FieldAccountRateMultiplier)
+}
+
// SetBillingType sets the "billing_type" field.
func (m *UsageLogMutation) SetBillingType(i int8) {
m.billing_type = &i
@@ -11963,7 +12122,7 @@ func (m *UsageLogMutation) Type() string {
// order to get all numeric fields that were incremented/decremented, call
// AddedFields().
func (m *UsageLogMutation) Fields() []string {
- fields := make([]string, 0, 29)
+ fields := make([]string, 0, 30)
if m.user != nil {
fields = append(fields, usagelog.FieldUserID)
}
@@ -12024,6 +12183,9 @@ func (m *UsageLogMutation) Fields() []string {
if m.rate_multiplier != nil {
fields = append(fields, usagelog.FieldRateMultiplier)
}
+ if m.account_rate_multiplier != nil {
+ fields = append(fields, usagelog.FieldAccountRateMultiplier)
+ }
if m.billing_type != nil {
fields = append(fields, usagelog.FieldBillingType)
}
@@ -12099,6 +12261,8 @@ func (m *UsageLogMutation) Field(name string) (ent.Value, bool) {
return m.ActualCost()
case usagelog.FieldRateMultiplier:
return m.RateMultiplier()
+ case usagelog.FieldAccountRateMultiplier:
+ return m.AccountRateMultiplier()
case usagelog.FieldBillingType:
return m.BillingType()
case usagelog.FieldStream:
@@ -12166,6 +12330,8 @@ func (m *UsageLogMutation) OldField(ctx context.Context, name string) (ent.Value
return m.OldActualCost(ctx)
case usagelog.FieldRateMultiplier:
return m.OldRateMultiplier(ctx)
+ case usagelog.FieldAccountRateMultiplier:
+ return m.OldAccountRateMultiplier(ctx)
case usagelog.FieldBillingType:
return m.OldBillingType(ctx)
case usagelog.FieldStream:
@@ -12333,6 +12499,13 @@ func (m *UsageLogMutation) SetField(name string, value ent.Value) error {
}
m.SetRateMultiplier(v)
return nil
+ case usagelog.FieldAccountRateMultiplier:
+ v, ok := value.(float64)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.SetAccountRateMultiplier(v)
+ return nil
case usagelog.FieldBillingType:
v, ok := value.(int8)
if !ok {
@@ -12443,6 +12616,9 @@ func (m *UsageLogMutation) AddedFields() []string {
if m.addrate_multiplier != nil {
fields = append(fields, usagelog.FieldRateMultiplier)
}
+ if m.addaccount_rate_multiplier != nil {
+ fields = append(fields, usagelog.FieldAccountRateMultiplier)
+ }
if m.addbilling_type != nil {
fields = append(fields, usagelog.FieldBillingType)
}
@@ -12489,6 +12665,8 @@ func (m *UsageLogMutation) AddedField(name string) (ent.Value, bool) {
return m.AddedActualCost()
case usagelog.FieldRateMultiplier:
return m.AddedRateMultiplier()
+ case usagelog.FieldAccountRateMultiplier:
+ return m.AddedAccountRateMultiplier()
case usagelog.FieldBillingType:
return m.AddedBillingType()
case usagelog.FieldDurationMs:
@@ -12597,6 +12775,13 @@ func (m *UsageLogMutation) AddField(name string, value ent.Value) error {
}
m.AddRateMultiplier(v)
return nil
+ case usagelog.FieldAccountRateMultiplier:
+ v, ok := value.(float64)
+ if !ok {
+ return fmt.Errorf("unexpected type %T for field %s", value, name)
+ }
+ m.AddAccountRateMultiplier(v)
+ return nil
case usagelog.FieldBillingType:
v, ok := value.(int8)
if !ok {
@@ -12639,6 +12824,9 @@ func (m *UsageLogMutation) ClearedFields() []string {
if m.FieldCleared(usagelog.FieldSubscriptionID) {
fields = append(fields, usagelog.FieldSubscriptionID)
}
+ if m.FieldCleared(usagelog.FieldAccountRateMultiplier) {
+ fields = append(fields, usagelog.FieldAccountRateMultiplier)
+ }
if m.FieldCleared(usagelog.FieldDurationMs) {
fields = append(fields, usagelog.FieldDurationMs)
}
@@ -12674,6 +12862,9 @@ func (m *UsageLogMutation) ClearField(name string) error {
case usagelog.FieldSubscriptionID:
m.ClearSubscriptionID()
return nil
+ case usagelog.FieldAccountRateMultiplier:
+ m.ClearAccountRateMultiplier()
+ return nil
case usagelog.FieldDurationMs:
m.ClearDurationMs()
return nil
@@ -12757,6 +12948,9 @@ func (m *UsageLogMutation) ResetField(name string) error {
case usagelog.FieldRateMultiplier:
m.ResetRateMultiplier()
return nil
+ case usagelog.FieldAccountRateMultiplier:
+ m.ResetAccountRateMultiplier()
+ return nil
case usagelog.FieldBillingType:
m.ResetBillingType()
return nil
diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go
index ad1aa626..ed13c852 100644
--- a/backend/ent/runtime/runtime.go
+++ b/backend/ent/runtime/runtime.go
@@ -177,22 +177,26 @@ func init() {
accountDescPriority := accountFields[8].Descriptor()
// account.DefaultPriority holds the default value on creation for the priority field.
account.DefaultPriority = accountDescPriority.Default.(int)
+ // accountDescRateMultiplier is the schema descriptor for rate_multiplier field.
+ accountDescRateMultiplier := accountFields[9].Descriptor()
+ // account.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field.
+ account.DefaultRateMultiplier = accountDescRateMultiplier.Default.(float64)
// accountDescStatus is the schema descriptor for status field.
- accountDescStatus := accountFields[9].Descriptor()
+ accountDescStatus := accountFields[10].Descriptor()
// account.DefaultStatus holds the default value on creation for the status field.
account.DefaultStatus = accountDescStatus.Default.(string)
// account.StatusValidator is a validator for the "status" field. It is called by the builders before save.
account.StatusValidator = accountDescStatus.Validators[0].(func(string) error)
// accountDescAutoPauseOnExpired is the schema descriptor for auto_pause_on_expired field.
- accountDescAutoPauseOnExpired := accountFields[13].Descriptor()
+ accountDescAutoPauseOnExpired := accountFields[14].Descriptor()
// account.DefaultAutoPauseOnExpired holds the default value on creation for the auto_pause_on_expired field.
account.DefaultAutoPauseOnExpired = accountDescAutoPauseOnExpired.Default.(bool)
// accountDescSchedulable is the schema descriptor for schedulable field.
- accountDescSchedulable := accountFields[14].Descriptor()
+ accountDescSchedulable := accountFields[15].Descriptor()
// account.DefaultSchedulable holds the default value on creation for the schedulable field.
account.DefaultSchedulable = accountDescSchedulable.Default.(bool)
// accountDescSessionWindowStatus is the schema descriptor for session_window_status field.
- accountDescSessionWindowStatus := accountFields[20].Descriptor()
+ accountDescSessionWindowStatus := accountFields[21].Descriptor()
// account.SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save.
account.SessionWindowStatusValidator = accountDescSessionWindowStatus.Validators[0].(func(string) error)
accountgroupFields := schema.AccountGroup{}.Fields()
@@ -578,31 +582,31 @@ func init() {
// usagelog.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field.
usagelog.DefaultRateMultiplier = usagelogDescRateMultiplier.Default.(float64)
// usagelogDescBillingType is the schema descriptor for billing_type field.
- usagelogDescBillingType := usagelogFields[20].Descriptor()
+ usagelogDescBillingType := usagelogFields[21].Descriptor()
// usagelog.DefaultBillingType holds the default value on creation for the billing_type field.
usagelog.DefaultBillingType = usagelogDescBillingType.Default.(int8)
// usagelogDescStream is the schema descriptor for stream field.
- usagelogDescStream := usagelogFields[21].Descriptor()
+ usagelogDescStream := usagelogFields[22].Descriptor()
// usagelog.DefaultStream holds the default value on creation for the stream field.
usagelog.DefaultStream = usagelogDescStream.Default.(bool)
// usagelogDescUserAgent is the schema descriptor for user_agent field.
- usagelogDescUserAgent := usagelogFields[24].Descriptor()
+ usagelogDescUserAgent := usagelogFields[25].Descriptor()
// usagelog.UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save.
usagelog.UserAgentValidator = usagelogDescUserAgent.Validators[0].(func(string) error)
// usagelogDescIPAddress is the schema descriptor for ip_address field.
- usagelogDescIPAddress := usagelogFields[25].Descriptor()
+ usagelogDescIPAddress := usagelogFields[26].Descriptor()
// usagelog.IPAddressValidator is a validator for the "ip_address" field. It is called by the builders before save.
usagelog.IPAddressValidator = usagelogDescIPAddress.Validators[0].(func(string) error)
// usagelogDescImageCount is the schema descriptor for image_count field.
- usagelogDescImageCount := usagelogFields[26].Descriptor()
+ usagelogDescImageCount := usagelogFields[27].Descriptor()
// usagelog.DefaultImageCount holds the default value on creation for the image_count field.
usagelog.DefaultImageCount = usagelogDescImageCount.Default.(int)
// usagelogDescImageSize is the schema descriptor for image_size field.
- usagelogDescImageSize := usagelogFields[27].Descriptor()
+ usagelogDescImageSize := usagelogFields[28].Descriptor()
// usagelog.ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save.
usagelog.ImageSizeValidator = usagelogDescImageSize.Validators[0].(func(string) error)
// usagelogDescCreatedAt is the schema descriptor for created_at field.
- usagelogDescCreatedAt := usagelogFields[28].Descriptor()
+ usagelogDescCreatedAt := usagelogFields[29].Descriptor()
// usagelog.DefaultCreatedAt holds the default value on creation for the created_at field.
usagelog.DefaultCreatedAt = usagelogDescCreatedAt.Default.(func() time.Time)
userMixin := schema.User{}.Mixin()
diff --git a/backend/ent/schema/account.go b/backend/ent/schema/account.go
index ec192a97..dd79ba96 100644
--- a/backend/ent/schema/account.go
+++ b/backend/ent/schema/account.go
@@ -102,6 +102,12 @@ func (Account) Fields() []ent.Field {
field.Int("priority").
Default(50),
+ // rate_multiplier: 账号计费倍率(>=0,允许 0 表示该账号计费为 0)
+ // 仅影响账号维度计费口径,不影响用户/API Key 扣费(分组倍率)
+ field.Float("rate_multiplier").
+ SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}).
+ Default(1.0),
+
// status: 账户状态,如 "active", "error", "disabled"
field.String("status").
MaxLen(20).
diff --git a/backend/ent/schema/usage_log.go b/backend/ent/schema/usage_log.go
index 264a4087..fc7c7165 100644
--- a/backend/ent/schema/usage_log.go
+++ b/backend/ent/schema/usage_log.go
@@ -85,6 +85,12 @@ func (UsageLog) Fields() []ent.Field {
Default(1).
SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}),
+ // account_rate_multiplier: 账号计费倍率快照(NULL 表示按 1.0 处理)
+ field.Float("account_rate_multiplier").
+ Optional().
+ Nillable().
+ SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}),
+
// 其他字段
field.Int8("billing_type").
Default(0),
diff --git a/backend/ent/usagelog.go b/backend/ent/usagelog.go
index cd576466..81c466b4 100644
--- a/backend/ent/usagelog.go
+++ b/backend/ent/usagelog.go
@@ -62,6 +62,8 @@ type UsageLog struct {
ActualCost float64 `json:"actual_cost,omitempty"`
// RateMultiplier holds the value of the "rate_multiplier" field.
RateMultiplier float64 `json:"rate_multiplier,omitempty"`
+ // AccountRateMultiplier holds the value of the "account_rate_multiplier" field.
+ AccountRateMultiplier *float64 `json:"account_rate_multiplier,omitempty"`
// BillingType holds the value of the "billing_type" field.
BillingType int8 `json:"billing_type,omitempty"`
// Stream holds the value of the "stream" field.
@@ -165,7 +167,7 @@ func (*UsageLog) scanValues(columns []string) ([]any, error) {
switch columns[i] {
case usagelog.FieldStream:
values[i] = new(sql.NullBool)
- case usagelog.FieldInputCost, usagelog.FieldOutputCost, usagelog.FieldCacheCreationCost, usagelog.FieldCacheReadCost, usagelog.FieldTotalCost, usagelog.FieldActualCost, usagelog.FieldRateMultiplier:
+ case usagelog.FieldInputCost, usagelog.FieldOutputCost, usagelog.FieldCacheCreationCost, usagelog.FieldCacheReadCost, usagelog.FieldTotalCost, usagelog.FieldActualCost, usagelog.FieldRateMultiplier, usagelog.FieldAccountRateMultiplier:
values[i] = new(sql.NullFloat64)
case usagelog.FieldID, usagelog.FieldUserID, usagelog.FieldAPIKeyID, usagelog.FieldAccountID, usagelog.FieldGroupID, usagelog.FieldSubscriptionID, usagelog.FieldInputTokens, usagelog.FieldOutputTokens, usagelog.FieldCacheCreationTokens, usagelog.FieldCacheReadTokens, usagelog.FieldCacheCreation5mTokens, usagelog.FieldCacheCreation1hTokens, usagelog.FieldBillingType, usagelog.FieldDurationMs, usagelog.FieldFirstTokenMs, usagelog.FieldImageCount:
values[i] = new(sql.NullInt64)
@@ -316,6 +318,13 @@ func (_m *UsageLog) assignValues(columns []string, values []any) error {
} else if value.Valid {
_m.RateMultiplier = value.Float64
}
+ case usagelog.FieldAccountRateMultiplier:
+ if value, ok := values[i].(*sql.NullFloat64); !ok {
+ return fmt.Errorf("unexpected type %T for field account_rate_multiplier", values[i])
+ } else if value.Valid {
+ _m.AccountRateMultiplier = new(float64)
+ *_m.AccountRateMultiplier = value.Float64
+ }
case usagelog.FieldBillingType:
if value, ok := values[i].(*sql.NullInt64); !ok {
return fmt.Errorf("unexpected type %T for field billing_type", values[i])
@@ -500,6 +509,11 @@ func (_m *UsageLog) String() string {
builder.WriteString("rate_multiplier=")
builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier))
builder.WriteString(", ")
+ if v := _m.AccountRateMultiplier; v != nil {
+ builder.WriteString("account_rate_multiplier=")
+ builder.WriteString(fmt.Sprintf("%v", *v))
+ }
+ builder.WriteString(", ")
builder.WriteString("billing_type=")
builder.WriteString(fmt.Sprintf("%v", _m.BillingType))
builder.WriteString(", ")
diff --git a/backend/ent/usagelog/usagelog.go b/backend/ent/usagelog/usagelog.go
index c06925c4..980f1e58 100644
--- a/backend/ent/usagelog/usagelog.go
+++ b/backend/ent/usagelog/usagelog.go
@@ -54,6 +54,8 @@ const (
FieldActualCost = "actual_cost"
// FieldRateMultiplier holds the string denoting the rate_multiplier field in the database.
FieldRateMultiplier = "rate_multiplier"
+ // FieldAccountRateMultiplier holds the string denoting the account_rate_multiplier field in the database.
+ FieldAccountRateMultiplier = "account_rate_multiplier"
// FieldBillingType holds the string denoting the billing_type field in the database.
FieldBillingType = "billing_type"
// FieldStream holds the string denoting the stream field in the database.
@@ -144,6 +146,7 @@ var Columns = []string{
FieldTotalCost,
FieldActualCost,
FieldRateMultiplier,
+ FieldAccountRateMultiplier,
FieldBillingType,
FieldStream,
FieldDurationMs,
@@ -320,6 +323,11 @@ func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc()
}
+// ByAccountRateMultiplier orders the results by the account_rate_multiplier field.
+func ByAccountRateMultiplier(opts ...sql.OrderTermOption) OrderOption {
+ return sql.OrderByField(FieldAccountRateMultiplier, opts...).ToFunc()
+}
+
// ByBillingType orders the results by the billing_type field.
func ByBillingType(opts ...sql.OrderTermOption) OrderOption {
return sql.OrderByField(FieldBillingType, opts...).ToFunc()
diff --git a/backend/ent/usagelog/where.go b/backend/ent/usagelog/where.go
index 96b7a19c..28e2ab4c 100644
--- a/backend/ent/usagelog/where.go
+++ b/backend/ent/usagelog/where.go
@@ -155,6 +155,11 @@ func RateMultiplier(v float64) predicate.UsageLog {
return predicate.UsageLog(sql.FieldEQ(FieldRateMultiplier, v))
}
+// AccountRateMultiplier applies equality check predicate on the "account_rate_multiplier" field. It's identical to AccountRateMultiplierEQ.
+func AccountRateMultiplier(v float64) predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldEQ(FieldAccountRateMultiplier, v))
+}
+
// BillingType applies equality check predicate on the "billing_type" field. It's identical to BillingTypeEQ.
func BillingType(v int8) predicate.UsageLog {
return predicate.UsageLog(sql.FieldEQ(FieldBillingType, v))
@@ -970,6 +975,56 @@ func RateMultiplierLTE(v float64) predicate.UsageLog {
return predicate.UsageLog(sql.FieldLTE(FieldRateMultiplier, v))
}
+// AccountRateMultiplierEQ applies the EQ predicate on the "account_rate_multiplier" field.
+func AccountRateMultiplierEQ(v float64) predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldEQ(FieldAccountRateMultiplier, v))
+}
+
+// AccountRateMultiplierNEQ applies the NEQ predicate on the "account_rate_multiplier" field.
+func AccountRateMultiplierNEQ(v float64) predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldNEQ(FieldAccountRateMultiplier, v))
+}
+
+// AccountRateMultiplierIn applies the In predicate on the "account_rate_multiplier" field.
+func AccountRateMultiplierIn(vs ...float64) predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldIn(FieldAccountRateMultiplier, vs...))
+}
+
+// AccountRateMultiplierNotIn applies the NotIn predicate on the "account_rate_multiplier" field.
+func AccountRateMultiplierNotIn(vs ...float64) predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldNotIn(FieldAccountRateMultiplier, vs...))
+}
+
+// AccountRateMultiplierGT applies the GT predicate on the "account_rate_multiplier" field.
+func AccountRateMultiplierGT(v float64) predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldGT(FieldAccountRateMultiplier, v))
+}
+
+// AccountRateMultiplierGTE applies the GTE predicate on the "account_rate_multiplier" field.
+func AccountRateMultiplierGTE(v float64) predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldGTE(FieldAccountRateMultiplier, v))
+}
+
+// AccountRateMultiplierLT applies the LT predicate on the "account_rate_multiplier" field.
+func AccountRateMultiplierLT(v float64) predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldLT(FieldAccountRateMultiplier, v))
+}
+
+// AccountRateMultiplierLTE applies the LTE predicate on the "account_rate_multiplier" field.
+func AccountRateMultiplierLTE(v float64) predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldLTE(FieldAccountRateMultiplier, v))
+}
+
+// AccountRateMultiplierIsNil applies the IsNil predicate on the "account_rate_multiplier" field.
+func AccountRateMultiplierIsNil() predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldIsNull(FieldAccountRateMultiplier))
+}
+
+// AccountRateMultiplierNotNil applies the NotNil predicate on the "account_rate_multiplier" field.
+func AccountRateMultiplierNotNil() predicate.UsageLog {
+ return predicate.UsageLog(sql.FieldNotNull(FieldAccountRateMultiplier))
+}
+
// BillingTypeEQ applies the EQ predicate on the "billing_type" field.
func BillingTypeEQ(v int8) predicate.UsageLog {
return predicate.UsageLog(sql.FieldEQ(FieldBillingType, v))
diff --git a/backend/ent/usagelog_create.go b/backend/ent/usagelog_create.go
index e63fab05..a17d6507 100644
--- a/backend/ent/usagelog_create.go
+++ b/backend/ent/usagelog_create.go
@@ -267,6 +267,20 @@ func (_c *UsageLogCreate) SetNillableRateMultiplier(v *float64) *UsageLogCreate
return _c
}
+// SetAccountRateMultiplier sets the "account_rate_multiplier" field.
+func (_c *UsageLogCreate) SetAccountRateMultiplier(v float64) *UsageLogCreate {
+ _c.mutation.SetAccountRateMultiplier(v)
+ return _c
+}
+
+// SetNillableAccountRateMultiplier sets the "account_rate_multiplier" field if the given value is not nil.
+func (_c *UsageLogCreate) SetNillableAccountRateMultiplier(v *float64) *UsageLogCreate {
+ if v != nil {
+ _c.SetAccountRateMultiplier(*v)
+ }
+ return _c
+}
+
// SetBillingType sets the "billing_type" field.
func (_c *UsageLogCreate) SetBillingType(v int8) *UsageLogCreate {
_c.mutation.SetBillingType(v)
@@ -712,6 +726,10 @@ func (_c *UsageLogCreate) createSpec() (*UsageLog, *sqlgraph.CreateSpec) {
_spec.SetField(usagelog.FieldRateMultiplier, field.TypeFloat64, value)
_node.RateMultiplier = value
}
+ if value, ok := _c.mutation.AccountRateMultiplier(); ok {
+ _spec.SetField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value)
+ _node.AccountRateMultiplier = &value
+ }
if value, ok := _c.mutation.BillingType(); ok {
_spec.SetField(usagelog.FieldBillingType, field.TypeInt8, value)
_node.BillingType = value
@@ -1215,6 +1233,30 @@ func (u *UsageLogUpsert) AddRateMultiplier(v float64) *UsageLogUpsert {
return u
}
+// SetAccountRateMultiplier sets the "account_rate_multiplier" field.
+func (u *UsageLogUpsert) SetAccountRateMultiplier(v float64) *UsageLogUpsert {
+ u.Set(usagelog.FieldAccountRateMultiplier, v)
+ return u
+}
+
+// UpdateAccountRateMultiplier sets the "account_rate_multiplier" field to the value that was provided on create.
+func (u *UsageLogUpsert) UpdateAccountRateMultiplier() *UsageLogUpsert {
+ u.SetExcluded(usagelog.FieldAccountRateMultiplier)
+ return u
+}
+
+// AddAccountRateMultiplier adds v to the "account_rate_multiplier" field.
+func (u *UsageLogUpsert) AddAccountRateMultiplier(v float64) *UsageLogUpsert {
+ u.Add(usagelog.FieldAccountRateMultiplier, v)
+ return u
+}
+
+// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field.
+func (u *UsageLogUpsert) ClearAccountRateMultiplier() *UsageLogUpsert {
+ u.SetNull(usagelog.FieldAccountRateMultiplier)
+ return u
+}
+
// SetBillingType sets the "billing_type" field.
func (u *UsageLogUpsert) SetBillingType(v int8) *UsageLogUpsert {
u.Set(usagelog.FieldBillingType, v)
@@ -1795,6 +1837,34 @@ func (u *UsageLogUpsertOne) UpdateRateMultiplier() *UsageLogUpsertOne {
})
}
+// SetAccountRateMultiplier sets the "account_rate_multiplier" field.
+func (u *UsageLogUpsertOne) SetAccountRateMultiplier(v float64) *UsageLogUpsertOne {
+ return u.Update(func(s *UsageLogUpsert) {
+ s.SetAccountRateMultiplier(v)
+ })
+}
+
+// AddAccountRateMultiplier adds v to the "account_rate_multiplier" field.
+func (u *UsageLogUpsertOne) AddAccountRateMultiplier(v float64) *UsageLogUpsertOne {
+ return u.Update(func(s *UsageLogUpsert) {
+ s.AddAccountRateMultiplier(v)
+ })
+}
+
+// UpdateAccountRateMultiplier sets the "account_rate_multiplier" field to the value that was provided on create.
+func (u *UsageLogUpsertOne) UpdateAccountRateMultiplier() *UsageLogUpsertOne {
+ return u.Update(func(s *UsageLogUpsert) {
+ s.UpdateAccountRateMultiplier()
+ })
+}
+
+// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field.
+func (u *UsageLogUpsertOne) ClearAccountRateMultiplier() *UsageLogUpsertOne {
+ return u.Update(func(s *UsageLogUpsert) {
+ s.ClearAccountRateMultiplier()
+ })
+}
+
// SetBillingType sets the "billing_type" field.
func (u *UsageLogUpsertOne) SetBillingType(v int8) *UsageLogUpsertOne {
return u.Update(func(s *UsageLogUpsert) {
@@ -2566,6 +2636,34 @@ func (u *UsageLogUpsertBulk) UpdateRateMultiplier() *UsageLogUpsertBulk {
})
}
+// SetAccountRateMultiplier sets the "account_rate_multiplier" field.
+func (u *UsageLogUpsertBulk) SetAccountRateMultiplier(v float64) *UsageLogUpsertBulk {
+ return u.Update(func(s *UsageLogUpsert) {
+ s.SetAccountRateMultiplier(v)
+ })
+}
+
+// AddAccountRateMultiplier adds v to the "account_rate_multiplier" field.
+func (u *UsageLogUpsertBulk) AddAccountRateMultiplier(v float64) *UsageLogUpsertBulk {
+ return u.Update(func(s *UsageLogUpsert) {
+ s.AddAccountRateMultiplier(v)
+ })
+}
+
+// UpdateAccountRateMultiplier sets the "account_rate_multiplier" field to the value that was provided on create.
+func (u *UsageLogUpsertBulk) UpdateAccountRateMultiplier() *UsageLogUpsertBulk {
+ return u.Update(func(s *UsageLogUpsert) {
+ s.UpdateAccountRateMultiplier()
+ })
+}
+
+// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field.
+func (u *UsageLogUpsertBulk) ClearAccountRateMultiplier() *UsageLogUpsertBulk {
+ return u.Update(func(s *UsageLogUpsert) {
+ s.ClearAccountRateMultiplier()
+ })
+}
+
// SetBillingType sets the "billing_type" field.
func (u *UsageLogUpsertBulk) SetBillingType(v int8) *UsageLogUpsertBulk {
return u.Update(func(s *UsageLogUpsert) {
diff --git a/backend/ent/usagelog_update.go b/backend/ent/usagelog_update.go
index ec2acbbb..571a7b3c 100644
--- a/backend/ent/usagelog_update.go
+++ b/backend/ent/usagelog_update.go
@@ -415,6 +415,33 @@ func (_u *UsageLogUpdate) AddRateMultiplier(v float64) *UsageLogUpdate {
return _u
}
+// SetAccountRateMultiplier sets the "account_rate_multiplier" field.
+func (_u *UsageLogUpdate) SetAccountRateMultiplier(v float64) *UsageLogUpdate {
+ _u.mutation.ResetAccountRateMultiplier()
+ _u.mutation.SetAccountRateMultiplier(v)
+ return _u
+}
+
+// SetNillableAccountRateMultiplier sets the "account_rate_multiplier" field if the given value is not nil.
+func (_u *UsageLogUpdate) SetNillableAccountRateMultiplier(v *float64) *UsageLogUpdate {
+ if v != nil {
+ _u.SetAccountRateMultiplier(*v)
+ }
+ return _u
+}
+
+// AddAccountRateMultiplier adds value to the "account_rate_multiplier" field.
+func (_u *UsageLogUpdate) AddAccountRateMultiplier(v float64) *UsageLogUpdate {
+ _u.mutation.AddAccountRateMultiplier(v)
+ return _u
+}
+
+// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field.
+func (_u *UsageLogUpdate) ClearAccountRateMultiplier() *UsageLogUpdate {
+ _u.mutation.ClearAccountRateMultiplier()
+ return _u
+}
+
// SetBillingType sets the "billing_type" field.
func (_u *UsageLogUpdate) SetBillingType(v int8) *UsageLogUpdate {
_u.mutation.ResetBillingType()
@@ -807,6 +834,15 @@ func (_u *UsageLogUpdate) sqlSave(ctx context.Context) (_node int, err error) {
if value, ok := _u.mutation.AddedRateMultiplier(); ok {
_spec.AddField(usagelog.FieldRateMultiplier, field.TypeFloat64, value)
}
+ if value, ok := _u.mutation.AccountRateMultiplier(); ok {
+ _spec.SetField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value)
+ }
+ if value, ok := _u.mutation.AddedAccountRateMultiplier(); ok {
+ _spec.AddField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value)
+ }
+ if _u.mutation.AccountRateMultiplierCleared() {
+ _spec.ClearField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64)
+ }
if value, ok := _u.mutation.BillingType(); ok {
_spec.SetField(usagelog.FieldBillingType, field.TypeInt8, value)
}
@@ -1406,6 +1442,33 @@ func (_u *UsageLogUpdateOne) AddRateMultiplier(v float64) *UsageLogUpdateOne {
return _u
}
+// SetAccountRateMultiplier sets the "account_rate_multiplier" field.
+func (_u *UsageLogUpdateOne) SetAccountRateMultiplier(v float64) *UsageLogUpdateOne {
+ _u.mutation.ResetAccountRateMultiplier()
+ _u.mutation.SetAccountRateMultiplier(v)
+ return _u
+}
+
+// SetNillableAccountRateMultiplier sets the "account_rate_multiplier" field if the given value is not nil.
+func (_u *UsageLogUpdateOne) SetNillableAccountRateMultiplier(v *float64) *UsageLogUpdateOne {
+ if v != nil {
+ _u.SetAccountRateMultiplier(*v)
+ }
+ return _u
+}
+
+// AddAccountRateMultiplier adds value to the "account_rate_multiplier" field.
+func (_u *UsageLogUpdateOne) AddAccountRateMultiplier(v float64) *UsageLogUpdateOne {
+ _u.mutation.AddAccountRateMultiplier(v)
+ return _u
+}
+
+// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field.
+func (_u *UsageLogUpdateOne) ClearAccountRateMultiplier() *UsageLogUpdateOne {
+ _u.mutation.ClearAccountRateMultiplier()
+ return _u
+}
+
// SetBillingType sets the "billing_type" field.
func (_u *UsageLogUpdateOne) SetBillingType(v int8) *UsageLogUpdateOne {
_u.mutation.ResetBillingType()
@@ -1828,6 +1891,15 @@ func (_u *UsageLogUpdateOne) sqlSave(ctx context.Context) (_node *UsageLog, err
if value, ok := _u.mutation.AddedRateMultiplier(); ok {
_spec.AddField(usagelog.FieldRateMultiplier, field.TypeFloat64, value)
}
+ if value, ok := _u.mutation.AccountRateMultiplier(); ok {
+ _spec.SetField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value)
+ }
+ if value, ok := _u.mutation.AddedAccountRateMultiplier(); ok {
+ _spec.AddField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value)
+ }
+ if _u.mutation.AccountRateMultiplierCleared() {
+ _spec.ClearField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64)
+ }
if value, ok := _u.mutation.BillingType(); ok {
_spec.SetField(usagelog.FieldBillingType, field.TypeInt8, value)
}
diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go
index 8a7270e5..92fdf2eb 100644
--- a/backend/internal/handler/admin/account_handler.go
+++ b/backend/internal/handler/admin/account_handler.go
@@ -84,6 +84,7 @@ type CreateAccountRequest struct {
ProxyID *int64 `json:"proxy_id"`
Concurrency int `json:"concurrency"`
Priority int `json:"priority"`
+ RateMultiplier *float64 `json:"rate_multiplier"`
GroupIDs []int64 `json:"group_ids"`
ExpiresAt *int64 `json:"expires_at"`
AutoPauseOnExpired *bool `json:"auto_pause_on_expired"`
@@ -101,6 +102,7 @@ type UpdateAccountRequest struct {
ProxyID *int64 `json:"proxy_id"`
Concurrency *int `json:"concurrency"`
Priority *int `json:"priority"`
+ RateMultiplier *float64 `json:"rate_multiplier"`
Status string `json:"status" binding:"omitempty,oneof=active inactive"`
GroupIDs *[]int64 `json:"group_ids"`
ExpiresAt *int64 `json:"expires_at"`
@@ -115,6 +117,7 @@ type BulkUpdateAccountsRequest struct {
ProxyID *int64 `json:"proxy_id"`
Concurrency *int `json:"concurrency"`
Priority *int `json:"priority"`
+ RateMultiplier *float64 `json:"rate_multiplier"`
Status string `json:"status" binding:"omitempty,oneof=active inactive error"`
Schedulable *bool `json:"schedulable"`
GroupIDs *[]int64 `json:"group_ids"`
@@ -199,6 +202,10 @@ func (h *AccountHandler) Create(c *gin.Context) {
response.BadRequest(c, "Invalid request: "+err.Error())
return
}
+ if req.RateMultiplier != nil && *req.RateMultiplier < 0 {
+ response.BadRequest(c, "rate_multiplier must be >= 0")
+ return
+ }
// 确定是否跳过混合渠道检查
skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk
@@ -213,6 +220,7 @@ func (h *AccountHandler) Create(c *gin.Context) {
ProxyID: req.ProxyID,
Concurrency: req.Concurrency,
Priority: req.Priority,
+ RateMultiplier: req.RateMultiplier,
GroupIDs: req.GroupIDs,
ExpiresAt: req.ExpiresAt,
AutoPauseOnExpired: req.AutoPauseOnExpired,
@@ -258,6 +266,10 @@ func (h *AccountHandler) Update(c *gin.Context) {
response.BadRequest(c, "Invalid request: "+err.Error())
return
}
+ if req.RateMultiplier != nil && *req.RateMultiplier < 0 {
+ response.BadRequest(c, "rate_multiplier must be >= 0")
+ return
+ }
// 确定是否跳过混合渠道检查
skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk
@@ -271,6 +283,7 @@ func (h *AccountHandler) Update(c *gin.Context) {
ProxyID: req.ProxyID,
Concurrency: req.Concurrency, // 指针类型,nil 表示未提供
Priority: req.Priority, // 指针类型,nil 表示未提供
+ RateMultiplier: req.RateMultiplier,
Status: req.Status,
GroupIDs: req.GroupIDs,
ExpiresAt: req.ExpiresAt,
@@ -652,6 +665,10 @@ func (h *AccountHandler) BulkUpdate(c *gin.Context) {
response.BadRequest(c, "Invalid request: "+err.Error())
return
}
+ if req.RateMultiplier != nil && *req.RateMultiplier < 0 {
+ response.BadRequest(c, "rate_multiplier must be >= 0")
+ return
+ }
// 确定是否跳过混合渠道检查
skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk
@@ -660,6 +677,7 @@ func (h *AccountHandler) BulkUpdate(c *gin.Context) {
req.ProxyID != nil ||
req.Concurrency != nil ||
req.Priority != nil ||
+ req.RateMultiplier != nil ||
req.Status != "" ||
req.Schedulable != nil ||
req.GroupIDs != nil ||
@@ -677,6 +695,7 @@ func (h *AccountHandler) BulkUpdate(c *gin.Context) {
ProxyID: req.ProxyID,
Concurrency: req.Concurrency,
Priority: req.Priority,
+ RateMultiplier: req.RateMultiplier,
Status: req.Status,
Schedulable: req.Schedulable,
GroupIDs: req.GroupIDs,
diff --git a/backend/internal/handler/admin/dashboard_handler.go b/backend/internal/handler/admin/dashboard_handler.go
index 9b675974..3f07403d 100644
--- a/backend/internal/handler/admin/dashboard_handler.go
+++ b/backend/internal/handler/admin/dashboard_handler.go
@@ -186,13 +186,16 @@ func (h *DashboardHandler) GetRealtimeMetrics(c *gin.Context) {
// GetUsageTrend handles getting usage trend data
// GET /api/v1/admin/dashboard/trend
-// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id
+// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, stream
func (h *DashboardHandler) GetUsageTrend(c *gin.Context) {
startTime, endTime := parseTimeRange(c)
granularity := c.DefaultQuery("granularity", "day")
// Parse optional filter params
- var userID, apiKeyID int64
+ var userID, apiKeyID, accountID, groupID int64
+ var model string
+ var stream *bool
+
if userIDStr := c.Query("user_id"); userIDStr != "" {
if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil {
userID = id
@@ -203,8 +206,26 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) {
apiKeyID = id
}
}
+ if accountIDStr := c.Query("account_id"); accountIDStr != "" {
+ if id, err := strconv.ParseInt(accountIDStr, 10, 64); err == nil {
+ accountID = id
+ }
+ }
+ if groupIDStr := c.Query("group_id"); groupIDStr != "" {
+ if id, err := strconv.ParseInt(groupIDStr, 10, 64); err == nil {
+ groupID = id
+ }
+ }
+ if modelStr := c.Query("model"); modelStr != "" {
+ model = modelStr
+ }
+ if streamStr := c.Query("stream"); streamStr != "" {
+ if streamVal, err := strconv.ParseBool(streamStr); err == nil {
+ stream = &streamVal
+ }
+ }
- trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID)
+ trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream)
if err != nil {
response.Error(c, 500, "Failed to get usage trend")
return
@@ -220,12 +241,14 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) {
// GetModelStats handles getting model usage statistics
// GET /api/v1/admin/dashboard/models
-// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id
+// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, stream
func (h *DashboardHandler) GetModelStats(c *gin.Context) {
startTime, endTime := parseTimeRange(c)
// Parse optional filter params
- var userID, apiKeyID int64
+ var userID, apiKeyID, accountID, groupID int64
+ var stream *bool
+
if userIDStr := c.Query("user_id"); userIDStr != "" {
if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil {
userID = id
@@ -236,8 +259,23 @@ func (h *DashboardHandler) GetModelStats(c *gin.Context) {
apiKeyID = id
}
}
+ if accountIDStr := c.Query("account_id"); accountIDStr != "" {
+ if id, err := strconv.ParseInt(accountIDStr, 10, 64); err == nil {
+ accountID = id
+ }
+ }
+ if groupIDStr := c.Query("group_id"); groupIDStr != "" {
+ if id, err := strconv.ParseInt(groupIDStr, 10, 64); err == nil {
+ groupID = id
+ }
+ }
+ if streamStr := c.Query("stream"); streamStr != "" {
+ if streamVal, err := strconv.ParseBool(streamStr); err == nil {
+ stream = &streamVal
+ }
+ }
- stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID)
+ stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, stream)
if err != nil {
response.Error(c, 500, "Failed to get model statistics")
return
diff --git a/backend/internal/handler/admin/ops_alerts_handler.go b/backend/internal/handler/admin/ops_alerts_handler.go
index 1e33ddd5..c9da19c7 100644
--- a/backend/internal/handler/admin/ops_alerts_handler.go
+++ b/backend/internal/handler/admin/ops_alerts_handler.go
@@ -7,8 +7,10 @@ import (
"net/http"
"strconv"
"strings"
+ "time"
"github.com/Wei-Shaw/sub2api/internal/pkg/response"
+ "github.com/Wei-Shaw/sub2api/internal/server/middleware"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/gin-gonic/gin"
"github.com/gin-gonic/gin/binding"
@@ -18,8 +20,6 @@ var validOpsAlertMetricTypes = []string{
"success_rate",
"error_rate",
"upstream_error_rate",
- "p95_latency_ms",
- "p99_latency_ms",
"cpu_usage_percent",
"memory_usage_percent",
"concurrency_queue_depth",
@@ -372,8 +372,135 @@ func (h *OpsHandler) DeleteAlertRule(c *gin.Context) {
response.Success(c, gin.H{"deleted": true})
}
+// GetAlertEvent returns a single ops alert event.
+// GET /api/v1/admin/ops/alert-events/:id
+func (h *OpsHandler) GetAlertEvent(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ id, err := strconv.ParseInt(c.Param("id"), 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid event ID")
+ return
+ }
+
+ ev, err := h.opsService.GetAlertEventByID(c.Request.Context(), id)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+ response.Success(c, ev)
+}
+
+// UpdateAlertEventStatus updates an ops alert event status.
+// PUT /api/v1/admin/ops/alert-events/:id/status
+func (h *OpsHandler) UpdateAlertEventStatus(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ id, err := strconv.ParseInt(c.Param("id"), 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid event ID")
+ return
+ }
+
+ var payload struct {
+ Status string `json:"status"`
+ }
+ if err := c.ShouldBindJSON(&payload); err != nil {
+ response.BadRequest(c, "Invalid request body")
+ return
+ }
+ payload.Status = strings.TrimSpace(payload.Status)
+ if payload.Status == "" {
+ response.BadRequest(c, "Invalid status")
+ return
+ }
+ if payload.Status != service.OpsAlertStatusResolved && payload.Status != service.OpsAlertStatusManualResolved {
+ response.BadRequest(c, "Invalid status")
+ return
+ }
+
+ var resolvedAt *time.Time
+ if payload.Status == service.OpsAlertStatusResolved || payload.Status == service.OpsAlertStatusManualResolved {
+ now := time.Now().UTC()
+ resolvedAt = &now
+ }
+ if err := h.opsService.UpdateAlertEventStatus(c.Request.Context(), id, payload.Status, resolvedAt); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+ response.Success(c, gin.H{"updated": true})
+}
+
// ListAlertEvents lists recent ops alert events.
// GET /api/v1/admin/ops/alert-events
+// CreateAlertSilence creates a scoped silence for ops alerts.
+// POST /api/v1/admin/ops/alert-silences
+func (h *OpsHandler) CreateAlertSilence(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ var payload struct {
+ RuleID int64 `json:"rule_id"`
+ Platform string `json:"platform"`
+ GroupID *int64 `json:"group_id"`
+ Region *string `json:"region"`
+ Until string `json:"until"`
+ Reason string `json:"reason"`
+ }
+ if err := c.ShouldBindJSON(&payload); err != nil {
+ response.BadRequest(c, "Invalid request body")
+ return
+ }
+ until, err := time.Parse(time.RFC3339, strings.TrimSpace(payload.Until))
+ if err != nil {
+ response.BadRequest(c, "Invalid until")
+ return
+ }
+
+ createdBy := (*int64)(nil)
+ if subject, ok := middleware.GetAuthSubjectFromContext(c); ok {
+ uid := subject.UserID
+ createdBy = &uid
+ }
+
+ silence := &service.OpsAlertSilence{
+ RuleID: payload.RuleID,
+ Platform: strings.TrimSpace(payload.Platform),
+ GroupID: payload.GroupID,
+ Region: payload.Region,
+ Until: until,
+ Reason: strings.TrimSpace(payload.Reason),
+ CreatedBy: createdBy,
+ }
+
+ created, err := h.opsService.CreateAlertSilence(c.Request.Context(), silence)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+ response.Success(c, created)
+}
+
func (h *OpsHandler) ListAlertEvents(c *gin.Context) {
if h.opsService == nil {
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
@@ -384,7 +511,7 @@ func (h *OpsHandler) ListAlertEvents(c *gin.Context) {
return
}
- limit := 100
+ limit := 20
if raw := strings.TrimSpace(c.Query("limit")); raw != "" {
n, err := strconv.Atoi(raw)
if err != nil || n <= 0 {
@@ -400,6 +527,49 @@ func (h *OpsHandler) ListAlertEvents(c *gin.Context) {
Severity: strings.TrimSpace(c.Query("severity")),
}
+ if v := strings.TrimSpace(c.Query("email_sent")); v != "" {
+ vv := strings.ToLower(v)
+ switch vv {
+ case "true", "1":
+ b := true
+ filter.EmailSent = &b
+ case "false", "0":
+ b := false
+ filter.EmailSent = &b
+ default:
+ response.BadRequest(c, "Invalid email_sent")
+ return
+ }
+ }
+
+ // Cursor pagination: both params must be provided together.
+ rawTS := strings.TrimSpace(c.Query("before_fired_at"))
+ rawID := strings.TrimSpace(c.Query("before_id"))
+ if (rawTS == "") != (rawID == "") {
+ response.BadRequest(c, "before_fired_at and before_id must be provided together")
+ return
+ }
+ if rawTS != "" {
+ ts, err := time.Parse(time.RFC3339Nano, rawTS)
+ if err != nil {
+ if t2, err2 := time.Parse(time.RFC3339, rawTS); err2 == nil {
+ ts = t2
+ } else {
+ response.BadRequest(c, "Invalid before_fired_at")
+ return
+ }
+ }
+ filter.BeforeFiredAt = &ts
+ }
+ if rawID != "" {
+ id, err := strconv.ParseInt(rawID, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid before_id")
+ return
+ }
+ filter.BeforeID = &id
+ }
+
// Optional global filter support (platform/group/time range).
if platform := strings.TrimSpace(c.Query("platform")); platform != "" {
filter.Platform = platform
diff --git a/backend/internal/handler/admin/ops_handler.go b/backend/internal/handler/admin/ops_handler.go
index bff7426a..44accc8f 100644
--- a/backend/internal/handler/admin/ops_handler.go
+++ b/backend/internal/handler/admin/ops_handler.go
@@ -19,6 +19,57 @@ type OpsHandler struct {
opsService *service.OpsService
}
+// GetErrorLogByID returns ops error log detail.
+// GET /api/v1/admin/ops/errors/:id
+func (h *OpsHandler) GetErrorLogByID(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ idStr := strings.TrimSpace(c.Param("id"))
+ id, err := strconv.ParseInt(idStr, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid error id")
+ return
+ }
+
+ detail, err := h.opsService.GetErrorLogByID(c.Request.Context(), id)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ response.Success(c, detail)
+}
+
+const (
+ opsListViewErrors = "errors"
+ opsListViewExcluded = "excluded"
+ opsListViewAll = "all"
+)
+
+func parseOpsViewParam(c *gin.Context) string {
+ if c == nil {
+ return ""
+ }
+ v := strings.ToLower(strings.TrimSpace(c.Query("view")))
+ switch v {
+ case "", opsListViewErrors:
+ return opsListViewErrors
+ case opsListViewExcluded:
+ return opsListViewExcluded
+ case opsListViewAll:
+ return opsListViewAll
+ default:
+ return opsListViewErrors
+ }
+}
+
func NewOpsHandler(opsService *service.OpsService) *OpsHandler {
return &OpsHandler{opsService: opsService}
}
@@ -47,16 +98,26 @@ func (h *OpsHandler) GetErrorLogs(c *gin.Context) {
return
}
- filter := &service.OpsErrorLogFilter{
- Page: page,
- PageSize: pageSize,
- }
+ filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize}
+
if !startTime.IsZero() {
filter.StartTime = &startTime
}
if !endTime.IsZero() {
filter.EndTime = &endTime
}
+ filter.View = parseOpsViewParam(c)
+ filter.Phase = strings.TrimSpace(c.Query("phase"))
+ filter.Owner = strings.TrimSpace(c.Query("error_owner"))
+ filter.Source = strings.TrimSpace(c.Query("error_source"))
+ filter.Query = strings.TrimSpace(c.Query("q"))
+ filter.UserQuery = strings.TrimSpace(c.Query("user_query"))
+
+ // Force request errors: client-visible status >= 400.
+ // buildOpsErrorLogsWhere already applies this for non-upstream phase.
+ if strings.EqualFold(strings.TrimSpace(filter.Phase), "upstream") {
+ filter.Phase = ""
+ }
if platform := strings.TrimSpace(c.Query("platform")); platform != "" {
filter.Platform = platform
@@ -77,11 +138,19 @@ func (h *OpsHandler) GetErrorLogs(c *gin.Context) {
}
filter.AccountID = &id
}
- if phase := strings.TrimSpace(c.Query("phase")); phase != "" {
- filter.Phase = phase
- }
- if q := strings.TrimSpace(c.Query("q")); q != "" {
- filter.Query = q
+
+ if v := strings.TrimSpace(c.Query("resolved")); v != "" {
+ switch strings.ToLower(v) {
+ case "1", "true", "yes":
+ b := true
+ filter.Resolved = &b
+ case "0", "false", "no":
+ b := false
+ filter.Resolved = &b
+ default:
+ response.BadRequest(c, "Invalid resolved")
+ return
+ }
}
if statusCodesStr := strings.TrimSpace(c.Query("status_codes")); statusCodesStr != "" {
parts := strings.Split(statusCodesStr, ",")
@@ -106,13 +175,120 @@ func (h *OpsHandler) GetErrorLogs(c *gin.Context) {
response.ErrorFrom(c, err)
return
}
-
response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize)
}
-// GetErrorLogByID returns a single error log detail.
-// GET /api/v1/admin/ops/errors/:id
-func (h *OpsHandler) GetErrorLogByID(c *gin.Context) {
+// ListRequestErrors lists client-visible request errors.
+// GET /api/v1/admin/ops/request-errors
+func (h *OpsHandler) ListRequestErrors(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ page, pageSize := response.ParsePagination(c)
+ if pageSize > 500 {
+ pageSize = 500
+ }
+ startTime, endTime, err := parseOpsTimeRange(c, "1h")
+ if err != nil {
+ response.BadRequest(c, err.Error())
+ return
+ }
+
+ filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize}
+ if !startTime.IsZero() {
+ filter.StartTime = &startTime
+ }
+ if !endTime.IsZero() {
+ filter.EndTime = &endTime
+ }
+ filter.View = parseOpsViewParam(c)
+ filter.Phase = strings.TrimSpace(c.Query("phase"))
+ filter.Owner = strings.TrimSpace(c.Query("error_owner"))
+ filter.Source = strings.TrimSpace(c.Query("error_source"))
+ filter.Query = strings.TrimSpace(c.Query("q"))
+ filter.UserQuery = strings.TrimSpace(c.Query("user_query"))
+
+ // Force request errors: client-visible status >= 400.
+ // buildOpsErrorLogsWhere already applies this for non-upstream phase.
+ if strings.EqualFold(strings.TrimSpace(filter.Phase), "upstream") {
+ filter.Phase = ""
+ }
+
+ if platform := strings.TrimSpace(c.Query("platform")); platform != "" {
+ filter.Platform = platform
+ }
+ if v := strings.TrimSpace(c.Query("group_id")); v != "" {
+ id, err := strconv.ParseInt(v, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid group_id")
+ return
+ }
+ filter.GroupID = &id
+ }
+ if v := strings.TrimSpace(c.Query("account_id")); v != "" {
+ id, err := strconv.ParseInt(v, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid account_id")
+ return
+ }
+ filter.AccountID = &id
+ }
+
+ if v := strings.TrimSpace(c.Query("resolved")); v != "" {
+ switch strings.ToLower(v) {
+ case "1", "true", "yes":
+ b := true
+ filter.Resolved = &b
+ case "0", "false", "no":
+ b := false
+ filter.Resolved = &b
+ default:
+ response.BadRequest(c, "Invalid resolved")
+ return
+ }
+ }
+ if statusCodesStr := strings.TrimSpace(c.Query("status_codes")); statusCodesStr != "" {
+ parts := strings.Split(statusCodesStr, ",")
+ out := make([]int, 0, len(parts))
+ for _, part := range parts {
+ p := strings.TrimSpace(part)
+ if p == "" {
+ continue
+ }
+ n, err := strconv.Atoi(p)
+ if err != nil || n < 0 {
+ response.BadRequest(c, "Invalid status_codes")
+ return
+ }
+ out = append(out, n)
+ }
+ filter.StatusCodes = out
+ }
+
+ result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+ response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize)
+}
+
+// GetRequestError returns request error detail.
+// GET /api/v1/admin/ops/request-errors/:id
+func (h *OpsHandler) GetRequestError(c *gin.Context) {
+ // same storage; just proxy to existing detail
+ h.GetErrorLogByID(c)
+}
+
+// ListRequestErrorUpstreamErrors lists upstream error logs correlated to a request error.
+// GET /api/v1/admin/ops/request-errors/:id/upstream-errors
+func (h *OpsHandler) ListRequestErrorUpstreamErrors(c *gin.Context) {
if h.opsService == nil {
response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
return
@@ -129,15 +305,306 @@ func (h *OpsHandler) GetErrorLogByID(c *gin.Context) {
return
}
+ // Load request error to get correlation keys.
detail, err := h.opsService.GetErrorLogByID(c.Request.Context(), id)
if err != nil {
response.ErrorFrom(c, err)
return
}
- response.Success(c, detail)
+ // Correlate by request_id/client_request_id.
+ requestID := strings.TrimSpace(detail.RequestID)
+ clientRequestID := strings.TrimSpace(detail.ClientRequestID)
+ if requestID == "" && clientRequestID == "" {
+ response.Paginated(c, []*service.OpsErrorLog{}, 0, 1, 10)
+ return
+ }
+
+ page, pageSize := response.ParsePagination(c)
+ if pageSize > 500 {
+ pageSize = 500
+ }
+
+ // Keep correlation window wide enough so linked upstream errors
+ // are discoverable even when UI defaults to 1h elsewhere.
+ startTime, endTime, err := parseOpsTimeRange(c, "30d")
+ if err != nil {
+ response.BadRequest(c, err.Error())
+ return
+ }
+
+ filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize}
+ if !startTime.IsZero() {
+ filter.StartTime = &startTime
+ }
+ if !endTime.IsZero() {
+ filter.EndTime = &endTime
+ }
+ filter.View = "all"
+ filter.Phase = "upstream"
+ filter.Owner = "provider"
+ filter.Source = strings.TrimSpace(c.Query("error_source"))
+ filter.Query = strings.TrimSpace(c.Query("q"))
+
+ if platform := strings.TrimSpace(c.Query("platform")); platform != "" {
+ filter.Platform = platform
+ }
+
+ // Prefer exact match on request_id; if missing, fall back to client_request_id.
+ if requestID != "" {
+ filter.RequestID = requestID
+ } else {
+ filter.ClientRequestID = clientRequestID
+ }
+
+ result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ // If client asks for details, expand each upstream error log to include upstream response fields.
+ includeDetail := strings.TrimSpace(c.Query("include_detail"))
+ if includeDetail == "1" || strings.EqualFold(includeDetail, "true") || strings.EqualFold(includeDetail, "yes") {
+ details := make([]*service.OpsErrorLogDetail, 0, len(result.Errors))
+ for _, item := range result.Errors {
+ if item == nil {
+ continue
+ }
+ d, err := h.opsService.GetErrorLogByID(c.Request.Context(), item.ID)
+ if err != nil || d == nil {
+ continue
+ }
+ details = append(details, d)
+ }
+ response.Paginated(c, details, int64(result.Total), result.Page, result.PageSize)
+ return
+ }
+
+ response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize)
}
+// RetryRequestErrorClient retries the client request based on stored request body.
+// POST /api/v1/admin/ops/request-errors/:id/retry-client
+func (h *OpsHandler) RetryRequestErrorClient(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ subject, ok := middleware.GetAuthSubjectFromContext(c)
+ if !ok || subject.UserID <= 0 {
+ response.Error(c, http.StatusUnauthorized, "Unauthorized")
+ return
+ }
+
+ idStr := strings.TrimSpace(c.Param("id"))
+ id, err := strconv.ParseInt(idStr, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid error id")
+ return
+ }
+
+ result, err := h.opsService.RetryError(c.Request.Context(), subject.UserID, id, service.OpsRetryModeClient, nil)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+ response.Success(c, result)
+}
+
+// RetryRequestErrorUpstreamEvent retries a specific upstream attempt using captured upstream_request_body.
+// POST /api/v1/admin/ops/request-errors/:id/upstream-errors/:idx/retry
+func (h *OpsHandler) RetryRequestErrorUpstreamEvent(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ subject, ok := middleware.GetAuthSubjectFromContext(c)
+ if !ok || subject.UserID <= 0 {
+ response.Error(c, http.StatusUnauthorized, "Unauthorized")
+ return
+ }
+
+ idStr := strings.TrimSpace(c.Param("id"))
+ id, err := strconv.ParseInt(idStr, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid error id")
+ return
+ }
+
+ idxStr := strings.TrimSpace(c.Param("idx"))
+ idx, err := strconv.Atoi(idxStr)
+ if err != nil || idx < 0 {
+ response.BadRequest(c, "Invalid upstream idx")
+ return
+ }
+
+ result, err := h.opsService.RetryUpstreamEvent(c.Request.Context(), subject.UserID, id, idx)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+ response.Success(c, result)
+}
+
+// ResolveRequestError toggles resolved status.
+// PUT /api/v1/admin/ops/request-errors/:id/resolve
+func (h *OpsHandler) ResolveRequestError(c *gin.Context) {
+ h.UpdateErrorResolution(c)
+}
+
+// ListUpstreamErrors lists independent upstream errors.
+// GET /api/v1/admin/ops/upstream-errors
+func (h *OpsHandler) ListUpstreamErrors(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ page, pageSize := response.ParsePagination(c)
+ if pageSize > 500 {
+ pageSize = 500
+ }
+ startTime, endTime, err := parseOpsTimeRange(c, "1h")
+ if err != nil {
+ response.BadRequest(c, err.Error())
+ return
+ }
+
+ filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize}
+ if !startTime.IsZero() {
+ filter.StartTime = &startTime
+ }
+ if !endTime.IsZero() {
+ filter.EndTime = &endTime
+ }
+
+ filter.View = parseOpsViewParam(c)
+ filter.Phase = "upstream"
+ filter.Owner = "provider"
+ filter.Source = strings.TrimSpace(c.Query("error_source"))
+ filter.Query = strings.TrimSpace(c.Query("q"))
+
+ if platform := strings.TrimSpace(c.Query("platform")); platform != "" {
+ filter.Platform = platform
+ }
+ if v := strings.TrimSpace(c.Query("group_id")); v != "" {
+ id, err := strconv.ParseInt(v, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid group_id")
+ return
+ }
+ filter.GroupID = &id
+ }
+ if v := strings.TrimSpace(c.Query("account_id")); v != "" {
+ id, err := strconv.ParseInt(v, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid account_id")
+ return
+ }
+ filter.AccountID = &id
+ }
+
+ if v := strings.TrimSpace(c.Query("resolved")); v != "" {
+ switch strings.ToLower(v) {
+ case "1", "true", "yes":
+ b := true
+ filter.Resolved = &b
+ case "0", "false", "no":
+ b := false
+ filter.Resolved = &b
+ default:
+ response.BadRequest(c, "Invalid resolved")
+ return
+ }
+ }
+ if statusCodesStr := strings.TrimSpace(c.Query("status_codes")); statusCodesStr != "" {
+ parts := strings.Split(statusCodesStr, ",")
+ out := make([]int, 0, len(parts))
+ for _, part := range parts {
+ p := strings.TrimSpace(part)
+ if p == "" {
+ continue
+ }
+ n, err := strconv.Atoi(p)
+ if err != nil || n < 0 {
+ response.BadRequest(c, "Invalid status_codes")
+ return
+ }
+ out = append(out, n)
+ }
+ filter.StatusCodes = out
+ }
+
+ result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+ response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize)
+}
+
+// GetUpstreamError returns upstream error detail.
+// GET /api/v1/admin/ops/upstream-errors/:id
+func (h *OpsHandler) GetUpstreamError(c *gin.Context) {
+ h.GetErrorLogByID(c)
+}
+
+// RetryUpstreamError retries upstream error using the original account_id.
+// POST /api/v1/admin/ops/upstream-errors/:id/retry
+func (h *OpsHandler) RetryUpstreamError(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ subject, ok := middleware.GetAuthSubjectFromContext(c)
+ if !ok || subject.UserID <= 0 {
+ response.Error(c, http.StatusUnauthorized, "Unauthorized")
+ return
+ }
+
+ idStr := strings.TrimSpace(c.Param("id"))
+ id, err := strconv.ParseInt(idStr, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid error id")
+ return
+ }
+
+ result, err := h.opsService.RetryError(c.Request.Context(), subject.UserID, id, service.OpsRetryModeUpstream, nil)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+ response.Success(c, result)
+}
+
+// ResolveUpstreamError toggles resolved status.
+// PUT /api/v1/admin/ops/upstream-errors/:id/resolve
+func (h *OpsHandler) ResolveUpstreamError(c *gin.Context) {
+ h.UpdateErrorResolution(c)
+}
+
+// ==================== Existing endpoints ====================
+
// ListRequestDetails returns a request-level list (success + error) for drill-down.
// GET /api/v1/admin/ops/requests
func (h *OpsHandler) ListRequestDetails(c *gin.Context) {
@@ -242,6 +709,11 @@ func (h *OpsHandler) ListRequestDetails(c *gin.Context) {
type opsRetryRequest struct {
Mode string `json:"mode"`
PinnedAccountID *int64 `json:"pinned_account_id"`
+ Force bool `json:"force"`
+}
+
+type opsResolveRequest struct {
+ Resolved bool `json:"resolved"`
}
// RetryErrorRequest retries a failed request using stored request_body.
@@ -278,6 +750,16 @@ func (h *OpsHandler) RetryErrorRequest(c *gin.Context) {
req.Mode = service.OpsRetryModeClient
}
+ // Force flag is currently a UI-level acknowledgement. Server may still enforce safety constraints.
+ _ = req.Force
+
+ // Legacy endpoint safety: only allow retrying the client request here.
+ // Upstream retries must go through the split endpoints.
+ if strings.EqualFold(strings.TrimSpace(req.Mode), service.OpsRetryModeUpstream) {
+ response.BadRequest(c, "upstream retry is not supported on this endpoint")
+ return
+ }
+
result, err := h.opsService.RetryError(c.Request.Context(), subject.UserID, id, req.Mode, req.PinnedAccountID)
if err != nil {
response.ErrorFrom(c, err)
@@ -287,6 +769,81 @@ func (h *OpsHandler) RetryErrorRequest(c *gin.Context) {
response.Success(c, result)
}
+// ListRetryAttempts lists retry attempts for an error log.
+// GET /api/v1/admin/ops/errors/:id/retries
+func (h *OpsHandler) ListRetryAttempts(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ idStr := strings.TrimSpace(c.Param("id"))
+ id, err := strconv.ParseInt(idStr, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid error id")
+ return
+ }
+
+ limit := 50
+ if v := strings.TrimSpace(c.Query("limit")); v != "" {
+ n, err := strconv.Atoi(v)
+ if err != nil || n <= 0 {
+ response.BadRequest(c, "Invalid limit")
+ return
+ }
+ limit = n
+ }
+
+ items, err := h.opsService.ListRetryAttemptsByErrorID(c.Request.Context(), id, limit)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+ response.Success(c, items)
+}
+
+// UpdateErrorResolution allows manual resolve/unresolve.
+// PUT /api/v1/admin/ops/errors/:id/resolve
+func (h *OpsHandler) UpdateErrorResolution(c *gin.Context) {
+ if h.opsService == nil {
+ response.Error(c, http.StatusServiceUnavailable, "Ops service not available")
+ return
+ }
+ if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ subject, ok := middleware.GetAuthSubjectFromContext(c)
+ if !ok || subject.UserID <= 0 {
+ response.Error(c, http.StatusUnauthorized, "Unauthorized")
+ return
+ }
+
+ idStr := strings.TrimSpace(c.Param("id"))
+ id, err := strconv.ParseInt(idStr, 10, 64)
+ if err != nil || id <= 0 {
+ response.BadRequest(c, "Invalid error id")
+ return
+ }
+
+ var req opsResolveRequest
+ if err := c.ShouldBindJSON(&req); err != nil {
+ response.BadRequest(c, "Invalid request: "+err.Error())
+ return
+ }
+ uid := subject.UserID
+ if err := h.opsService.UpdateErrorResolution(c.Request.Context(), id, req.Resolved, &uid, nil); err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+ response.Success(c, gin.H{"ok": true})
+}
+
func parseOpsTimeRange(c *gin.Context, defaultRange string) (time.Time, time.Time, error) {
startStr := strings.TrimSpace(c.Query("start_time"))
endStr := strings.TrimSpace(c.Query("end_time"))
@@ -358,6 +915,10 @@ func parseOpsDuration(v string) (time.Duration, bool) {
return 6 * time.Hour, true
case "24h":
return 24 * time.Hour, true
+ case "7d":
+ return 7 * 24 * time.Hour, true
+ case "30d":
+ return 30 * 24 * time.Hour, true
default:
return 0, false
}
diff --git a/backend/internal/handler/admin/proxy_handler.go b/backend/internal/handler/admin/proxy_handler.go
index 437e9300..a6758f69 100644
--- a/backend/internal/handler/admin/proxy_handler.go
+++ b/backend/internal/handler/admin/proxy_handler.go
@@ -196,6 +196,28 @@ func (h *ProxyHandler) Delete(c *gin.Context) {
response.Success(c, gin.H{"message": "Proxy deleted successfully"})
}
+// BatchDelete handles batch deleting proxies
+// POST /api/v1/admin/proxies/batch-delete
+func (h *ProxyHandler) BatchDelete(c *gin.Context) {
+ type BatchDeleteRequest struct {
+ IDs []int64 `json:"ids" binding:"required,min=1"`
+ }
+
+ var req BatchDeleteRequest
+ if err := c.ShouldBindJSON(&req); err != nil {
+ response.BadRequest(c, "Invalid request: "+err.Error())
+ return
+ }
+
+ result, err := h.adminService.BatchDeleteProxies(c.Request.Context(), req.IDs)
+ if err != nil {
+ response.ErrorFrom(c, err)
+ return
+ }
+
+ response.Success(c, result)
+}
+
// Test handles testing proxy connectivity
// POST /api/v1/admin/proxies/:id/test
func (h *ProxyHandler) Test(c *gin.Context) {
@@ -243,19 +265,17 @@ func (h *ProxyHandler) GetProxyAccounts(c *gin.Context) {
return
}
- page, pageSize := response.ParsePagination(c)
-
- accounts, total, err := h.adminService.GetProxyAccounts(c.Request.Context(), proxyID, page, pageSize)
+ accounts, err := h.adminService.GetProxyAccounts(c.Request.Context(), proxyID)
if err != nil {
response.ErrorFrom(c, err)
return
}
- out := make([]dto.Account, 0, len(accounts))
+ out := make([]dto.ProxyAccountSummary, 0, len(accounts))
for i := range accounts {
- out = append(out, *dto.AccountFromService(&accounts[i]))
+ out = append(out, *dto.ProxyAccountSummaryFromService(&accounts[i]))
}
- response.Paginated(c, out, total, page, pageSize)
+ response.Success(c, out)
}
// BatchCreateProxyItem represents a single proxy in batch create request
diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go
index 6ffaedea..4980fd66 100644
--- a/backend/internal/handler/dto/mappers.go
+++ b/backend/internal/handler/dto/mappers.go
@@ -125,6 +125,7 @@ func AccountFromServiceShallow(a *service.Account) *Account {
ProxyID: a.ProxyID,
Concurrency: a.Concurrency,
Priority: a.Priority,
+ RateMultiplier: a.BillingRateMultiplier(),
Status: a.Status,
ErrorMessage: a.ErrorMessage,
LastUsedAt: a.LastUsedAt,
@@ -212,8 +213,24 @@ func ProxyWithAccountCountFromService(p *service.ProxyWithAccountCount) *ProxyWi
return nil
}
return &ProxyWithAccountCount{
- Proxy: *ProxyFromService(&p.Proxy),
- AccountCount: p.AccountCount,
+ Proxy: *ProxyFromService(&p.Proxy),
+ AccountCount: p.AccountCount,
+ LatencyMs: p.LatencyMs,
+ LatencyStatus: p.LatencyStatus,
+ LatencyMessage: p.LatencyMessage,
+ }
+}
+
+func ProxyAccountSummaryFromService(a *service.ProxyAccountSummary) *ProxyAccountSummary {
+ if a == nil {
+ return nil
+ }
+ return &ProxyAccountSummary{
+ ID: a.ID,
+ Name: a.Name,
+ Platform: a.Platform,
+ Type: a.Type,
+ Notes: a.Notes,
}
}
@@ -279,6 +296,7 @@ func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary, inclu
TotalCost: l.TotalCost,
ActualCost: l.ActualCost,
RateMultiplier: l.RateMultiplier,
+ AccountRateMultiplier: l.AccountRateMultiplier,
BillingType: l.BillingType,
Stream: l.Stream,
DurationMs: l.DurationMs,
diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go
index a9b010b9..0da21233 100644
--- a/backend/internal/handler/dto/types.go
+++ b/backend/internal/handler/dto/types.go
@@ -76,6 +76,7 @@ type Account struct {
ProxyID *int64 `json:"proxy_id"`
Concurrency int `json:"concurrency"`
Priority int `json:"priority"`
+ RateMultiplier float64 `json:"rate_multiplier"`
Status string `json:"status"`
ErrorMessage string `json:"error_message"`
LastUsedAt *time.Time `json:"last_used_at"`
@@ -129,7 +130,18 @@ type Proxy struct {
type ProxyWithAccountCount struct {
Proxy
- AccountCount int64 `json:"account_count"`
+ AccountCount int64 `json:"account_count"`
+ LatencyMs *int64 `json:"latency_ms,omitempty"`
+ LatencyStatus string `json:"latency_status,omitempty"`
+ LatencyMessage string `json:"latency_message,omitempty"`
+}
+
+type ProxyAccountSummary struct {
+ ID int64 `json:"id"`
+ Name string `json:"name"`
+ Platform string `json:"platform"`
+ Type string `json:"type"`
+ Notes *string `json:"notes,omitempty"`
}
type RedeemCode struct {
@@ -169,13 +181,14 @@ type UsageLog struct {
CacheCreation5mTokens int `json:"cache_creation_5m_tokens"`
CacheCreation1hTokens int `json:"cache_creation_1h_tokens"`
- InputCost float64 `json:"input_cost"`
- OutputCost float64 `json:"output_cost"`
- CacheCreationCost float64 `json:"cache_creation_cost"`
- CacheReadCost float64 `json:"cache_read_cost"`
- TotalCost float64 `json:"total_cost"`
- ActualCost float64 `json:"actual_cost"`
- RateMultiplier float64 `json:"rate_multiplier"`
+ InputCost float64 `json:"input_cost"`
+ OutputCost float64 `json:"output_cost"`
+ CacheCreationCost float64 `json:"cache_creation_cost"`
+ CacheReadCost float64 `json:"cache_read_cost"`
+ TotalCost float64 `json:"total_cost"`
+ ActualCost float64 `json:"actual_cost"`
+ RateMultiplier float64 `json:"rate_multiplier"`
+ AccountRateMultiplier *float64 `json:"account_rate_multiplier"`
BillingType int8 `json:"billing_type"`
Stream bool `json:"stream"`
diff --git a/backend/internal/handler/ops_error_logger.go b/backend/internal/handler/ops_error_logger.go
index 13bd9d94..f62e6b3e 100644
--- a/backend/internal/handler/ops_error_logger.go
+++ b/backend/internal/handler/ops_error_logger.go
@@ -544,6 +544,11 @@ func OpsErrorLoggerMiddleware(ops *service.OpsService) gin.HandlerFunc {
body := w.buf.Bytes()
parsed := parseOpsErrorResponse(body)
+ // Skip logging if the error should be filtered based on settings
+ if shouldSkipOpsErrorLog(c.Request.Context(), ops, parsed.Message, string(body), c.Request.URL.Path) {
+ return
+ }
+
apiKey, _ := middleware2.GetAPIKeyFromContext(c)
clientRequestID, _ := c.Request.Context().Value(ctxkey.ClientRequestID).(string)
@@ -832,28 +837,30 @@ func normalizeOpsErrorType(errType string, code string) string {
func classifyOpsPhase(errType, message, code string) string {
msg := strings.ToLower(message)
+ // Standardized phases: request|auth|routing|upstream|network|internal
+ // Map billing/concurrency/response => request; scheduling => routing.
switch strings.TrimSpace(code) {
case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID":
- return "billing"
+ return "request"
}
switch errType {
case "authentication_error":
return "auth"
case "billing_error", "subscription_error":
- return "billing"
+ return "request"
case "rate_limit_error":
if strings.Contains(msg, "concurrency") || strings.Contains(msg, "pending") || strings.Contains(msg, "queue") {
- return "concurrency"
+ return "request"
}
return "upstream"
case "invalid_request_error":
- return "response"
+ return "request"
case "upstream_error", "overloaded_error":
return "upstream"
case "api_error":
if strings.Contains(msg, "no available accounts") {
- return "scheduling"
+ return "routing"
}
return "internal"
default:
@@ -914,34 +921,38 @@ func classifyOpsIsBusinessLimited(errType, phase, code string, status int, messa
}
func classifyOpsErrorOwner(phase string, message string) string {
+ // Standardized owners: client|provider|platform
switch phase {
case "upstream", "network":
return "provider"
- case "billing", "concurrency", "auth", "response":
+ case "request", "auth":
return "client"
+ case "routing", "internal":
+ return "platform"
default:
if strings.Contains(strings.ToLower(message), "upstream") {
return "provider"
}
- return "sub2api"
+ return "platform"
}
}
func classifyOpsErrorSource(phase string, message string) string {
+ // Standardized sources: client_request|upstream_http|gateway
switch phase {
case "upstream":
return "upstream_http"
case "network":
- return "upstream_network"
- case "billing":
- return "billing"
- case "concurrency":
- return "concurrency"
+ return "gateway"
+ case "request", "auth":
+ return "client_request"
+ case "routing", "internal":
+ return "gateway"
default:
if strings.Contains(strings.ToLower(message), "upstream") {
return "upstream_http"
}
- return "internal"
+ return "gateway"
}
}
@@ -963,3 +974,42 @@ func truncateString(s string, max int) string {
func strconvItoa(v int) string {
return strconv.Itoa(v)
}
+
+// shouldSkipOpsErrorLog determines if an error should be skipped from logging based on settings.
+// Returns true for errors that should be filtered according to OpsAdvancedSettings.
+func shouldSkipOpsErrorLog(ctx context.Context, ops *service.OpsService, message, body, requestPath string) bool {
+ if ops == nil {
+ return false
+ }
+
+ // Get advanced settings to check filter configuration
+ settings, err := ops.GetOpsAdvancedSettings(ctx)
+ if err != nil || settings == nil {
+ // If we can't get settings, don't skip (fail open)
+ return false
+ }
+
+ msgLower := strings.ToLower(message)
+ bodyLower := strings.ToLower(body)
+
+ // Check if count_tokens errors should be ignored
+ if settings.IgnoreCountTokensErrors && strings.Contains(requestPath, "/count_tokens") {
+ return true
+ }
+
+ // Check if context canceled errors should be ignored (client disconnects)
+ if settings.IgnoreContextCanceled {
+ if strings.Contains(msgLower, "context canceled") || strings.Contains(bodyLower, "context canceled") {
+ return true
+ }
+ }
+
+ // Check if "no available accounts" errors should be ignored
+ if settings.IgnoreNoAvailableAccounts {
+ if strings.Contains(msgLower, "no available accounts") || strings.Contains(bodyLower, "no available accounts") {
+ return true
+ }
+ }
+
+ return false
+}
diff --git a/backend/internal/pkg/usagestats/account_stats.go b/backend/internal/pkg/usagestats/account_stats.go
index ed77dd27..9ac49625 100644
--- a/backend/internal/pkg/usagestats/account_stats.go
+++ b/backend/internal/pkg/usagestats/account_stats.go
@@ -1,8 +1,14 @@
package usagestats
// AccountStats 账号使用统计
+//
+// cost: 账号口径费用(使用 total_cost * account_rate_multiplier)
+// standard_cost: 标准费用(使用 total_cost,不含倍率)
+// user_cost: 用户/API Key 口径费用(使用 actual_cost,受分组倍率影响)
type AccountStats struct {
- Requests int64 `json:"requests"`
- Tokens int64 `json:"tokens"`
- Cost float64 `json:"cost"`
+ Requests int64 `json:"requests"`
+ Tokens int64 `json:"tokens"`
+ Cost float64 `json:"cost"`
+ StandardCost float64 `json:"standard_cost"`
+ UserCost float64 `json:"user_cost"`
}
diff --git a/backend/internal/pkg/usagestats/usage_log_types.go b/backend/internal/pkg/usagestats/usage_log_types.go
index 3952785b..2f6c7fe0 100644
--- a/backend/internal/pkg/usagestats/usage_log_types.go
+++ b/backend/internal/pkg/usagestats/usage_log_types.go
@@ -147,14 +147,15 @@ type UsageLogFilters struct {
// UsageStats represents usage statistics
type UsageStats struct {
- TotalRequests int64 `json:"total_requests"`
- TotalInputTokens int64 `json:"total_input_tokens"`
- TotalOutputTokens int64 `json:"total_output_tokens"`
- TotalCacheTokens int64 `json:"total_cache_tokens"`
- TotalTokens int64 `json:"total_tokens"`
- TotalCost float64 `json:"total_cost"`
- TotalActualCost float64 `json:"total_actual_cost"`
- AverageDurationMs float64 `json:"average_duration_ms"`
+ TotalRequests int64 `json:"total_requests"`
+ TotalInputTokens int64 `json:"total_input_tokens"`
+ TotalOutputTokens int64 `json:"total_output_tokens"`
+ TotalCacheTokens int64 `json:"total_cache_tokens"`
+ TotalTokens int64 `json:"total_tokens"`
+ TotalCost float64 `json:"total_cost"`
+ TotalActualCost float64 `json:"total_actual_cost"`
+ TotalAccountCost *float64 `json:"total_account_cost,omitempty"`
+ AverageDurationMs float64 `json:"average_duration_ms"`
}
// BatchUserUsageStats represents usage stats for a single user
@@ -177,25 +178,29 @@ type AccountUsageHistory struct {
Label string `json:"label"`
Requests int64 `json:"requests"`
Tokens int64 `json:"tokens"`
- Cost float64 `json:"cost"`
- ActualCost float64 `json:"actual_cost"`
+ Cost float64 `json:"cost"` // 标准计费(total_cost)
+ ActualCost float64 `json:"actual_cost"` // 账号口径费用(total_cost * account_rate_multiplier)
+ UserCost float64 `json:"user_cost"` // 用户口径费用(actual_cost,受分组倍率影响)
}
// AccountUsageSummary represents summary statistics for an account
type AccountUsageSummary struct {
Days int `json:"days"`
ActualDaysUsed int `json:"actual_days_used"`
- TotalCost float64 `json:"total_cost"`
+ TotalCost float64 `json:"total_cost"` // 账号口径费用
+ TotalUserCost float64 `json:"total_user_cost"` // 用户口径费用
TotalStandardCost float64 `json:"total_standard_cost"`
TotalRequests int64 `json:"total_requests"`
TotalTokens int64 `json:"total_tokens"`
- AvgDailyCost float64 `json:"avg_daily_cost"`
+ AvgDailyCost float64 `json:"avg_daily_cost"` // 账号口径日均
+ AvgDailyUserCost float64 `json:"avg_daily_user_cost"`
AvgDailyRequests float64 `json:"avg_daily_requests"`
AvgDailyTokens float64 `json:"avg_daily_tokens"`
AvgDurationMs float64 `json:"avg_duration_ms"`
Today *struct {
Date string `json:"date"`
Cost float64 `json:"cost"`
+ UserCost float64 `json:"user_cost"`
Requests int64 `json:"requests"`
Tokens int64 `json:"tokens"`
} `json:"today"`
@@ -203,6 +208,7 @@ type AccountUsageSummary struct {
Date string `json:"date"`
Label string `json:"label"`
Cost float64 `json:"cost"`
+ UserCost float64 `json:"user_cost"`
Requests int64 `json:"requests"`
} `json:"highest_cost_day"`
HighestRequestDay *struct {
@@ -210,6 +216,7 @@ type AccountUsageSummary struct {
Label string `json:"label"`
Requests int64 `json:"requests"`
Cost float64 `json:"cost"`
+ UserCost float64 `json:"user_cost"`
} `json:"highest_request_day"`
}
diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go
index aaa89f21..ba8751df 100644
--- a/backend/internal/repository/account_repo.go
+++ b/backend/internal/repository/account_repo.go
@@ -80,6 +80,10 @@ func (r *accountRepository) Create(ctx context.Context, account *service.Account
SetSchedulable(account.Schedulable).
SetAutoPauseOnExpired(account.AutoPauseOnExpired)
+ if account.RateMultiplier != nil {
+ builder.SetRateMultiplier(*account.RateMultiplier)
+ }
+
if account.ProxyID != nil {
builder.SetProxyID(*account.ProxyID)
}
@@ -291,6 +295,10 @@ func (r *accountRepository) Update(ctx context.Context, account *service.Account
SetSchedulable(account.Schedulable).
SetAutoPauseOnExpired(account.AutoPauseOnExpired)
+ if account.RateMultiplier != nil {
+ builder.SetRateMultiplier(*account.RateMultiplier)
+ }
+
if account.ProxyID != nil {
builder.SetProxyID(*account.ProxyID)
} else {
@@ -999,6 +1007,11 @@ func (r *accountRepository) BulkUpdate(ctx context.Context, ids []int64, updates
args = append(args, *updates.Priority)
idx++
}
+ if updates.RateMultiplier != nil {
+ setClauses = append(setClauses, "rate_multiplier = $"+itoa(idx))
+ args = append(args, *updates.RateMultiplier)
+ idx++
+ }
if updates.Status != nil {
setClauses = append(setClauses, "status = $"+itoa(idx))
args = append(args, *updates.Status)
@@ -1347,6 +1360,8 @@ func accountEntityToService(m *dbent.Account) *service.Account {
return nil
}
+ rateMultiplier := m.RateMultiplier
+
return &service.Account{
ID: m.ID,
Name: m.Name,
@@ -1358,6 +1373,7 @@ func accountEntityToService(m *dbent.Account) *service.Account {
ProxyID: m.ProxyID,
Concurrency: m.Concurrency,
Priority: m.Priority,
+ RateMultiplier: &rateMultiplier,
Status: m.Status,
ErrorMessage: derefString(m.ErrorMessage),
LastUsedAt: m.LastUsedAt,
diff --git a/backend/internal/repository/dashboard_aggregation_repo.go b/backend/internal/repository/dashboard_aggregation_repo.go
index d238e320..3543e061 100644
--- a/backend/internal/repository/dashboard_aggregation_repo.go
+++ b/backend/internal/repository/dashboard_aggregation_repo.go
@@ -8,6 +8,7 @@ import (
"strings"
"time"
+ "github.com/Wei-Shaw/sub2api/internal/pkg/timezone"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/lib/pq"
)
@@ -41,21 +42,22 @@ func isPostgresDriver(db *sql.DB) bool {
}
func (r *dashboardAggregationRepository) AggregateRange(ctx context.Context, start, end time.Time) error {
- startUTC := start.UTC()
- endUTC := end.UTC()
- if !endUTC.After(startUTC) {
+ loc := timezone.Location()
+ startLocal := start.In(loc)
+ endLocal := end.In(loc)
+ if !endLocal.After(startLocal) {
return nil
}
- hourStart := startUTC.Truncate(time.Hour)
- hourEnd := endUTC.Truncate(time.Hour)
- if endUTC.After(hourEnd) {
+ hourStart := startLocal.Truncate(time.Hour)
+ hourEnd := endLocal.Truncate(time.Hour)
+ if endLocal.After(hourEnd) {
hourEnd = hourEnd.Add(time.Hour)
}
- dayStart := truncateToDayUTC(startUTC)
- dayEnd := truncateToDayUTC(endUTC)
- if endUTC.After(dayEnd) {
+ dayStart := truncateToDay(startLocal)
+ dayEnd := truncateToDay(endLocal)
+ if endLocal.After(dayEnd) {
dayEnd = dayEnd.Add(24 * time.Hour)
}
@@ -146,38 +148,41 @@ func (r *dashboardAggregationRepository) EnsureUsageLogsPartitions(ctx context.C
}
func (r *dashboardAggregationRepository) insertHourlyActiveUsers(ctx context.Context, start, end time.Time) error {
+ tzName := timezone.Name()
query := `
INSERT INTO usage_dashboard_hourly_users (bucket_start, user_id)
SELECT DISTINCT
- date_trunc('hour', created_at AT TIME ZONE 'UTC') AT TIME ZONE 'UTC' AS bucket_start,
+ date_trunc('hour', created_at AT TIME ZONE $3) AT TIME ZONE $3 AS bucket_start,
user_id
FROM usage_logs
WHERE created_at >= $1 AND created_at < $2
ON CONFLICT DO NOTHING
`
- _, err := r.sql.ExecContext(ctx, query, start.UTC(), end.UTC())
+ _, err := r.sql.ExecContext(ctx, query, start, end, tzName)
return err
}
func (r *dashboardAggregationRepository) insertDailyActiveUsers(ctx context.Context, start, end time.Time) error {
+ tzName := timezone.Name()
query := `
INSERT INTO usage_dashboard_daily_users (bucket_date, user_id)
SELECT DISTINCT
- (bucket_start AT TIME ZONE 'UTC')::date AS bucket_date,
+ (bucket_start AT TIME ZONE $3)::date AS bucket_date,
user_id
FROM usage_dashboard_hourly_users
WHERE bucket_start >= $1 AND bucket_start < $2
ON CONFLICT DO NOTHING
`
- _, err := r.sql.ExecContext(ctx, query, start.UTC(), end.UTC())
+ _, err := r.sql.ExecContext(ctx, query, start, end, tzName)
return err
}
func (r *dashboardAggregationRepository) upsertHourlyAggregates(ctx context.Context, start, end time.Time) error {
+ tzName := timezone.Name()
query := `
WITH hourly AS (
SELECT
- date_trunc('hour', created_at AT TIME ZONE 'UTC') AT TIME ZONE 'UTC' AS bucket_start,
+ date_trunc('hour', created_at AT TIME ZONE $3) AT TIME ZONE $3 AS bucket_start,
COUNT(*) AS total_requests,
COALESCE(SUM(input_tokens), 0) AS input_tokens,
COALESCE(SUM(output_tokens), 0) AS output_tokens,
@@ -236,15 +241,16 @@ func (r *dashboardAggregationRepository) upsertHourlyAggregates(ctx context.Cont
active_users = EXCLUDED.active_users,
computed_at = EXCLUDED.computed_at
`
- _, err := r.sql.ExecContext(ctx, query, start.UTC(), end.UTC())
+ _, err := r.sql.ExecContext(ctx, query, start, end, tzName)
return err
}
func (r *dashboardAggregationRepository) upsertDailyAggregates(ctx context.Context, start, end time.Time) error {
+ tzName := timezone.Name()
query := `
WITH daily AS (
SELECT
- (bucket_start AT TIME ZONE 'UTC')::date AS bucket_date,
+ (bucket_start AT TIME ZONE $5)::date AS bucket_date,
COALESCE(SUM(total_requests), 0) AS total_requests,
COALESCE(SUM(input_tokens), 0) AS input_tokens,
COALESCE(SUM(output_tokens), 0) AS output_tokens,
@@ -255,7 +261,7 @@ func (r *dashboardAggregationRepository) upsertDailyAggregates(ctx context.Conte
COALESCE(SUM(total_duration_ms), 0) AS total_duration_ms
FROM usage_dashboard_hourly
WHERE bucket_start >= $1 AND bucket_start < $2
- GROUP BY (bucket_start AT TIME ZONE 'UTC')::date
+ GROUP BY (bucket_start AT TIME ZONE $5)::date
),
user_counts AS (
SELECT bucket_date, COUNT(*) AS active_users
@@ -303,7 +309,7 @@ func (r *dashboardAggregationRepository) upsertDailyAggregates(ctx context.Conte
active_users = EXCLUDED.active_users,
computed_at = EXCLUDED.computed_at
`
- _, err := r.sql.ExecContext(ctx, query, start.UTC(), end.UTC(), start.UTC(), end.UTC())
+ _, err := r.sql.ExecContext(ctx, query, start, end, start, end, tzName)
return err
}
@@ -376,9 +382,8 @@ func (r *dashboardAggregationRepository) createUsageLogsPartition(ctx context.Co
return err
}
-func truncateToDayUTC(t time.Time) time.Time {
- t = t.UTC()
- return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)
+func truncateToDay(t time.Time) time.Time {
+ return timezone.StartOfDay(t)
}
func truncateToMonthUTC(t time.Time) time.Time {
diff --git a/backend/internal/repository/ops_repo.go b/backend/internal/repository/ops_repo.go
index f9cb6b4d..613c5bd5 100644
--- a/backend/internal/repository/ops_repo.go
+++ b/backend/internal/repository/ops_repo.go
@@ -55,7 +55,6 @@ INSERT INTO ops_error_logs (
upstream_error_message,
upstream_error_detail,
upstream_errors,
- duration_ms,
time_to_first_token_ms,
request_body,
request_body_truncated,
@@ -65,7 +64,7 @@ INSERT INTO ops_error_logs (
retry_count,
created_at
) VALUES (
- $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$30,$31,$32,$33,$34,$35
+ $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$30,$31,$32,$33,$34
) RETURNING id`
var id int64
@@ -98,7 +97,6 @@ INSERT INTO ops_error_logs (
opsNullString(input.UpstreamErrorMessage),
opsNullString(input.UpstreamErrorDetail),
opsNullString(input.UpstreamErrorsJSON),
- opsNullInt(input.DurationMs),
opsNullInt64(input.TimeToFirstTokenMs),
opsNullString(input.RequestBodyJSON),
input.RequestBodyTruncated,
@@ -135,7 +133,7 @@ func (r *opsRepository) ListErrorLogs(ctx context.Context, filter *service.OpsEr
}
where, args := buildOpsErrorLogsWhere(filter)
- countSQL := "SELECT COUNT(*) FROM ops_error_logs " + where
+ countSQL := "SELECT COUNT(*) FROM ops_error_logs e " + where
var total int
if err := r.db.QueryRowContext(ctx, countSQL, args...).Scan(&total); err != nil {
@@ -146,28 +144,43 @@ func (r *opsRepository) ListErrorLogs(ctx context.Context, filter *service.OpsEr
argsWithLimit := append(args, pageSize, offset)
selectSQL := `
SELECT
- id,
- created_at,
- error_phase,
- error_type,
- severity,
- COALESCE(upstream_status_code, status_code, 0),
- COALESCE(platform, ''),
- COALESCE(model, ''),
- duration_ms,
- COALESCE(client_request_id, ''),
- COALESCE(request_id, ''),
- COALESCE(error_message, ''),
- user_id,
- api_key_id,
- account_id,
- group_id,
- CASE WHEN client_ip IS NULL THEN NULL ELSE client_ip::text END,
- COALESCE(request_path, ''),
- stream
-FROM ops_error_logs
+ e.id,
+ e.created_at,
+ e.error_phase,
+ e.error_type,
+ COALESCE(e.error_owner, ''),
+ COALESCE(e.error_source, ''),
+ e.severity,
+ COALESCE(e.upstream_status_code, e.status_code, 0),
+ COALESCE(e.platform, ''),
+ COALESCE(e.model, ''),
+ COALESCE(e.is_retryable, false),
+ COALESCE(e.retry_count, 0),
+ COALESCE(e.resolved, false),
+ e.resolved_at,
+ e.resolved_by_user_id,
+ COALESCE(u2.email, ''),
+ e.resolved_retry_id,
+ COALESCE(e.client_request_id, ''),
+ COALESCE(e.request_id, ''),
+ COALESCE(e.error_message, ''),
+ e.user_id,
+ COALESCE(u.email, ''),
+ e.api_key_id,
+ e.account_id,
+ COALESCE(a.name, ''),
+ e.group_id,
+ COALESCE(g.name, ''),
+ CASE WHEN e.client_ip IS NULL THEN NULL ELSE e.client_ip::text END,
+ COALESCE(e.request_path, ''),
+ e.stream
+FROM ops_error_logs e
+LEFT JOIN accounts a ON e.account_id = a.id
+LEFT JOIN groups g ON e.group_id = g.id
+LEFT JOIN users u ON e.user_id = u.id
+LEFT JOIN users u2 ON e.resolved_by_user_id = u2.id
` + where + `
-ORDER BY created_at DESC
+ORDER BY e.created_at DESC
LIMIT $` + itoa(len(args)+1) + ` OFFSET $` + itoa(len(args)+2)
rows, err := r.db.QueryContext(ctx, selectSQL, argsWithLimit...)
@@ -179,39 +192,65 @@ LIMIT $` + itoa(len(args)+1) + ` OFFSET $` + itoa(len(args)+2)
out := make([]*service.OpsErrorLog, 0, pageSize)
for rows.Next() {
var item service.OpsErrorLog
- var latency sql.NullInt64
var statusCode sql.NullInt64
var clientIP sql.NullString
var userID sql.NullInt64
var apiKeyID sql.NullInt64
var accountID sql.NullInt64
+ var accountName string
var groupID sql.NullInt64
+ var groupName string
+ var userEmail string
+ var resolvedAt sql.NullTime
+ var resolvedBy sql.NullInt64
+ var resolvedByName string
+ var resolvedRetryID sql.NullInt64
if err := rows.Scan(
&item.ID,
&item.CreatedAt,
&item.Phase,
&item.Type,
+ &item.Owner,
+ &item.Source,
&item.Severity,
&statusCode,
&item.Platform,
&item.Model,
- &latency,
+ &item.IsRetryable,
+ &item.RetryCount,
+ &item.Resolved,
+ &resolvedAt,
+ &resolvedBy,
+ &resolvedByName,
+ &resolvedRetryID,
&item.ClientRequestID,
&item.RequestID,
&item.Message,
&userID,
+ &userEmail,
&apiKeyID,
&accountID,
+ &accountName,
&groupID,
+ &groupName,
&clientIP,
&item.RequestPath,
&item.Stream,
); err != nil {
return nil, err
}
- if latency.Valid {
- v := int(latency.Int64)
- item.LatencyMs = &v
+ if resolvedAt.Valid {
+ t := resolvedAt.Time
+ item.ResolvedAt = &t
+ }
+ if resolvedBy.Valid {
+ v := resolvedBy.Int64
+ item.ResolvedByUserID = &v
+ }
+ item.ResolvedByUserName = resolvedByName
+ if resolvedRetryID.Valid {
+ v := resolvedRetryID.Int64
+ item.ResolvedRetryID = &v
}
item.StatusCode = int(statusCode.Int64)
if clientIP.Valid {
@@ -222,6 +261,7 @@ LIMIT $` + itoa(len(args)+1) + ` OFFSET $` + itoa(len(args)+2)
v := userID.Int64
item.UserID = &v
}
+ item.UserEmail = userEmail
if apiKeyID.Valid {
v := apiKeyID.Int64
item.APIKeyID = &v
@@ -230,10 +270,12 @@ LIMIT $` + itoa(len(args)+1) + ` OFFSET $` + itoa(len(args)+2)
v := accountID.Int64
item.AccountID = &v
}
+ item.AccountName = accountName
if groupID.Valid {
v := groupID.Int64
item.GroupID = &v
}
+ item.GroupName = groupName
out = append(out, &item)
}
if err := rows.Err(); err != nil {
@@ -258,49 +300,64 @@ func (r *opsRepository) GetErrorLogByID(ctx context.Context, id int64) (*service
q := `
SELECT
- id,
- created_at,
- error_phase,
- error_type,
- severity,
- COALESCE(upstream_status_code, status_code, 0),
- COALESCE(platform, ''),
- COALESCE(model, ''),
- duration_ms,
- COALESCE(client_request_id, ''),
- COALESCE(request_id, ''),
- COALESCE(error_message, ''),
- COALESCE(error_body, ''),
- upstream_status_code,
- COALESCE(upstream_error_message, ''),
- COALESCE(upstream_error_detail, ''),
- COALESCE(upstream_errors::text, ''),
- is_business_limited,
- user_id,
- api_key_id,
- account_id,
- group_id,
- CASE WHEN client_ip IS NULL THEN NULL ELSE client_ip::text END,
- COALESCE(request_path, ''),
- stream,
- COALESCE(user_agent, ''),
- auth_latency_ms,
- routing_latency_ms,
- upstream_latency_ms,
- response_latency_ms,
- time_to_first_token_ms,
- COALESCE(request_body::text, ''),
- request_body_truncated,
- request_body_bytes,
- COALESCE(request_headers::text, '')
-FROM ops_error_logs
-WHERE id = $1
+ e.id,
+ e.created_at,
+ e.error_phase,
+ e.error_type,
+ COALESCE(e.error_owner, ''),
+ COALESCE(e.error_source, ''),
+ e.severity,
+ COALESCE(e.upstream_status_code, e.status_code, 0),
+ COALESCE(e.platform, ''),
+ COALESCE(e.model, ''),
+ COALESCE(e.is_retryable, false),
+ COALESCE(e.retry_count, 0),
+ COALESCE(e.resolved, false),
+ e.resolved_at,
+ e.resolved_by_user_id,
+ e.resolved_retry_id,
+ COALESCE(e.client_request_id, ''),
+ COALESCE(e.request_id, ''),
+ COALESCE(e.error_message, ''),
+ COALESCE(e.error_body, ''),
+ e.upstream_status_code,
+ COALESCE(e.upstream_error_message, ''),
+ COALESCE(e.upstream_error_detail, ''),
+ COALESCE(e.upstream_errors::text, ''),
+ e.is_business_limited,
+ e.user_id,
+ COALESCE(u.email, ''),
+ e.api_key_id,
+ e.account_id,
+ COALESCE(a.name, ''),
+ e.group_id,
+ COALESCE(g.name, ''),
+ CASE WHEN e.client_ip IS NULL THEN NULL ELSE e.client_ip::text END,
+ COALESCE(e.request_path, ''),
+ e.stream,
+ COALESCE(e.user_agent, ''),
+ e.auth_latency_ms,
+ e.routing_latency_ms,
+ e.upstream_latency_ms,
+ e.response_latency_ms,
+ e.time_to_first_token_ms,
+ COALESCE(e.request_body::text, ''),
+ e.request_body_truncated,
+ e.request_body_bytes,
+ COALESCE(e.request_headers::text, '')
+FROM ops_error_logs e
+LEFT JOIN users u ON e.user_id = u.id
+LEFT JOIN accounts a ON e.account_id = a.id
+LEFT JOIN groups g ON e.group_id = g.id
+WHERE e.id = $1
LIMIT 1`
var out service.OpsErrorLogDetail
- var latency sql.NullInt64
var statusCode sql.NullInt64
var upstreamStatusCode sql.NullInt64
+ var resolvedAt sql.NullTime
+ var resolvedBy sql.NullInt64
+ var resolvedRetryID sql.NullInt64
var clientIP sql.NullString
var userID sql.NullInt64
var apiKeyID sql.NullInt64
@@ -318,11 +375,18 @@ LIMIT 1`
&out.CreatedAt,
&out.Phase,
&out.Type,
+ &out.Owner,
+ &out.Source,
&out.Severity,
&statusCode,
&out.Platform,
&out.Model,
- &latency,
+ &out.IsRetryable,
+ &out.RetryCount,
+ &out.Resolved,
+ &resolvedAt,
+ &resolvedBy,
+ &resolvedRetryID,
&out.ClientRequestID,
&out.RequestID,
&out.Message,
@@ -333,9 +397,12 @@ LIMIT 1`
&out.UpstreamErrors,
&out.IsBusinessLimited,
&userID,
+ &out.UserEmail,
&apiKeyID,
&accountID,
+ &out.AccountName,
&groupID,
+ &out.GroupName,
&clientIP,
&out.RequestPath,
&out.Stream,
@@ -355,9 +422,17 @@ LIMIT 1`
}
out.StatusCode = int(statusCode.Int64)
- if latency.Valid {
- v := int(latency.Int64)
- out.LatencyMs = &v
+ if resolvedAt.Valid {
+ t := resolvedAt.Time
+ out.ResolvedAt = &t
+ }
+ if resolvedBy.Valid {
+ v := resolvedBy.Int64
+ out.ResolvedByUserID = &v
+ }
+ if resolvedRetryID.Valid {
+ v := resolvedRetryID.Int64
+ out.ResolvedRetryID = &v
}
if clientIP.Valid {
s := clientIP.String
@@ -487,9 +562,15 @@ SET
status = $2,
finished_at = $3,
duration_ms = $4,
- result_request_id = $5,
- result_error_id = $6,
- error_message = $7
+ success = $5,
+ http_status_code = $6,
+ upstream_request_id = $7,
+ used_account_id = $8,
+ response_preview = $9,
+ response_truncated = $10,
+ result_request_id = $11,
+ result_error_id = $12,
+ error_message = $13
WHERE id = $1`
_, err := r.db.ExecContext(
@@ -499,8 +580,14 @@ WHERE id = $1`
strings.TrimSpace(input.Status),
nullTime(input.FinishedAt),
input.DurationMs,
+ nullBool(input.Success),
+ nullInt(input.HTTPStatusCode),
+ opsNullString(input.UpstreamRequestID),
+ nullInt64(input.UsedAccountID),
+ opsNullString(input.ResponsePreview),
+ nullBool(input.ResponseTruncated),
opsNullString(input.ResultRequestID),
- opsNullInt64(input.ResultErrorID),
+ nullInt64(input.ResultErrorID),
opsNullString(input.ErrorMessage),
)
return err
@@ -526,6 +613,12 @@ SELECT
started_at,
finished_at,
duration_ms,
+ success,
+ http_status_code,
+ upstream_request_id,
+ used_account_id,
+ response_preview,
+ response_truncated,
result_request_id,
result_error_id,
error_message
@@ -540,6 +633,12 @@ LIMIT 1`
var startedAt sql.NullTime
var finishedAt sql.NullTime
var durationMs sql.NullInt64
+ var success sql.NullBool
+ var httpStatusCode sql.NullInt64
+ var upstreamRequestID sql.NullString
+ var usedAccountID sql.NullInt64
+ var responsePreview sql.NullString
+ var responseTruncated sql.NullBool
var resultRequestID sql.NullString
var resultErrorID sql.NullInt64
var errorMessage sql.NullString
@@ -555,6 +654,12 @@ LIMIT 1`
&startedAt,
&finishedAt,
&durationMs,
+ &success,
+ &httpStatusCode,
+ &upstreamRequestID,
+ &usedAccountID,
+ &responsePreview,
+ &responseTruncated,
&resultRequestID,
&resultErrorID,
&errorMessage,
@@ -579,6 +684,30 @@ LIMIT 1`
v := durationMs.Int64
out.DurationMs = &v
}
+ if success.Valid {
+ v := success.Bool
+ out.Success = &v
+ }
+ if httpStatusCode.Valid {
+ v := int(httpStatusCode.Int64)
+ out.HTTPStatusCode = &v
+ }
+ if upstreamRequestID.Valid {
+ s := upstreamRequestID.String
+ out.UpstreamRequestID = &s
+ }
+ if usedAccountID.Valid {
+ v := usedAccountID.Int64
+ out.UsedAccountID = &v
+ }
+ if responsePreview.Valid {
+ s := responsePreview.String
+ out.ResponsePreview = &s
+ }
+ if responseTruncated.Valid {
+ v := responseTruncated.Bool
+ out.ResponseTruncated = &v
+ }
if resultRequestID.Valid {
s := resultRequestID.String
out.ResultRequestID = &s
@@ -602,30 +731,234 @@ func nullTime(t time.Time) sql.NullTime {
return sql.NullTime{Time: t, Valid: true}
}
+func nullBool(v *bool) sql.NullBool {
+ if v == nil {
+ return sql.NullBool{}
+ }
+ return sql.NullBool{Bool: *v, Valid: true}
+}
+
+func (r *opsRepository) ListRetryAttemptsByErrorID(ctx context.Context, sourceErrorID int64, limit int) ([]*service.OpsRetryAttempt, error) {
+ if r == nil || r.db == nil {
+ return nil, fmt.Errorf("nil ops repository")
+ }
+ if sourceErrorID <= 0 {
+ return nil, fmt.Errorf("invalid source_error_id")
+ }
+ if limit <= 0 {
+ limit = 50
+ }
+ if limit > 200 {
+ limit = 200
+ }
+
+ q := `
+SELECT
+ r.id,
+ r.created_at,
+ COALESCE(r.requested_by_user_id, 0),
+ r.source_error_id,
+ COALESCE(r.mode, ''),
+ r.pinned_account_id,
+ COALESCE(pa.name, ''),
+ COALESCE(r.status, ''),
+ r.started_at,
+ r.finished_at,
+ r.duration_ms,
+ r.success,
+ r.http_status_code,
+ r.upstream_request_id,
+ r.used_account_id,
+ COALESCE(ua.name, ''),
+ r.response_preview,
+ r.response_truncated,
+ r.result_request_id,
+ r.result_error_id,
+ r.error_message
+FROM ops_retry_attempts r
+LEFT JOIN accounts pa ON r.pinned_account_id = pa.id
+LEFT JOIN accounts ua ON r.used_account_id = ua.id
+WHERE r.source_error_id = $1
+ORDER BY r.created_at DESC
+LIMIT $2`
+
+ rows, err := r.db.QueryContext(ctx, q, sourceErrorID, limit)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = rows.Close() }()
+
+ out := make([]*service.OpsRetryAttempt, 0, 16)
+ for rows.Next() {
+ var item service.OpsRetryAttempt
+ var pinnedAccountID sql.NullInt64
+ var pinnedAccountName string
+ var requestedBy sql.NullInt64
+ var startedAt sql.NullTime
+ var finishedAt sql.NullTime
+ var durationMs sql.NullInt64
+ var success sql.NullBool
+ var httpStatusCode sql.NullInt64
+ var upstreamRequestID sql.NullString
+ var usedAccountID sql.NullInt64
+ var usedAccountName string
+ var responsePreview sql.NullString
+ var responseTruncated sql.NullBool
+ var resultRequestID sql.NullString
+ var resultErrorID sql.NullInt64
+ var errorMessage sql.NullString
+
+ if err := rows.Scan(
+ &item.ID,
+ &item.CreatedAt,
+ &requestedBy,
+ &item.SourceErrorID,
+ &item.Mode,
+ &pinnedAccountID,
+ &pinnedAccountName,
+ &item.Status,
+ &startedAt,
+ &finishedAt,
+ &durationMs,
+ &success,
+ &httpStatusCode,
+ &upstreamRequestID,
+ &usedAccountID,
+ &usedAccountName,
+ &responsePreview,
+ &responseTruncated,
+ &resultRequestID,
+ &resultErrorID,
+ &errorMessage,
+ ); err != nil {
+ return nil, err
+ }
+
+ item.RequestedByUserID = requestedBy.Int64
+ if pinnedAccountID.Valid {
+ v := pinnedAccountID.Int64
+ item.PinnedAccountID = &v
+ }
+ item.PinnedAccountName = pinnedAccountName
+ if startedAt.Valid {
+ t := startedAt.Time
+ item.StartedAt = &t
+ }
+ if finishedAt.Valid {
+ t := finishedAt.Time
+ item.FinishedAt = &t
+ }
+ if durationMs.Valid {
+ v := durationMs.Int64
+ item.DurationMs = &v
+ }
+ if success.Valid {
+ v := success.Bool
+ item.Success = &v
+ }
+ if httpStatusCode.Valid {
+ v := int(httpStatusCode.Int64)
+ item.HTTPStatusCode = &v
+ }
+ if upstreamRequestID.Valid {
+ item.UpstreamRequestID = &upstreamRequestID.String
+ }
+ if usedAccountID.Valid {
+ v := usedAccountID.Int64
+ item.UsedAccountID = &v
+ }
+ item.UsedAccountName = usedAccountName
+ if responsePreview.Valid {
+ item.ResponsePreview = &responsePreview.String
+ }
+ if responseTruncated.Valid {
+ v := responseTruncated.Bool
+ item.ResponseTruncated = &v
+ }
+ if resultRequestID.Valid {
+ item.ResultRequestID = &resultRequestID.String
+ }
+ if resultErrorID.Valid {
+ v := resultErrorID.Int64
+ item.ResultErrorID = &v
+ }
+ if errorMessage.Valid {
+ item.ErrorMessage = &errorMessage.String
+ }
+ out = append(out, &item)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
+func (r *opsRepository) UpdateErrorResolution(ctx context.Context, errorID int64, resolved bool, resolvedByUserID *int64, resolvedRetryID *int64, resolvedAt *time.Time) error {
+ if r == nil || r.db == nil {
+ return fmt.Errorf("nil ops repository")
+ }
+ if errorID <= 0 {
+ return fmt.Errorf("invalid error id")
+ }
+
+ q := `
+UPDATE ops_error_logs
+SET
+ resolved = $2,
+ resolved_at = $3,
+ resolved_by_user_id = $4,
+ resolved_retry_id = $5
+WHERE id = $1`
+
+ at := sql.NullTime{}
+ if resolvedAt != nil && !resolvedAt.IsZero() {
+ at = sql.NullTime{Time: resolvedAt.UTC(), Valid: true}
+ } else if resolved {
+ now := time.Now().UTC()
+ at = sql.NullTime{Time: now, Valid: true}
+ }
+
+ _, err := r.db.ExecContext(
+ ctx,
+ q,
+ errorID,
+ resolved,
+ at,
+ nullInt64(resolvedByUserID),
+ nullInt64(resolvedRetryID),
+ )
+ return err
+}
+
func buildOpsErrorLogsWhere(filter *service.OpsErrorLogFilter) (string, []any) {
- clauses := make([]string, 0, 8)
- args := make([]any, 0, 8)
+ clauses := make([]string, 0, 12)
+ args := make([]any, 0, 12)
clauses = append(clauses, "1=1")
phaseFilter := ""
if filter != nil {
phaseFilter = strings.TrimSpace(strings.ToLower(filter.Phase))
}
- // ops_error_logs primarily stores client-visible error requests (status>=400),
+ // ops_error_logs stores client-visible error requests (status>=400),
// but we also persist "recovered" upstream errors (status<400) for upstream health visibility.
- // By default, keep list endpoints scoped to client errors unless explicitly filtering upstream phase.
+ // If Resolved is not specified, do not filter by resolved state (backward-compatible).
+ resolvedFilter := (*bool)(nil)
+ if filter != nil {
+ resolvedFilter = filter.Resolved
+ }
+ // Keep list endpoints scoped to client errors unless explicitly filtering upstream phase.
if phaseFilter != "upstream" {
clauses = append(clauses, "COALESCE(status_code, 0) >= 400")
}
if filter.StartTime != nil && !filter.StartTime.IsZero() {
args = append(args, filter.StartTime.UTC())
- clauses = append(clauses, "created_at >= $"+itoa(len(args)))
+ clauses = append(clauses, "e.created_at >= $"+itoa(len(args)))
}
if filter.EndTime != nil && !filter.EndTime.IsZero() {
args = append(args, filter.EndTime.UTC())
// Keep time-window semantics consistent with other ops queries: [start, end)
- clauses = append(clauses, "created_at < $"+itoa(len(args)))
+ clauses = append(clauses, "e.created_at < $"+itoa(len(args)))
}
if p := strings.TrimSpace(filter.Platform); p != "" {
args = append(args, p)
@@ -643,10 +976,59 @@ func buildOpsErrorLogsWhere(filter *service.OpsErrorLogFilter) (string, []any) {
args = append(args, phase)
clauses = append(clauses, "error_phase = $"+itoa(len(args)))
}
+ if filter != nil {
+ if owner := strings.TrimSpace(strings.ToLower(filter.Owner)); owner != "" {
+ args = append(args, owner)
+ clauses = append(clauses, "LOWER(COALESCE(error_owner,'')) = $"+itoa(len(args)))
+ }
+ if source := strings.TrimSpace(strings.ToLower(filter.Source)); source != "" {
+ args = append(args, source)
+ clauses = append(clauses, "LOWER(COALESCE(error_source,'')) = $"+itoa(len(args)))
+ }
+ }
+ if resolvedFilter != nil {
+ args = append(args, *resolvedFilter)
+ clauses = append(clauses, "COALESCE(resolved,false) = $"+itoa(len(args)))
+ }
+
+ // View filter: errors vs excluded vs all.
+ // Excluded = upstream 429/529 and business-limited (quota/concurrency/billing) errors.
+ view := ""
+ if filter != nil {
+ view = strings.ToLower(strings.TrimSpace(filter.View))
+ }
+ switch view {
+ case "", "errors":
+ clauses = append(clauses, "COALESCE(is_business_limited,false) = false")
+ clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)")
+ case "excluded":
+ clauses = append(clauses, "(COALESCE(is_business_limited,false) = true OR COALESCE(upstream_status_code, status_code, 0) IN (429, 529))")
+ case "all":
+ // no-op
+ default:
+ // treat unknown as default 'errors'
+ clauses = append(clauses, "COALESCE(is_business_limited,false) = false")
+ clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)")
+ }
if len(filter.StatusCodes) > 0 {
args = append(args, pq.Array(filter.StatusCodes))
clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) = ANY($"+itoa(len(args))+")")
+ } else if filter.StatusCodesOther {
+ // "Other" means: status codes not in the common list.
+ known := []int{400, 401, 403, 404, 409, 422, 429, 500, 502, 503, 504, 529}
+ args = append(args, pq.Array(known))
+ clauses = append(clauses, "NOT (COALESCE(upstream_status_code, status_code, 0) = ANY($"+itoa(len(args))+"))")
}
+ // Exact correlation keys (preferred for request↔upstream linkage).
+ if rid := strings.TrimSpace(filter.RequestID); rid != "" {
+ args = append(args, rid)
+ clauses = append(clauses, "COALESCE(request_id,'') = $"+itoa(len(args)))
+ }
+ if crid := strings.TrimSpace(filter.ClientRequestID); crid != "" {
+ args = append(args, crid)
+ clauses = append(clauses, "COALESCE(client_request_id,'') = $"+itoa(len(args)))
+ }
+
if q := strings.TrimSpace(filter.Query); q != "" {
like := "%" + q + "%"
args = append(args, like)
@@ -654,6 +1036,13 @@ func buildOpsErrorLogsWhere(filter *service.OpsErrorLogFilter) (string, []any) {
clauses = append(clauses, "(request_id ILIKE $"+n+" OR client_request_id ILIKE $"+n+" OR error_message ILIKE $"+n+")")
}
+ if userQuery := strings.TrimSpace(filter.UserQuery); userQuery != "" {
+ like := "%" + userQuery + "%"
+ args = append(args, like)
+ n := itoa(len(args))
+ clauses = append(clauses, "u.email ILIKE $"+n)
+ }
+
return "WHERE " + strings.Join(clauses, " AND "), args
}
diff --git a/backend/internal/repository/ops_repo_alerts.go b/backend/internal/repository/ops_repo_alerts.go
index f601c363..bd98b7e4 100644
--- a/backend/internal/repository/ops_repo_alerts.go
+++ b/backend/internal/repository/ops_repo_alerts.go
@@ -354,7 +354,7 @@ SELECT
created_at
FROM ops_alert_events
` + where + `
-ORDER BY fired_at DESC
+ORDER BY fired_at DESC, id DESC
LIMIT ` + limitArg
rows, err := r.db.QueryContext(ctx, q, args...)
@@ -413,6 +413,43 @@ LIMIT ` + limitArg
return out, nil
}
+func (r *opsRepository) GetAlertEventByID(ctx context.Context, eventID int64) (*service.OpsAlertEvent, error) {
+ if r == nil || r.db == nil {
+ return nil, fmt.Errorf("nil ops repository")
+ }
+ if eventID <= 0 {
+ return nil, fmt.Errorf("invalid event id")
+ }
+
+ q := `
+SELECT
+ id,
+ COALESCE(rule_id, 0),
+ COALESCE(severity, ''),
+ COALESCE(status, ''),
+ COALESCE(title, ''),
+ COALESCE(description, ''),
+ metric_value,
+ threshold_value,
+ dimensions,
+ fired_at,
+ resolved_at,
+ email_sent,
+ created_at
+FROM ops_alert_events
+WHERE id = $1`
+
+ row := r.db.QueryRowContext(ctx, q, eventID)
+ ev, err := scanOpsAlertEvent(row)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return nil, nil
+ }
+ return nil, err
+ }
+ return ev, nil
+}
+
func (r *opsRepository) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*service.OpsAlertEvent, error) {
if r == nil || r.db == nil {
return nil, fmt.Errorf("nil ops repository")
@@ -591,6 +628,121 @@ type opsAlertEventRow interface {
Scan(dest ...any) error
}
+func (r *opsRepository) CreateAlertSilence(ctx context.Context, input *service.OpsAlertSilence) (*service.OpsAlertSilence, error) {
+ if r == nil || r.db == nil {
+ return nil, fmt.Errorf("nil ops repository")
+ }
+ if input == nil {
+ return nil, fmt.Errorf("nil input")
+ }
+ if input.RuleID <= 0 {
+ return nil, fmt.Errorf("invalid rule_id")
+ }
+ platform := strings.TrimSpace(input.Platform)
+ if platform == "" {
+ return nil, fmt.Errorf("invalid platform")
+ }
+ if input.Until.IsZero() {
+ return nil, fmt.Errorf("invalid until")
+ }
+
+ q := `
+INSERT INTO ops_alert_silences (
+ rule_id,
+ platform,
+ group_id,
+ region,
+ until,
+ reason,
+ created_by,
+ created_at
+) VALUES (
+ $1,$2,$3,$4,$5,$6,$7,NOW()
+)
+RETURNING id, rule_id, platform, group_id, region, until, COALESCE(reason,''), created_by, created_at`
+
+ row := r.db.QueryRowContext(
+ ctx,
+ q,
+ input.RuleID,
+ platform,
+ opsNullInt64(input.GroupID),
+ opsNullString(input.Region),
+ input.Until,
+ opsNullString(input.Reason),
+ opsNullInt64(input.CreatedBy),
+ )
+
+ var out service.OpsAlertSilence
+ var groupID sql.NullInt64
+ var region sql.NullString
+ var createdBy sql.NullInt64
+ if err := row.Scan(
+ &out.ID,
+ &out.RuleID,
+ &out.Platform,
+ &groupID,
+ ®ion,
+ &out.Until,
+ &out.Reason,
+ &createdBy,
+ &out.CreatedAt,
+ ); err != nil {
+ return nil, err
+ }
+ if groupID.Valid {
+ v := groupID.Int64
+ out.GroupID = &v
+ }
+ if region.Valid {
+ v := strings.TrimSpace(region.String)
+ if v != "" {
+ out.Region = &v
+ }
+ }
+ if createdBy.Valid {
+ v := createdBy.Int64
+ out.CreatedBy = &v
+ }
+ return &out, nil
+}
+
+func (r *opsRepository) IsAlertSilenced(ctx context.Context, ruleID int64, platform string, groupID *int64, region *string, now time.Time) (bool, error) {
+ if r == nil || r.db == nil {
+ return false, fmt.Errorf("nil ops repository")
+ }
+ if ruleID <= 0 {
+ return false, fmt.Errorf("invalid rule id")
+ }
+ platform = strings.TrimSpace(platform)
+ if platform == "" {
+ return false, nil
+ }
+ if now.IsZero() {
+ now = time.Now().UTC()
+ }
+
+ q := `
+SELECT 1
+FROM ops_alert_silences
+WHERE rule_id = $1
+ AND platform = $2
+ AND (group_id IS NOT DISTINCT FROM $3)
+ AND (region IS NOT DISTINCT FROM $4)
+ AND until > $5
+LIMIT 1`
+
+ var dummy int
+ err := r.db.QueryRowContext(ctx, q, ruleID, platform, opsNullInt64(groupID), opsNullString(region), now).Scan(&dummy)
+ if err != nil {
+ if err == sql.ErrNoRows {
+ return false, nil
+ }
+ return false, err
+ }
+ return true, nil
+}
+
func scanOpsAlertEvent(row opsAlertEventRow) (*service.OpsAlertEvent, error) {
var ev service.OpsAlertEvent
var metricValue sql.NullFloat64
@@ -652,6 +804,10 @@ func buildOpsAlertEventsWhere(filter *service.OpsAlertEventFilter) (string, []an
args = append(args, severity)
clauses = append(clauses, "severity = $"+itoa(len(args)))
}
+ if filter.EmailSent != nil {
+ args = append(args, *filter.EmailSent)
+ clauses = append(clauses, "email_sent = $"+itoa(len(args)))
+ }
if filter.StartTime != nil && !filter.StartTime.IsZero() {
args = append(args, *filter.StartTime)
clauses = append(clauses, "fired_at >= $"+itoa(len(args)))
@@ -661,6 +817,14 @@ func buildOpsAlertEventsWhere(filter *service.OpsAlertEventFilter) (string, []an
clauses = append(clauses, "fired_at < $"+itoa(len(args)))
}
+ // Cursor pagination (descending by fired_at, then id)
+ if filter.BeforeFiredAt != nil && !filter.BeforeFiredAt.IsZero() && filter.BeforeID != nil && *filter.BeforeID > 0 {
+ args = append(args, *filter.BeforeFiredAt)
+ tsArg := "$" + itoa(len(args))
+ args = append(args, *filter.BeforeID)
+ idArg := "$" + itoa(len(args))
+ clauses = append(clauses, fmt.Sprintf("(fired_at < %s OR (fired_at = %s AND id < %s))", tsArg, tsArg, idArg))
+ }
// Dimensions are stored in JSONB. We filter best-effort without requiring GIN indexes.
if platform := strings.TrimSpace(filter.Platform); platform != "" {
args = append(args, platform)
diff --git a/backend/internal/repository/proxy_latency_cache.go b/backend/internal/repository/proxy_latency_cache.go
new file mode 100644
index 00000000..4458b5e1
--- /dev/null
+++ b/backend/internal/repository/proxy_latency_cache.go
@@ -0,0 +1,74 @@
+package repository
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/Wei-Shaw/sub2api/internal/service"
+ "github.com/redis/go-redis/v9"
+)
+
+const proxyLatencyKeyPrefix = "proxy:latency:"
+
+func proxyLatencyKey(proxyID int64) string {
+ return fmt.Sprintf("%s%d", proxyLatencyKeyPrefix, proxyID)
+}
+
+type proxyLatencyCache struct {
+ rdb *redis.Client
+}
+
+func NewProxyLatencyCache(rdb *redis.Client) service.ProxyLatencyCache {
+ return &proxyLatencyCache{rdb: rdb}
+}
+
+func (c *proxyLatencyCache) GetProxyLatencies(ctx context.Context, proxyIDs []int64) (map[int64]*service.ProxyLatencyInfo, error) {
+ results := make(map[int64]*service.ProxyLatencyInfo)
+ if len(proxyIDs) == 0 {
+ return results, nil
+ }
+
+ keys := make([]string, 0, len(proxyIDs))
+ for _, id := range proxyIDs {
+ keys = append(keys, proxyLatencyKey(id))
+ }
+
+ values, err := c.rdb.MGet(ctx, keys...).Result()
+ if err != nil {
+ return results, err
+ }
+
+ for i, raw := range values {
+ if raw == nil {
+ continue
+ }
+ var payload []byte
+ switch v := raw.(type) {
+ case string:
+ payload = []byte(v)
+ case []byte:
+ payload = v
+ default:
+ continue
+ }
+ var info service.ProxyLatencyInfo
+ if err := json.Unmarshal(payload, &info); err != nil {
+ continue
+ }
+ results[proxyIDs[i]] = &info
+ }
+
+ return results, nil
+}
+
+func (c *proxyLatencyCache) SetProxyLatency(ctx context.Context, proxyID int64, info *service.ProxyLatencyInfo) error {
+ if info == nil {
+ return nil
+ }
+ payload, err := json.Marshal(info)
+ if err != nil {
+ return err
+ }
+ return c.rdb.Set(ctx, proxyLatencyKey(proxyID), payload, 0).Err()
+}
diff --git a/backend/internal/repository/proxy_probe_service.go b/backend/internal/repository/proxy_probe_service.go
index 5c42e4d1..ad7b6e1c 100644
--- a/backend/internal/repository/proxy_probe_service.go
+++ b/backend/internal/repository/proxy_probe_service.go
@@ -34,7 +34,10 @@ func NewProxyExitInfoProber(cfg *config.Config) service.ProxyExitInfoProber {
}
}
-const defaultIPInfoURL = "https://ipinfo.io/json"
+const (
+ defaultIPInfoURL = "https://ipinfo.io/json"
+ defaultProxyProbeTimeout = 30 * time.Second
+)
type proxyProbeService struct {
ipInfoURL string
@@ -46,7 +49,7 @@ type proxyProbeService struct {
func (s *proxyProbeService) ProbeProxy(ctx context.Context, proxyURL string) (*service.ProxyExitInfo, int64, error) {
client, err := httpclient.GetClient(httpclient.Options{
ProxyURL: proxyURL,
- Timeout: 15 * time.Second,
+ Timeout: defaultProxyProbeTimeout,
InsecureSkipVerify: s.insecureSkipVerify,
ProxyStrict: true,
ValidateResolvedIP: s.validateResolvedIP,
diff --git a/backend/internal/repository/proxy_repo.go b/backend/internal/repository/proxy_repo.go
index 622b0aeb..36965c05 100644
--- a/backend/internal/repository/proxy_repo.go
+++ b/backend/internal/repository/proxy_repo.go
@@ -219,12 +219,54 @@ func (r *proxyRepository) ExistsByHostPortAuth(ctx context.Context, host string,
// CountAccountsByProxyID returns the number of accounts using a specific proxy
func (r *proxyRepository) CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) {
var count int64
- if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM accounts WHERE proxy_id = $1", []any{proxyID}, &count); err != nil {
+ if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM accounts WHERE proxy_id = $1 AND deleted_at IS NULL", []any{proxyID}, &count); err != nil {
return 0, err
}
return count, nil
}
+func (r *proxyRepository) ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]service.ProxyAccountSummary, error) {
+ rows, err := r.sql.QueryContext(ctx, `
+ SELECT id, name, platform, type, notes
+ FROM accounts
+ WHERE proxy_id = $1 AND deleted_at IS NULL
+ ORDER BY id DESC
+ `, proxyID)
+ if err != nil {
+ return nil, err
+ }
+ defer func() { _ = rows.Close() }()
+
+ out := make([]service.ProxyAccountSummary, 0)
+ for rows.Next() {
+ var (
+ id int64
+ name string
+ platform string
+ accType string
+ notes sql.NullString
+ )
+ if err := rows.Scan(&id, &name, &platform, &accType, ¬es); err != nil {
+ return nil, err
+ }
+ var notesPtr *string
+ if notes.Valid {
+ notesPtr = ¬es.String
+ }
+ out = append(out, service.ProxyAccountSummary{
+ ID: id,
+ Name: name,
+ Platform: platform,
+ Type: accType,
+ Notes: notesPtr,
+ })
+ }
+ if err := rows.Err(); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
+
// GetAccountCountsForProxies returns a map of proxy ID to account count for all proxies
func (r *proxyRepository) GetAccountCountsForProxies(ctx context.Context) (counts map[int64]int64, err error) {
rows, err := r.sql.QueryContext(ctx, "SELECT proxy_id, COUNT(*) AS count FROM accounts WHERE proxy_id IS NOT NULL AND deleted_at IS NULL GROUP BY proxy_id")
diff --git a/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go b/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go
index dede6014..e442a125 100644
--- a/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go
+++ b/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go
@@ -27,7 +27,7 @@ func TestSchedulerSnapshotOutboxReplay(t *testing.T) {
RunMode: config.RunModeStandard,
Gateway: config.GatewayConfig{
Scheduling: config.GatewaySchedulingConfig{
- OutboxPollIntervalSeconds: 1,
+ OutboxPollIntervalSeconds: 1,
FullRebuildIntervalSeconds: 0,
DbFallbackEnabled: true,
},
diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go
index e483f89f..4a2aaade 100644
--- a/backend/internal/repository/usage_log_repo.go
+++ b/backend/internal/repository/usage_log_repo.go
@@ -22,7 +22,7 @@ import (
"github.com/lib/pq"
)
-const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, billing_type, stream, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, created_at"
+const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, account_rate_multiplier, billing_type, stream, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, created_at"
type usageLogRepository struct {
client *dbent.Client
@@ -105,6 +105,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog)
total_cost,
actual_cost,
rate_multiplier,
+ account_rate_multiplier,
billing_type,
stream,
duration_ms,
@@ -120,7 +121,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog)
$8, $9, $10, $11,
$12, $13,
$14, $15, $16, $17, $18, $19,
- $20, $21, $22, $23, $24, $25, $26, $27, $28, $29
+ $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30
)
ON CONFLICT (request_id, api_key_id) DO NOTHING
RETURNING id, created_at
@@ -160,6 +161,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog)
log.TotalCost,
log.ActualCost,
rateMultiplier,
+ log.AccountRateMultiplier,
log.BillingType,
log.Stream,
duration,
@@ -270,13 +272,13 @@ type DashboardStats = usagestats.DashboardStats
func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardStats, error) {
stats := &DashboardStats{}
- now := time.Now().UTC()
- todayUTC := truncateToDayUTC(now)
+ now := timezone.Now()
+ todayStart := timezone.Today()
- if err := r.fillDashboardEntityStats(ctx, stats, todayUTC, now); err != nil {
+ if err := r.fillDashboardEntityStats(ctx, stats, todayStart, now); err != nil {
return nil, err
}
- if err := r.fillDashboardUsageStatsAggregated(ctx, stats, todayUTC, now); err != nil {
+ if err := r.fillDashboardUsageStatsAggregated(ctx, stats, todayStart, now); err != nil {
return nil, err
}
@@ -298,13 +300,13 @@ func (r *usageLogRepository) GetDashboardStatsWithRange(ctx context.Context, sta
}
stats := &DashboardStats{}
- now := time.Now().UTC()
- todayUTC := truncateToDayUTC(now)
+ now := timezone.Now()
+ todayStart := timezone.Today()
- if err := r.fillDashboardEntityStats(ctx, stats, todayUTC, now); err != nil {
+ if err := r.fillDashboardEntityStats(ctx, stats, todayStart, now); err != nil {
return nil, err
}
- if err := r.fillDashboardUsageStatsFromUsageLogs(ctx, stats, startUTC, endUTC, todayUTC, now); err != nil {
+ if err := r.fillDashboardUsageStatsFromUsageLogs(ctx, stats, startUTC, endUTC, todayStart, now); err != nil {
return nil, err
}
@@ -455,7 +457,7 @@ func (r *usageLogRepository) fillDashboardUsageStatsAggregated(ctx context.Conte
FROM usage_dashboard_hourly
WHERE bucket_start = $1
`
- hourStart := now.UTC().Truncate(time.Hour)
+ hourStart := now.In(timezone.Location()).Truncate(time.Hour)
if err := scanSingleRow(ctx, r.sql, hourlyActiveQuery, []any{hourStart}, &stats.HourlyActiveUsers); err != nil {
if err != sql.ErrNoRows {
return err
@@ -835,7 +837,9 @@ func (r *usageLogRepository) GetAccountTodayStats(ctx context.Context, accountID
SELECT
COUNT(*) as requests,
COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens,
- COALESCE(SUM(actual_cost), 0) as cost
+ COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as cost,
+ COALESCE(SUM(total_cost), 0) as standard_cost,
+ COALESCE(SUM(actual_cost), 0) as user_cost
FROM usage_logs
WHERE account_id = $1 AND created_at >= $2
`
@@ -849,6 +853,8 @@ func (r *usageLogRepository) GetAccountTodayStats(ctx context.Context, accountID
&stats.Requests,
&stats.Tokens,
&stats.Cost,
+ &stats.StandardCost,
+ &stats.UserCost,
); err != nil {
return nil, err
}
@@ -861,7 +867,9 @@ func (r *usageLogRepository) GetAccountWindowStats(ctx context.Context, accountI
SELECT
COUNT(*) as requests,
COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens,
- COALESCE(SUM(actual_cost), 0) as cost
+ COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as cost,
+ COALESCE(SUM(total_cost), 0) as standard_cost,
+ COALESCE(SUM(actual_cost), 0) as user_cost
FROM usage_logs
WHERE account_id = $1 AND created_at >= $2
`
@@ -875,6 +883,8 @@ func (r *usageLogRepository) GetAccountWindowStats(ctx context.Context, accountI
&stats.Requests,
&stats.Tokens,
&stats.Cost,
+ &stats.StandardCost,
+ &stats.UserCost,
); err != nil {
return nil, err
}
@@ -1400,8 +1410,8 @@ func (r *usageLogRepository) GetBatchAPIKeyUsageStats(ctx context.Context, apiKe
return result, nil
}
-// GetUsageTrendWithFilters returns usage trend data with optional user/api_key filters
-func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID int64) (results []TrendDataPoint, err error) {
+// GetUsageTrendWithFilters returns usage trend data with optional filters
+func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) (results []TrendDataPoint, err error) {
dateFormat := "YYYY-MM-DD"
if granularity == "hour" {
dateFormat = "YYYY-MM-DD HH24:00"
@@ -1430,6 +1440,22 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start
query += fmt.Sprintf(" AND api_key_id = $%d", len(args)+1)
args = append(args, apiKeyID)
}
+ if accountID > 0 {
+ query += fmt.Sprintf(" AND account_id = $%d", len(args)+1)
+ args = append(args, accountID)
+ }
+ if groupID > 0 {
+ query += fmt.Sprintf(" AND group_id = $%d", len(args)+1)
+ args = append(args, groupID)
+ }
+ if model != "" {
+ query += fmt.Sprintf(" AND model = $%d", len(args)+1)
+ args = append(args, model)
+ }
+ if stream != nil {
+ query += fmt.Sprintf(" AND stream = $%d", len(args)+1)
+ args = append(args, *stream)
+ }
query += " GROUP BY date ORDER BY date ASC"
rows, err := r.sql.QueryContext(ctx, query, args...)
@@ -1452,9 +1478,15 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start
return results, nil
}
-// GetModelStatsWithFilters returns model statistics with optional user/api_key filters
-func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID int64) (results []ModelStat, err error) {
- query := `
+// GetModelStatsWithFilters returns model statistics with optional filters
+func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) (results []ModelStat, err error) {
+ actualCostExpr := "COALESCE(SUM(actual_cost), 0) as actual_cost"
+ // 当仅按 account_id 聚合时,实际费用使用账号倍率(total_cost * account_rate_multiplier)。
+ if accountID > 0 && userID == 0 && apiKeyID == 0 {
+ actualCostExpr = "COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as actual_cost"
+ }
+
+ query := fmt.Sprintf(`
SELECT
model,
COUNT(*) as requests,
@@ -1462,10 +1494,10 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start
COALESCE(SUM(output_tokens), 0) as output_tokens,
COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens,
COALESCE(SUM(total_cost), 0) as cost,
- COALESCE(SUM(actual_cost), 0) as actual_cost
+ %s
FROM usage_logs
WHERE created_at >= $1 AND created_at < $2
- `
+ `, actualCostExpr)
args := []any{startTime, endTime}
if userID > 0 {
@@ -1480,6 +1512,14 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start
query += fmt.Sprintf(" AND account_id = $%d", len(args)+1)
args = append(args, accountID)
}
+ if groupID > 0 {
+ query += fmt.Sprintf(" AND group_id = $%d", len(args)+1)
+ args = append(args, groupID)
+ }
+ if stream != nil {
+ query += fmt.Sprintf(" AND stream = $%d", len(args)+1)
+ args = append(args, *stream)
+ }
query += " GROUP BY model ORDER BY total_tokens DESC"
rows, err := r.sql.QueryContext(ctx, query, args...)
@@ -1587,12 +1627,14 @@ func (r *usageLogRepository) GetStatsWithFilters(ctx context.Context, filters Us
COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as total_cache_tokens,
COALESCE(SUM(total_cost), 0) as total_cost,
COALESCE(SUM(actual_cost), 0) as total_actual_cost,
+ COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as total_account_cost,
COALESCE(AVG(duration_ms), 0) as avg_duration_ms
FROM usage_logs
%s
`, buildWhere(conditions))
stats := &UsageStats{}
+ var totalAccountCost float64
if err := scanSingleRow(
ctx,
r.sql,
@@ -1604,10 +1646,14 @@ func (r *usageLogRepository) GetStatsWithFilters(ctx context.Context, filters Us
&stats.TotalCacheTokens,
&stats.TotalCost,
&stats.TotalActualCost,
+ &totalAccountCost,
&stats.AverageDurationMs,
); err != nil {
return nil, err
}
+ if filters.AccountID > 0 {
+ stats.TotalAccountCost = &totalAccountCost
+ }
stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens
return stats, nil
}
@@ -1634,7 +1680,8 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
COUNT(*) as requests,
COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens,
COALESCE(SUM(total_cost), 0) as cost,
- COALESCE(SUM(actual_cost), 0) as actual_cost
+ COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as actual_cost,
+ COALESCE(SUM(actual_cost), 0) as user_cost
FROM usage_logs
WHERE account_id = $1 AND created_at >= $2 AND created_at < $3
GROUP BY date
@@ -1661,7 +1708,8 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
var tokens int64
var cost float64
var actualCost float64
- if err = rows.Scan(&date, &requests, &tokens, &cost, &actualCost); err != nil {
+ var userCost float64
+ if err = rows.Scan(&date, &requests, &tokens, &cost, &actualCost, &userCost); err != nil {
return nil, err
}
t, _ := time.Parse("2006-01-02", date)
@@ -1672,19 +1720,21 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
Tokens: tokens,
Cost: cost,
ActualCost: actualCost,
+ UserCost: userCost,
})
}
if err = rows.Err(); err != nil {
return nil, err
}
- var totalActualCost, totalStandardCost float64
+ var totalAccountCost, totalUserCost, totalStandardCost float64
var totalRequests, totalTokens int64
var highestCostDay, highestRequestDay *AccountUsageHistory
for i := range history {
h := &history[i]
- totalActualCost += h.ActualCost
+ totalAccountCost += h.ActualCost
+ totalUserCost += h.UserCost
totalStandardCost += h.Cost
totalRequests += h.Requests
totalTokens += h.Tokens
@@ -1711,11 +1761,13 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
summary := AccountUsageSummary{
Days: daysCount,
ActualDaysUsed: actualDaysUsed,
- TotalCost: totalActualCost,
+ TotalCost: totalAccountCost,
+ TotalUserCost: totalUserCost,
TotalStandardCost: totalStandardCost,
TotalRequests: totalRequests,
TotalTokens: totalTokens,
- AvgDailyCost: totalActualCost / float64(actualDaysUsed),
+ AvgDailyCost: totalAccountCost / float64(actualDaysUsed),
+ AvgDailyUserCost: totalUserCost / float64(actualDaysUsed),
AvgDailyRequests: float64(totalRequests) / float64(actualDaysUsed),
AvgDailyTokens: float64(totalTokens) / float64(actualDaysUsed),
AvgDurationMs: avgDuration,
@@ -1727,11 +1779,13 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
summary.Today = &struct {
Date string `json:"date"`
Cost float64 `json:"cost"`
+ UserCost float64 `json:"user_cost"`
Requests int64 `json:"requests"`
Tokens int64 `json:"tokens"`
}{
Date: history[i].Date,
Cost: history[i].ActualCost,
+ UserCost: history[i].UserCost,
Requests: history[i].Requests,
Tokens: history[i].Tokens,
}
@@ -1744,11 +1798,13 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
Date string `json:"date"`
Label string `json:"label"`
Cost float64 `json:"cost"`
+ UserCost float64 `json:"user_cost"`
Requests int64 `json:"requests"`
}{
Date: highestCostDay.Date,
Label: highestCostDay.Label,
Cost: highestCostDay.ActualCost,
+ UserCost: highestCostDay.UserCost,
Requests: highestCostDay.Requests,
}
}
@@ -1759,15 +1815,17 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID
Label string `json:"label"`
Requests int64 `json:"requests"`
Cost float64 `json:"cost"`
+ UserCost float64 `json:"user_cost"`
}{
Date: highestRequestDay.Date,
Label: highestRequestDay.Label,
Requests: highestRequestDay.Requests,
Cost: highestRequestDay.ActualCost,
+ UserCost: highestRequestDay.UserCost,
}
}
- models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID)
+ models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil)
if err != nil {
models = []ModelStat{}
}
@@ -1994,36 +2052,37 @@ func (r *usageLogRepository) loadSubscriptions(ctx context.Context, ids []int64)
func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, error) {
var (
- id int64
- userID int64
- apiKeyID int64
- accountID int64
- requestID sql.NullString
- model string
- groupID sql.NullInt64
- subscriptionID sql.NullInt64
- inputTokens int
- outputTokens int
- cacheCreationTokens int
- cacheReadTokens int
- cacheCreation5m int
- cacheCreation1h int
- inputCost float64
- outputCost float64
- cacheCreationCost float64
- cacheReadCost float64
- totalCost float64
- actualCost float64
- rateMultiplier float64
- billingType int16
- stream bool
- durationMs sql.NullInt64
- firstTokenMs sql.NullInt64
- userAgent sql.NullString
- ipAddress sql.NullString
- imageCount int
- imageSize sql.NullString
- createdAt time.Time
+ id int64
+ userID int64
+ apiKeyID int64
+ accountID int64
+ requestID sql.NullString
+ model string
+ groupID sql.NullInt64
+ subscriptionID sql.NullInt64
+ inputTokens int
+ outputTokens int
+ cacheCreationTokens int
+ cacheReadTokens int
+ cacheCreation5m int
+ cacheCreation1h int
+ inputCost float64
+ outputCost float64
+ cacheCreationCost float64
+ cacheReadCost float64
+ totalCost float64
+ actualCost float64
+ rateMultiplier float64
+ accountRateMultiplier sql.NullFloat64
+ billingType int16
+ stream bool
+ durationMs sql.NullInt64
+ firstTokenMs sql.NullInt64
+ userAgent sql.NullString
+ ipAddress sql.NullString
+ imageCount int
+ imageSize sql.NullString
+ createdAt time.Time
)
if err := scanner.Scan(
@@ -2048,6 +2107,7 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e
&totalCost,
&actualCost,
&rateMultiplier,
+ &accountRateMultiplier,
&billingType,
&stream,
&durationMs,
@@ -2080,6 +2140,7 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e
TotalCost: totalCost,
ActualCost: actualCost,
RateMultiplier: rateMultiplier,
+ AccountRateMultiplier: nullFloat64Ptr(accountRateMultiplier),
BillingType: int8(billingType),
Stream: stream,
ImageCount: imageCount,
@@ -2186,6 +2247,14 @@ func nullInt(v *int) sql.NullInt64 {
return sql.NullInt64{Int64: int64(*v), Valid: true}
}
+func nullFloat64Ptr(v sql.NullFloat64) *float64 {
+ if !v.Valid {
+ return nil
+ }
+ out := v.Float64
+ return &out
+}
+
func nullString(v *string) sql.NullString {
if v == nil || *v == "" {
return sql.NullString{}
diff --git a/backend/internal/repository/usage_log_repo_integration_test.go b/backend/internal/repository/usage_log_repo_integration_test.go
index 3f90e49e..7174be18 100644
--- a/backend/internal/repository/usage_log_repo_integration_test.go
+++ b/backend/internal/repository/usage_log_repo_integration_test.go
@@ -11,6 +11,7 @@ import (
dbent "github.com/Wei-Shaw/sub2api/ent"
"github.com/Wei-Shaw/sub2api/internal/pkg/pagination"
+ "github.com/Wei-Shaw/sub2api/internal/pkg/timezone"
"github.com/Wei-Shaw/sub2api/internal/pkg/usagestats"
"github.com/Wei-Shaw/sub2api/internal/service"
"github.com/stretchr/testify/suite"
@@ -36,6 +37,12 @@ func TestUsageLogRepoSuite(t *testing.T) {
suite.Run(t, new(UsageLogRepoSuite))
}
+// truncateToDayUTC 截断到 UTC 日期边界(测试辅助函数)
+func truncateToDayUTC(t time.Time) time.Time {
+ t = t.UTC()
+ return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)
+}
+
func (s *UsageLogRepoSuite) createUsageLog(user *service.User, apiKey *service.APIKey, account *service.Account, inputTokens, outputTokens int, cost float64, createdAt time.Time) *service.UsageLog {
log := &service.UsageLog{
UserID: user.ID,
@@ -95,6 +102,34 @@ func (s *UsageLogRepoSuite) TestGetByID_NotFound() {
s.Require().Error(err, "expected error for non-existent ID")
}
+func (s *UsageLogRepoSuite) TestGetByID_ReturnsAccountRateMultiplier() {
+ user := mustCreateUser(s.T(), s.client, &service.User{Email: "getbyid-mult@test.com"})
+ apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-getbyid-mult", Name: "k"})
+ account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-getbyid-mult"})
+
+ m := 0.5
+ log := &service.UsageLog{
+ UserID: user.ID,
+ APIKeyID: apiKey.ID,
+ AccountID: account.ID,
+ RequestID: uuid.New().String(),
+ Model: "claude-3",
+ InputTokens: 10,
+ OutputTokens: 20,
+ TotalCost: 1.0,
+ ActualCost: 2.0,
+ AccountRateMultiplier: &m,
+ CreatedAt: timezone.Today().Add(2 * time.Hour),
+ }
+ _, err := s.repo.Create(s.ctx, log)
+ s.Require().NoError(err)
+
+ got, err := s.repo.GetByID(s.ctx, log.ID)
+ s.Require().NoError(err)
+ s.Require().NotNil(got.AccountRateMultiplier)
+ s.Require().InEpsilon(0.5, *got.AccountRateMultiplier, 0.0001)
+}
+
// --- Delete ---
func (s *UsageLogRepoSuite) TestDelete() {
@@ -403,12 +438,49 @@ func (s *UsageLogRepoSuite) TestGetAccountTodayStats() {
apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-acctoday", Name: "k"})
account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-today"})
- s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now())
+ createdAt := timezone.Today().Add(1 * time.Hour)
+
+ m1 := 1.5
+ m2 := 0.0
+ _, err := s.repo.Create(s.ctx, &service.UsageLog{
+ UserID: user.ID,
+ APIKeyID: apiKey.ID,
+ AccountID: account.ID,
+ RequestID: uuid.New().String(),
+ Model: "claude-3",
+ InputTokens: 10,
+ OutputTokens: 20,
+ TotalCost: 1.0,
+ ActualCost: 2.0,
+ AccountRateMultiplier: &m1,
+ CreatedAt: createdAt,
+ })
+ s.Require().NoError(err)
+ _, err = s.repo.Create(s.ctx, &service.UsageLog{
+ UserID: user.ID,
+ APIKeyID: apiKey.ID,
+ AccountID: account.ID,
+ RequestID: uuid.New().String(),
+ Model: "claude-3",
+ InputTokens: 5,
+ OutputTokens: 5,
+ TotalCost: 0.5,
+ ActualCost: 1.0,
+ AccountRateMultiplier: &m2,
+ CreatedAt: createdAt,
+ })
+ s.Require().NoError(err)
stats, err := s.repo.GetAccountTodayStats(s.ctx, account.ID)
s.Require().NoError(err, "GetAccountTodayStats")
- s.Require().Equal(int64(1), stats.Requests)
- s.Require().Equal(int64(30), stats.Tokens)
+ s.Require().Equal(int64(2), stats.Requests)
+ s.Require().Equal(int64(40), stats.Tokens)
+ // account cost = SUM(total_cost * account_rate_multiplier)
+ s.Require().InEpsilon(1.5, stats.Cost, 0.0001)
+ // standard cost = SUM(total_cost)
+ s.Require().InEpsilon(1.5, stats.StandardCost, 0.0001)
+ // user cost = SUM(actual_cost)
+ s.Require().InEpsilon(3.0, stats.UserCost, 0.0001)
}
func (s *UsageLogRepoSuite) TestDashboardAggregationConsistency() {
@@ -416,8 +488,8 @@ func (s *UsageLogRepoSuite) TestDashboardAggregationConsistency() {
// 使用固定的时间偏移确保 hour1 和 hour2 在同一天且都在过去
// 选择当天 02:00 和 03:00 作为测试时间点(基于 now 的日期)
dayStart := truncateToDayUTC(now)
- hour1 := dayStart.Add(2 * time.Hour) // 当天 02:00
- hour2 := dayStart.Add(3 * time.Hour) // 当天 03:00
+ hour1 := dayStart.Add(2 * time.Hour) // 当天 02:00
+ hour2 := dayStart.Add(3 * time.Hour) // 当天 03:00
// 如果当前时间早于 hour2,则使用昨天的时间
if now.Before(hour2.Add(time.Hour)) {
dayStart = dayStart.Add(-24 * time.Hour)
@@ -872,17 +944,17 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters() {
endTime := base.Add(48 * time.Hour)
// Test with user filter
- trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0)
+ trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil)
s.Require().NoError(err, "GetUsageTrendWithFilters user filter")
s.Require().Len(trend, 2)
// Test with apiKey filter
- trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID)
+ trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil)
s.Require().NoError(err, "GetUsageTrendWithFilters apiKey filter")
s.Require().Len(trend, 2)
// Test with both filters
- trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID)
+ trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil)
s.Require().NoError(err, "GetUsageTrendWithFilters both filters")
s.Require().Len(trend, 2)
}
@@ -899,7 +971,7 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters_HourlyGranularity() {
startTime := base.Add(-1 * time.Hour)
endTime := base.Add(3 * time.Hour)
- trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0)
+ trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil)
s.Require().NoError(err, "GetUsageTrendWithFilters hourly")
s.Require().Len(trend, 2)
}
@@ -945,17 +1017,17 @@ func (s *UsageLogRepoSuite) TestGetModelStatsWithFilters() {
endTime := base.Add(2 * time.Hour)
// Test with user filter
- stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0)
+ stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil)
s.Require().NoError(err, "GetModelStatsWithFilters user filter")
s.Require().Len(stats, 2)
// Test with apiKey filter
- stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0)
+ stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil)
s.Require().NoError(err, "GetModelStatsWithFilters apiKey filter")
s.Require().Len(stats, 2)
// Test with account filter
- stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID)
+ stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil)
s.Require().NoError(err, "GetModelStatsWithFilters account filter")
s.Require().Len(stats, 2)
}
diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go
index 45a8f182..91ef9413 100644
--- a/backend/internal/repository/wire.go
+++ b/backend/internal/repository/wire.go
@@ -69,6 +69,7 @@ var ProviderSet = wire.NewSet(
NewGeminiTokenCache,
NewSchedulerCache,
NewSchedulerOutboxRepository,
+ NewProxyLatencyCache,
// HTTP service ports (DI Strategy A: return interface directly)
NewTurnstileVerifier,
diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go
index d96732bd..7d2b789f 100644
--- a/backend/internal/server/api_contract_test.go
+++ b/backend/internal/server/api_contract_test.go
@@ -239,9 +239,10 @@ func TestAPIContracts(t *testing.T) {
"cache_creation_cost": 0,
"cache_read_cost": 0,
"total_cost": 0.5,
- "actual_cost": 0.5,
- "rate_multiplier": 1,
- "billing_type": 0,
+ "actual_cost": 0.5,
+ "rate_multiplier": 1,
+ "account_rate_multiplier": null,
+ "billing_type": 0,
"stream": true,
"duration_ms": 100,
"first_token_ms": 50,
@@ -262,11 +263,11 @@ func TestAPIContracts(t *testing.T) {
name: "GET /api/v1/admin/settings",
setup: func(t *testing.T, deps *contractDeps) {
t.Helper()
- deps.settingRepo.SetAll(map[string]string{
- service.SettingKeyRegistrationEnabled: "true",
- service.SettingKeyEmailVerifyEnabled: "false",
+ deps.settingRepo.SetAll(map[string]string{
+ service.SettingKeyRegistrationEnabled: "true",
+ service.SettingKeyEmailVerifyEnabled: "false",
- service.SettingKeySMTPHost: "smtp.example.com",
+ service.SettingKeySMTPHost: "smtp.example.com",
service.SettingKeySMTPPort: "587",
service.SettingKeySMTPUsername: "user",
service.SettingKeySMTPPassword: "secret",
@@ -285,15 +286,15 @@ func TestAPIContracts(t *testing.T) {
service.SettingKeyContactInfo: "support",
service.SettingKeyDocURL: "https://docs.example.com",
- service.SettingKeyDefaultConcurrency: "5",
- service.SettingKeyDefaultBalance: "1.25",
+ service.SettingKeyDefaultConcurrency: "5",
+ service.SettingKeyDefaultBalance: "1.25",
- service.SettingKeyOpsMonitoringEnabled: "false",
- service.SettingKeyOpsRealtimeMonitoringEnabled: "true",
- service.SettingKeyOpsQueryModeDefault: "auto",
- service.SettingKeyOpsMetricsIntervalSeconds: "60",
- })
- },
+ service.SettingKeyOpsMonitoringEnabled: "false",
+ service.SettingKeyOpsRealtimeMonitoringEnabled: "true",
+ service.SettingKeyOpsQueryModeDefault: "auto",
+ service.SettingKeyOpsMetricsIntervalSeconds: "60",
+ })
+ },
method: http.MethodGet,
path: "/api/v1/admin/settings",
wantStatus: http.StatusOK,
@@ -435,7 +436,7 @@ func newContractDeps(t *testing.T) *contractDeps {
settingRepo := newStubSettingRepo()
settingService := service.NewSettingService(settingRepo, cfg)
- adminService := service.NewAdminService(userRepo, groupRepo, &accountRepo, proxyRepo, apiKeyRepo, redeemRepo, nil, nil, nil)
+ adminService := service.NewAdminService(userRepo, groupRepo, &accountRepo, proxyRepo, apiKeyRepo, redeemRepo, nil, nil, nil, nil)
authHandler := handler.NewAuthHandler(cfg, nil, userService, settingService, nil)
apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService)
usageHandler := handler.NewUsageHandler(usageService, apiKeyService)
@@ -858,6 +859,10 @@ func (stubProxyRepo) CountAccountsByProxyID(ctx context.Context, proxyID int64)
return 0, errors.New("not implemented")
}
+func (stubProxyRepo) ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]service.ProxyAccountSummary, error) {
+ return nil, errors.New("not implemented")
+}
+
type stubRedeemCodeRepo struct{}
func (stubRedeemCodeRepo) Create(ctx context.Context, code *service.RedeemCode) error {
@@ -1229,11 +1234,11 @@ func (r *stubUsageLogRepo) GetDashboardStats(ctx context.Context) (*usagestats.D
return nil, errors.New("not implemented")
}
-func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID int64) ([]usagestats.TrendDataPoint, error) {
+func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) {
return nil, errors.New("not implemented")
}
-func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID int64) ([]usagestats.ModelStat, error) {
+func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) {
return nil, errors.New("not implemented")
}
diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go
index 9bb019bb..ff05b32a 100644
--- a/backend/internal/server/routes/admin.go
+++ b/backend/internal/server/routes/admin.go
@@ -81,6 +81,9 @@ func registerOpsRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
ops.PUT("/alert-rules/:id", h.Admin.Ops.UpdateAlertRule)
ops.DELETE("/alert-rules/:id", h.Admin.Ops.DeleteAlertRule)
ops.GET("/alert-events", h.Admin.Ops.ListAlertEvents)
+ ops.GET("/alert-events/:id", h.Admin.Ops.GetAlertEvent)
+ ops.PUT("/alert-events/:id/status", h.Admin.Ops.UpdateAlertEventStatus)
+ ops.POST("/alert-silences", h.Admin.Ops.CreateAlertSilence)
// Email notification config (DB-backed)
ops.GET("/email-notification/config", h.Admin.Ops.GetEmailNotificationConfig)
@@ -110,10 +113,26 @@ func registerOpsRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
ws.GET("/qps", h.Admin.Ops.QPSWSHandler)
}
- // Error logs (MVP-1)
+ // Error logs (legacy)
ops.GET("/errors", h.Admin.Ops.GetErrorLogs)
ops.GET("/errors/:id", h.Admin.Ops.GetErrorLogByID)
+ ops.GET("/errors/:id/retries", h.Admin.Ops.ListRetryAttempts)
ops.POST("/errors/:id/retry", h.Admin.Ops.RetryErrorRequest)
+ ops.PUT("/errors/:id/resolve", h.Admin.Ops.UpdateErrorResolution)
+
+ // Request errors (client-visible failures)
+ ops.GET("/request-errors", h.Admin.Ops.ListRequestErrors)
+ ops.GET("/request-errors/:id", h.Admin.Ops.GetRequestError)
+ ops.GET("/request-errors/:id/upstream-errors", h.Admin.Ops.ListRequestErrorUpstreamErrors)
+ ops.POST("/request-errors/:id/retry-client", h.Admin.Ops.RetryRequestErrorClient)
+ ops.POST("/request-errors/:id/upstream-errors/:idx/retry", h.Admin.Ops.RetryRequestErrorUpstreamEvent)
+ ops.PUT("/request-errors/:id/resolve", h.Admin.Ops.ResolveRequestError)
+
+ // Upstream errors (independent upstream failures)
+ ops.GET("/upstream-errors", h.Admin.Ops.ListUpstreamErrors)
+ ops.GET("/upstream-errors/:id", h.Admin.Ops.GetUpstreamError)
+ ops.POST("/upstream-errors/:id/retry", h.Admin.Ops.RetryUpstreamError)
+ ops.PUT("/upstream-errors/:id/resolve", h.Admin.Ops.ResolveUpstreamError)
// Request drilldown (success + error)
ops.GET("/requests", h.Admin.Ops.ListRequestDetails)
@@ -250,6 +269,7 @@ func registerProxyRoutes(admin *gin.RouterGroup, h *handler.Handlers) {
proxies.POST("/:id/test", h.Admin.Proxy.Test)
proxies.GET("/:id/stats", h.Admin.Proxy.GetStats)
proxies.GET("/:id/accounts", h.Admin.Proxy.GetProxyAccounts)
+ proxies.POST("/batch-delete", h.Admin.Proxy.BatchDelete)
proxies.POST("/batch", h.Admin.Proxy.BatchCreate)
}
}
diff --git a/backend/internal/service/account.go b/backend/internal/service/account.go
index cfce9bfa..0d7a9cf9 100644
--- a/backend/internal/service/account.go
+++ b/backend/internal/service/account.go
@@ -9,16 +9,19 @@ import (
)
type Account struct {
- ID int64
- Name string
- Notes *string
- Platform string
- Type string
- Credentials map[string]any
- Extra map[string]any
- ProxyID *int64
- Concurrency int
- Priority int
+ ID int64
+ Name string
+ Notes *string
+ Platform string
+ Type string
+ Credentials map[string]any
+ Extra map[string]any
+ ProxyID *int64
+ Concurrency int
+ Priority int
+ // RateMultiplier 账号计费倍率(>=0,允许 0 表示该账号计费为 0)。
+ // 使用指针用于兼容旧版本调度缓存(Redis)中缺字段的情况:nil 表示按 1.0 处理。
+ RateMultiplier *float64
Status string
ErrorMessage string
LastUsedAt *time.Time
@@ -57,6 +60,20 @@ func (a *Account) IsActive() bool {
return a.Status == StatusActive
}
+// BillingRateMultiplier 返回账号计费倍率。
+// - nil 表示未配置/旧缓存缺字段,按 1.0 处理
+// - 允许 0,表示该账号计费为 0
+// - 负数属于非法数据,出于安全考虑按 1.0 处理
+func (a *Account) BillingRateMultiplier() float64 {
+ if a == nil || a.RateMultiplier == nil {
+ return 1.0
+ }
+ if *a.RateMultiplier < 0 {
+ return 1.0
+ }
+ return *a.RateMultiplier
+}
+
func (a *Account) IsSchedulable() bool {
if !a.IsActive() || !a.Schedulable {
return false
diff --git a/backend/internal/service/account_billing_rate_multiplier_test.go b/backend/internal/service/account_billing_rate_multiplier_test.go
new file mode 100644
index 00000000..731cfa7a
--- /dev/null
+++ b/backend/internal/service/account_billing_rate_multiplier_test.go
@@ -0,0 +1,27 @@
+package service
+
+import (
+ "encoding/json"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestAccount_BillingRateMultiplier_DefaultsToOneWhenNil(t *testing.T) {
+ var a Account
+ require.NoError(t, json.Unmarshal([]byte(`{"id":1,"name":"acc","status":"active"}`), &a))
+ require.Nil(t, a.RateMultiplier)
+ require.Equal(t, 1.0, a.BillingRateMultiplier())
+}
+
+func TestAccount_BillingRateMultiplier_AllowsZero(t *testing.T) {
+ v := 0.0
+ a := Account{RateMultiplier: &v}
+ require.Equal(t, 0.0, a.BillingRateMultiplier())
+}
+
+func TestAccount_BillingRateMultiplier_NegativeFallsBackToOne(t *testing.T) {
+ v := -1.0
+ a := Account{RateMultiplier: &v}
+ require.Equal(t, 1.0, a.BillingRateMultiplier())
+}
diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go
index 2f138b81..2badc760 100644
--- a/backend/internal/service/account_service.go
+++ b/backend/internal/service/account_service.go
@@ -63,14 +63,15 @@ type AccountRepository interface {
// AccountBulkUpdate describes the fields that can be updated in a bulk operation.
// Nil pointers mean "do not change".
type AccountBulkUpdate struct {
- Name *string
- ProxyID *int64
- Concurrency *int
- Priority *int
- Status *string
- Schedulable *bool
- Credentials map[string]any
- Extra map[string]any
+ Name *string
+ ProxyID *int64
+ Concurrency *int
+ Priority *int
+ RateMultiplier *float64
+ Status *string
+ Schedulable *bool
+ Credentials map[string]any
+ Extra map[string]any
}
// CreateAccountRequest 创建账号请求
diff --git a/backend/internal/service/account_usage_service.go b/backend/internal/service/account_usage_service.go
index f1ee43d2..d9ed5609 100644
--- a/backend/internal/service/account_usage_service.go
+++ b/backend/internal/service/account_usage_service.go
@@ -32,8 +32,8 @@ type UsageLogRepository interface {
// Admin dashboard stats
GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error)
- GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID int64) ([]usagestats.TrendDataPoint, error)
- GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID int64) ([]usagestats.ModelStat, error)
+ GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error)
+ GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error)
GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error)
GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.UserUsageTrendPoint, error)
GetBatchUserUsageStats(ctx context.Context, userIDs []int64) (map[int64]*usagestats.BatchUserUsageStats, error)
@@ -96,10 +96,16 @@ func NewUsageCache() *UsageCache {
}
// WindowStats 窗口期统计
+//
+// cost: 账号口径费用(total_cost * account_rate_multiplier)
+// standard_cost: 标准费用(total_cost,不含倍率)
+// user_cost: 用户/API Key 口径费用(actual_cost,受分组倍率影响)
type WindowStats struct {
- Requests int64 `json:"requests"`
- Tokens int64 `json:"tokens"`
- Cost float64 `json:"cost"`
+ Requests int64 `json:"requests"`
+ Tokens int64 `json:"tokens"`
+ Cost float64 `json:"cost"`
+ StandardCost float64 `json:"standard_cost"`
+ UserCost float64 `json:"user_cost"`
}
// UsageProgress 使用量进度
@@ -266,7 +272,7 @@ func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Accou
}
dayStart := geminiDailyWindowStart(now)
- stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID)
+ stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID, 0, nil)
if err != nil {
return nil, fmt.Errorf("get gemini usage stats failed: %w", err)
}
@@ -288,7 +294,7 @@ func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Accou
// Minute window (RPM) - fixed-window approximation: current minute [truncate(now), truncate(now)+1m)
minuteStart := now.Truncate(time.Minute)
minuteResetAt := minuteStart.Add(time.Minute)
- minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID)
+ minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID, 0, nil)
if err != nil {
return nil, fmt.Errorf("get gemini minute usage stats failed: %w", err)
}
@@ -377,9 +383,11 @@ func (s *AccountUsageService) addWindowStats(ctx context.Context, account *Accou
}
windowStats = &WindowStats{
- Requests: stats.Requests,
- Tokens: stats.Tokens,
- Cost: stats.Cost,
+ Requests: stats.Requests,
+ Tokens: stats.Tokens,
+ Cost: stats.Cost,
+ StandardCost: stats.StandardCost,
+ UserCost: stats.UserCost,
}
// 缓存窗口统计(1 分钟)
@@ -403,9 +411,11 @@ func (s *AccountUsageService) GetTodayStats(ctx context.Context, accountID int64
}
return &WindowStats{
- Requests: stats.Requests,
- Tokens: stats.Tokens,
- Cost: stats.Cost,
+ Requests: stats.Requests,
+ Tokens: stats.Tokens,
+ Cost: stats.Cost,
+ StandardCost: stats.StandardCost,
+ UserCost: stats.UserCost,
}, nil
}
diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go
index 1874c5c1..04b367e8 100644
--- a/backend/internal/service/admin_service.go
+++ b/backend/internal/service/admin_service.go
@@ -54,7 +54,8 @@ type AdminService interface {
CreateProxy(ctx context.Context, input *CreateProxyInput) (*Proxy, error)
UpdateProxy(ctx context.Context, id int64, input *UpdateProxyInput) (*Proxy, error)
DeleteProxy(ctx context.Context, id int64) error
- GetProxyAccounts(ctx context.Context, proxyID int64, page, pageSize int) ([]Account, int64, error)
+ BatchDeleteProxies(ctx context.Context, ids []int64) (*ProxyBatchDeleteResult, error)
+ GetProxyAccounts(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error)
CheckProxyExists(ctx context.Context, host string, port int, username, password string) (bool, error)
TestProxy(ctx context.Context, id int64) (*ProxyTestResult, error)
@@ -136,6 +137,7 @@ type CreateAccountInput struct {
ProxyID *int64
Concurrency int
Priority int
+ RateMultiplier *float64 // 账号计费倍率(>=0,允许 0)
GroupIDs []int64
ExpiresAt *int64
AutoPauseOnExpired *bool
@@ -151,8 +153,9 @@ type UpdateAccountInput struct {
Credentials map[string]any
Extra map[string]any
ProxyID *int64
- Concurrency *int // 使用指针区分"未提供"和"设置为0"
- Priority *int // 使用指针区分"未提供"和"设置为0"
+ Concurrency *int // 使用指针区分"未提供"和"设置为0"
+ Priority *int // 使用指针区分"未提供"和"设置为0"
+ RateMultiplier *float64 // 账号计费倍率(>=0,允许 0)
Status string
GroupIDs *[]int64
ExpiresAt *int64
@@ -162,16 +165,17 @@ type UpdateAccountInput struct {
// BulkUpdateAccountsInput describes the payload for bulk updating accounts.
type BulkUpdateAccountsInput struct {
- AccountIDs []int64
- Name string
- ProxyID *int64
- Concurrency *int
- Priority *int
- Status string
- Schedulable *bool
- GroupIDs *[]int64
- Credentials map[string]any
- Extra map[string]any
+ AccountIDs []int64
+ Name string
+ ProxyID *int64
+ Concurrency *int
+ Priority *int
+ RateMultiplier *float64 // 账号计费倍率(>=0,允许 0)
+ Status string
+ Schedulable *bool
+ GroupIDs *[]int64
+ Credentials map[string]any
+ Extra map[string]any
// SkipMixedChannelCheck skips the mixed channel risk check when binding groups.
// This should only be set when the caller has explicitly confirmed the risk.
SkipMixedChannelCheck bool
@@ -220,6 +224,16 @@ type GenerateRedeemCodesInput struct {
ValidityDays int // 订阅类型专用:有效天数
}
+type ProxyBatchDeleteResult struct {
+ DeletedIDs []int64 `json:"deleted_ids"`
+ Skipped []ProxyBatchDeleteSkipped `json:"skipped"`
+}
+
+type ProxyBatchDeleteSkipped struct {
+ ID int64 `json:"id"`
+ Reason string `json:"reason"`
+}
+
// ProxyTestResult represents the result of testing a proxy
type ProxyTestResult struct {
Success bool `json:"success"`
@@ -254,6 +268,7 @@ type adminServiceImpl struct {
redeemCodeRepo RedeemCodeRepository
billingCacheService *BillingCacheService
proxyProber ProxyExitInfoProber
+ proxyLatencyCache ProxyLatencyCache
authCacheInvalidator APIKeyAuthCacheInvalidator
}
@@ -267,6 +282,7 @@ func NewAdminService(
redeemCodeRepo RedeemCodeRepository,
billingCacheService *BillingCacheService,
proxyProber ProxyExitInfoProber,
+ proxyLatencyCache ProxyLatencyCache,
authCacheInvalidator APIKeyAuthCacheInvalidator,
) AdminService {
return &adminServiceImpl{
@@ -278,6 +294,7 @@ func NewAdminService(
redeemCodeRepo: redeemCodeRepo,
billingCacheService: billingCacheService,
proxyProber: proxyProber,
+ proxyLatencyCache: proxyLatencyCache,
authCacheInvalidator: authCacheInvalidator,
}
}
@@ -817,6 +834,12 @@ func (s *adminServiceImpl) CreateAccount(ctx context.Context, input *CreateAccou
} else {
account.AutoPauseOnExpired = true
}
+ if input.RateMultiplier != nil {
+ if *input.RateMultiplier < 0 {
+ return nil, errors.New("rate_multiplier must be >= 0")
+ }
+ account.RateMultiplier = input.RateMultiplier
+ }
if err := s.accountRepo.Create(ctx, account); err != nil {
return nil, err
}
@@ -869,6 +892,12 @@ func (s *adminServiceImpl) UpdateAccount(ctx context.Context, id int64, input *U
if input.Priority != nil {
account.Priority = *input.Priority
}
+ if input.RateMultiplier != nil {
+ if *input.RateMultiplier < 0 {
+ return nil, errors.New("rate_multiplier must be >= 0")
+ }
+ account.RateMultiplier = input.RateMultiplier
+ }
if input.Status != "" {
account.Status = input.Status
}
@@ -942,6 +971,12 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp
}
}
+ if input.RateMultiplier != nil {
+ if *input.RateMultiplier < 0 {
+ return nil, errors.New("rate_multiplier must be >= 0")
+ }
+ }
+
// Prepare bulk updates for columns and JSONB fields.
repoUpdates := AccountBulkUpdate{
Credentials: input.Credentials,
@@ -959,6 +994,9 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp
if input.Priority != nil {
repoUpdates.Priority = input.Priority
}
+ if input.RateMultiplier != nil {
+ repoUpdates.RateMultiplier = input.RateMultiplier
+ }
if input.Status != "" {
repoUpdates.Status = &input.Status
}
@@ -1069,6 +1107,7 @@ func (s *adminServiceImpl) ListProxiesWithAccountCount(ctx context.Context, page
if err != nil {
return nil, 0, err
}
+ s.attachProxyLatency(ctx, proxies)
return proxies, result.Total, nil
}
@@ -1077,7 +1116,12 @@ func (s *adminServiceImpl) GetAllProxies(ctx context.Context) ([]Proxy, error) {
}
func (s *adminServiceImpl) GetAllProxiesWithAccountCount(ctx context.Context) ([]ProxyWithAccountCount, error) {
- return s.proxyRepo.ListActiveWithAccountCount(ctx)
+ proxies, err := s.proxyRepo.ListActiveWithAccountCount(ctx)
+ if err != nil {
+ return nil, err
+ }
+ s.attachProxyLatency(ctx, proxies)
+ return proxies, nil
}
func (s *adminServiceImpl) GetProxy(ctx context.Context, id int64) (*Proxy, error) {
@@ -1097,6 +1141,8 @@ func (s *adminServiceImpl) CreateProxy(ctx context.Context, input *CreateProxyIn
if err := s.proxyRepo.Create(ctx, proxy); err != nil {
return nil, err
}
+ // Probe latency asynchronously so creation isn't blocked by network timeout.
+ go s.probeProxyLatency(context.Background(), proxy)
return proxy, nil
}
@@ -1135,12 +1181,53 @@ func (s *adminServiceImpl) UpdateProxy(ctx context.Context, id int64, input *Upd
}
func (s *adminServiceImpl) DeleteProxy(ctx context.Context, id int64) error {
+ count, err := s.proxyRepo.CountAccountsByProxyID(ctx, id)
+ if err != nil {
+ return err
+ }
+ if count > 0 {
+ return ErrProxyInUse
+ }
return s.proxyRepo.Delete(ctx, id)
}
-func (s *adminServiceImpl) GetProxyAccounts(ctx context.Context, proxyID int64, page, pageSize int) ([]Account, int64, error) {
- // Return mock data for now - would need a dedicated repository method
- return []Account{}, 0, nil
+func (s *adminServiceImpl) BatchDeleteProxies(ctx context.Context, ids []int64) (*ProxyBatchDeleteResult, error) {
+ result := &ProxyBatchDeleteResult{}
+ if len(ids) == 0 {
+ return result, nil
+ }
+
+ for _, id := range ids {
+ count, err := s.proxyRepo.CountAccountsByProxyID(ctx, id)
+ if err != nil {
+ result.Skipped = append(result.Skipped, ProxyBatchDeleteSkipped{
+ ID: id,
+ Reason: err.Error(),
+ })
+ continue
+ }
+ if count > 0 {
+ result.Skipped = append(result.Skipped, ProxyBatchDeleteSkipped{
+ ID: id,
+ Reason: ErrProxyInUse.Error(),
+ })
+ continue
+ }
+ if err := s.proxyRepo.Delete(ctx, id); err != nil {
+ result.Skipped = append(result.Skipped, ProxyBatchDeleteSkipped{
+ ID: id,
+ Reason: err.Error(),
+ })
+ continue
+ }
+ result.DeletedIDs = append(result.DeletedIDs, id)
+ }
+
+ return result, nil
+}
+
+func (s *adminServiceImpl) GetProxyAccounts(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error) {
+ return s.proxyRepo.ListAccountSummariesByProxyID(ctx, proxyID)
}
func (s *adminServiceImpl) CheckProxyExists(ctx context.Context, host string, port int, username, password string) (bool, error) {
@@ -1240,12 +1327,24 @@ func (s *adminServiceImpl) TestProxy(ctx context.Context, id int64) (*ProxyTestR
proxyURL := proxy.URL()
exitInfo, latencyMs, err := s.proxyProber.ProbeProxy(ctx, proxyURL)
if err != nil {
+ s.saveProxyLatency(ctx, id, &ProxyLatencyInfo{
+ Success: false,
+ Message: err.Error(),
+ UpdatedAt: time.Now(),
+ })
return &ProxyTestResult{
Success: false,
Message: err.Error(),
}, nil
}
+ latency := latencyMs
+ s.saveProxyLatency(ctx, id, &ProxyLatencyInfo{
+ Success: true,
+ LatencyMs: &latency,
+ Message: "Proxy is accessible",
+ UpdatedAt: time.Now(),
+ })
return &ProxyTestResult{
Success: true,
Message: "Proxy is accessible",
@@ -1257,6 +1356,29 @@ func (s *adminServiceImpl) TestProxy(ctx context.Context, id int64) (*ProxyTestR
}, nil
}
+func (s *adminServiceImpl) probeProxyLatency(ctx context.Context, proxy *Proxy) {
+ if s.proxyProber == nil || proxy == nil {
+ return
+ }
+ _, latencyMs, err := s.proxyProber.ProbeProxy(ctx, proxy.URL())
+ if err != nil {
+ s.saveProxyLatency(ctx, proxy.ID, &ProxyLatencyInfo{
+ Success: false,
+ Message: err.Error(),
+ UpdatedAt: time.Now(),
+ })
+ return
+ }
+
+ latency := latencyMs
+ s.saveProxyLatency(ctx, proxy.ID, &ProxyLatencyInfo{
+ Success: true,
+ LatencyMs: &latency,
+ Message: "Proxy is accessible",
+ UpdatedAt: time.Now(),
+ })
+}
+
// checkMixedChannelRisk 检查分组中是否存在混合渠道(Antigravity + Anthropic)
// 如果存在混合,返回错误提示用户确认
func (s *adminServiceImpl) checkMixedChannelRisk(ctx context.Context, currentAccountID int64, currentAccountPlatform string, groupIDs []int64) error {
@@ -1306,6 +1428,46 @@ func (s *adminServiceImpl) checkMixedChannelRisk(ctx context.Context, currentAcc
return nil
}
+func (s *adminServiceImpl) attachProxyLatency(ctx context.Context, proxies []ProxyWithAccountCount) {
+ if s.proxyLatencyCache == nil || len(proxies) == 0 {
+ return
+ }
+
+ ids := make([]int64, 0, len(proxies))
+ for i := range proxies {
+ ids = append(ids, proxies[i].ID)
+ }
+
+ latencies, err := s.proxyLatencyCache.GetProxyLatencies(ctx, ids)
+ if err != nil {
+ log.Printf("Warning: load proxy latency cache failed: %v", err)
+ return
+ }
+
+ for i := range proxies {
+ info := latencies[proxies[i].ID]
+ if info == nil {
+ continue
+ }
+ if info.Success {
+ proxies[i].LatencyStatus = "success"
+ proxies[i].LatencyMs = info.LatencyMs
+ } else {
+ proxies[i].LatencyStatus = "failed"
+ }
+ proxies[i].LatencyMessage = info.Message
+ }
+}
+
+func (s *adminServiceImpl) saveProxyLatency(ctx context.Context, proxyID int64, info *ProxyLatencyInfo) {
+ if s.proxyLatencyCache == nil || info == nil {
+ return
+ }
+ if err := s.proxyLatencyCache.SetProxyLatency(ctx, proxyID, info); err != nil {
+ log.Printf("Warning: store proxy latency cache failed: %v", err)
+ }
+}
+
// getAccountPlatform 根据账号 platform 判断混合渠道检查用的平台标识
func getAccountPlatform(accountPlatform string) string {
switch strings.ToLower(strings.TrimSpace(accountPlatform)) {
diff --git a/backend/internal/service/admin_service_bulk_update_test.go b/backend/internal/service/admin_service_bulk_update_test.go
index ef621213..662b95fb 100644
--- a/backend/internal/service/admin_service_bulk_update_test.go
+++ b/backend/internal/service/admin_service_bulk_update_test.go
@@ -12,9 +12,9 @@ import (
type accountRepoStubForBulkUpdate struct {
accountRepoStub
- bulkUpdateErr error
- bulkUpdateIDs []int64
- bindGroupErrByID map[int64]error
+ bulkUpdateErr error
+ bulkUpdateIDs []int64
+ bindGroupErrByID map[int64]error
}
func (s *accountRepoStubForBulkUpdate) BulkUpdate(_ context.Context, ids []int64, _ AccountBulkUpdate) (int64, error) {
diff --git a/backend/internal/service/admin_service_delete_test.go b/backend/internal/service/admin_service_delete_test.go
index 31639472..afa433af 100644
--- a/backend/internal/service/admin_service_delete_test.go
+++ b/backend/internal/service/admin_service_delete_test.go
@@ -153,8 +153,10 @@ func (s *groupRepoStub) DeleteAccountGroupsByGroupID(ctx context.Context, groupI
}
type proxyRepoStub struct {
- deleteErr error
- deletedIDs []int64
+ deleteErr error
+ countErr error
+ accountCount int64
+ deletedIDs []int64
}
func (s *proxyRepoStub) Create(ctx context.Context, proxy *Proxy) error {
@@ -199,7 +201,14 @@ func (s *proxyRepoStub) ExistsByHostPortAuth(ctx context.Context, host string, p
}
func (s *proxyRepoStub) CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) {
- panic("unexpected CountAccountsByProxyID call")
+ if s.countErr != nil {
+ return 0, s.countErr
+ }
+ return s.accountCount, nil
+}
+
+func (s *proxyRepoStub) ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error) {
+ panic("unexpected ListAccountSummariesByProxyID call")
}
type redeemRepoStub struct {
@@ -409,6 +418,15 @@ func TestAdminService_DeleteProxy_Idempotent(t *testing.T) {
require.Equal(t, []int64{404}, repo.deletedIDs)
}
+func TestAdminService_DeleteProxy_InUse(t *testing.T) {
+ repo := &proxyRepoStub{accountCount: 2}
+ svc := &adminServiceImpl{proxyRepo: repo}
+
+ err := svc.DeleteProxy(context.Background(), 77)
+ require.ErrorIs(t, err, ErrProxyInUse)
+ require.Empty(t, repo.deletedIDs)
+}
+
func TestAdminService_DeleteProxy_Error(t *testing.T) {
deleteErr := errors.New("delete failed")
repo := &proxyRepoStub{deleteErr: deleteErr}
diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go
index 60567434..7f3e97a2 100644
--- a/backend/internal/service/antigravity_gateway_service.go
+++ b/backend/internal/service/antigravity_gateway_service.go
@@ -564,6 +564,10 @@ urlFallbackLoop:
}
upstreamReq, err := antigravity.NewAPIRequestWithURL(ctx, baseURL, action, accessToken, geminiBody)
+ // Capture upstream request body for ops retry of this attempt.
+ if c != nil {
+ c.Set(OpsUpstreamRequestBodyKey, string(geminiBody))
+ }
if err != nil {
return nil, err
}
@@ -574,6 +578,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: 0,
Kind: "request_error",
Message: safeErr,
@@ -615,6 +620,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "retry",
@@ -645,6 +651,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "retry",
@@ -697,6 +704,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "signature_error",
@@ -740,6 +748,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: 0,
Kind: "signature_retry_request_error",
Message: sanitizeUpstreamErrorMessage(retryErr.Error()),
@@ -770,6 +779,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: retryResp.StatusCode,
UpstreamRequestID: retryResp.Header.Get("x-request-id"),
Kind: kind,
@@ -817,6 +827,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "failover",
@@ -1371,6 +1382,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: 0,
Kind: "request_error",
Message: safeErr,
@@ -1412,6 +1424,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "retry",
@@ -1442,6 +1455,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "retry",
@@ -1543,6 +1557,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: requestID,
Kind: "failover",
@@ -1559,6 +1574,7 @@ urlFallbackLoop:
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: requestID,
Kind: "http_error",
@@ -2039,6 +2055,7 @@ func (s *AntigravityGatewayService) writeMappedClaudeError(c *gin.Context, accou
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: upstreamStatus,
UpstreamRequestID: upstreamRequestID,
Kind: "http_error",
diff --git a/backend/internal/service/dashboard_service.go b/backend/internal/service/dashboard_service.go
index 9bc56c54..a9811919 100644
--- a/backend/internal/service/dashboard_service.go
+++ b/backend/internal/service/dashboard_service.go
@@ -124,16 +124,16 @@ func (s *DashboardService) GetDashboardStats(ctx context.Context) (*usagestats.D
return stats, nil
}
-func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID int64) ([]usagestats.TrendDataPoint, error) {
- trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID)
+func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) {
+ trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream)
if err != nil {
return nil, fmt.Errorf("get usage trend with filters: %w", err)
}
return trend, nil
}
-func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID int64) ([]usagestats.ModelStat, error) {
- stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, 0)
+func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) {
+ stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, accountID, groupID, stream)
if err != nil {
return nil, fmt.Errorf("get model stats with filters: %w", err)
}
diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go
index d5eb0e52..b552f030 100644
--- a/backend/internal/service/gateway_service.go
+++ b/backend/internal/service/gateway_service.go
@@ -1466,6 +1466,9 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A
for attempt := 1; attempt <= maxRetryAttempts; attempt++ {
// 构建上游请求(每次重试需要重新构建,因为请求体需要重新读取)
upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel)
+ // Capture upstream request body for ops retry of this attempt.
+ c.Set(OpsUpstreamRequestBodyKey, string(body))
+
if err != nil {
return nil, err
}
@@ -1482,6 +1485,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: 0,
Kind: "request_error",
Message: safeErr,
@@ -1506,6 +1510,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "signature_error",
@@ -1557,6 +1562,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: retryResp.StatusCode,
UpstreamRequestID: retryResp.Header.Get("x-request-id"),
Kind: "signature_retry_thinking",
@@ -1585,6 +1591,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: 0,
Kind: "signature_retry_tools_request_error",
Message: sanitizeUpstreamErrorMessage(retryErr2.Error()),
@@ -1643,6 +1650,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "retry",
@@ -1691,6 +1699,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "retry_exhausted_failover",
@@ -1757,6 +1766,7 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "failover_on_400",
@@ -2634,30 +2644,32 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu
if result.ImageSize != "" {
imageSize = &result.ImageSize
}
+ accountRateMultiplier := account.BillingRateMultiplier()
usageLog := &UsageLog{
- UserID: user.ID,
- APIKeyID: apiKey.ID,
- AccountID: account.ID,
- RequestID: result.RequestID,
- Model: result.Model,
- InputTokens: result.Usage.InputTokens,
- OutputTokens: result.Usage.OutputTokens,
- CacheCreationTokens: result.Usage.CacheCreationInputTokens,
- CacheReadTokens: result.Usage.CacheReadInputTokens,
- InputCost: cost.InputCost,
- OutputCost: cost.OutputCost,
- CacheCreationCost: cost.CacheCreationCost,
- CacheReadCost: cost.CacheReadCost,
- TotalCost: cost.TotalCost,
- ActualCost: cost.ActualCost,
- RateMultiplier: multiplier,
- BillingType: billingType,
- Stream: result.Stream,
- DurationMs: &durationMs,
- FirstTokenMs: result.FirstTokenMs,
- ImageCount: result.ImageCount,
- ImageSize: imageSize,
- CreatedAt: time.Now(),
+ UserID: user.ID,
+ APIKeyID: apiKey.ID,
+ AccountID: account.ID,
+ RequestID: result.RequestID,
+ Model: result.Model,
+ InputTokens: result.Usage.InputTokens,
+ OutputTokens: result.Usage.OutputTokens,
+ CacheCreationTokens: result.Usage.CacheCreationInputTokens,
+ CacheReadTokens: result.Usage.CacheReadInputTokens,
+ InputCost: cost.InputCost,
+ OutputCost: cost.OutputCost,
+ CacheCreationCost: cost.CacheCreationCost,
+ CacheReadCost: cost.CacheReadCost,
+ TotalCost: cost.TotalCost,
+ ActualCost: cost.ActualCost,
+ RateMultiplier: multiplier,
+ AccountRateMultiplier: &accountRateMultiplier,
+ BillingType: billingType,
+ Stream: result.Stream,
+ DurationMs: &durationMs,
+ FirstTokenMs: result.FirstTokenMs,
+ ImageCount: result.ImageCount,
+ ImageSize: imageSize,
+ CreatedAt: time.Now(),
}
// 添加 UserAgent
diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go
index 190e6afc..75de90f2 100644
--- a/backend/internal/service/gemini_messages_compat_service.go
+++ b/backend/internal/service/gemini_messages_compat_service.go
@@ -545,12 +545,19 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex
}
requestIDHeader = idHeader
+ // Capture upstream request body for ops retry of this attempt.
+ if c != nil {
+ // In this code path `body` is already the JSON sent to upstream.
+ c.Set(OpsUpstreamRequestBodyKey, string(body))
+ }
+
resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency)
if err != nil {
safeErr := sanitizeUpstreamErrorMessage(err.Error())
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: 0,
Kind: "request_error",
Message: safeErr,
@@ -588,6 +595,7 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: upstreamReqID,
Kind: "signature_error",
@@ -662,6 +670,7 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: upstreamReqID,
Kind: "retry",
@@ -711,6 +720,7 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: upstreamReqID,
Kind: "failover",
@@ -737,6 +747,7 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: upstreamReqID,
Kind: "failover",
@@ -972,12 +983,19 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin.
}
requestIDHeader = idHeader
+ // Capture upstream request body for ops retry of this attempt.
+ if c != nil {
+ // In this code path `body` is already the JSON sent to upstream.
+ c.Set(OpsUpstreamRequestBodyKey, string(body))
+ }
+
resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency)
if err != nil {
safeErr := sanitizeUpstreamErrorMessage(err.Error())
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: 0,
Kind: "request_error",
Message: safeErr,
@@ -1036,6 +1054,7 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin.
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: upstreamReqID,
Kind: "retry",
@@ -1120,6 +1139,7 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin.
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: requestID,
Kind: "failover",
@@ -1143,6 +1163,7 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin.
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: requestID,
Kind: "failover",
@@ -1168,6 +1189,7 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin.
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: requestID,
Kind: "http_error",
@@ -1300,6 +1322,7 @@ func (s *GeminiMessagesCompatService) writeGeminiMappedError(c *gin.Context, acc
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: upstreamStatus,
UpstreamRequestID: upstreamRequestID,
Kind: "http_error",
diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go
index 04a90fdd..cfba6460 100644
--- a/backend/internal/service/openai_gateway_service.go
+++ b/backend/internal/service/openai_gateway_service.go
@@ -664,6 +664,11 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
proxyURL = account.Proxy.URL()
}
+ // Capture upstream request body for ops retry of this attempt.
+ if c != nil {
+ c.Set(OpsUpstreamRequestBodyKey, string(body))
+ }
+
// Send request
resp, err := s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency)
if err != nil {
@@ -673,6 +678,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: 0,
Kind: "request_error",
Message: safeErr,
@@ -707,6 +713,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "failover",
@@ -864,6 +871,7 @@ func (s *OpenAIGatewayService) handleErrorResponse(ctx context.Context, resp *ht
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: "http_error",
@@ -894,6 +902,7 @@ func (s *OpenAIGatewayService) handleErrorResponse(ctx context.Context, resp *ht
appendOpsUpstreamError(c, OpsUpstreamErrorEvent{
Platform: account.Platform,
AccountID: account.ID,
+ AccountName: account.Name,
UpstreamStatusCode: resp.StatusCode,
UpstreamRequestID: resp.Header.Get("x-request-id"),
Kind: kind,
@@ -1443,28 +1452,30 @@ func (s *OpenAIGatewayService) RecordUsage(ctx context.Context, input *OpenAIRec
// Create usage log
durationMs := int(result.Duration.Milliseconds())
+ accountRateMultiplier := account.BillingRateMultiplier()
usageLog := &UsageLog{
- UserID: user.ID,
- APIKeyID: apiKey.ID,
- AccountID: account.ID,
- RequestID: result.RequestID,
- Model: result.Model,
- InputTokens: actualInputTokens,
- OutputTokens: result.Usage.OutputTokens,
- CacheCreationTokens: result.Usage.CacheCreationInputTokens,
- CacheReadTokens: result.Usage.CacheReadInputTokens,
- InputCost: cost.InputCost,
- OutputCost: cost.OutputCost,
- CacheCreationCost: cost.CacheCreationCost,
- CacheReadCost: cost.CacheReadCost,
- TotalCost: cost.TotalCost,
- ActualCost: cost.ActualCost,
- RateMultiplier: multiplier,
- BillingType: billingType,
- Stream: result.Stream,
- DurationMs: &durationMs,
- FirstTokenMs: result.FirstTokenMs,
- CreatedAt: time.Now(),
+ UserID: user.ID,
+ APIKeyID: apiKey.ID,
+ AccountID: account.ID,
+ RequestID: result.RequestID,
+ Model: result.Model,
+ InputTokens: actualInputTokens,
+ OutputTokens: result.Usage.OutputTokens,
+ CacheCreationTokens: result.Usage.CacheCreationInputTokens,
+ CacheReadTokens: result.Usage.CacheReadInputTokens,
+ InputCost: cost.InputCost,
+ OutputCost: cost.OutputCost,
+ CacheCreationCost: cost.CacheCreationCost,
+ CacheReadCost: cost.CacheReadCost,
+ TotalCost: cost.TotalCost,
+ ActualCost: cost.ActualCost,
+ RateMultiplier: multiplier,
+ AccountRateMultiplier: &accountRateMultiplier,
+ BillingType: billingType,
+ Stream: result.Stream,
+ DurationMs: &durationMs,
+ FirstTokenMs: result.FirstTokenMs,
+ CreatedAt: time.Now(),
}
// 添加 UserAgent
diff --git a/backend/internal/service/ops_alert_evaluator_service.go b/backend/internal/service/ops_alert_evaluator_service.go
index f376c246..2b619f4d 100644
--- a/backend/internal/service/ops_alert_evaluator_service.go
+++ b/backend/internal/service/ops_alert_evaluator_service.go
@@ -206,7 +206,7 @@ func (s *OpsAlertEvaluatorService) evaluateOnce(interval time.Duration) {
continue
}
- scopePlatform, scopeGroupID := parseOpsAlertRuleScope(rule.Filters)
+ scopePlatform, scopeGroupID, scopeRegion := parseOpsAlertRuleScope(rule.Filters)
windowMinutes := rule.WindowMinutes
if windowMinutes <= 0 {
@@ -236,6 +236,17 @@ func (s *OpsAlertEvaluatorService) evaluateOnce(interval time.Duration) {
continue
}
+ // Scoped silencing: if a matching silence exists, skip creating a firing event.
+ if s.opsService != nil {
+ platform := strings.TrimSpace(scopePlatform)
+ region := scopeRegion
+ if platform != "" {
+ if ok, err := s.opsService.IsAlertSilenced(ctx, rule.ID, platform, scopeGroupID, region, now); err == nil && ok {
+ continue
+ }
+ }
+ }
+
latestEvent, err := s.opsRepo.GetLatestAlertEvent(ctx, rule.ID)
if err != nil {
log.Printf("[OpsAlertEvaluator] get latest event failed (rule=%d): %v", rule.ID, err)
@@ -359,9 +370,9 @@ func requiredSustainedBreaches(sustainedMinutes int, interval time.Duration) int
return required
}
-func parseOpsAlertRuleScope(filters map[string]any) (platform string, groupID *int64) {
+func parseOpsAlertRuleScope(filters map[string]any) (platform string, groupID *int64, region *string) {
if filters == nil {
- return "", nil
+ return "", nil, nil
}
if v, ok := filters["platform"]; ok {
if s, ok := v.(string); ok {
@@ -392,7 +403,15 @@ func parseOpsAlertRuleScope(filters map[string]any) (platform string, groupID *i
}
}
}
- return platform, groupID
+ if v, ok := filters["region"]; ok {
+ if s, ok := v.(string); ok {
+ vv := strings.TrimSpace(s)
+ if vv != "" {
+ region = &vv
+ }
+ }
+ }
+ return platform, groupID, region
}
func (s *OpsAlertEvaluatorService) computeRuleMetric(
@@ -504,16 +523,6 @@ func (s *OpsAlertEvaluatorService) computeRuleMetric(
return 0, false
}
return overview.UpstreamErrorRate * 100, true
- case "p95_latency_ms":
- if overview.Duration.P95 == nil {
- return 0, false
- }
- return float64(*overview.Duration.P95), true
- case "p99_latency_ms":
- if overview.Duration.P99 == nil {
- return 0, false
- }
- return float64(*overview.Duration.P99), true
default:
return 0, false
}
diff --git a/backend/internal/service/ops_alert_models.go b/backend/internal/service/ops_alert_models.go
index 0acf13ab..a0caa990 100644
--- a/backend/internal/service/ops_alert_models.go
+++ b/backend/internal/service/ops_alert_models.go
@@ -8,8 +8,9 @@ import "time"
// with the existing ops dashboard frontend (backup style).
const (
- OpsAlertStatusFiring = "firing"
- OpsAlertStatusResolved = "resolved"
+ OpsAlertStatusFiring = "firing"
+ OpsAlertStatusResolved = "resolved"
+ OpsAlertStatusManualResolved = "manual_resolved"
)
type OpsAlertRule struct {
@@ -58,12 +59,32 @@ type OpsAlertEvent struct {
CreatedAt time.Time `json:"created_at"`
}
+type OpsAlertSilence struct {
+ ID int64 `json:"id"`
+
+ RuleID int64 `json:"rule_id"`
+ Platform string `json:"platform"`
+ GroupID *int64 `json:"group_id,omitempty"`
+ Region *string `json:"region,omitempty"`
+
+ Until time.Time `json:"until"`
+ Reason string `json:"reason"`
+
+ CreatedBy *int64 `json:"created_by,omitempty"`
+ CreatedAt time.Time `json:"created_at"`
+}
+
type OpsAlertEventFilter struct {
Limit int
+ // Cursor pagination (descending by fired_at, then id).
+ BeforeFiredAt *time.Time
+ BeforeID *int64
+
// Optional filters.
- Status string
- Severity string
+ Status string
+ Severity string
+ EmailSent *bool
StartTime *time.Time
EndTime *time.Time
diff --git a/backend/internal/service/ops_alerts.go b/backend/internal/service/ops_alerts.go
index b6c3d1c3..b4c09824 100644
--- a/backend/internal/service/ops_alerts.go
+++ b/backend/internal/service/ops_alerts.go
@@ -88,6 +88,29 @@ func (s *OpsService) ListAlertEvents(ctx context.Context, filter *OpsAlertEventF
return s.opsRepo.ListAlertEvents(ctx, filter)
}
+func (s *OpsService) GetAlertEventByID(ctx context.Context, eventID int64) (*OpsAlertEvent, error) {
+ if err := s.RequireMonitoringEnabled(ctx); err != nil {
+ return nil, err
+ }
+ if s.opsRepo == nil {
+ return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available")
+ }
+ if eventID <= 0 {
+ return nil, infraerrors.BadRequest("INVALID_EVENT_ID", "invalid event id")
+ }
+ ev, err := s.opsRepo.GetAlertEventByID(ctx, eventID)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return nil, infraerrors.NotFound("OPS_ALERT_EVENT_NOT_FOUND", "alert event not found")
+ }
+ return nil, err
+ }
+ if ev == nil {
+ return nil, infraerrors.NotFound("OPS_ALERT_EVENT_NOT_FOUND", "alert event not found")
+ }
+ return ev, nil
+}
+
func (s *OpsService) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) {
if err := s.RequireMonitoringEnabled(ctx); err != nil {
return nil, err
@@ -101,6 +124,49 @@ func (s *OpsService) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*Op
return s.opsRepo.GetActiveAlertEvent(ctx, ruleID)
}
+func (s *OpsService) CreateAlertSilence(ctx context.Context, input *OpsAlertSilence) (*OpsAlertSilence, error) {
+ if err := s.RequireMonitoringEnabled(ctx); err != nil {
+ return nil, err
+ }
+ if s.opsRepo == nil {
+ return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available")
+ }
+ if input == nil {
+ return nil, infraerrors.BadRequest("INVALID_SILENCE", "invalid silence")
+ }
+ if input.RuleID <= 0 {
+ return nil, infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id")
+ }
+ if strings.TrimSpace(input.Platform) == "" {
+ return nil, infraerrors.BadRequest("INVALID_PLATFORM", "invalid platform")
+ }
+ if input.Until.IsZero() {
+ return nil, infraerrors.BadRequest("INVALID_UNTIL", "invalid until")
+ }
+
+ created, err := s.opsRepo.CreateAlertSilence(ctx, input)
+ if err != nil {
+ return nil, err
+ }
+ return created, nil
+}
+
+func (s *OpsService) IsAlertSilenced(ctx context.Context, ruleID int64, platform string, groupID *int64, region *string, now time.Time) (bool, error) {
+ if err := s.RequireMonitoringEnabled(ctx); err != nil {
+ return false, err
+ }
+ if s.opsRepo == nil {
+ return false, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available")
+ }
+ if ruleID <= 0 {
+ return false, infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id")
+ }
+ if strings.TrimSpace(platform) == "" {
+ return false, nil
+ }
+ return s.opsRepo.IsAlertSilenced(ctx, ruleID, platform, groupID, region, now)
+}
+
func (s *OpsService) GetLatestAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) {
if err := s.RequireMonitoringEnabled(ctx); err != nil {
return nil, err
@@ -142,7 +208,11 @@ func (s *OpsService) UpdateAlertEventStatus(ctx context.Context, eventID int64,
if eventID <= 0 {
return infraerrors.BadRequest("INVALID_EVENT_ID", "invalid event id")
}
- if strings.TrimSpace(status) == "" {
+ status = strings.TrimSpace(status)
+ if status == "" {
+ return infraerrors.BadRequest("INVALID_STATUS", "invalid status")
+ }
+ if status != OpsAlertStatusResolved && status != OpsAlertStatusManualResolved {
return infraerrors.BadRequest("INVALID_STATUS", "invalid status")
}
return s.opsRepo.UpdateAlertEventStatus(ctx, eventID, status, resolvedAt)
diff --git a/backend/internal/service/ops_health_score.go b/backend/internal/service/ops_health_score.go
index feb0d843..5efae870 100644
--- a/backend/internal/service/ops_health_score.go
+++ b/backend/internal/service/ops_health_score.go
@@ -32,49 +32,38 @@ func computeDashboardHealthScore(now time.Time, overview *OpsDashboardOverview)
}
// computeBusinessHealth calculates business health score (0-100)
-// Components: SLA (50%) + Error Rate (30%) + Latency (20%)
+// Components: Error Rate (50%) + TTFT (50%)
func computeBusinessHealth(overview *OpsDashboardOverview) float64 {
- // SLA score: 99.5% → 100, 95% → 0 (linear)
- slaScore := 100.0
- slaPct := clampFloat64(overview.SLA*100, 0, 100)
- if slaPct < 99.5 {
- if slaPct >= 95 {
- slaScore = (slaPct - 95) / 4.5 * 100
- } else {
- slaScore = 0
- }
- }
-
- // Error rate score: 0.5% → 100, 5% → 0 (linear)
+ // Error rate score: 1% → 100, 10% → 0 (linear)
// Combines request errors and upstream errors
errorScore := 100.0
errorPct := clampFloat64(overview.ErrorRate*100, 0, 100)
upstreamPct := clampFloat64(overview.UpstreamErrorRate*100, 0, 100)
combinedErrorPct := math.Max(errorPct, upstreamPct) // Use worst case
- if combinedErrorPct > 0.5 {
- if combinedErrorPct <= 5 {
- errorScore = (5 - combinedErrorPct) / 4.5 * 100
+ if combinedErrorPct > 1.0 {
+ if combinedErrorPct <= 10.0 {
+ errorScore = (10.0 - combinedErrorPct) / 9.0 * 100
} else {
errorScore = 0
}
}
- // Latency score: 1s → 100, 10s → 0 (linear)
- // Uses P99 of duration (TTFT is less critical for overall health)
- latencyScore := 100.0
- if overview.Duration.P99 != nil {
- p99 := float64(*overview.Duration.P99)
+ // TTFT score: 1s → 100, 3s → 0 (linear)
+ // Time to first token is critical for user experience
+ ttftScore := 100.0
+ if overview.TTFT.P99 != nil {
+ p99 := float64(*overview.TTFT.P99)
if p99 > 1000 {
- if p99 <= 10000 {
- latencyScore = (10000 - p99) / 9000 * 100
+ if p99 <= 3000 {
+ ttftScore = (3000 - p99) / 2000 * 100
} else {
- latencyScore = 0
+ ttftScore = 0
}
}
}
- // Weighted combination
- return slaScore*0.5 + errorScore*0.3 + latencyScore*0.2
+ // Weighted combination: 50% error rate + 50% TTFT
+ return errorScore*0.5 + ttftScore*0.5
}
// computeInfraHealth calculates infrastructure health score (0-100)
diff --git a/backend/internal/service/ops_health_score_test.go b/backend/internal/service/ops_health_score_test.go
index 849ba146..25bfb43d 100644
--- a/backend/internal/service/ops_health_score_test.go
+++ b/backend/internal/service/ops_health_score_test.go
@@ -127,8 +127,8 @@ func TestComputeDashboardHealthScore_Comprehensive(t *testing.T) {
MemoryUsagePercent: float64Ptr(75),
},
},
- wantMin: 60,
- wantMax: 85,
+ wantMin: 96,
+ wantMax: 97,
},
{
name: "DB failure",
@@ -203,8 +203,8 @@ func TestComputeDashboardHealthScore_Comprehensive(t *testing.T) {
MemoryUsagePercent: float64Ptr(30),
},
},
- wantMin: 25,
- wantMax: 50,
+ wantMin: 84,
+ wantMax: 85,
},
{
name: "combined failures - business healthy + infra degraded",
@@ -277,30 +277,41 @@ func TestComputeBusinessHealth(t *testing.T) {
UpstreamErrorRate: 0,
Duration: OpsPercentiles{P99: intPtr(500)},
},
- wantMin: 50,
- wantMax: 60,
+ wantMin: 100,
+ wantMax: 100,
},
{
- name: "error rate boundary 0.5%",
+ name: "error rate boundary 1%",
overview: &OpsDashboardOverview{
- SLA: 0.995,
- ErrorRate: 0.005,
+ SLA: 0.99,
+ ErrorRate: 0.01,
UpstreamErrorRate: 0,
Duration: OpsPercentiles{P99: intPtr(500)},
},
- wantMin: 95,
+ wantMin: 100,
wantMax: 100,
},
{
- name: "latency boundary 1000ms",
+ name: "error rate 5%",
overview: &OpsDashboardOverview{
- SLA: 0.995,
+ SLA: 0.95,
+ ErrorRate: 0.05,
+ UpstreamErrorRate: 0,
+ Duration: OpsPercentiles{P99: intPtr(500)},
+ },
+ wantMin: 77,
+ wantMax: 78,
+ },
+ {
+ name: "TTFT boundary 2s",
+ overview: &OpsDashboardOverview{
+ SLA: 0.99,
ErrorRate: 0,
UpstreamErrorRate: 0,
- Duration: OpsPercentiles{P99: intPtr(1000)},
+ TTFT: OpsPercentiles{P99: intPtr(2000)},
},
- wantMin: 95,
- wantMax: 100,
+ wantMin: 75,
+ wantMax: 75,
},
{
name: "upstream error dominates",
@@ -310,7 +321,7 @@ func TestComputeBusinessHealth(t *testing.T) {
UpstreamErrorRate: 0.03,
Duration: OpsPercentiles{P99: intPtr(500)},
},
- wantMin: 75,
+ wantMin: 88,
wantMax: 90,
},
}
diff --git a/backend/internal/service/ops_models.go b/backend/internal/service/ops_models.go
index 996267fd..347cd52b 100644
--- a/backend/internal/service/ops_models.go
+++ b/backend/internal/service/ops_models.go
@@ -6,24 +6,43 @@ type OpsErrorLog struct {
ID int64 `json:"id"`
CreatedAt time.Time `json:"created_at"`
- Phase string `json:"phase"`
- Type string `json:"type"`
+ // Standardized classification
+ // - phase: request|auth|routing|upstream|network|internal
+ // - owner: client|provider|platform
+ // - source: client_request|upstream_http|gateway
+ Phase string `json:"phase"`
+ Type string `json:"type"`
+
+ Owner string `json:"error_owner"`
+ Source string `json:"error_source"`
+
Severity string `json:"severity"`
StatusCode int `json:"status_code"`
Platform string `json:"platform"`
Model string `json:"model"`
- LatencyMs *int `json:"latency_ms"`
+ IsRetryable bool `json:"is_retryable"`
+ RetryCount int `json:"retry_count"`
+
+ Resolved bool `json:"resolved"`
+ ResolvedAt *time.Time `json:"resolved_at"`
+ ResolvedByUserID *int64 `json:"resolved_by_user_id"`
+ ResolvedByUserName string `json:"resolved_by_user_name"`
+ ResolvedRetryID *int64 `json:"resolved_retry_id"`
+ ResolvedStatusRaw string `json:"-"`
ClientRequestID string `json:"client_request_id"`
RequestID string `json:"request_id"`
Message string `json:"message"`
- UserID *int64 `json:"user_id"`
- APIKeyID *int64 `json:"api_key_id"`
- AccountID *int64 `json:"account_id"`
- GroupID *int64 `json:"group_id"`
+ UserID *int64 `json:"user_id"`
+ UserEmail string `json:"user_email"`
+ APIKeyID *int64 `json:"api_key_id"`
+ AccountID *int64 `json:"account_id"`
+ AccountName string `json:"account_name"`
+ GroupID *int64 `json:"group_id"`
+ GroupName string `json:"group_name"`
ClientIP *string `json:"client_ip"`
RequestPath string `json:"request_path"`
@@ -67,9 +86,24 @@ type OpsErrorLogFilter struct {
GroupID *int64
AccountID *int64
- StatusCodes []int
- Phase string
- Query string
+ StatusCodes []int
+ StatusCodesOther bool
+ Phase string
+ Owner string
+ Source string
+ Resolved *bool
+ Query string
+ UserQuery string // Search by user email
+
+ // Optional correlation keys for exact matching.
+ RequestID string
+ ClientRequestID string
+
+ // View controls error categorization for list endpoints.
+ // - errors: show actionable errors (exclude business-limited / 429 / 529)
+ // - excluded: only show excluded errors
+ // - all: show everything
+ View string
Page int
PageSize int
@@ -90,12 +124,23 @@ type OpsRetryAttempt struct {
SourceErrorID int64 `json:"source_error_id"`
Mode string `json:"mode"`
PinnedAccountID *int64 `json:"pinned_account_id"`
+ PinnedAccountName string `json:"pinned_account_name"`
Status string `json:"status"`
StartedAt *time.Time `json:"started_at"`
FinishedAt *time.Time `json:"finished_at"`
DurationMs *int64 `json:"duration_ms"`
+ // Persisted execution results (best-effort)
+ Success *bool `json:"success"`
+ HTTPStatusCode *int `json:"http_status_code"`
+ UpstreamRequestID *string `json:"upstream_request_id"`
+ UsedAccountID *int64 `json:"used_account_id"`
+ UsedAccountName string `json:"used_account_name"`
+ ResponsePreview *string `json:"response_preview"`
+ ResponseTruncated *bool `json:"response_truncated"`
+
+ // Optional correlation
ResultRequestID *string `json:"result_request_id"`
ResultErrorID *int64 `json:"result_error_id"`
diff --git a/backend/internal/service/ops_port.go b/backend/internal/service/ops_port.go
index 4df21c37..cdeea241 100644
--- a/backend/internal/service/ops_port.go
+++ b/backend/internal/service/ops_port.go
@@ -14,6 +14,8 @@ type OpsRepository interface {
InsertRetryAttempt(ctx context.Context, input *OpsInsertRetryAttemptInput) (int64, error)
UpdateRetryAttempt(ctx context.Context, input *OpsUpdateRetryAttemptInput) error
GetLatestRetryAttemptForError(ctx context.Context, sourceErrorID int64) (*OpsRetryAttempt, error)
+ ListRetryAttemptsByErrorID(ctx context.Context, sourceErrorID int64, limit int) ([]*OpsRetryAttempt, error)
+ UpdateErrorResolution(ctx context.Context, errorID int64, resolved bool, resolvedByUserID *int64, resolvedRetryID *int64, resolvedAt *time.Time) error
// Lightweight window stats (for realtime WS / quick sampling).
GetWindowStats(ctx context.Context, filter *OpsDashboardFilter) (*OpsWindowStats, error)
@@ -39,12 +41,17 @@ type OpsRepository interface {
DeleteAlertRule(ctx context.Context, id int64) error
ListAlertEvents(ctx context.Context, filter *OpsAlertEventFilter) ([]*OpsAlertEvent, error)
+ GetAlertEventByID(ctx context.Context, eventID int64) (*OpsAlertEvent, error)
GetActiveAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error)
GetLatestAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error)
CreateAlertEvent(ctx context.Context, event *OpsAlertEvent) (*OpsAlertEvent, error)
UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error
UpdateAlertEventEmailSent(ctx context.Context, eventID int64, emailSent bool) error
+ // Alert silences
+ CreateAlertSilence(ctx context.Context, input *OpsAlertSilence) (*OpsAlertSilence, error)
+ IsAlertSilenced(ctx context.Context, ruleID int64, platform string, groupID *int64, region *string, now time.Time) (bool, error)
+
// Pre-aggregation (hourly/daily) used for long-window dashboard performance.
UpsertHourlyMetrics(ctx context.Context, startTime, endTime time.Time) error
UpsertDailyMetrics(ctx context.Context, startTime, endTime time.Time) error
@@ -91,7 +98,6 @@ type OpsInsertErrorLogInput struct {
// It is set by OpsService.RecordError before persisting.
UpstreamErrorsJSON *string
- DurationMs *int
TimeToFirstTokenMs *int64
RequestBodyJSON *string // sanitized json string (not raw bytes)
@@ -124,7 +130,15 @@ type OpsUpdateRetryAttemptInput struct {
FinishedAt time.Time
DurationMs int64
- // Optional correlation
+ // Persisted execution results (best-effort)
+ Success *bool
+ HTTPStatusCode *int
+ UpstreamRequestID *string
+ UsedAccountID *int64
+ ResponsePreview *string
+ ResponseTruncated *bool
+
+ // Optional correlation (legacy fields kept)
ResultRequestID *string
ResultErrorID *int64
diff --git a/backend/internal/service/ops_retry.go b/backend/internal/service/ops_retry.go
index 747aa3b8..25c10af6 100644
--- a/backend/internal/service/ops_retry.go
+++ b/backend/internal/service/ops_retry.go
@@ -108,6 +108,10 @@ func (w *limitedResponseWriter) truncated() bool {
return w.totalWritten > int64(w.limit)
}
+const (
+ OpsRetryModeUpstreamEvent = "upstream_event"
+)
+
func (s *OpsService) RetryError(ctx context.Context, requestedByUserID int64, errorID int64, mode string, pinnedAccountID *int64) (*OpsRetryResult, error) {
if err := s.RequireMonitoringEnabled(ctx); err != nil {
return nil, err
@@ -123,6 +127,81 @@ func (s *OpsService) RetryError(ctx context.Context, requestedByUserID int64, er
return nil, infraerrors.BadRequest("OPS_RETRY_INVALID_MODE", "mode must be client or upstream")
}
+ errorLog, err := s.GetErrorLogByID(ctx, errorID)
+ if err != nil {
+ return nil, err
+ }
+ if errorLog == nil {
+ return nil, infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found")
+ }
+ if strings.TrimSpace(errorLog.RequestBody) == "" {
+ return nil, infraerrors.BadRequest("OPS_RETRY_NO_REQUEST_BODY", "No request body found to retry")
+ }
+
+ var pinned *int64
+ if mode == OpsRetryModeUpstream {
+ if pinnedAccountID != nil && *pinnedAccountID > 0 {
+ pinned = pinnedAccountID
+ } else if errorLog.AccountID != nil && *errorLog.AccountID > 0 {
+ pinned = errorLog.AccountID
+ } else {
+ return nil, infraerrors.BadRequest("OPS_RETRY_PINNED_ACCOUNT_REQUIRED", "pinned_account_id is required for upstream retry")
+ }
+ }
+
+ return s.retryWithErrorLog(ctx, requestedByUserID, errorID, mode, mode, pinned, errorLog)
+}
+
+// RetryUpstreamEvent retries a specific upstream attempt captured inside ops_error_logs.upstream_errors.
+// idx is 0-based. It always pins the original event account_id.
+func (s *OpsService) RetryUpstreamEvent(ctx context.Context, requestedByUserID int64, errorID int64, idx int) (*OpsRetryResult, error) {
+ if err := s.RequireMonitoringEnabled(ctx); err != nil {
+ return nil, err
+ }
+ if s.opsRepo == nil {
+ return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available")
+ }
+ if idx < 0 {
+ return nil, infraerrors.BadRequest("OPS_RETRY_INVALID_UPSTREAM_IDX", "invalid upstream idx")
+ }
+
+ errorLog, err := s.GetErrorLogByID(ctx, errorID)
+ if err != nil {
+ return nil, err
+ }
+ if errorLog == nil {
+ return nil, infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found")
+ }
+
+ events, err := ParseOpsUpstreamErrors(errorLog.UpstreamErrors)
+ if err != nil {
+ return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_EVENTS_INVALID", "invalid upstream_errors")
+ }
+ if idx >= len(events) {
+ return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_IDX_OOB", "upstream idx out of range")
+ }
+ ev := events[idx]
+ if ev == nil {
+ return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_EVENT_MISSING", "upstream event missing")
+ }
+ if ev.AccountID <= 0 {
+ return nil, infraerrors.BadRequest("OPS_RETRY_PINNED_ACCOUNT_REQUIRED", "account_id is required for upstream retry")
+ }
+
+ upstreamBody := strings.TrimSpace(ev.UpstreamRequestBody)
+ if upstreamBody == "" {
+ return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_NO_REQUEST_BODY", "No upstream request body found to retry")
+ }
+
+ override := *errorLog
+ override.RequestBody = upstreamBody
+ pinned := ev.AccountID
+
+ // Persist as upstream_event, execute as upstream pinned retry.
+ return s.retryWithErrorLog(ctx, requestedByUserID, errorID, OpsRetryModeUpstreamEvent, OpsRetryModeUpstream, &pinned, &override)
+}
+
+func (s *OpsService) retryWithErrorLog(ctx context.Context, requestedByUserID int64, errorID int64, mode string, execMode string, pinnedAccountID *int64, errorLog *OpsErrorLogDetail) (*OpsRetryResult, error) {
latest, err := s.opsRepo.GetLatestRetryAttemptForError(ctx, errorID)
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, infraerrors.InternalServer("OPS_RETRY_LOAD_LATEST_FAILED", "Failed to check retry status").WithCause(err)
@@ -144,22 +223,18 @@ func (s *OpsService) RetryError(ctx context.Context, requestedByUserID int64, er
}
}
- errorLog, err := s.GetErrorLogByID(ctx, errorID)
- if err != nil {
- return nil, err
- }
- if strings.TrimSpace(errorLog.RequestBody) == "" {
+ if errorLog == nil || strings.TrimSpace(errorLog.RequestBody) == "" {
return nil, infraerrors.BadRequest("OPS_RETRY_NO_REQUEST_BODY", "No request body found to retry")
}
var pinned *int64
- if mode == OpsRetryModeUpstream {
+ if execMode == OpsRetryModeUpstream {
if pinnedAccountID != nil && *pinnedAccountID > 0 {
pinned = pinnedAccountID
} else if errorLog.AccountID != nil && *errorLog.AccountID > 0 {
pinned = errorLog.AccountID
} else {
- return nil, infraerrors.BadRequest("OPS_RETRY_PINNED_ACCOUNT_REQUIRED", "pinned_account_id is required for upstream retry")
+ return nil, infraerrors.BadRequest("OPS_RETRY_PINNED_ACCOUNT_REQUIRED", "account_id is required for upstream retry")
}
}
@@ -196,7 +271,7 @@ func (s *OpsService) RetryError(ctx context.Context, requestedByUserID int64, er
execCtx, cancel := context.WithTimeout(ctx, opsRetryTimeout)
defer cancel()
- execRes := s.executeRetry(execCtx, errorLog, mode, pinned)
+ execRes := s.executeRetry(execCtx, errorLog, execMode, pinned)
finishedAt := time.Now()
result.FinishedAt = finishedAt
@@ -220,27 +295,40 @@ func (s *OpsService) RetryError(ctx context.Context, requestedByUserID int64, er
msg := result.ErrorMessage
updateErrMsg = &msg
}
+ // Keep legacy result_request_id empty; use upstream_request_id instead.
var resultRequestID *string
- if strings.TrimSpace(result.UpstreamRequestID) != "" {
- v := result.UpstreamRequestID
- resultRequestID = &v
- }
finalStatus := result.Status
if strings.TrimSpace(finalStatus) == "" {
finalStatus = opsRetryStatusFailed
}
+ success := strings.EqualFold(finalStatus, opsRetryStatusSucceeded)
+ httpStatus := result.HTTPStatusCode
+ upstreamReqID := result.UpstreamRequestID
+ usedAccountID := result.UsedAccountID
+ preview := result.ResponsePreview
+ truncated := result.ResponseTruncated
+
if err := s.opsRepo.UpdateRetryAttempt(updateCtx, &OpsUpdateRetryAttemptInput{
- ID: attemptID,
- Status: finalStatus,
- FinishedAt: finishedAt,
- DurationMs: result.DurationMs,
- ResultRequestID: resultRequestID,
- ErrorMessage: updateErrMsg,
+ ID: attemptID,
+ Status: finalStatus,
+ FinishedAt: finishedAt,
+ DurationMs: result.DurationMs,
+ Success: &success,
+ HTTPStatusCode: &httpStatus,
+ UpstreamRequestID: &upstreamReqID,
+ UsedAccountID: usedAccountID,
+ ResponsePreview: &preview,
+ ResponseTruncated: &truncated,
+ ResultRequestID: resultRequestID,
+ ErrorMessage: updateErrMsg,
}); err != nil {
- // Best-effort: retry itself already executed; do not fail the API response.
log.Printf("[Ops] UpdateRetryAttempt failed: %v", err)
+ } else if success {
+ if err := s.opsRepo.UpdateErrorResolution(updateCtx, errorID, true, &requestedByUserID, &attemptID, &finishedAt); err != nil {
+ log.Printf("[Ops] UpdateErrorResolution failed: %v", err)
+ }
}
return result, nil
diff --git a/backend/internal/service/ops_service.go b/backend/internal/service/ops_service.go
index 426d46f1..abb8ae12 100644
--- a/backend/internal/service/ops_service.go
+++ b/backend/internal/service/ops_service.go
@@ -208,6 +208,25 @@ func (s *OpsService) RecordError(ctx context.Context, entry *OpsInsertErrorLogIn
out.Detail = ""
}
+ out.UpstreamRequestBody = strings.TrimSpace(out.UpstreamRequestBody)
+ if out.UpstreamRequestBody != "" {
+ // Reuse the same sanitization/trimming strategy as request body storage.
+ // Keep it small so it is safe to persist in ops_error_logs JSON.
+ sanitized, truncated, _ := sanitizeAndTrimRequestBody([]byte(out.UpstreamRequestBody), 10*1024)
+ if sanitized != "" {
+ out.UpstreamRequestBody = sanitized
+ if truncated {
+ out.Kind = strings.TrimSpace(out.Kind)
+ if out.Kind == "" {
+ out.Kind = "upstream"
+ }
+ out.Kind = out.Kind + ":request_body_truncated"
+ }
+ } else {
+ out.UpstreamRequestBody = ""
+ }
+ }
+
// Drop fully-empty events (can happen if only status code was known).
if out.UpstreamStatusCode == 0 && out.Message == "" && out.Detail == "" {
continue
@@ -236,7 +255,13 @@ func (s *OpsService) GetErrorLogs(ctx context.Context, filter *OpsErrorLogFilter
if s.opsRepo == nil {
return &OpsErrorLogList{Errors: []*OpsErrorLog{}, Total: 0, Page: 1, PageSize: 20}, nil
}
- return s.opsRepo.ListErrorLogs(ctx, filter)
+ result, err := s.opsRepo.ListErrorLogs(ctx, filter)
+ if err != nil {
+ log.Printf("[Ops] GetErrorLogs failed: %v", err)
+ return nil, err
+ }
+
+ return result, nil
}
func (s *OpsService) GetErrorLogByID(ctx context.Context, id int64) (*OpsErrorLogDetail, error) {
@@ -256,6 +281,46 @@ func (s *OpsService) GetErrorLogByID(ctx context.Context, id int64) (*OpsErrorLo
return detail, nil
}
+func (s *OpsService) ListRetryAttemptsByErrorID(ctx context.Context, errorID int64, limit int) ([]*OpsRetryAttempt, error) {
+ if err := s.RequireMonitoringEnabled(ctx); err != nil {
+ return nil, err
+ }
+ if s.opsRepo == nil {
+ return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available")
+ }
+ if errorID <= 0 {
+ return nil, infraerrors.BadRequest("OPS_ERROR_INVALID_ID", "invalid error id")
+ }
+ items, err := s.opsRepo.ListRetryAttemptsByErrorID(ctx, errorID, limit)
+ if err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return []*OpsRetryAttempt{}, nil
+ }
+ return nil, infraerrors.InternalServer("OPS_RETRY_LIST_FAILED", "Failed to list retry attempts").WithCause(err)
+ }
+ return items, nil
+}
+
+func (s *OpsService) UpdateErrorResolution(ctx context.Context, errorID int64, resolved bool, resolvedByUserID *int64, resolvedRetryID *int64) error {
+ if err := s.RequireMonitoringEnabled(ctx); err != nil {
+ return err
+ }
+ if s.opsRepo == nil {
+ return infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available")
+ }
+ if errorID <= 0 {
+ return infraerrors.BadRequest("OPS_ERROR_INVALID_ID", "invalid error id")
+ }
+ // Best-effort ensure the error exists
+ if _, err := s.opsRepo.GetErrorLogByID(ctx, errorID); err != nil {
+ if errors.Is(err, sql.ErrNoRows) {
+ return infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found")
+ }
+ return infraerrors.InternalServer("OPS_ERROR_LOAD_FAILED", "Failed to load ops error log").WithCause(err)
+ }
+ return s.opsRepo.UpdateErrorResolution(ctx, errorID, resolved, resolvedByUserID, resolvedRetryID, nil)
+}
+
func sanitizeAndTrimRequestBody(raw []byte, maxBytes int) (jsonString string, truncated bool, bytesLen int) {
bytesLen = len(raw)
if len(raw) == 0 {
@@ -296,14 +361,34 @@ func sanitizeAndTrimRequestBody(raw []byte, maxBytes int) (jsonString string, tr
}
}
- // Last resort: store a minimal placeholder (still valid JSON).
- placeholder := map[string]any{
- "request_body_truncated": true,
+ // Last resort: keep JSON shape but drop big fields.
+ // This avoids downstream code that expects certain top-level keys from crashing.
+ if root, ok := decoded.(map[string]any); ok {
+ placeholder := shallowCopyMap(root)
+ placeholder["request_body_truncated"] = true
+
+ // Replace potentially huge arrays/strings, but keep the keys present.
+ for _, k := range []string{"messages", "contents", "input", "prompt"} {
+ if _, exists := placeholder[k]; exists {
+ placeholder[k] = []any{}
+ }
+ }
+ for _, k := range []string{"text"} {
+ if _, exists := placeholder[k]; exists {
+ placeholder[k] = ""
+ }
+ }
+
+ encoded4, err4 := json.Marshal(placeholder)
+ if err4 == nil {
+ if len(encoded4) <= maxBytes {
+ return string(encoded4), true, bytesLen
+ }
+ }
}
- if model := extractString(decoded, "model"); model != "" {
- placeholder["model"] = model
- }
- encoded4, err4 := json.Marshal(placeholder)
+
+ // Final fallback: minimal valid JSON.
+ encoded4, err4 := json.Marshal(map[string]any{"request_body_truncated": true})
if err4 != nil {
return "", true, bytesLen
}
@@ -526,12 +611,3 @@ func sanitizeErrorBodyForStorage(raw string, maxBytes int) (sanitized string, tr
}
return raw, false
}
-
-func extractString(v any, key string) string {
- root, ok := v.(map[string]any)
- if !ok {
- return ""
- }
- s, _ := root[key].(string)
- return strings.TrimSpace(s)
-}
diff --git a/backend/internal/service/ops_settings.go b/backend/internal/service/ops_settings.go
index 53c78fed..a6a4a0d7 100644
--- a/backend/internal/service/ops_settings.go
+++ b/backend/internal/service/ops_settings.go
@@ -368,9 +368,11 @@ func defaultOpsAdvancedSettings() *OpsAdvancedSettings {
Aggregation: OpsAggregationSettings{
AggregationEnabled: false,
},
- IgnoreCountTokensErrors: false,
- AutoRefreshEnabled: false,
- AutoRefreshIntervalSec: 30,
+ IgnoreCountTokensErrors: false,
+ IgnoreContextCanceled: true, // Default to true - client disconnects are not errors
+ IgnoreNoAvailableAccounts: false, // Default to false - this is a real routing issue
+ AutoRefreshEnabled: false,
+ AutoRefreshIntervalSec: 30,
}
}
@@ -482,13 +484,11 @@ const SettingKeyOpsMetricThresholds = "ops_metric_thresholds"
func defaultOpsMetricThresholds() *OpsMetricThresholds {
slaMin := 99.5
- latencyMax := 2000.0
ttftMax := 500.0
reqErrMax := 5.0
upstreamErrMax := 5.0
return &OpsMetricThresholds{
SLAPercentMin: &slaMin,
- LatencyP99MsMax: &latencyMax,
TTFTp99MsMax: &ttftMax,
RequestErrorRatePercentMax: &reqErrMax,
UpstreamErrorRatePercentMax: &upstreamErrMax,
@@ -538,9 +538,6 @@ func (s *OpsService) UpdateMetricThresholds(ctx context.Context, cfg *OpsMetricT
if cfg.SLAPercentMin != nil && (*cfg.SLAPercentMin < 0 || *cfg.SLAPercentMin > 100) {
return nil, errors.New("sla_percent_min must be between 0 and 100")
}
- if cfg.LatencyP99MsMax != nil && *cfg.LatencyP99MsMax < 0 {
- return nil, errors.New("latency_p99_ms_max must be >= 0")
- }
if cfg.TTFTp99MsMax != nil && *cfg.TTFTp99MsMax < 0 {
return nil, errors.New("ttft_p99_ms_max must be >= 0")
}
diff --git a/backend/internal/service/ops_settings_models.go b/backend/internal/service/ops_settings_models.go
index 229488a1..df06f578 100644
--- a/backend/internal/service/ops_settings_models.go
+++ b/backend/internal/service/ops_settings_models.go
@@ -63,7 +63,6 @@ type OpsAlertSilencingSettings struct {
type OpsMetricThresholds struct {
SLAPercentMin *float64 `json:"sla_percent_min,omitempty"` // SLA低于此值变红
- LatencyP99MsMax *float64 `json:"latency_p99_ms_max,omitempty"` // 延迟P99高于此值变红
TTFTp99MsMax *float64 `json:"ttft_p99_ms_max,omitempty"` // TTFT P99高于此值变红
RequestErrorRatePercentMax *float64 `json:"request_error_rate_percent_max,omitempty"` // 请求错误率高于此值变红
UpstreamErrorRatePercentMax *float64 `json:"upstream_error_rate_percent_max,omitempty"` // 上游错误率高于此值变红
@@ -79,11 +78,13 @@ type OpsAlertRuntimeSettings struct {
// OpsAdvancedSettings stores advanced ops configuration (data retention, aggregation).
type OpsAdvancedSettings struct {
- DataRetention OpsDataRetentionSettings `json:"data_retention"`
- Aggregation OpsAggregationSettings `json:"aggregation"`
- IgnoreCountTokensErrors bool `json:"ignore_count_tokens_errors"`
- AutoRefreshEnabled bool `json:"auto_refresh_enabled"`
- AutoRefreshIntervalSec int `json:"auto_refresh_interval_seconds"`
+ DataRetention OpsDataRetentionSettings `json:"data_retention"`
+ Aggregation OpsAggregationSettings `json:"aggregation"`
+ IgnoreCountTokensErrors bool `json:"ignore_count_tokens_errors"`
+ IgnoreContextCanceled bool `json:"ignore_context_canceled"`
+ IgnoreNoAvailableAccounts bool `json:"ignore_no_available_accounts"`
+ AutoRefreshEnabled bool `json:"auto_refresh_enabled"`
+ AutoRefreshIntervalSec int `json:"auto_refresh_interval_seconds"`
}
type OpsDataRetentionSettings struct {
diff --git a/backend/internal/service/ops_upstream_context.go b/backend/internal/service/ops_upstream_context.go
index 615ae6a1..96bcc9fe 100644
--- a/backend/internal/service/ops_upstream_context.go
+++ b/backend/internal/service/ops_upstream_context.go
@@ -15,6 +15,11 @@ const (
OpsUpstreamErrorMessageKey = "ops_upstream_error_message"
OpsUpstreamErrorDetailKey = "ops_upstream_error_detail"
OpsUpstreamErrorsKey = "ops_upstream_errors"
+
+ // Best-effort capture of the current upstream request body so ops can
+ // retry the specific upstream attempt (not just the client request).
+ // This value is sanitized+trimmed before being persisted.
+ OpsUpstreamRequestBodyKey = "ops_upstream_request_body"
)
func setOpsUpstreamError(c *gin.Context, upstreamStatusCode int, upstreamMessage, upstreamDetail string) {
@@ -38,13 +43,21 @@ type OpsUpstreamErrorEvent struct {
AtUnixMs int64 `json:"at_unix_ms,omitempty"`
// Context
- Platform string `json:"platform,omitempty"`
- AccountID int64 `json:"account_id,omitempty"`
+ Platform string `json:"platform,omitempty"`
+ AccountID int64 `json:"account_id,omitempty"`
+ AccountName string `json:"account_name,omitempty"`
// Outcome
UpstreamStatusCode int `json:"upstream_status_code,omitempty"`
UpstreamRequestID string `json:"upstream_request_id,omitempty"`
+ // Best-effort upstream request capture (sanitized+trimmed).
+ // Required for retrying a specific upstream attempt.
+ UpstreamRequestBody string `json:"upstream_request_body,omitempty"`
+
+ // Best-effort upstream response capture (sanitized+trimmed).
+ UpstreamResponseBody string `json:"upstream_response_body,omitempty"`
+
// Kind: http_error | request_error | retry_exhausted | failover
Kind string `json:"kind,omitempty"`
@@ -61,6 +74,8 @@ func appendOpsUpstreamError(c *gin.Context, ev OpsUpstreamErrorEvent) {
}
ev.Platform = strings.TrimSpace(ev.Platform)
ev.UpstreamRequestID = strings.TrimSpace(ev.UpstreamRequestID)
+ ev.UpstreamRequestBody = strings.TrimSpace(ev.UpstreamRequestBody)
+ ev.UpstreamResponseBody = strings.TrimSpace(ev.UpstreamResponseBody)
ev.Kind = strings.TrimSpace(ev.Kind)
ev.Message = strings.TrimSpace(ev.Message)
ev.Detail = strings.TrimSpace(ev.Detail)
@@ -68,6 +83,16 @@ func appendOpsUpstreamError(c *gin.Context, ev OpsUpstreamErrorEvent) {
ev.Message = sanitizeUpstreamErrorMessage(ev.Message)
}
+ // If the caller didn't explicitly pass upstream request body but the gateway
+ // stored it on the context, attach it so ops can retry this specific attempt.
+ if ev.UpstreamRequestBody == "" {
+ if v, ok := c.Get(OpsUpstreamRequestBodyKey); ok {
+ if s, ok := v.(string); ok {
+ ev.UpstreamRequestBody = strings.TrimSpace(s)
+ }
+ }
+ }
+
var existing []*OpsUpstreamErrorEvent
if v, ok := c.Get(OpsUpstreamErrorsKey); ok {
if arr, ok := v.([]*OpsUpstreamErrorEvent); ok {
@@ -92,3 +117,15 @@ func marshalOpsUpstreamErrors(events []*OpsUpstreamErrorEvent) *string {
s := string(raw)
return &s
}
+
+func ParseOpsUpstreamErrors(raw string) ([]*OpsUpstreamErrorEvent, error) {
+ raw = strings.TrimSpace(raw)
+ if raw == "" {
+ return []*OpsUpstreamErrorEvent{}, nil
+ }
+ var out []*OpsUpstreamErrorEvent
+ if err := json.Unmarshal([]byte(raw), &out); err != nil {
+ return nil, err
+ }
+ return out, nil
+}
diff --git a/backend/internal/service/proxy.go b/backend/internal/service/proxy.go
index 768e2a0a..9cb31808 100644
--- a/backend/internal/service/proxy.go
+++ b/backend/internal/service/proxy.go
@@ -31,5 +31,16 @@ func (p *Proxy) URL() string {
type ProxyWithAccountCount struct {
Proxy
- AccountCount int64
+ AccountCount int64
+ LatencyMs *int64
+ LatencyStatus string
+ LatencyMessage string
+}
+
+type ProxyAccountSummary struct {
+ ID int64
+ Name string
+ Platform string
+ Type string
+ Notes *string
}
diff --git a/backend/internal/service/proxy_latency_cache.go b/backend/internal/service/proxy_latency_cache.go
new file mode 100644
index 00000000..2901df0b
--- /dev/null
+++ b/backend/internal/service/proxy_latency_cache.go
@@ -0,0 +1,18 @@
+package service
+
+import (
+ "context"
+ "time"
+)
+
+type ProxyLatencyInfo struct {
+ Success bool `json:"success"`
+ LatencyMs *int64 `json:"latency_ms,omitempty"`
+ Message string `json:"message,omitempty"`
+ UpdatedAt time.Time `json:"updated_at"`
+}
+
+type ProxyLatencyCache interface {
+ GetProxyLatencies(ctx context.Context, proxyIDs []int64) (map[int64]*ProxyLatencyInfo, error)
+ SetProxyLatency(ctx context.Context, proxyID int64, info *ProxyLatencyInfo) error
+}
diff --git a/backend/internal/service/proxy_service.go b/backend/internal/service/proxy_service.go
index 58408d04..a5d897f6 100644
--- a/backend/internal/service/proxy_service.go
+++ b/backend/internal/service/proxy_service.go
@@ -10,6 +10,7 @@ import (
var (
ErrProxyNotFound = infraerrors.NotFound("PROXY_NOT_FOUND", "proxy not found")
+ ErrProxyInUse = infraerrors.Conflict("PROXY_IN_USE", "proxy is in use by accounts")
)
type ProxyRepository interface {
@@ -26,6 +27,7 @@ type ProxyRepository interface {
ExistsByHostPortAuth(ctx context.Context, host string, port int, username, password string) (bool, error)
CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error)
+ ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error)
}
// CreateProxyRequest 创建代理请求
diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go
index 8c4ffb68..ca479486 100644
--- a/backend/internal/service/ratelimit_service.go
+++ b/backend/internal/service/ratelimit_service.go
@@ -179,7 +179,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account,
start := geminiDailyWindowStart(now)
totals, ok := s.getGeminiUsageTotals(account.ID, start, now)
if !ok {
- stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID)
+ stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil)
if err != nil {
return true, err
}
@@ -226,7 +226,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account,
if limit > 0 {
start := now.Truncate(time.Minute)
- stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID)
+ stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil)
if err != nil {
return true, err
}
diff --git a/backend/internal/service/usage_log.go b/backend/internal/service/usage_log.go
index 62d7fae0..3b0e934f 100644
--- a/backend/internal/service/usage_log.go
+++ b/backend/internal/service/usage_log.go
@@ -33,6 +33,8 @@ type UsageLog struct {
TotalCost float64
ActualCost float64
RateMultiplier float64
+ // AccountRateMultiplier 账号计费倍率快照(nil 表示历史数据,按 1.0 处理)
+ AccountRateMultiplier *float64
BillingType int8
Stream bool
diff --git a/backend/migrations/037_add_account_rate_multiplier.sql b/backend/migrations/037_add_account_rate_multiplier.sql
new file mode 100644
index 00000000..06f5b090
--- /dev/null
+++ b/backend/migrations/037_add_account_rate_multiplier.sql
@@ -0,0 +1,14 @@
+-- Add account billing rate multiplier and per-usage snapshot.
+--
+-- accounts.rate_multiplier: 账号计费倍率(>=0,允许 0 表示该账号计费为 0)。
+-- usage_logs.account_rate_multiplier: 每条 usage log 的账号倍率快照,用于实现
+-- “倍率调整仅影响之后请求”,并支持同一天分段倍率加权统计。
+--
+-- 注意:usage_logs.account_rate_multiplier 不做回填、不设置 NOT NULL。
+-- 老数据为 NULL 时,统计口径按 1.0 处理(COALESCE)。
+
+ALTER TABLE IF EXISTS accounts
+ ADD COLUMN IF NOT EXISTS rate_multiplier DECIMAL(10,4) NOT NULL DEFAULT 1.0;
+
+ALTER TABLE IF EXISTS usage_logs
+ ADD COLUMN IF NOT EXISTS account_rate_multiplier DECIMAL(10,4);
diff --git a/backend/migrations/037_ops_alert_silences.sql b/backend/migrations/037_ops_alert_silences.sql
new file mode 100644
index 00000000..95b61a09
--- /dev/null
+++ b/backend/migrations/037_ops_alert_silences.sql
@@ -0,0 +1,28 @@
+-- +goose Up
+-- +goose StatementBegin
+-- Ops alert silences: scoped (rule_id + platform + group_id + region)
+
+CREATE TABLE IF NOT EXISTS ops_alert_silences (
+ id BIGSERIAL PRIMARY KEY,
+
+ rule_id BIGINT NOT NULL,
+ platform VARCHAR(64) NOT NULL,
+ group_id BIGINT,
+ region VARCHAR(64),
+
+ until TIMESTAMPTZ NOT NULL,
+ reason TEXT,
+
+ created_by BIGINT,
+ created_at TIMESTAMPTZ NOT NULL DEFAULT NOW()
+);
+
+CREATE INDEX IF NOT EXISTS idx_ops_alert_silences_lookup
+ ON ops_alert_silences (rule_id, platform, group_id, region, until);
+
+-- +goose StatementEnd
+
+-- +goose Down
+-- +goose StatementBegin
+DROP TABLE IF EXISTS ops_alert_silences;
+-- +goose StatementEnd
diff --git a/backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql b/backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql
new file mode 100644
index 00000000..adaacf1c
--- /dev/null
+++ b/backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql
@@ -0,0 +1,111 @@
+-- Add resolution tracking to ops_error_logs, persist retry results, and standardize error classification enums.
+--
+-- This migration is intentionally idempotent.
+
+SET LOCAL lock_timeout = '5s';
+SET LOCAL statement_timeout = '10min';
+
+-- ============================================
+-- 1) ops_error_logs: resolution fields
+-- ============================================
+
+ALTER TABLE ops_error_logs
+ ADD COLUMN IF NOT EXISTS resolved BOOLEAN NOT NULL DEFAULT false;
+
+ALTER TABLE ops_error_logs
+ ADD COLUMN IF NOT EXISTS resolved_at TIMESTAMPTZ;
+
+ALTER TABLE ops_error_logs
+ ADD COLUMN IF NOT EXISTS resolved_by_user_id BIGINT;
+
+ALTER TABLE ops_error_logs
+ ADD COLUMN IF NOT EXISTS resolved_retry_id BIGINT;
+
+CREATE INDEX IF NOT EXISTS idx_ops_error_logs_resolved_time
+ ON ops_error_logs (resolved, created_at DESC);
+
+CREATE INDEX IF NOT EXISTS idx_ops_error_logs_unresolved_time
+ ON ops_error_logs (created_at DESC)
+ WHERE resolved = false;
+
+-- ============================================
+-- 2) ops_retry_attempts: persist execution results
+-- ============================================
+
+ALTER TABLE ops_retry_attempts
+ ADD COLUMN IF NOT EXISTS success BOOLEAN;
+
+ALTER TABLE ops_retry_attempts
+ ADD COLUMN IF NOT EXISTS http_status_code INT;
+
+ALTER TABLE ops_retry_attempts
+ ADD COLUMN IF NOT EXISTS upstream_request_id VARCHAR(128);
+
+ALTER TABLE ops_retry_attempts
+ ADD COLUMN IF NOT EXISTS used_account_id BIGINT;
+
+ALTER TABLE ops_retry_attempts
+ ADD COLUMN IF NOT EXISTS response_preview TEXT;
+
+ALTER TABLE ops_retry_attempts
+ ADD COLUMN IF NOT EXISTS response_truncated BOOLEAN NOT NULL DEFAULT false;
+
+CREATE INDEX IF NOT EXISTS idx_ops_retry_attempts_success_time
+ ON ops_retry_attempts (success, created_at DESC);
+
+-- Backfill best-effort fields for existing rows.
+UPDATE ops_retry_attempts
+SET success = (LOWER(COALESCE(status, '')) = 'succeeded')
+WHERE success IS NULL;
+
+UPDATE ops_retry_attempts
+SET upstream_request_id = result_request_id
+WHERE upstream_request_id IS NULL AND result_request_id IS NOT NULL;
+
+-- ============================================
+-- 3) Standardize classification enums in ops_error_logs
+--
+-- New enums:
+-- error_phase: request|auth|routing|upstream|network|internal
+-- error_owner: client|provider|platform
+-- error_source: client_request|upstream_http|gateway
+-- ============================================
+
+-- Owner: legacy sub2api => platform.
+UPDATE ops_error_logs
+SET error_owner = 'platform'
+WHERE LOWER(COALESCE(error_owner, '')) = 'sub2api';
+
+-- Owner: normalize empty/null to platform (best-effort).
+UPDATE ops_error_logs
+SET error_owner = 'platform'
+WHERE COALESCE(TRIM(error_owner), '') = '';
+
+-- Phase: map legacy phases.
+UPDATE ops_error_logs
+SET error_phase = CASE
+ WHEN COALESCE(TRIM(error_phase), '') = '' THEN 'internal'
+ WHEN LOWER(error_phase) IN ('billing', 'concurrency', 'response') THEN 'request'
+ WHEN LOWER(error_phase) IN ('scheduling') THEN 'routing'
+ WHEN LOWER(error_phase) IN ('request', 'auth', 'routing', 'upstream', 'network', 'internal') THEN LOWER(error_phase)
+ ELSE 'internal'
+END;
+
+-- Source: map legacy sources.
+UPDATE ops_error_logs
+SET error_source = CASE
+ WHEN COALESCE(TRIM(error_source), '') = '' THEN 'gateway'
+ WHEN LOWER(error_source) IN ('billing', 'concurrency') THEN 'client_request'
+ WHEN LOWER(error_source) IN ('upstream_http') THEN 'upstream_http'
+ WHEN LOWER(error_source) IN ('upstream_network') THEN 'gateway'
+ WHEN LOWER(error_source) IN ('internal') THEN 'gateway'
+ WHEN LOWER(error_source) IN ('client_request', 'upstream_http', 'gateway') THEN LOWER(error_source)
+ ELSE 'gateway'
+END;
+
+-- Auto-resolve recovered upstream errors (client status < 400).
+UPDATE ops_error_logs
+SET
+ resolved = true,
+ resolved_at = COALESCE(resolved_at, created_at)
+WHERE resolved = false AND COALESCE(status_code, 0) > 0 AND COALESCE(status_code, 0) < 400;
diff --git a/frontend/src/api/admin/dashboard.ts b/frontend/src/api/admin/dashboard.ts
index 83e56c0e..9b338788 100644
--- a/frontend/src/api/admin/dashboard.ts
+++ b/frontend/src/api/admin/dashboard.ts
@@ -46,6 +46,10 @@ export interface TrendParams {
granularity?: 'day' | 'hour'
user_id?: number
api_key_id?: number
+ model?: string
+ account_id?: number
+ group_id?: number
+ stream?: boolean
}
export interface TrendResponse {
@@ -70,6 +74,10 @@ export interface ModelStatsParams {
end_date?: string
user_id?: number
api_key_id?: number
+ model?: string
+ account_id?: number
+ group_id?: number
+ stream?: boolean
}
export interface ModelStatsResponse {
diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts
index ce0ab58d..63b12cfb 100644
--- a/frontend/src/api/admin/ops.ts
+++ b/frontend/src/api/admin/ops.ts
@@ -17,6 +17,47 @@ export interface OpsRequestOptions {
export interface OpsRetryRequest {
mode: OpsRetryMode
pinned_account_id?: number
+ force?: boolean
+}
+
+export interface OpsRetryAttempt {
+ id: number
+ created_at: string
+ requested_by_user_id: number
+ source_error_id: number
+ mode: string
+ pinned_account_id?: number | null
+ pinned_account_name?: string
+
+ status: string
+ started_at?: string | null
+ finished_at?: string | null
+ duration_ms?: number | null
+
+ success?: boolean | null
+ http_status_code?: number | null
+ upstream_request_id?: string | null
+ used_account_id?: number | null
+ used_account_name?: string
+ response_preview?: string | null
+ response_truncated?: boolean | null
+
+ result_request_id?: string | null
+ result_error_id?: number | null
+ error_message?: string | null
+}
+
+export type OpsUpstreamErrorEvent = {
+ at_unix_ms?: number
+ platform?: string
+ account_id?: number
+ account_name?: string
+ upstream_status_code?: number
+ upstream_request_id?: string
+ upstream_request_body?: string
+ kind?: string
+ message?: string
+ detail?: string
}
export interface OpsRetryResult {
@@ -626,8 +667,6 @@ export type MetricType =
| 'success_rate'
| 'error_rate'
| 'upstream_error_rate'
- | 'p95_latency_ms'
- | 'p99_latency_ms'
| 'cpu_usage_percent'
| 'memory_usage_percent'
| 'concurrency_queue_depth'
@@ -663,7 +702,7 @@ export interface AlertEvent {
id: number
rule_id: number
severity: OpsSeverity | string
- status: 'firing' | 'resolved' | string
+ status: 'firing' | 'resolved' | 'manual_resolved' | string
title?: string
description?: string
metric_value?: number
@@ -701,10 +740,9 @@ export interface EmailNotificationConfig {
}
export interface OpsMetricThresholds {
- sla_percent_min?: number | null // SLA低于此值变红
- latency_p99_ms_max?: number | null // 延迟P99高于此值变红
- ttft_p99_ms_max?: number | null // TTFT P99高于此值变红
- request_error_rate_percent_max?: number | null // 请求错误率高于此值变红
+ sla_percent_min?: number | null // SLA低于此值变红
+ ttft_p99_ms_max?: number | null // TTFT P99高于此值变红
+ request_error_rate_percent_max?: number | null // 请求错误率高于此值变红
upstream_error_rate_percent_max?: number | null // 上游错误率高于此值变红
}
@@ -735,6 +773,8 @@ export interface OpsAdvancedSettings {
data_retention: OpsDataRetentionSettings
aggregation: OpsAggregationSettings
ignore_count_tokens_errors: boolean
+ ignore_context_canceled: boolean
+ ignore_no_available_accounts: boolean
auto_refresh_enabled: boolean
auto_refresh_interval_seconds: number
}
@@ -754,21 +794,37 @@ export interface OpsAggregationSettings {
export interface OpsErrorLog {
id: number
created_at: string
+
+ // Standardized classification
phase: OpsPhase
type: string
+ error_owner: 'client' | 'provider' | 'platform' | string
+ error_source: 'client_request' | 'upstream_http' | 'gateway' | string
+
severity: OpsSeverity
status_code: number
platform: string
model: string
- latency_ms?: number | null
+
+ is_retryable: boolean
+ retry_count: number
+
+ resolved: boolean
+ resolved_at?: string | null
+ resolved_by_user_id?: number | null
+ resolved_retry_id?: number | null
+
client_request_id: string
request_id: string
message: string
user_id?: number | null
+ user_email: string
api_key_id?: number | null
account_id?: number | null
+ account_name: string
group_id?: number | null
+ group_name: string
client_ip?: string | null
request_path?: string
@@ -890,7 +946,9 @@ export async function getErrorDistribution(
return data
}
-export async function listErrorLogs(params: {
+export type OpsErrorListView = 'errors' | 'excluded' | 'all'
+
+export type OpsErrorListQueryParams = {
page?: number
page_size?: number
time_range?: string
@@ -899,10 +957,20 @@ export async function listErrorLogs(params: {
platform?: string
group_id?: number | null
account_id?: number | null
+
phase?: string
+ error_owner?: string
+ error_source?: string
+ resolved?: string
+ view?: OpsErrorListView
+
q?: string
status_codes?: string
-}): Promise {
+ status_codes_other?: string
+}
+
+// Legacy unified endpoints
+export async function listErrorLogs(params: OpsErrorListQueryParams): Promise {
const { data } = await apiClient.get('/admin/ops/errors', { params })
return data
}
@@ -917,6 +985,70 @@ export async function retryErrorRequest(id: number, req: OpsRetryRequest): Promi
return data
}
+export async function listRetryAttempts(errorId: number, limit = 50): Promise {
+ const { data } = await apiClient.get(`/admin/ops/errors/${errorId}/retries`, { params: { limit } })
+ return data
+}
+
+export async function updateErrorResolved(errorId: number, resolved: boolean): Promise {
+ await apiClient.put(`/admin/ops/errors/${errorId}/resolve`, { resolved })
+}
+
+// New split endpoints
+export async function listRequestErrors(params: OpsErrorListQueryParams): Promise {
+ const { data } = await apiClient.get('/admin/ops/request-errors', { params })
+ return data
+}
+
+export async function listUpstreamErrors(params: OpsErrorListQueryParams): Promise {
+ const { data } = await apiClient.get('/admin/ops/upstream-errors', { params })
+ return data
+}
+
+export async function getRequestErrorDetail(id: number): Promise {
+ const { data } = await apiClient.get(`/admin/ops/request-errors/${id}`)
+ return data
+}
+
+export async function getUpstreamErrorDetail(id: number): Promise {
+ const { data } = await apiClient.get(`/admin/ops/upstream-errors/${id}`)
+ return data
+}
+
+export async function retryRequestErrorClient(id: number): Promise {
+ const { data } = await apiClient.post(`/admin/ops/request-errors/${id}/retry-client`, {})
+ return data
+}
+
+export async function retryRequestErrorUpstreamEvent(id: number, idx: number): Promise {
+ const { data } = await apiClient.post(`/admin/ops/request-errors/${id}/upstream-errors/${idx}/retry`, {})
+ return data
+}
+
+export async function retryUpstreamError(id: number): Promise {
+ const { data } = await apiClient.post(`/admin/ops/upstream-errors/${id}/retry`, {})
+ return data
+}
+
+export async function updateRequestErrorResolved(errorId: number, resolved: boolean): Promise {
+ await apiClient.put(`/admin/ops/request-errors/${errorId}/resolve`, { resolved })
+}
+
+export async function updateUpstreamErrorResolved(errorId: number, resolved: boolean): Promise {
+ await apiClient.put(`/admin/ops/upstream-errors/${errorId}/resolve`, { resolved })
+}
+
+export async function listRequestErrorUpstreamErrors(
+ id: number,
+ params: OpsErrorListQueryParams = {},
+ options: { include_detail?: boolean } = {}
+): Promise> {
+ const query: Record = { ...params }
+ if (options.include_detail) query.include_detail = '1'
+ const { data } = await apiClient.get>(`/admin/ops/request-errors/${id}/upstream-errors`, { params: query })
+ return data
+}
+
export async function listRequestDetails(params: OpsRequestDetailsParams): Promise {
const { data } = await apiClient.get('/admin/ops/requests', { params })
return data
@@ -942,11 +1074,45 @@ export async function deleteAlertRule(id: number): Promise {
await apiClient.delete(`/admin/ops/alert-rules/${id}`)
}
-export async function listAlertEvents(limit = 100): Promise {
- const { data } = await apiClient.get('/admin/ops/alert-events', { params: { limit } })
+export interface AlertEventsQuery {
+ limit?: number
+ status?: string
+ severity?: string
+ email_sent?: boolean
+ time_range?: string
+ start_time?: string
+ end_time?: string
+ before_fired_at?: string
+ before_id?: number
+ platform?: string
+ group_id?: number
+}
+
+export async function listAlertEvents(params: AlertEventsQuery = {}): Promise {
+ const { data } = await apiClient.get('/admin/ops/alert-events', { params })
return data
}
+export async function getAlertEvent(id: number): Promise {
+ const { data } = await apiClient.get(`/admin/ops/alert-events/${id}`)
+ return data
+}
+
+export async function updateAlertEventStatus(id: number, status: 'resolved' | 'manual_resolved'): Promise {
+ await apiClient.put(`/admin/ops/alert-events/${id}/status`, { status })
+}
+
+export async function createAlertSilence(payload: {
+ rule_id: number
+ platform: string
+ group_id?: number | null
+ region?: string | null
+ until: string
+ reason?: string
+}): Promise {
+ await apiClient.post('/admin/ops/alert-silences', payload)
+}
+
// Email notification config
export async function getEmailNotificationConfig(): Promise {
const { data } = await apiClient.get('/admin/ops/email-notification/config')
@@ -1001,15 +1167,35 @@ export const opsAPI = {
getAccountAvailabilityStats,
getRealtimeTrafficSummary,
subscribeQPS,
+
+ // Legacy unified endpoints
listErrorLogs,
getErrorLogDetail,
retryErrorRequest,
+ listRetryAttempts,
+ updateErrorResolved,
+
+ // New split endpoints
+ listRequestErrors,
+ listUpstreamErrors,
+ getRequestErrorDetail,
+ getUpstreamErrorDetail,
+ retryRequestErrorClient,
+ retryRequestErrorUpstreamEvent,
+ retryUpstreamError,
+ updateRequestErrorResolved,
+ updateUpstreamErrorResolved,
+ listRequestErrorUpstreamErrors,
+
listRequestDetails,
listAlertRules,
createAlertRule,
updateAlertRule,
deleteAlertRule,
listAlertEvents,
+ getAlertEvent,
+ updateAlertEventStatus,
+ createAlertSilence,
getEmailNotificationConfig,
updateEmailNotificationConfig,
getAlertRuntimeSettings,
diff --git a/frontend/src/api/admin/proxies.ts b/frontend/src/api/admin/proxies.ts
index fe20a205..fc526f7a 100644
--- a/frontend/src/api/admin/proxies.ts
+++ b/frontend/src/api/admin/proxies.ts
@@ -4,7 +4,13 @@
*/
import { apiClient } from '../client'
-import type { Proxy, CreateProxyRequest, UpdateProxyRequest, PaginatedResponse } from '@/types'
+import type {
+ Proxy,
+ ProxyAccountSummary,
+ CreateProxyRequest,
+ UpdateProxyRequest,
+ PaginatedResponse
+} from '@/types'
/**
* List all proxies with pagination
@@ -160,8 +166,8 @@ export async function getStats(id: number): Promise<{
* @param id - Proxy ID
* @returns List of accounts using the proxy
*/
-export async function getProxyAccounts(id: number): Promise> {
- const { data } = await apiClient.get>(`/admin/proxies/${id}/accounts`)
+export async function getProxyAccounts(id: number): Promise {
+ const { data } = await apiClient.get(`/admin/proxies/${id}/accounts`)
return data
}
@@ -189,6 +195,17 @@ export async function batchCreate(
return data
}
+export async function batchDelete(ids: number[]): Promise<{
+ deleted_ids: number[]
+ skipped: Array<{ id: number; reason: string }>
+}> {
+ const { data } = await apiClient.post<{
+ deleted_ids: number[]
+ skipped: Array<{ id: number; reason: string }>
+ }>('/admin/proxies/batch-delete', { ids })
+ return data
+}
+
export const proxiesAPI = {
list,
getAll,
@@ -201,7 +218,8 @@ export const proxiesAPI = {
testProxy,
getStats,
getProxyAccounts,
- batchCreate
+ batchCreate,
+ batchDelete
}
export default proxiesAPI
diff --git a/frontend/src/api/admin/usage.ts b/frontend/src/api/admin/usage.ts
index ca76234b..dd85fc24 100644
--- a/frontend/src/api/admin/usage.ts
+++ b/frontend/src/api/admin/usage.ts
@@ -16,6 +16,7 @@ export interface AdminUsageStatsResponse {
total_tokens: number
total_cost: number
total_actual_cost: number
+ total_account_cost?: number
average_duration_ms: number
}
diff --git a/frontend/src/components/account/AccountStatsModal.vue b/frontend/src/components/account/AccountStatsModal.vue
index 92016699..7968fa8d 100644
--- a/frontend/src/components/account/AccountStatsModal.vue
+++ b/frontend/src/components/account/AccountStatsModal.vue
@@ -73,11 +73,12 @@
{{ t('admin.accounts.stats.accumulatedCost') }}
- ({{ t('admin.accounts.stats.standardCost') }}: ${{
+
+ ({{ t('usage.userBilled') }}: ${{ formatCost(stats.summary.total_user_cost) }} ·
+ {{ t('admin.accounts.stats.standardCost') }}: ${{
formatCost(stats.summary.total_standard_cost)
- }})
+ }})
+
@@ -121,12 +122,15 @@
${{ formatCost(stats.summary.avg_daily_cost) }}
-
+
{{
t('admin.accounts.stats.basedOnActualDays', {
days: stats.summary.actual_days_used
})
}}
+
+ ({{ t('usage.userBilled') }}: ${{ formatCost(stats.summary.avg_daily_user_cost) }})
+
@@ -189,13 +193,17 @@
- {{
- t('admin.accounts.stats.cost')
- }}
+ {{ t('usage.accountBilled') }}
${{ formatCost(stats.summary.today?.cost || 0) }}
+
+ {{ t('usage.userBilled') }}
+ ${{ formatCost(stats.summary.today?.user_cost || 0) }}
+
{{
t('admin.accounts.stats.requests')
@@ -240,13 +248,17 @@
}}
- {{
- t('admin.accounts.stats.cost')
- }}
+ {{ t('usage.accountBilled') }}
${{ formatCost(stats.summary.highest_cost_day?.cost || 0) }}
+
+ {{ t('usage.userBilled') }}
+ ${{ formatCost(stats.summary.highest_cost_day?.user_cost || 0) }}
+
{{
t('admin.accounts.stats.requests')
@@ -291,13 +303,17 @@
}}
- {{
- t('admin.accounts.stats.cost')
- }}
+ {{ t('usage.accountBilled') }}
${{ formatCost(stats.summary.highest_request_day?.cost || 0) }}
+
+ {{ t('usage.userBilled') }}
+ ${{ formatCost(stats.summary.highest_request_day?.user_cost || 0) }}
+
@@ -397,13 +413,17 @@
}}
- {{
- t('admin.accounts.stats.todayCost')
- }}
+ {{ t('usage.accountBilled') }}
${{ formatCost(stats.summary.today?.cost || 0) }}
+
+ {{ t('usage.userBilled') }}
+ ${{ formatCost(stats.summary.today?.user_cost || 0) }}
+
@@ -517,14 +537,24 @@ const trendChartData = computed(() => {
labels: stats.value.history.map((h) => h.label),
datasets: [
{
- label: t('admin.accounts.stats.cost') + ' (USD)',
- data: stats.value.history.map((h) => h.cost),
+ label: t('usage.accountBilled') + ' (USD)',
+ data: stats.value.history.map((h) => h.actual_cost),
borderColor: '#3b82f6',
backgroundColor: 'rgba(59, 130, 246, 0.1)',
fill: true,
tension: 0.3,
yAxisID: 'y'
},
+ {
+ label: t('usage.userBilled') + ' (USD)',
+ data: stats.value.history.map((h) => h.user_cost),
+ borderColor: '#10b981',
+ backgroundColor: 'rgba(16, 185, 129, 0.08)',
+ fill: false,
+ tension: 0.3,
+ borderDash: [5, 5],
+ yAxisID: 'y'
+ },
{
label: t('admin.accounts.stats.requests'),
data: stats.value.history.map((h) => h.requests),
@@ -602,7 +632,7 @@ const lineChartOptions = computed(() => ({
},
title: {
display: true,
- text: t('admin.accounts.stats.cost') + ' (USD)',
+ text: t('usage.accountBilled') + ' (USD)',
color: '#3b82f6',
font: {
size: 11
diff --git a/frontend/src/components/account/AccountTodayStatsCell.vue b/frontend/src/components/account/AccountTodayStatsCell.vue
index b8bbc618..a920f314 100644
--- a/frontend/src/components/account/AccountTodayStatsCell.vue
+++ b/frontend/src/components/account/AccountTodayStatsCell.vue
@@ -32,15 +32,20 @@
formatTokens(stats.tokens)
}}
-
+
- {{ t('admin.accounts.stats.cost') }}:
+ {{ t('usage.accountBilled') }}:
{{
formatCurrency(stats.cost)
}}
+
+
+ {{ t('usage.userBilled') }}:
+ {{
+ formatCurrency(stats.user_cost)
+ }}
+
diff --git a/frontend/src/components/account/BulkEditAccountModal.vue b/frontend/src/components/account/BulkEditAccountModal.vue
index 9ccf6130..fb776e96 100644
--- a/frontend/src/components/account/BulkEditAccountModal.vue
+++ b/frontend/src/components/account/BulkEditAccountModal.vue
@@ -459,7 +459,7 @@
-
+
+
+
+
+
+
+
+
{{ t('admin.accounts.billingRateMultiplierHint') }}
+
@@ -655,6 +685,7 @@ const enableInterceptWarmup = ref(false)
const enableProxy = ref(false)
const enableConcurrency = ref(false)
const enablePriority = ref(false)
+const enableRateMultiplier = ref(false)
const enableStatus = ref(false)
const enableGroups = ref(false)
@@ -670,6 +701,7 @@ const interceptWarmupRequests = ref(false)
const proxyId = ref
(null)
const concurrency = ref(1)
const priority = ref(1)
+const rateMultiplier = ref(1)
const status = ref<'active' | 'inactive'>('active')
const groupIds = ref([])
@@ -863,6 +895,10 @@ const buildUpdatePayload = (): Record | null => {
updates.priority = priority.value
}
+ if (enableRateMultiplier.value) {
+ updates.rate_multiplier = rateMultiplier.value
+ }
+
if (enableStatus.value) {
updates.status = status.value
}
@@ -923,6 +959,7 @@ const handleSubmit = async () => {
enableProxy.value ||
enableConcurrency.value ||
enablePriority.value ||
+ enableRateMultiplier.value ||
enableStatus.value ||
enableGroups.value
@@ -977,6 +1014,7 @@ watch(
enableProxy.value = false
enableConcurrency.value = false
enablePriority.value = false
+ enableRateMultiplier.value = false
enableStatus.value = false
enableGroups.value = false
@@ -991,6 +1029,7 @@ watch(
proxyId.value = null
concurrency.value = 1
priority.value = 1
+ rateMultiplier.value = 1
status.value = 'active'
groupIds.value = []
}
diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue
index a56a987f..c81de00e 100644
--- a/frontend/src/components/account/CreateAccountModal.vue
+++ b/frontend/src/components/account/CreateAccountModal.vue
@@ -1196,7 +1196,7 @@
-
+
+
+
+
+
{{ t('admin.accounts.billingRateMultiplierHint') }}
+
@@ -1832,6 +1837,7 @@ const form = reactive({
proxy_id: null as number | null,
concurrency: 10,
priority: 1,
+ rate_multiplier: 1,
group_ids: [] as number[],
expires_at: null as number | null
})
@@ -2119,6 +2125,7 @@ const resetForm = () => {
form.proxy_id = null
form.concurrency = 10
form.priority = 1
+ form.rate_multiplier = 1
form.group_ids = []
form.expires_at = null
accountCategory.value = 'oauth-based'
@@ -2272,6 +2279,7 @@ const createAccountAndFinish = async (
proxy_id: form.proxy_id,
concurrency: form.concurrency,
priority: form.priority,
+ rate_multiplier: form.rate_multiplier,
group_ids: form.group_ids,
expires_at: form.expires_at,
auto_pause_on_expired: autoPauseOnExpired.value
@@ -2490,6 +2498,7 @@ const handleCookieAuth = async (sessionKey: string) => {
proxy_id: form.proxy_id,
concurrency: form.concurrency,
priority: form.priority,
+ rate_multiplier: form.rate_multiplier,
group_ids: form.group_ids,
expires_at: form.expires_at,
auto_pause_on_expired: autoPauseOnExpired.value
diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue
index 7cb740bd..00cd9b24 100644
--- a/frontend/src/components/account/EditAccountModal.vue
+++ b/frontend/src/components/account/EditAccountModal.vue
@@ -549,7 +549,7 @@
-
+
@@ -564,6 +564,11 @@
data-tour="account-form-priority"
/>
+
+
+
+
{{ t('admin.accounts.billingRateMultiplierHint') }}
+
@@ -807,6 +812,7 @@ const form = reactive({
proxy_id: null as number | null,
concurrency: 1,
priority: 1,
+ rate_multiplier: 1,
status: 'active' as 'active' | 'inactive',
group_ids: [] as number[],
expires_at: null as number | null
@@ -834,6 +840,7 @@ watch(
form.proxy_id = newAccount.proxy_id
form.concurrency = newAccount.concurrency
form.priority = newAccount.priority
+ form.rate_multiplier = newAccount.rate_multiplier ?? 1
form.status = newAccount.status as 'active' | 'inactive'
form.group_ids = newAccount.group_ids || []
form.expires_at = newAccount.expires_at ?? null
diff --git a/frontend/src/components/account/UsageProgressBar.vue b/frontend/src/components/account/UsageProgressBar.vue
index 1b3561ef..93844295 100644
--- a/frontend/src/components/account/UsageProgressBar.vue
+++ b/frontend/src/components/account/UsageProgressBar.vue
@@ -15,7 +15,13 @@
{{ formatTokens }}
- ${{ formatCost }}
+ A ${{ formatAccountCost }}
+
+ U ${{ formatUserCost }}
+
@@ -149,8 +155,13 @@ const formatTokens = computed(() => {
return t.toString()
})
-const formatCost = computed(() => {
+const formatAccountCost = computed(() => {
if (!props.windowStats) return '0.00'
return props.windowStats.cost.toFixed(2)
})
+
+const formatUserCost = computed(() => {
+ if (!props.windowStats || props.windowStats.user_cost == null) return '0.00'
+ return props.windowStats.user_cost.toFixed(2)
+})
diff --git a/frontend/src/components/admin/account/AccountStatsModal.vue b/frontend/src/components/admin/account/AccountStatsModal.vue
index 138f5811..72a71d36 100644
--- a/frontend/src/components/admin/account/AccountStatsModal.vue
+++ b/frontend/src/components/admin/account/AccountStatsModal.vue
@@ -61,11 +61,12 @@
{{ t('admin.accounts.stats.accumulatedCost') }}
- ({{ t('admin.accounts.stats.standardCost') }}: ${{
+
+ ({{ t('usage.userBilled') }}: ${{ formatCost(stats.summary.total_user_cost) }} ·
+ {{ t('admin.accounts.stats.standardCost') }}: ${{
formatCost(stats.summary.total_standard_cost)
- }})
+ }})
+
@@ -108,12 +109,15 @@
${{ formatCost(stats.summary.avg_daily_cost) }}
-
+
{{
t('admin.accounts.stats.basedOnActualDays', {
days: stats.summary.actual_days_used
})
}}
+
+ ({{ t('usage.userBilled') }}: ${{ formatCost(stats.summary.avg_daily_user_cost) }})
+
@@ -164,13 +168,17 @@
- {{
- t('admin.accounts.stats.cost')
- }}
+ {{ t('usage.accountBilled') }}
${{ formatCost(stats.summary.today?.cost || 0) }}
+
+ {{ t('usage.userBilled') }}
+ ${{ formatCost(stats.summary.today?.user_cost || 0) }}
+
{{
t('admin.accounts.stats.requests')
@@ -210,13 +218,17 @@
}}
- {{
- t('admin.accounts.stats.cost')
- }}
+ {{ t('usage.accountBilled') }}
${{ formatCost(stats.summary.highest_cost_day?.cost || 0) }}
+
+ {{ t('usage.userBilled') }}
+ ${{ formatCost(stats.summary.highest_cost_day?.user_cost || 0) }}
+
{{
t('admin.accounts.stats.requests')
@@ -260,13 +272,17 @@
}}
- {{
- t('admin.accounts.stats.cost')
- }}
+ {{ t('usage.accountBilled') }}
${{ formatCost(stats.summary.highest_request_day?.cost || 0) }}
+
+ {{ t('usage.userBilled') }}
+ ${{ formatCost(stats.summary.highest_request_day?.user_cost || 0) }}
+
@@ -485,14 +501,24 @@ const trendChartData = computed(() => {
labels: stats.value.history.map((h) => h.label),
datasets: [
{
- label: t('admin.accounts.stats.cost') + ' (USD)',
- data: stats.value.history.map((h) => h.cost),
+ label: t('usage.accountBilled') + ' (USD)',
+ data: stats.value.history.map((h) => h.actual_cost),
borderColor: '#3b82f6',
backgroundColor: 'rgba(59, 130, 246, 0.1)',
fill: true,
tension: 0.3,
yAxisID: 'y'
},
+ {
+ label: t('usage.userBilled') + ' (USD)',
+ data: stats.value.history.map((h) => h.user_cost),
+ borderColor: '#10b981',
+ backgroundColor: 'rgba(16, 185, 129, 0.08)',
+ fill: false,
+ tension: 0.3,
+ borderDash: [5, 5],
+ yAxisID: 'y'
+ },
{
label: t('admin.accounts.stats.requests'),
data: stats.value.history.map((h) => h.requests),
@@ -570,7 +596,7 @@ const lineChartOptions = computed(() => ({
},
title: {
display: true,
- text: t('admin.accounts.stats.cost') + ' (USD)',
+ text: t('usage.accountBilled') + ' (USD)',
color: '#3b82f6',
font: {
size: 11
diff --git a/frontend/src/components/admin/usage/UsageStatsCards.vue b/frontend/src/components/admin/usage/UsageStatsCards.vue
index 16ce6619..cd962a09 100644
--- a/frontend/src/components/admin/usage/UsageStatsCards.vue
+++ b/frontend/src/components/admin/usage/UsageStatsCards.vue
@@ -27,9 +27,18 @@
{{ t('usage.totalCost') }}
-
${{ (stats?.total_actual_cost || 0).toFixed(4) }}
-
- {{ t('usage.standardCost') }}: ${{ (stats?.total_cost || 0).toFixed(4) }}
+
+ ${{ ((stats?.total_account_cost ?? stats?.total_actual_cost) || 0).toFixed(4) }}
+
+
+ {{ t('usage.userBilled') }}:
+ ${{ (stats?.total_actual_cost || 0).toFixed(4) }}
+ · {{ t('usage.standardCost') }}:
+ ${{ (stats?.total_cost || 0).toFixed(4) }}
+
+
+ {{ t('usage.standardCost') }}:
+ ${{ (stats?.total_cost || 0).toFixed(4) }}
diff --git a/frontend/src/components/admin/usage/UsageTable.vue b/frontend/src/components/admin/usage/UsageTable.vue
index a66e4b7b..d2260c59 100644
--- a/frontend/src/components/admin/usage/UsageTable.vue
+++ b/frontend/src/components/admin/usage/UsageTable.vue
@@ -81,18 +81,23 @@
-
-
${{ row.actual_cost?.toFixed(6) || '0.000000' }}
-
-
-
-
+
+
+
${{ row.actual_cost?.toFixed(6) || '0.000000' }}
+
+
+
+ A ${{ (row.total_cost * row.account_rate_multiplier).toFixed(6) }}
+
@@ -202,14 +207,24 @@
{{ t('usage.rate') }}
{{ (tooltipData?.rate_multiplier || 1).toFixed(2) }}x
+
+ {{ t('usage.accountMultiplier') }}
+ {{ (tooltipData?.account_rate_multiplier ?? 1).toFixed(2) }}x
+
{{ t('usage.original') }}
${{ tooltipData?.total_cost?.toFixed(6) || '0.000000' }}
-
-
{{ t('usage.billed') }}
+
+ {{ t('usage.userBilled') }}
${{ tooltipData?.actual_cost?.toFixed(6) || '0.000000' }}
+
+ {{ t('usage.accountBilled') }}
+
+ ${{ (((tooltipData?.total_cost || 0) * (tooltipData?.account_rate_multiplier ?? 1)) || 0).toFixed(6) }}
+
+
diff --git a/frontend/src/components/admin/user/UserCreateModal.vue b/frontend/src/components/admin/user/UserCreateModal.vue
index f2ab1e02..0e44d81e 100644
--- a/frontend/src/components/admin/user/UserCreateModal.vue
+++ b/frontend/src/components/admin/user/UserCreateModal.vue
@@ -25,7 +25,7 @@
-
+
diff --git a/frontend/src/components/common/DataTable.vue b/frontend/src/components/common/DataTable.vue
index dc492d36..eab337ac 100644
--- a/frontend/src/components/common/DataTable.vue
+++ b/frontend/src/components/common/DataTable.vue
@@ -1,7 +1,68 @@
+
+
+
+
+
+
+
+
+
+
+
+ {{ t('empty.noData') }}
+
+
+
+
+
+
+
+
+
+
+
+ {{ column.label }}
+
+
+
+ {{ column.formatter ? column.formatter(row[column.key], row) : row[column.key] }}
+
+
+
+
+
+
+
+
+
+
+
-
-
{{ column.label }}
-
-
-
-
-
+
+
+
{{ column.label }}
+
+
+
+
+
+
@@ -277,7 +345,10 @@ const sortedData = computed(() => {
})
})
-// 检查第一列是否为勾选列
+const hasActionsColumn = computed(() => {
+ return props.columns.some(column => column.key === 'actions')
+})
+
const hasSelectColumn = computed(() => {
return props.columns.length > 0 && props.columns[0].key === 'select'
})
diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts
index 3c4fbcc1..09ebe8e1 100644
--- a/frontend/src/i18n/locales/en.ts
+++ b/frontend/src/i18n/locales/en.ts
@@ -129,6 +129,8 @@ export default {
all: 'All',
none: 'None',
noData: 'No data',
+ expand: 'Expand',
+ collapse: 'Collapse',
success: 'Success',
error: 'Error',
critical: 'Critical',
@@ -150,12 +152,13 @@ export default {
invalidEmail: 'Please enter a valid email address',
optional: 'optional',
selectOption: 'Select an option',
- searchPlaceholder: 'Search...',
- noOptionsFound: 'No options found',
- noGroupsAvailable: 'No groups available',
- unknownError: 'Unknown error occurred',
- saving: 'Saving...',
- selectedCount: '({count} selected)', refresh: 'Refresh',
+ searchPlaceholder: 'Search...',
+ noOptionsFound: 'No options found',
+ noGroupsAvailable: 'No groups available',
+ unknownError: 'Unknown error occurred',
+ saving: 'Saving...',
+ selectedCount: '({count} selected)',
+ refresh: 'Refresh',
settings: 'Settings',
notAvailable: 'N/A',
now: 'Now',
@@ -429,6 +432,9 @@ export default {
totalCost: 'Total Cost',
standardCost: 'Standard',
actualCost: 'Actual',
+ userBilled: 'User billed',
+ accountBilled: 'Account billed',
+ accountMultiplier: 'Account rate',
avgDuration: 'Avg Duration',
inSelectedRange: 'in selected range',
perRequest: 'per request',
@@ -1059,6 +1065,7 @@ export default {
concurrencyStatus: 'Concurrency',
notes: 'Notes',
priority: 'Priority',
+ billingRateMultiplier: 'Billing Rate',
weight: 'Weight',
status: 'Status',
schedulable: 'Schedulable',
@@ -1226,6 +1233,8 @@ export default {
concurrency: 'Concurrency',
priority: 'Priority',
priorityHint: 'Lower value accounts are used first',
+ billingRateMultiplier: 'Billing Rate Multiplier',
+ billingRateMultiplierHint: '>=0, 0 means free. Affects account billing only',
expiresAt: 'Expires At',
expiresAtHint: 'Leave empty for no expiration',
higherPriorityFirst: 'Lower value means higher priority',
@@ -1627,11 +1636,29 @@ export default {
address: 'Address',
status: 'Status',
accounts: 'Accounts',
+ latency: 'Latency',
actions: 'Actions'
},
testConnection: 'Test Connection',
batchTest: 'Test All Proxies',
testFailed: 'Failed',
+ latencyFailed: 'Connection failed',
+ batchTestEmpty: 'No proxies available for testing',
+ batchTestDone: 'Batch test completed for {count} proxies',
+ batchTestFailed: 'Batch test failed',
+ batchDeleteAction: 'Delete',
+ batchDelete: 'Batch delete',
+ batchDeleteConfirm: 'Delete {count} selected proxies? In-use ones will be skipped.',
+ batchDeleteDone: 'Deleted {deleted} proxies, skipped {skipped}',
+ batchDeleteSkipped: 'Skipped {skipped} proxies',
+ batchDeleteFailed: 'Batch delete failed',
+ deleteBlockedInUse: 'This proxy is in use and cannot be deleted',
+ accountsTitle: 'Accounts using this IP',
+ accountsEmpty: 'No accounts are using this proxy',
+ accountsFailed: 'Failed to load accounts list',
+ accountName: 'Account',
+ accountPlatform: 'Platform',
+ accountNotes: 'Notes',
name: 'Name',
protocol: 'Protocol',
host: 'Host',
@@ -1858,10 +1885,8 @@ export default {
noSystemMetrics: 'No system metrics collected yet.',
collectedAt: 'Collected at:',
window: 'window',
- cpu: 'CPU',
memory: 'Memory',
db: 'DB',
- redis: 'Redis',
goroutines: 'Goroutines',
jobs: 'Jobs',
jobsHelp: 'Click “Details” to view job heartbeats and recent errors',
@@ -1887,7 +1912,7 @@ export default {
totalRequests: 'Total Requests',
avgQps: 'Avg QPS',
avgTps: 'Avg TPS',
- avgLatency: 'Avg Latency',
+ avgLatency: 'Avg Request Duration',
avgTtft: 'Avg TTFT',
exceptions: 'Exceptions',
requestErrors: 'Request Errors',
@@ -1899,7 +1924,7 @@ export default {
errors: 'Errors',
errorRate: 'error_rate:',
upstreamRate: 'upstream_rate:',
- latencyDuration: 'Latency (duration_ms)',
+ latencyDuration: 'Request Duration (ms)',
ttftLabel: 'TTFT (first_token_ms)',
p50: 'p50:',
p90: 'p90:',
@@ -1907,7 +1932,6 @@ export default {
p99: 'p99:',
avg: 'avg:',
max: 'max:',
- qps: 'QPS',
requests: 'Requests',
requestsTitle: 'Requests',
upstream: 'Upstream',
@@ -1919,7 +1943,7 @@ export default {
failedToLoadData: 'Failed to load ops data.',
failedToLoadOverview: 'Failed to load overview',
failedToLoadThroughputTrend: 'Failed to load throughput trend',
- failedToLoadLatencyHistogram: 'Failed to load latency histogram',
+ failedToLoadLatencyHistogram: 'Failed to load request duration histogram',
failedToLoadErrorTrend: 'Failed to load error trend',
failedToLoadErrorDistribution: 'Failed to load error distribution',
failedToLoadErrorDetail: 'Failed to load error detail',
@@ -1927,7 +1951,7 @@ export default {
tpsK: 'TPS (K)',
top: 'Top:',
throughputTrend: 'Throughput Trend',
- latencyHistogram: 'Latency Histogram',
+ latencyHistogram: 'Request Duration Histogram',
errorTrend: 'Error Trend',
errorDistribution: 'Error Distribution',
// Health Score & Diagnosis
@@ -1942,7 +1966,9 @@ export default {
'30m': 'Last 30 minutes',
'1h': 'Last 1 hour',
'6h': 'Last 6 hours',
- '24h': 'Last 24 hours'
+ '24h': 'Last 24 hours',
+ '7d': 'Last 7 days',
+ '30d': 'Last 30 days'
},
fullscreen: {
enter: 'Enter Fullscreen'
@@ -1971,14 +1997,7 @@ export default {
memoryHigh: 'Memory usage elevated ({usage}%)',
memoryHighImpact: 'Memory pressure is high, needs attention',
memoryHighAction: 'Monitor memory trends, check for memory leaks',
- // Latency diagnostics
- latencyCritical: 'Response latency critically high ({latency}ms)',
- latencyCriticalImpact: 'User experience extremely poor, many requests timing out',
- latencyCriticalAction: 'Check slow queries, database indexes, network latency, and upstream services',
- latencyHigh: 'Response latency elevated ({latency}ms)',
- latencyHighImpact: 'User experience degraded, needs optimization',
- latencyHighAction: 'Analyze slow request logs, optimize database queries and business logic',
- ttftHigh: 'Time to first byte elevated ({ttft}ms)',
+ ttftHigh: 'Time to first token elevated ({ttft}ms)',
ttftHighImpact: 'User perceived latency increased',
ttftHighAction: 'Optimize request processing flow, reduce pre-processing time',
// Error rate diagnostics
@@ -2014,27 +2033,106 @@ export default {
// Error Log
errorLog: {
timeId: 'Time / ID',
+ commonErrors: {
+ contextDeadlineExceeded: 'context deadline exceeded',
+ connectionRefused: 'connection refused',
+ rateLimit: 'rate limit'
+ },
+ time: 'Time',
+ type: 'Type',
context: 'Context',
+ platform: 'Platform',
+ model: 'Model',
+ group: 'Group',
+ user: 'User',
+ userId: 'User ID',
+ account: 'Account',
+ accountId: 'Account ID',
status: 'Status',
message: 'Message',
- latency: 'Latency',
+ latency: 'Request Duration',
action: 'Action',
noErrors: 'No errors in this window.',
grp: 'GRP:',
acc: 'ACC:',
details: 'Details',
- phase: 'Phase'
+ phase: 'Phase',
+ id: 'ID:',
+ typeUpstream: 'Upstream',
+ typeRequest: 'Request',
+ typeAuth: 'Auth',
+ typeRouting: 'Routing',
+ typeInternal: 'Internal'
},
// Error Details Modal
errorDetails: {
upstreamErrors: 'Upstream Errors',
requestErrors: 'Request Errors',
+ unresolved: 'Unresolved',
+ resolved: 'Resolved',
+ viewErrors: 'Errors',
+ viewExcluded: 'Excluded',
+ statusCodeOther: 'Other',
+ owner: {
+ provider: 'Provider',
+ client: 'Client',
+ platform: 'Platform'
+ },
+ phase: {
+ request: 'Request',
+ auth: 'Auth',
+ routing: 'Routing',
+ upstream: 'Upstream',
+ network: 'Network',
+ internal: 'Internal'
+ },
total: 'Total:',
searchPlaceholder: 'Search request_id / client_request_id / message',
- accountIdPlaceholder: 'account_id'
},
// Error Detail Modal
errorDetail: {
+ title: 'Error Detail',
+ titleWithId: 'Error #{id}',
+ noErrorSelected: 'No error selected.',
+ resolution: 'Resolved:',
+ pinnedToOriginalAccountId: 'Pinned to original account_id',
+ missingUpstreamRequestBody: 'Missing upstream request body',
+ failedToLoadRetryHistory: 'Failed to load retry history',
+ failedToUpdateResolvedStatus: 'Failed to update resolved status',
+ unsupportedRetryMode: 'Unsupported retry mode',
+ classificationKeys: {
+ phase: 'Phase',
+ owner: 'Owner',
+ source: 'Source',
+ retryable: 'Retryable',
+ resolvedAt: 'Resolved At',
+ resolvedBy: 'Resolved By',
+ resolvedRetryId: 'Resolved Retry',
+ retryCount: 'Retry Count'
+ },
+ source: {
+ upstream_http: 'Upstream HTTP'
+ },
+ upstreamKeys: {
+ status: 'Status',
+ message: 'Message',
+ detail: 'Detail',
+ upstreamErrors: 'Upstream Errors'
+ },
+ upstreamEvent: {
+ account: 'Account',
+ status: 'Status',
+ requestId: 'Request ID'
+ },
+ responsePreview: {
+ expand: 'Response (click to expand)',
+ collapse: 'Response (click to collapse)'
+ },
+ retryMeta: {
+ used: 'Used',
+ success: 'Success',
+ pinned: 'Pinned'
+ },
loading: 'Loading…',
requestId: 'Request ID',
time: 'Time',
@@ -2044,8 +2142,10 @@ export default {
basicInfo: 'Basic Info',
platform: 'Platform',
model: 'Model',
- latency: 'Latency',
- ttft: 'TTFT',
+ group: 'Group',
+ user: 'User',
+ account: 'Account',
+ latency: 'Request Duration',
businessLimited: 'Business Limited',
requestPath: 'Request Path',
timings: 'Timings',
@@ -2053,6 +2153,8 @@ export default {
routing: 'Routing',
upstream: 'Upstream',
response: 'Response',
+ classification: 'Classification',
+ notRetryable: 'Not recommended to retry',
retry: 'Retry',
retryClient: 'Retry (Client)',
retryUpstream: 'Retry (Upstream pinned)',
@@ -2064,7 +2166,6 @@ export default {
confirmRetry: 'Confirm Retry',
retrySuccess: 'Retry succeeded',
retryFailed: 'Retry failed',
- na: 'N/A',
retryHint: 'Retry will resend the request with the same parameters',
retryClientHint: 'Use client retry (no account pinning)',
retryUpstreamHint: 'Use upstream pinned retry (pin to the error account)',
@@ -2072,8 +2173,33 @@ export default {
retryNote1: 'Retry will use the same request body and parameters',
retryNote2: 'If the original request failed due to account issues, pinned retry may still fail',
retryNote3: 'Client retry will reselect an account',
+ retryNote4: 'You can force retry for non-retryable errors, but it is not recommended',
confirmRetryMessage: 'Confirm retry this request?',
- confirmRetryHint: 'Will resend with the same request parameters'
+ confirmRetryHint: 'Will resend with the same request parameters',
+ forceRetry: 'I understand and want to force retry',
+ forceRetryHint: 'This error usually cannot be fixed by retry; check to proceed',
+ forceRetryNeedAck: 'Please check to force retry',
+ markResolved: 'Mark resolved',
+ markUnresolved: 'Mark unresolved',
+ viewRetries: 'Retry history',
+ retryHistory: 'Retry History',
+ tabOverview: 'Overview',
+ tabRetries: 'Retries',
+ tabRequest: 'Request',
+ tabResponse: 'Response',
+ responseBody: 'Response',
+ compareA: 'Compare A',
+ compareB: 'Compare B',
+ retrySummary: 'Retry Summary',
+ responseHintSucceeded: 'Showing succeeded retry response_preview (#{id})',
+ responseHintFallback: 'No succeeded retry found; showing stored error_body',
+ suggestion: 'Suggestion',
+ suggestUpstreamResolved: '✓ Upstream error resolved by retry; no action needed',
+ suggestUpstream: 'Upstream instability: check account status, consider switching accounts, or retry',
+ suggestRequest: 'Client request error: ask customer to fix request parameters',
+ suggestAuth: 'Auth failed: verify API key/credentials',
+ suggestPlatform: 'Platform error: prioritize investigation and fix',
+ suggestGeneric: 'See details for more context'
},
requestDetails: {
title: 'Request Details',
@@ -2109,13 +2235,46 @@ export default {
loading: 'Loading...',
empty: 'No alert events',
loadFailed: 'Failed to load alert events',
+ status: {
+ firing: 'FIRING',
+ resolved: 'RESOLVED',
+ manualResolved: 'MANUAL RESOLVED'
+ },
+ detail: {
+ title: 'Alert Detail',
+ loading: 'Loading detail...',
+ empty: 'No detail',
+ loadFailed: 'Failed to load alert detail',
+ manualResolve: 'Mark as Resolved',
+ manualResolvedSuccess: 'Marked as manually resolved',
+ manualResolvedFailed: 'Failed to mark as manually resolved',
+ silence: 'Ignore Alert',
+ silenceSuccess: 'Alert silenced',
+ silenceFailed: 'Failed to silence alert',
+ viewRule: 'View Rule',
+ viewLogs: 'View Logs',
+ firedAt: 'Fired At',
+ resolvedAt: 'Resolved At',
+ ruleId: 'Rule ID',
+ dimensions: 'Dimensions',
+ historyTitle: 'History',
+ historyHint: 'Recent events with same rule + dimensions',
+ historyLoading: 'Loading history...',
+ historyEmpty: 'No history'
+ },
table: {
time: 'Time',
status: 'Status',
severity: 'Severity',
+ platform: 'Platform',
+ ruleId: 'Rule ID',
title: 'Title',
+ duration: 'Duration',
metric: 'Metric / Threshold',
- email: 'Email Sent'
+ dimensions: 'Dimensions',
+ email: 'Email Sent',
+ emailSent: 'Sent',
+ emailIgnored: 'Ignored'
}
},
alertRules: {
@@ -2229,7 +2388,6 @@ export default {
title: 'Alert Silencing (Maintenance Mode)',
enabled: 'Enable silencing',
globalUntil: 'Silence until (RFC3339)',
- untilPlaceholder: '2026-01-05T00:00:00Z',
untilHint: 'Leave empty to only toggle silencing without an expiry (not recommended).',
reason: 'Reason',
reasonPlaceholder: 'e.g., planned maintenance',
@@ -2269,7 +2427,11 @@ export default {
lockKeyRequired: 'Distributed lock key is required when lock is enabled',
lockKeyPrefix: 'Distributed lock key must start with "{prefix}"',
lockKeyHint: 'Recommended: start with "{prefix}" to avoid conflicts',
- lockTtlRange: 'Distributed lock TTL must be between 1 and 86400 seconds'
+ lockTtlRange: 'Distributed lock TTL must be between 1 and 86400 seconds',
+ slaMinPercentRange: 'SLA minimum percentage must be between 0 and 100',
+ ttftP99MaxRange: 'TTFT P99 maximum must be a number ≥ 0',
+ requestErrorRateMaxRange: 'Request error rate maximum must be between 0 and 100',
+ upstreamErrorRateMaxRange: 'Upstream error rate maximum must be between 0 and 100'
}
},
email: {
@@ -2334,8 +2496,6 @@ export default {
metricThresholdsHint: 'Configure alert thresholds for metrics, values exceeding thresholds will be displayed in red',
slaMinPercent: 'SLA Minimum Percentage',
slaMinPercentHint: 'SLA below this value will be displayed in red (default: 99.5%)',
- latencyP99MaxMs: 'Latency P99 Maximum (ms)',
- latencyP99MaxMsHint: 'Latency P99 above this value will be displayed in red (default: 2000ms)',
ttftP99MaxMs: 'TTFT P99 Maximum (ms)',
ttftP99MaxMsHint: 'TTFT P99 above this value will be displayed in red (default: 500ms)',
requestErrorRateMaxPercent: 'Request Error Rate Maximum (%)',
@@ -2354,9 +2514,28 @@ export default {
aggregation: 'Pre-aggregation Tasks',
enableAggregation: 'Enable Pre-aggregation',
aggregationHint: 'Pre-aggregation improves query performance for long time windows',
+ errorFiltering: 'Error Filtering',
+ ignoreCountTokensErrors: 'Ignore count_tokens errors',
+ ignoreCountTokensErrorsHint: 'When enabled, errors from count_tokens requests will not be written to the error log.',
+ ignoreContextCanceled: 'Ignore client disconnect errors',
+ ignoreContextCanceledHint: 'When enabled, client disconnect (context canceled) errors will not be written to the error log.',
+ ignoreNoAvailableAccounts: 'Ignore no available accounts errors',
+ ignoreNoAvailableAccountsHint: 'When enabled, "No available accounts" errors will not be written to the error log (not recommended; usually a config issue).',
+ autoRefresh: 'Auto Refresh',
+ enableAutoRefresh: 'Enable auto refresh',
+ enableAutoRefreshHint: 'Automatically refresh dashboard data at a fixed interval.',
+ refreshInterval: 'Refresh Interval',
+ refreshInterval15s: '15 seconds',
+ refreshInterval30s: '30 seconds',
+ refreshInterval60s: '60 seconds',
+ autoRefreshCountdown: 'Auto refresh: {seconds}s',
validation: {
title: 'Please fix the following issues',
- retentionDaysRange: 'Retention days must be between 1-365 days'
+ retentionDaysRange: 'Retention days must be between 1-365 days',
+ slaMinPercentRange: 'SLA minimum percentage must be between 0 and 100',
+ ttftP99MaxRange: 'TTFT P99 maximum must be a number ≥ 0',
+ requestErrorRateMaxRange: 'Request error rate maximum must be between 0 and 100',
+ upstreamErrorRateMaxRange: 'Upstream error rate maximum must be between 0 and 100'
}
},
concurrency: {
@@ -2394,7 +2573,7 @@ export default {
tooltips: {
totalRequests: 'Total number of requests (including both successful and failed requests) in the selected time window.',
throughputTrend: 'Requests/QPS + Tokens/TPS in the selected window.',
- latencyHistogram: 'Latency distribution (duration_ms) for successful requests.',
+ latencyHistogram: 'Request duration distribution (ms) for successful requests.',
errorTrend: 'Error counts over time (SLA scope excludes business limits; upstream excludes 429/529).',
errorDistribution: 'Error distribution by status code.',
goroutines:
@@ -2409,7 +2588,7 @@ export default {
sla: 'Service Level Agreement success rate, excluding business limits (e.g., insufficient balance, quota exceeded).',
errors: 'Error statistics, including total errors, error rate, and upstream error rate.',
upstreamErrors: 'Upstream error statistics, excluding rate limit errors (429/529).',
- latency: 'Request latency statistics, including p50, p90, p95, p99 percentiles.',
+ latency: 'Request duration statistics, including p50, p90, p95, p99 percentiles.',
ttft: 'Time To First Token, measuring the speed of first byte return in streaming responses.',
health: 'System health score (0-100), considering SLA, error rate, and resource usage.'
},
diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts
index 70d2fb35..f01c1e4b 100644
--- a/frontend/src/i18n/locales/zh.ts
+++ b/frontend/src/i18n/locales/zh.ts
@@ -126,6 +126,8 @@ export default {
all: '全部',
none: '无',
noData: '暂无数据',
+ expand: '展开',
+ collapse: '收起',
success: '成功',
error: '错误',
critical: '严重',
@@ -426,6 +428,9 @@ export default {
totalCost: '总消费',
standardCost: '标准',
actualCost: '实际',
+ userBilled: '用户扣费',
+ accountBilled: '账号计费',
+ accountMultiplier: '账号倍率',
avgDuration: '平均耗时',
inSelectedRange: '所选范围内',
perRequest: '每次请求',
@@ -1109,6 +1114,7 @@ export default {
concurrencyStatus: '并发',
notes: '备注',
priority: '优先级',
+ billingRateMultiplier: '账号倍率',
weight: '权重',
status: '状态',
schedulable: '调度',
@@ -1360,6 +1366,8 @@ export default {
concurrency: '并发数',
priority: '优先级',
priorityHint: '优先级越小的账号优先使用',
+ billingRateMultiplier: '账号计费倍率',
+ billingRateMultiplierHint: '>=0,0 表示该账号计费为 0;仅影响账号计费口径',
expiresAt: '过期时间',
expiresAtHint: '留空表示不过期',
higherPriorityFirst: '数值越小优先级越高',
@@ -1713,6 +1721,7 @@ export default {
address: '地址',
status: '状态',
accounts: '账号数',
+ latency: '延迟',
actions: '操作',
nameLabel: '名称',
namePlaceholder: '请输入代理名称',
@@ -1749,11 +1758,32 @@ export default {
enterProxyName: '请输入代理名称',
optionalAuth: '可选认证信息',
leaveEmptyToKeep: '留空保持不变',
+ form: {
+ hostPlaceholder: '请输入主机地址',
+ portPlaceholder: '请输入端口'
+ },
noProxiesYet: '暂无代理',
createFirstProxy: '添加您的第一个代理以开始使用。',
testConnection: '测试连接',
batchTest: '批量测试',
testFailed: '失败',
+ latencyFailed: '链接失败',
+ batchTestEmpty: '暂无可测试的代理',
+ batchTestDone: '批量测试完成,共测试 {count} 个代理',
+ batchTestFailed: '批量测试失败',
+ batchDeleteAction: '删除',
+ batchDelete: '批量删除',
+ batchDeleteConfirm: '确定删除选中的 {count} 个代理吗?已被账号使用的将自动跳过。',
+ batchDeleteDone: '已删除 {deleted} 个代理,跳过 {skipped} 个',
+ batchDeleteSkipped: '已跳过 {skipped} 个代理',
+ batchDeleteFailed: '批量删除失败',
+ deleteBlockedInUse: '该代理已有账号使用,无法删除',
+ accountsTitle: '使用该IP的账号',
+ accountsEmpty: '暂无账号使用此代理',
+ accountsFailed: '获取账号列表失败',
+ accountName: '账号名称',
+ accountPlatform: '所属平台',
+ accountNotes: '备注',
// Batch import
standardAdd: '标准添加',
batchAdd: '快捷添加',
@@ -2003,10 +2033,8 @@ export default {
noSystemMetrics: '尚未收集系统指标。',
collectedAt: '采集时间:',
window: '窗口',
- cpu: 'CPU',
memory: '内存',
db: '数据库',
- redis: 'Redis',
goroutines: '协程',
jobs: '后台任务',
jobsHelp: '点击“明细”查看任务心跳与报错信息',
@@ -2032,7 +2060,7 @@ export default {
totalRequests: '总请求',
avgQps: '平均 QPS',
avgTps: '平均 TPS',
- avgLatency: '平均延迟',
+ avgLatency: '平均请求时长',
avgTtft: '平均首字延迟',
exceptions: '异常数',
requestErrors: '请求错误',
@@ -2044,7 +2072,7 @@ export default {
errors: '错误',
errorRate: '错误率:',
upstreamRate: '上游错误率:',
- latencyDuration: '延迟(毫秒)',
+ latencyDuration: '请求时长(毫秒)',
ttftLabel: '首字延迟(毫秒)',
p50: 'p50',
p90: 'p90',
@@ -2052,7 +2080,6 @@ export default {
p99: 'p99',
avg: 'avg',
max: 'max',
- qps: 'QPS',
requests: '请求数',
requestsTitle: '请求',
upstream: '上游',
@@ -2064,7 +2091,7 @@ export default {
failedToLoadData: '加载运维数据失败',
failedToLoadOverview: '加载概览数据失败',
failedToLoadThroughputTrend: '加载吞吐趋势失败',
- failedToLoadLatencyHistogram: '加载延迟分布失败',
+ failedToLoadLatencyHistogram: '加载请求时长分布失败',
failedToLoadErrorTrend: '加载错误趋势失败',
failedToLoadErrorDistribution: '加载错误分布失败',
failedToLoadErrorDetail: '加载错误详情失败',
@@ -2072,7 +2099,7 @@ export default {
tpsK: 'TPS(千)',
top: '最高:',
throughputTrend: '吞吐趋势',
- latencyHistogram: '延迟分布',
+ latencyHistogram: '请求时长分布',
errorTrend: '错误趋势',
errorDistribution: '错误分布',
// Health Score & Diagnosis
@@ -2087,7 +2114,9 @@ export default {
'30m': '近30分钟',
'1h': '近1小时',
'6h': '近6小时',
- '24h': '近24小时'
+ '24h': '近24小时',
+ '7d': '近7天',
+ '30d': '近30天'
},
fullscreen: {
enter: '进入全屏'
@@ -2116,15 +2145,8 @@ export default {
memoryHigh: '内存使用率偏高 ({usage}%)',
memoryHighImpact: '内存压力较大,需要关注',
memoryHighAction: '监控内存趋势,检查是否有内存泄漏',
- // Latency diagnostics
- latencyCritical: '响应延迟严重过高 ({latency}ms)',
- latencyCriticalImpact: '用户体验极差,大量请求超时',
- latencyCriticalAction: '检查慢查询、数据库索引、网络延迟和上游服务',
- latencyHigh: '响应延迟偏高 ({latency}ms)',
- latencyHighImpact: '用户体验下降,需要优化',
- latencyHighAction: '分析慢请求日志,优化数据库查询和业务逻辑',
ttftHigh: '首字节时间偏高 ({ttft}ms)',
- ttftHighImpact: '用户感知延迟增加',
+ ttftHighImpact: '用户感知时长增加',
ttftHighAction: '优化请求处理流程,减少前置逻辑耗时',
// Error rate diagnostics
upstreamCritical: '上游错误率严重偏高 ({rate}%)',
@@ -2142,13 +2164,13 @@ export default {
// SLA diagnostics
slaCritical: 'SLA 严重低于目标 ({sla}%)',
slaCriticalImpact: '用户体验严重受损',
- slaCriticalAction: '紧急排查错误和延迟问题,考虑限流保护',
+ slaCriticalAction: '紧急排查错误原因,必要时采取限流保护',
slaLow: 'SLA 低于目标 ({sla}%)',
slaLowImpact: '需要关注服务质量',
slaLowAction: '分析SLA下降原因,优化系统性能',
// Health score diagnostics
healthCritical: '综合健康评分过低 ({score})',
- healthCriticalImpact: '多个指标可能同时异常,建议优先排查错误与延迟',
+ healthCriticalImpact: '多个指标可能同时异常,建议优先排查错误与资源使用情况',
healthCriticalAction: '全面检查系统状态,优先处理critical级别问题',
healthLow: '综合健康评分偏低 ({score})',
healthLowImpact: '可能存在轻度波动,建议关注 SLA 与错误率',
@@ -2159,27 +2181,106 @@ export default {
// Error Log
errorLog: {
timeId: '时间 / ID',
+ commonErrors: {
+ contextDeadlineExceeded: '请求超时',
+ connectionRefused: '连接被拒绝',
+ rateLimit: '触发限流'
+ },
+ time: '时间',
+ type: '类型',
context: '上下文',
+ platform: '平台',
+ model: '模型',
+ group: '分组',
+ user: '用户',
+ userId: '用户 ID',
+ account: '账号',
+ accountId: '账号 ID',
status: '状态码',
- message: '消息',
- latency: '延迟',
+ message: '响应内容',
+ latency: '请求时长',
action: '操作',
noErrors: '该窗口内暂无错误。',
grp: 'GRP:',
acc: 'ACC:',
details: '详情',
- phase: '阶段'
+ phase: '阶段',
+ id: 'ID:',
+ typeUpstream: '上游',
+ typeRequest: '请求',
+ typeAuth: '认证',
+ typeRouting: '路由',
+ typeInternal: '内部'
},
// Error Details Modal
errorDetails: {
upstreamErrors: '上游错误',
requestErrors: '请求错误',
+ unresolved: '未解决',
+ resolved: '已解决',
+ viewErrors: '错误',
+ viewExcluded: '排除项',
+ statusCodeOther: '其他',
+ owner: {
+ provider: '服务商',
+ client: '客户端',
+ platform: '平台'
+ },
+ phase: {
+ request: '请求',
+ auth: '认证',
+ routing: '路由',
+ upstream: '上游',
+ network: '网络',
+ internal: '内部'
+ },
total: '总计:',
searchPlaceholder: '搜索 request_id / client_request_id / message',
- accountIdPlaceholder: 'account_id'
},
// Error Detail Modal
errorDetail: {
+ title: '错误详情',
+ titleWithId: '错误 #{id}',
+ noErrorSelected: '未选择错误。',
+ resolution: '已解决:',
+ pinnedToOriginalAccountId: '固定到原 account_id',
+ missingUpstreamRequestBody: '缺少上游请求体',
+ failedToLoadRetryHistory: '加载重试历史失败',
+ failedToUpdateResolvedStatus: '更新解决状态失败',
+ unsupportedRetryMode: '不支持的重试模式',
+ classificationKeys: {
+ phase: '阶段',
+ owner: '归属方',
+ source: '来源',
+ retryable: '可重试',
+ resolvedAt: '解决时间',
+ resolvedBy: '解决人',
+ resolvedRetryId: '解决重试ID',
+ retryCount: '重试次数'
+ },
+ source: {
+ upstream_http: '上游 HTTP'
+ },
+ upstreamKeys: {
+ status: '状态码',
+ message: '消息',
+ detail: '详情',
+ upstreamErrors: '上游错误列表'
+ },
+ upstreamEvent: {
+ account: '账号',
+ status: '状态码',
+ requestId: '请求ID'
+ },
+ responsePreview: {
+ expand: '响应内容(点击展开)',
+ collapse: '响应内容(点击收起)'
+ },
+ retryMeta: {
+ used: '使用账号',
+ success: '成功',
+ pinned: '固定账号'
+ },
loading: '加载中…',
requestId: '请求 ID',
time: '时间',
@@ -2189,8 +2290,10 @@ export default {
basicInfo: '基本信息',
platform: '平台',
model: '模型',
- latency: '延迟',
- ttft: 'TTFT',
+ group: '分组',
+ user: '用户',
+ account: '账号',
+ latency: '请求时长',
businessLimited: '业务限制',
requestPath: '请求路径',
timings: '时序信息',
@@ -2198,6 +2301,8 @@ export default {
routing: '路由',
upstream: '上游',
response: '响应',
+ classification: '错误分类',
+ notRetryable: '此错误不建议重试',
retry: '重试',
retryClient: '重试(客户端)',
retryUpstream: '重试(上游固定)',
@@ -2209,7 +2314,6 @@ export default {
confirmRetry: '确认重试',
retrySuccess: '重试成功',
retryFailed: '重试失败',
- na: 'N/A',
retryHint: '重试将使用相同的请求参数重新发送请求',
retryClientHint: '使用客户端重试(不固定账号)',
retryUpstreamHint: '使用上游固定重试(固定到错误的账号)',
@@ -2217,8 +2321,33 @@ export default {
retryNote1: '重试会使用相同的请求体和参数',
retryNote2: '如果原请求失败是因为账号问题,固定重试可能仍会失败',
retryNote3: '客户端重试会重新选择账号',
+ retryNote4: '对不可重试的错误可以强制重试,但不推荐',
confirmRetryMessage: '确认要重试该请求吗?',
- confirmRetryHint: '将使用相同的请求参数重新发送'
+ confirmRetryHint: '将使用相同的请求参数重新发送',
+ forceRetry: '我已确认并理解强制重试风险',
+ forceRetryHint: '此错误类型通常不可通过重试解决;如仍需重试请勾选确认',
+ forceRetryNeedAck: '请先勾选确认再强制重试',
+ markResolved: '标记已解决',
+ markUnresolved: '标记未解决',
+ viewRetries: '重试历史',
+ retryHistory: '重试历史',
+ tabOverview: '概览',
+ tabRetries: '重试历史',
+ tabRequest: '请求详情',
+ tabResponse: '响应详情',
+ responseBody: '响应详情',
+ compareA: '对比 A',
+ compareB: '对比 B',
+ retrySummary: '重试摘要',
+ responseHintSucceeded: '展示重试成功的 response_preview(#{id})',
+ responseHintFallback: '没有成功的重试结果,展示存储的 error_body',
+ suggestion: '处理建议',
+ suggestUpstreamResolved: '✓ 上游错误已通过重试解决,无需人工介入',
+ suggestUpstream: '⚠️ 上游服务不稳定,建议:检查上游账号状态 / 考虑切换账号 / 再次重试',
+ suggestRequest: '⚠️ 客户端请求错误,建议:联系客户修正请求参数 / 手动标记已解决',
+ suggestAuth: '⚠️ 认证失败,建议:检查 API Key 是否有效 / 联系客户更新凭证',
+ suggestPlatform: '🚨 平台错误,建议立即排查修复',
+ suggestGeneric: '查看详情了解更多信息'
},
requestDetails: {
title: '请求明细',
@@ -2254,13 +2383,46 @@ export default {
loading: '加载中...',
empty: '暂无告警事件',
loadFailed: '加载告警事件失败',
+ status: {
+ firing: '告警中',
+ resolved: '已恢复',
+ manualResolved: '手动已解决'
+ },
+ detail: {
+ title: '告警详情',
+ loading: '加载详情中...',
+ empty: '暂无详情',
+ loadFailed: '加载告警详情失败',
+ manualResolve: '标记为已解决',
+ manualResolvedSuccess: '已标记为手动解决',
+ manualResolvedFailed: '标记为手动解决失败',
+ silence: '忽略此告警',
+ silenceSuccess: '已静默该告警',
+ silenceFailed: '静默失败',
+ viewRule: '查看规则',
+ viewLogs: '查看相关日志',
+ firedAt: '触发时间',
+ resolvedAt: '解决时间',
+ ruleId: '规则 ID',
+ dimensions: '维度信息',
+ historyTitle: '历史记录',
+ historyHint: '同一规则 + 相同维度的最近事件',
+ historyLoading: '加载历史中...',
+ historyEmpty: '暂无历史记录'
+ },
table: {
time: '时间',
status: '状态',
severity: '级别',
+ platform: '平台',
+ ruleId: '规则ID',
title: '标题',
+ duration: '持续时间',
metric: '指标 / 阈值',
- email: '邮件已发送'
+ dimensions: '维度',
+ email: '邮件已发送',
+ emailSent: '已发送',
+ emailIgnored: '已忽略'
}
},
alertRules: {
@@ -2288,8 +2450,8 @@ export default {
successRate: '成功率 (%)',
errorRate: '错误率 (%)',
upstreamErrorRate: '上游错误率 (%)',
- p95: 'P95 延迟 (ms)',
- p99: 'P99 延迟 (ms)',
+ p95: 'P95 请求时长 (ms)',
+ p99: 'P99 请求时长 (ms)',
cpu: 'CPU 使用率 (%)',
memory: '内存使用率 (%)',
queueDepth: '并发排队深度',
@@ -2374,7 +2536,6 @@ export default {
title: '告警静默(维护模式)',
enabled: '启用静默',
globalUntil: '静默截止时间(RFC3339)',
- untilPlaceholder: '2026-01-05T00:00:00Z',
untilHint: '建议填写截止时间,避免忘记关闭静默。',
reason: '原因',
reasonPlaceholder: '例如:计划维护',
@@ -2414,7 +2575,11 @@ export default {
lockKeyRequired: '启用分布式锁时必须填写 Lock Key',
lockKeyPrefix: '分布式锁 Key 必须以「{prefix}」开头',
lockKeyHint: '建议以「{prefix}」开头以避免冲突',
- lockTtlRange: '分布式锁 TTL 必须在 1 到 86400 秒之间'
+ lockTtlRange: '分布式锁 TTL 必须在 1 到 86400 秒之间',
+ slaMinPercentRange: 'SLA 最低值必须在 0-100 之间',
+ ttftP99MaxRange: 'TTFT P99 最大值必须大于或等于 0',
+ requestErrorRateMaxRange: '请求错误率最大值必须在 0-100 之间',
+ upstreamErrorRateMaxRange: '上游错误率最大值必须在 0-100 之间'
}
},
email: {
@@ -2479,8 +2644,6 @@ export default {
metricThresholdsHint: '配置各项指标的告警阈值,超出阈值时将以红色显示',
slaMinPercent: 'SLA最低百分比',
slaMinPercentHint: 'SLA低于此值时显示为红色(默认:99.5%)',
- latencyP99MaxMs: '延迟P99最大值(毫秒)',
- latencyP99MaxMsHint: '延迟P99高于此值时显示为红色(默认:2000ms)',
ttftP99MaxMs: 'TTFT P99最大值(毫秒)',
ttftP99MaxMsHint: 'TTFT P99高于此值时显示为红色(默认:500ms)',
requestErrorRateMaxPercent: '请求错误率最大值(%)',
@@ -2499,9 +2662,28 @@ export default {
aggregation: '预聚合任务',
enableAggregation: '启用预聚合任务',
aggregationHint: '预聚合可提升长时间窗口查询性能',
+ errorFiltering: '错误过滤',
+ ignoreCountTokensErrors: '忽略 count_tokens 错误',
+ ignoreCountTokensErrorsHint: '启用后,count_tokens 请求的错误将不会写入错误日志。',
+ ignoreContextCanceled: '忽略客户端断连错误',
+ ignoreContextCanceledHint: '启用后,客户端主动断开连接(context canceled)的错误将不会写入错误日志。',
+ ignoreNoAvailableAccounts: '忽略无可用账号错误',
+ ignoreNoAvailableAccountsHint: '启用后,“No available accounts” 错误将不会写入错误日志(不推荐,这通常是配置问题)。',
+ autoRefresh: '自动刷新',
+ enableAutoRefresh: '启用自动刷新',
+ enableAutoRefreshHint: '自动刷新仪表板数据,启用后会定期拉取最新数据。',
+ refreshInterval: '刷新间隔',
+ refreshInterval15s: '15 秒',
+ refreshInterval30s: '30 秒',
+ refreshInterval60s: '60 秒',
+ autoRefreshCountdown: '自动刷新:{seconds}s',
validation: {
title: '请先修正以下问题',
- retentionDaysRange: '保留天数必须在1-365天之间'
+ retentionDaysRange: '保留天数必须在1-365天之间',
+ slaMinPercentRange: 'SLA最低百分比必须在0-100之间',
+ ttftP99MaxRange: 'TTFT P99最大值必须大于等于0',
+ requestErrorRateMaxRange: '请求错误率最大值必须在0-100之间',
+ upstreamErrorRateMaxRange: '上游错误率最大值必须在0-100之间'
}
},
concurrency: {
@@ -2539,12 +2721,12 @@ export default {
tooltips: {
totalRequests: '当前时间窗口内的总请求数和Token消耗量。',
throughputTrend: '当前窗口内的请求/QPS 与 token/TPS 趋势。',
- latencyHistogram: '成功请求的延迟分布(毫秒)。',
+ latencyHistogram: '成功请求的请求时长分布(毫秒)。',
errorTrend: '错误趋势(SLA 口径排除业务限制;上游错误率排除 429/529)。',
errorDistribution: '按状态码统计的错误分布。',
upstreamErrors: '上游服务返回的错误,包括API提供商的错误响应(排除429/529限流错误)。',
goroutines:
- 'Go 运行时的协程数量(轻量级线程)。没有绝对“安全值”,建议以历史基线为准。经验参考:<2000 常见;2000-8000 需关注;>8000 且伴随队列/延迟上升时,优先排查阻塞/泄漏。',
+ 'Go 运行时的协程数量(轻量级线程)。没有绝对"安全值",建议以历史基线为准。经验参考:<2000 常见;2000-8000 需关注;>8000 且伴随队列上升时,优先排查阻塞/泄漏。',
cpu: 'CPU 使用率,显示系统处理器的负载情况。',
memory: '内存使用率,包括已使用和总可用内存。',
db: '数据库连接池状态,包括活跃连接、空闲连接和等待连接数。',
@@ -2554,7 +2736,7 @@ export default {
tokens: '当前时间窗口内处理的总Token数量。',
sla: '服务等级协议达成率,排除业务限制(如余额不足、配额超限)的成功请求占比。',
errors: '错误统计,包括总错误数、错误率和上游错误率。',
- latency: '请求延迟统计,包括 p50、p90、p95、p99 等百分位数。',
+ latency: '请求时长统计,包括 p50、p90、p95、p99 等百分位数。',
ttft: '首Token延迟(Time To First Token),衡量流式响应的首字节返回速度。',
health: '系统健康评分(0-100),综合考虑 SLA、错误率和资源使用情况。'
},
diff --git a/frontend/src/style.css b/frontend/src/style.css
index b3b492b9..81a40e0c 100644
--- a/frontend/src/style.css
+++ b/frontend/src/style.css
@@ -345,7 +345,7 @@
.modal-overlay {
@apply fixed inset-0 z-50;
@apply bg-black/50 backdrop-blur-sm;
- @apply flex items-center justify-center p-4;
+ @apply flex items-center justify-center p-2 sm:p-4;
}
.modal-content {
diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts
index 5eb74596..dabc260d 100644
--- a/frontend/src/types/index.ts
+++ b/frontend/src/types/index.ts
@@ -364,10 +364,21 @@ export interface Proxy {
password?: string | null
status: 'active' | 'inactive'
account_count?: number // Number of accounts using this proxy
+ latency_ms?: number
+ latency_status?: 'success' | 'failed'
+ latency_message?: string
created_at: string
updated_at: string
}
+export interface ProxyAccountSummary {
+ id: number
+ name: string
+ platform: AccountPlatform
+ type: AccountType
+ notes?: string | null
+}
+
// Gemini credentials structure for OAuth and API Key authentication
export interface GeminiCredentials {
// API Key authentication
@@ -428,6 +439,7 @@ export interface Account {
concurrency: number
current_concurrency?: number // Real-time concurrency count from Redis
priority: number
+ rate_multiplier?: number // Account billing multiplier (>=0, 0 means free)
status: 'active' | 'inactive' | 'error'
error_message: string | null
last_used_at: string | null
@@ -457,7 +469,9 @@ export interface Account {
export interface WindowStats {
requests: number
tokens: number
- cost: number
+ cost: number // Account cost (account multiplier)
+ standard_cost?: number
+ user_cost?: number
}
export interface UsageProgress {
@@ -522,6 +536,7 @@ export interface CreateAccountRequest {
proxy_id?: number | null
concurrency?: number
priority?: number
+ rate_multiplier?: number // Account billing multiplier (>=0, 0 means free)
group_ids?: number[]
expires_at?: number | null
auto_pause_on_expired?: boolean
@@ -537,6 +552,7 @@ export interface UpdateAccountRequest {
proxy_id?: number | null
concurrency?: number
priority?: number
+ rate_multiplier?: number // Account billing multiplier (>=0, 0 means free)
schedulable?: boolean
status?: 'active' | 'inactive'
group_ids?: number[]
@@ -593,6 +609,7 @@ export interface UsageLog {
total_cost: number
actual_cost: number
rate_multiplier: number
+ account_rate_multiplier?: number | null
stream: boolean
duration_ms: number
@@ -852,23 +869,27 @@ export interface AccountUsageHistory {
requests: number
tokens: number
cost: number
- actual_cost: number
+ actual_cost: number // Account cost (account multiplier)
+ user_cost: number // User/API key billed cost (group multiplier)
}
export interface AccountUsageSummary {
days: number
actual_days_used: number
- total_cost: number
+ total_cost: number // Account cost (account multiplier)
+ total_user_cost: number
total_standard_cost: number
total_requests: number
total_tokens: number
- avg_daily_cost: number
+ avg_daily_cost: number // Account cost
+ avg_daily_user_cost: number
avg_daily_requests: number
avg_daily_tokens: number
avg_duration_ms: number
today: {
date: string
cost: number
+ user_cost: number
requests: number
tokens: number
} | null
@@ -876,6 +897,7 @@ export interface AccountUsageSummary {
date: string
label: string
cost: number
+ user_cost: number
requests: number
} | null
highest_request_day: {
@@ -883,6 +905,7 @@ export interface AccountUsageSummary {
label: string
requests: number
cost: number
+ user_cost: number
} | null
}
diff --git a/frontend/src/views/admin/AccountsView.vue b/frontend/src/views/admin/AccountsView.vue
index 8a5268ca..f465c001 100644
--- a/frontend/src/views/admin/AccountsView.vue
+++ b/frontend/src/views/admin/AccountsView.vue
@@ -61,6 +61,11 @@
+
+
+ {{ (row.rate_multiplier ?? 1).toFixed(2) }}x
+
+
{{ value }}
@@ -120,7 +125,7 @@
diff --git a/frontend/src/views/admin/ProxiesView.vue b/frontend/src/views/admin/ProxiesView.vue
index 22d778d9..9d876972 100644
--- a/frontend/src/views/admin/ProxiesView.vue
+++ b/frontend/src/views/admin/ProxiesView.vue
@@ -51,6 +51,24 @@
>
+
+
@@ -1181,7 +1156,7 @@ function handleToolbarRefresh() {
-
+
{{ t('admin.ops.requestsTitle') }}
@@ -1217,10 +1192,10 @@ function handleToolbarRefresh() {
-
+
- SLA
+ {{ t('admin.ops.sla') }}
@@ -1247,8 +1222,8 @@ function handleToolbarRefresh() {
-
-
+
+
{{ t('admin.ops.latencyDuration') }}
@@ -1264,42 +1239,42 @@ function handleToolbarRefresh() {
-
+
{{ durationP99Ms ?? '-' }}
ms (P99)
- P95:
- {{ durationP95Ms ?? '-' }}
+ {{ t('admin.ops.p95') }}
+ {{ durationP95Ms ?? '-' }}
ms
- P90:
- {{ durationP90Ms ?? '-' }}
+ {{ t('admin.ops.p90') }}
+ {{ durationP90Ms ?? '-' }}
ms
- P50:
- {{ durationP50Ms ?? '-' }}
+ {{ t('admin.ops.p50') }}
+ {{ durationP50Ms ?? '-' }}
ms
Avg:
- {{ durationAvgMs ?? '-' }}
+ {{ durationAvgMs ?? '-' }}
ms
Max:
- {{ durationMaxMs ?? '-' }}
+ {{ durationMaxMs ?? '-' }}
ms
-
-
+
+
TTFT
@@ -1309,48 +1284,48 @@ function handleToolbarRefresh() {
v-if="!props.fullscreen"
class="text-[10px] font-bold text-blue-500 hover:underline"
type="button"
- @click="openDetails({ title: 'TTFT', sort: 'duration_desc' })"
+ @click="openDetails({ title: t('admin.ops.ttftLabel'), sort: 'duration_desc' })"
>
{{ t('admin.ops.requestDetails.details') }}
-
+
{{ ttftP99Ms ?? '-' }}
ms (P99)
- P95:
- {{ ttftP95Ms ?? '-' }}
+ {{ t('admin.ops.p95') }}
+ {{ ttftP95Ms ?? '-' }}
ms
- P90:
- {{ ttftP90Ms ?? '-' }}
+ {{ t('admin.ops.p90') }}
+ {{ ttftP90Ms ?? '-' }}
ms
- P50:
- {{ ttftP50Ms ?? '-' }}
+ {{ t('admin.ops.p50') }}
+ {{ ttftP50Ms ?? '-' }}
ms
Avg:
- {{ ttftAvgMs ?? '-' }}
+ {{ ttftAvgMs ?? '-' }}
ms
Max:
- {{ ttftMaxMs ?? '-' }}
+ {{ ttftMaxMs ?? '-' }}
ms
-
-
+
+
{{ t('admin.ops.requestErrors') }}
@@ -1376,7 +1351,7 @@ function handleToolbarRefresh() {
-
+
{{ t('admin.ops.upstreamErrors') }}
@@ -1423,7 +1398,7 @@ function handleToolbarRefresh() {
-
MEM
+
{{ t('admin.ops.mem') }}
@@ -1441,7 +1416,7 @@ function handleToolbarRefresh() {
-
DB
+
{{ t('admin.ops.db') }}
diff --git a/frontend/src/views/admin/ops/components/OpsDashboardSkeleton.vue b/frontend/src/views/admin/ops/components/OpsDashboardSkeleton.vue
index 5bbadd03..cffdd8a1 100644
--- a/frontend/src/views/admin/ops/components/OpsDashboardSkeleton.vue
+++ b/frontend/src/views/admin/ops/components/OpsDashboardSkeleton.vue
@@ -1,50 +1,96 @@
+
+
-
-
-
+
+
+
-
-
-
-
-
-
-
+
+
+
+
+
+
+
diff --git a/frontend/src/views/admin/ops/components/OpsErrorDetailModal.vue b/frontend/src/views/admin/ops/components/OpsErrorDetailModal.vue
index 0726bacd..81fe982c 100644
--- a/frontend/src/views/admin/ops/components/OpsErrorDetailModal.vue
+++ b/frontend/src/views/admin/ops/components/OpsErrorDetailModal.vue
@@ -12,12 +12,12 @@
-
-
+
+
{{ t('admin.ops.errorDetail.requestId') }}
- {{ detail.request_id || detail.client_request_id || '—' }}
+ {{ requestId || '—' }}
@@ -29,277 +29,149 @@
-
{{ t('admin.ops.errorDetail.phase') }}
-
- {{ detail.phase || '—' }}
+
+ {{ isUpstreamError(detail) ? t('admin.ops.errorDetail.account') : t('admin.ops.errorDetail.user') }}
-
- {{ detail.type || '—' }}
+
+
+ {{ detail.account_name || (detail.account_id != null ? String(detail.account_id) : '—') }}
+
+
+ {{ detail.user_email || (detail.user_id != null ? String(detail.user_id) : '—') }}
+
+
+
+
+
+
{{ t('admin.ops.errorDetail.platform') }}
+
+ {{ detail.platform || '—' }}
+
+
+
+
+
{{ t('admin.ops.errorDetail.group') }}
+
+ {{ detail.group_name || (detail.group_id != null ? String(detail.group_id) : '—') }}
+
+
+
+
+
{{ t('admin.ops.errorDetail.model') }}
+
+ {{ detail.model || '—' }}
{{ t('admin.ops.errorDetail.status') }}
-
+
{{ detail.status_code }}
-
- {{ detail.severity }}
-
+
+
+
+
+
{{ t('admin.ops.errorDetail.message') }}
+
+ {{ detail.message || '—' }}
-
+
-
{{ t('admin.ops.errorDetail.message') }}
-
- {{ detail.message || '—' }}
-
+
{{ t('admin.ops.errorDetail.responseBody') }}
+
{{ prettyJSON(primaryResponseBody || '') }}
-
-
-
{{ t('admin.ops.errorDetail.basicInfo') }}
-
-
-
{{ t('admin.ops.errorDetail.platform') }}
-
{{ detail.platform || '—' }}
-
-
-
{{ t('admin.ops.errorDetail.model') }}
-
{{ detail.model || '—' }}
-
-
-
{{ t('admin.ops.errorDetail.latency') }}
-
- {{ detail.latency_ms != null ? `${detail.latency_ms}ms` : '—' }}
-
-
-
-
{{ t('admin.ops.errorDetail.ttft') }}
-
- {{ detail.time_to_first_token_ms != null ? `${detail.time_to_first_token_ms}ms` : '—' }}
-
-
-
-
{{ t('admin.ops.errorDetail.businessLimited') }}
-
- {{ detail.is_business_limited ? 'true' : 'false' }}
-
-
-
-
{{ t('admin.ops.errorDetail.requestPath') }}
-
- {{ detail.request_path || '—' }}
-
-
-
-
-
-
-
-
{{ t('admin.ops.errorDetail.timings') }}
-
-
-
{{ t('admin.ops.errorDetail.auth') }}
-
- {{ detail.auth_latency_ms != null ? `${detail.auth_latency_ms}ms` : '—' }}
-
-
-
-
{{ t('admin.ops.errorDetail.routing') }}
-
- {{ detail.routing_latency_ms != null ? `${detail.routing_latency_ms}ms` : '—' }}
-
-
-
-
{{ t('admin.ops.errorDetail.upstream') }}
-
- {{ detail.upstream_latency_ms != null ? `${detail.upstream_latency_ms}ms` : '—' }}
-
-
-
-
{{ t('admin.ops.errorDetail.response') }}
-
- {{ detail.response_latency_ms != null ? `${detail.response_latency_ms}ms` : '—' }}
-
-
-
-
-
-
-
-
-
-
{{ t('admin.ops.errorDetail.retry') }}
-
- {{ t('admin.ops.errorDetail.retryNote1') }}
-
-
-
-
- {{ t('admin.ops.errorDetail.retryClient') }}
-
-
- {{ t('admin.ops.errorDetail.retryUpstream') }}
-
-
+
+
+
+
{{ t('admin.ops.errorDetails.upstreamErrors') }}
+
{{ t('common.loading') }}
-
-
-
-
-
- {{ t('admin.ops.errorDetail.retryNote2') }}
-
-
-
-
-
{{ t('admin.ops.errorDetail.retryNotes') }}
-
- - {{ t('admin.ops.errorDetail.retryNote3') }}
- - {{ t('admin.ops.errorDetail.retryNote4') }}
-
-
-
-
-
-
-
-
-
- {{ t('admin.ops.errorDetails.upstreamErrors') }}
-
-
-
-
-
status
-
- {{ detail.upstream_status_code != null ? detail.upstream_status_code : '—' }}
-
-
-
-
message
-
- {{ detail.upstream_error_message || '—' }}
-
-
+
+ {{ t('common.noData') }}
-
-
detail
-
{{ prettyJSON(detail.upstream_error_detail) }}
-
-
-
-
upstream_errors
-
-
-
-
-
- #{{ idx + 1 }} {{ ev.kind }}
-
-
- {{ ev.at_unix_ms ? formatDateTime(new Date(ev.at_unix_ms)) : '' }}
-
-
-
-
-
account_id: {{ ev.account_id ?? '—' }}
-
status: {{ ev.upstream_status_code ?? '—' }}
-
- request_id: {{ ev.upstream_request_id || '—' }}
-
-
-
-
- {{ ev.message }}
-
-
-
{{ prettyJSON(ev.detail) }}
-
-
-
-
{{ prettyJSON(detail.upstream_errors) }}
-
-
-
-
-
-
-
{{ t('admin.ops.errorDetail.requestBody') }}
+
- {{ t('admin.ops.errorDetail.trimmed') }}
+
+
+ #{{ idx + 1 }}
+ {{ ev.type }}
+
+
+
+ {{ ev.status_code ?? '—' }}
+
+
+
+
+ {{
+ expandedUpstreamDetailIds.has(ev.id)
+ ? t('admin.ops.errorDetail.responsePreview.collapse')
+ : t('admin.ops.errorDetail.responsePreview.expand')
+ }}
+
+
+
+
+
+
+
+ {{ t('admin.ops.errorDetail.upstreamEvent.status') }}:
+ {{ ev.status_code ?? '—' }}
+
+
+ {{ t('admin.ops.errorDetail.upstreamEvent.requestId') }}:
+ {{ ev.request_id || ev.client_request_id || '—' }}
+
+
+
+
{{ ev.message }}
+
+
{{ prettyJSON(getUpstreamResponsePreview(ev)) }}
-
{{ prettyJSON(detail.request_body) }}
-
-
-
-
-
{{ t('admin.ops.errorDetail.errorBody') }}
-
{{ prettyJSON(detail.error_body) }}
-
-
diff --git a/frontend/src/views/admin/ops/components/OpsErrorDetailsModal.vue b/frontend/src/views/admin/ops/components/OpsErrorDetailsModal.vue
index 0280f158..e9e44010 100644
--- a/frontend/src/views/admin/ops/components/OpsErrorDetailsModal.vue
+++ b/frontend/src/views/admin/ops/components/OpsErrorDetailsModal.vue
@@ -22,23 +22,19 @@ const emit = defineEmits<{
const { t } = useI18n()
+
const loading = ref(false)
const rows = ref
([])
const total = ref(0)
const page = ref(1)
-const pageSize = ref(20)
+const pageSize = ref(10)
const q = ref('')
-const statusCode = ref(null)
+const statusCode = ref(null)
const phase = ref('')
-const accountIdInput = ref('')
+const errorOwner = ref('')
+const viewMode = ref<'errors' | 'excluded' | 'all'>('errors')
-const accountId = computed(() => {
- const raw = String(accountIdInput.value || '').trim()
- if (!raw) return null
- const n = Number.parseInt(raw, 10)
- return Number.isFinite(n) && n > 0 ? n : null
-})
const modalTitle = computed(() => {
return props.errorType === 'upstream' ? t('admin.ops.errorDetails.upstreamErrors') : t('admin.ops.errorDetails.requestErrors')
@@ -48,20 +44,38 @@ const statusCodeSelectOptions = computed(() => {
const codes = [400, 401, 403, 404, 409, 422, 429, 500, 502, 503, 504, 529]
return [
{ value: null, label: t('common.all') },
- ...codes.map((c) => ({ value: c, label: String(c) }))
+ ...codes.map((c) => ({ value: c, label: String(c) })),
+ { value: 'other', label: t('admin.ops.errorDetails.statusCodeOther') || 'Other' }
+ ]
+})
+
+const ownerSelectOptions = computed(() => {
+ return [
+ { value: '', label: t('common.all') },
+ { value: 'provider', label: t('admin.ops.errorDetails.owner.provider') || 'provider' },
+ { value: 'client', label: t('admin.ops.errorDetails.owner.client') || 'client' },
+ { value: 'platform', label: t('admin.ops.errorDetails.owner.platform') || 'platform' }
+ ]
+})
+
+
+const viewModeSelectOptions = computed(() => {
+ return [
+ { value: 'errors', label: t('admin.ops.errorDetails.viewErrors') || 'errors' },
+ { value: 'excluded', label: t('admin.ops.errorDetails.viewExcluded') || 'excluded' },
+ { value: 'all', label: t('common.all') }
]
})
const phaseSelectOptions = computed(() => {
const options = [
{ value: '', label: t('common.all') },
- { value: 'upstream', label: 'upstream' },
- { value: 'network', label: 'network' },
- { value: 'routing', label: 'routing' },
- { value: 'auth', label: 'auth' },
- { value: 'billing', label: 'billing' },
- { value: 'concurrency', label: 'concurrency' },
- { value: 'internal', label: 'internal' }
+ { value: 'request', label: t('admin.ops.errorDetails.phase.request') || 'request' },
+ { value: 'auth', label: t('admin.ops.errorDetails.phase.auth') || 'auth' },
+ { value: 'routing', label: t('admin.ops.errorDetails.phase.routing') || 'routing' },
+ { value: 'upstream', label: t('admin.ops.errorDetails.phase.upstream') || 'upstream' },
+ { value: 'network', label: t('admin.ops.errorDetails.phase.network') || 'network' },
+ { value: 'internal', label: t('admin.ops.errorDetails.phase.internal') || 'internal' }
]
return options
})
@@ -78,7 +92,8 @@ async function fetchErrorLogs() {
const params: Record = {
page: page.value,
page_size: pageSize.value,
- time_range: props.timeRange
+ time_range: props.timeRange,
+ view: viewMode.value
}
const platform = String(props.platform || '').trim()
@@ -86,13 +101,19 @@ async function fetchErrorLogs() {
if (typeof props.groupId === 'number' && props.groupId > 0) params.group_id = props.groupId
if (q.value.trim()) params.q = q.value.trim()
- if (typeof statusCode.value === 'number') params.status_codes = String(statusCode.value)
- if (typeof accountId.value === 'number') params.account_id = accountId.value
+ if (statusCode.value === 'other') params.status_codes_other = '1'
+ else if (typeof statusCode.value === 'number') params.status_codes = String(statusCode.value)
const phaseVal = String(phase.value || '').trim()
if (phaseVal) params.phase = phaseVal
- const res = await opsAPI.listErrorLogs(params)
+ const ownerVal = String(errorOwner.value || '').trim()
+ if (ownerVal) params.error_owner = ownerVal
+
+
+ const res = props.errorType === 'upstream'
+ ? await opsAPI.listUpstreamErrors(params)
+ : await opsAPI.listRequestErrors(params)
rows.value = res.items || []
total.value = res.total || 0
} catch (err) {
@@ -104,21 +125,23 @@ async function fetchErrorLogs() {
}
}
-function resetFilters() {
- q.value = ''
- statusCode.value = null
- phase.value = props.errorType === 'upstream' ? 'upstream' : ''
- accountIdInput.value = ''
- page.value = 1
- fetchErrorLogs()
-}
+ function resetFilters() {
+ q.value = ''
+ statusCode.value = null
+ phase.value = props.errorType === 'upstream' ? 'upstream' : ''
+ errorOwner.value = ''
+ viewMode.value = 'errors'
+ page.value = 1
+ fetchErrorLogs()
+ }
+
watch(
() => props.show,
(open) => {
if (!open) return
page.value = 1
- pageSize.value = 20
+ pageSize.value = 10
resetFilters()
}
)
@@ -154,16 +177,7 @@ watch(
)
watch(
- () => [statusCode.value, phase.value] as const,
- () => {
- if (!props.show) return
- page.value = 1
- fetchErrorLogs()
- }
-)
-
-watch(
- () => accountId.value,
+ () => [statusCode.value, phase.value, errorOwner.value, viewMode.value] as const,
() => {
if (!props.show) return
page.value = 1
@@ -177,12 +191,12 @@ watch(
-
-
+
+
-
-
-
+
+
-
-
+
+
-
-
+
+
-
-
+
+
+
+
+
+
+
+
{{ t('common.reset') }}
@@ -231,18 +245,26 @@ watch(
{{ t('admin.ops.errorDetails.total') }} {{ total }}
-
+
+
+
+
diff --git a/frontend/src/views/admin/ops/components/OpsErrorLogTable.vue b/frontend/src/views/admin/ops/components/OpsErrorLogTable.vue
index 416bdba9..28868552 100644
--- a/frontend/src/views/admin/ops/components/OpsErrorLogTable.vue
+++ b/frontend/src/views/admin/ops/components/OpsErrorLogTable.vue
@@ -1,55 +1,48 @@
-
+
+
+
-
-
-
+
+
+
- |
- {{ t('admin.ops.errorLog.timeId') }}
+ |
+ {{ t('admin.ops.errorLog.time') }}
|
-
- {{ t('admin.ops.errorLog.context') }}
+ |
+ {{ t('admin.ops.errorLog.type') }}
|
-
+ |
+ {{ t('admin.ops.errorLog.platform') }}
+ |
+
+ {{ t('admin.ops.errorLog.model') }}
+ |
+
+ {{ t('admin.ops.errorLog.group') }}
+ |
+
+ {{ t('admin.ops.errorLog.user') }}
+ |
+
{{ t('admin.ops.errorLog.status') }}
|
-
+ |
{{ t('admin.ops.errorLog.message') }}
|
-
- {{ t('admin.ops.errorLog.latency') }}
- |
-
+ |
{{ t('admin.ops.errorLog.action') }}
|
-
- |
+ |
+ |
{{ t('admin.ops.errorLog.noErrors') }}
|
@@ -57,59 +50,83 @@
-
-
-
-
+
+
+
+
{{ formatDateTime(log.created_at).split(' ')[1] }}
-
- {{ (log.request_id || log.client_request_id || '').substring(0, 12) }}
-
-
+
|
-
-
-
-
- {{ log.platform || '-' }}
-
-
+
+
+
+ {{ getTypeBadge(log).label }}
+
+ |
+
+
+
+
+ {{ log.platform || '-' }}
+
+ |
+
+
+
+
+
{{ log.model }}
-
- {{ t('admin.ops.errorLog.grp') }} {{ log.group_id }}
- {{ t('admin.ops.errorLog.acc') }} {{ log.account_id }}
-
+ -
|
-
-
-
+
+
+
+
+ {{ log.group_name || '-' }}
+
+
+ -
+ |
+
+
+
+
+
+
+ {{ log.account_name || '-' }}
+
+
+ -
+
+
+
+
+ {{ log.user_email || '-' }}
+
+
+ -
+
+ |
+
+
+
+
@@ -117,61 +134,47 @@
{{ log.severity }}
|
-
-
-
-
+
+
+
+
{{ formatSmartMessage(log.message) || '-' }}
-
-
-
- {{ log.phase }}
-
-
-
- {{ log.client_ip }}
-
-
-
- |
-
-
-
-
-
- {{ log.latency_ms != null ? Math.round(log.latency_ms) + 'ms' : '--' }}
-
|
-
-
- {{ t('admin.ops.errorLog.details') }}
-
+ |
+
+
+ {{ t('admin.ops.errorLog.details') }}
+
+
|
| | | |
-
+
+
@@ -184,6 +187,36 @@ import { getSeverityClass, formatDateTime } from '../utils/opsFormatters'
const { t } = useI18n()
+function isUpstreamRow(log: OpsErrorLog): boolean {
+ const phase = String(log.phase || '').toLowerCase()
+ const owner = String(log.error_owner || '').toLowerCase()
+ return phase === 'upstream' && owner === 'provider'
+}
+
+function getTypeBadge(log: OpsErrorLog): { label: string; className: string } {
+ const phase = String(log.phase || '').toLowerCase()
+ const owner = String(log.error_owner || '').toLowerCase()
+
+ if (isUpstreamRow(log)) {
+ return { label: t('admin.ops.errorLog.typeUpstream'), className: 'bg-red-50 text-red-700 ring-red-600/20 dark:bg-red-900/30 dark:text-red-400 dark:ring-red-500/30' }
+ }
+ if (phase === 'request' && owner === 'client') {
+ return { label: t('admin.ops.errorLog.typeRequest'), className: 'bg-amber-50 text-amber-700 ring-amber-600/20 dark:bg-amber-900/30 dark:text-amber-400 dark:ring-amber-500/30' }
+ }
+ if (phase === 'auth' && owner === 'client') {
+ return { label: t('admin.ops.errorLog.typeAuth'), className: 'bg-blue-50 text-blue-700 ring-blue-600/20 dark:bg-blue-900/30 dark:text-blue-400 dark:ring-blue-500/30' }
+ }
+ if (phase === 'routing' && owner === 'platform') {
+ return { label: t('admin.ops.errorLog.typeRouting'), className: 'bg-purple-50 text-purple-700 ring-purple-600/20 dark:bg-purple-900/30 dark:text-purple-400 dark:ring-purple-500/30' }
+ }
+ if (phase === 'internal' && owner === 'platform') {
+ return { label: t('admin.ops.errorLog.typeInternal'), className: 'bg-gray-100 text-gray-800 ring-gray-600/20 dark:bg-dark-700 dark:text-gray-200 dark:ring-dark-500/40' }
+ }
+
+ const fallback = phase || owner || t('common.unknown')
+ return { label: fallback, className: 'bg-gray-50 text-gray-700 ring-gray-600/10 dark:bg-dark-900 dark:text-gray-300 dark:ring-dark-700' }
+}
+
interface Props {
rows: OpsErrorLog[]
total: number
@@ -208,14 +241,6 @@ function getStatusClass(code: number): string {
return 'bg-gray-50 text-gray-700 ring-gray-600/20 dark:bg-gray-900/30 dark:text-gray-400 dark:ring-gray-500/30'
}
-function getLatencyClass(latency: number | null): string {
- if (!latency) return 'text-gray-400'
- if (latency > 10000) return 'text-red-600 font-black'
- if (latency > 5000) return 'text-red-500 font-bold'
- if (latency > 2000) return 'text-orange-500 font-medium'
- return 'text-gray-600 dark:text-gray-400'
-}
-
function formatSmartMessage(msg: string): string {
if (!msg) return ''
@@ -231,10 +256,11 @@ function formatSmartMessage(msg: string): string {
}
}
- if (msg.includes('context deadline exceeded')) return 'context deadline exceeded'
- if (msg.includes('connection refused')) return 'connection refused'
- if (msg.toLowerCase().includes('rate limit')) return 'rate limit'
+ if (msg.includes('context deadline exceeded')) return t('admin.ops.errorLog.commonErrors.contextDeadlineExceeded')
+ if (msg.includes('connection refused')) return t('admin.ops.errorLog.commonErrors.connectionRefused')
+ if (msg.toLowerCase().includes('rate limit')) return t('admin.ops.errorLog.commonErrors.rateLimit')
return msg.length > 200 ? msg.substring(0, 200) + '...' : msg
+
}
-
+
\ No newline at end of file
diff --git a/frontend/src/views/admin/ops/components/OpsRequestDetailsModal.vue b/frontend/src/views/admin/ops/components/OpsRequestDetailsModal.vue
index d3edd745..3a70b4f2 100644
--- a/frontend/src/views/admin/ops/components/OpsRequestDetailsModal.vue
+++ b/frontend/src/views/admin/ops/components/OpsRequestDetailsModal.vue
@@ -38,7 +38,7 @@ const loading = ref(false)
const items = ref([])
const total = ref(0)
const page = ref(1)
-const pageSize = ref(20)
+const pageSize = ref(10)
const close = () => emit('update:modelValue', false)
@@ -95,7 +95,7 @@ watch(
(open) => {
if (open) {
page.value = 1
- pageSize.value = 20
+ pageSize.value = 10
fetchData()
}
}
diff --git a/frontend/src/views/admin/ops/components/OpsRuntimeSettingsCard.vue b/frontend/src/views/admin/ops/components/OpsRuntimeSettingsCard.vue
index 1dcab4b3..82c19f4f 100644
--- a/frontend/src/views/admin/ops/components/OpsRuntimeSettingsCard.vue
+++ b/frontend/src/views/admin/ops/components/OpsRuntimeSettingsCard.vue
@@ -50,27 +50,22 @@ function validateRuntimeSettings(settings: OpsAlertRuntimeSettings): ValidationR
if (thresholds) {
if (thresholds.sla_percent_min != null) {
if (!Number.isFinite(thresholds.sla_percent_min) || thresholds.sla_percent_min < 0 || thresholds.sla_percent_min > 100) {
- errors.push('SLA 最低值必须在 0-100 之间')
- }
- }
- if (thresholds.latency_p99_ms_max != null) {
- if (!Number.isFinite(thresholds.latency_p99_ms_max) || thresholds.latency_p99_ms_max < 0) {
- errors.push('延迟 P99 最大值必须大于或等于 0')
+ errors.push(t('admin.ops.runtime.validation.slaMinPercentRange'))
}
}
if (thresholds.ttft_p99_ms_max != null) {
if (!Number.isFinite(thresholds.ttft_p99_ms_max) || thresholds.ttft_p99_ms_max < 0) {
- errors.push('TTFT P99 最大值必须大于或等于 0')
+ errors.push(t('admin.ops.runtime.validation.ttftP99MaxRange'))
}
}
if (thresholds.request_error_rate_percent_max != null) {
if (!Number.isFinite(thresholds.request_error_rate_percent_max) || thresholds.request_error_rate_percent_max < 0 || thresholds.request_error_rate_percent_max > 100) {
- errors.push('请求错误率最大值必须在 0-100 之间')
+ errors.push(t('admin.ops.runtime.validation.requestErrorRateMaxRange'))
}
}
if (thresholds.upstream_error_rate_percent_max != null) {
if (!Number.isFinite(thresholds.upstream_error_rate_percent_max) || thresholds.upstream_error_rate_percent_max < 0 || thresholds.upstream_error_rate_percent_max > 100) {
- errors.push('上游错误率最大值必须在 0-100 之间')
+ errors.push(t('admin.ops.runtime.validation.upstreamErrorRateMaxRange'))
}
}
}
@@ -163,7 +158,6 @@ function openAlertEditor() {
if (!draftAlert.value.thresholds) {
draftAlert.value.thresholds = {
sla_percent_min: 99.5,
- latency_p99_ms_max: 2000,
ttft_p99_ms_max: 500,
request_error_rate_percent_max: 5,
upstream_error_rate_percent_max: 5
@@ -335,12 +329,12 @@ onMounted(() => {
-
指标阈值配置
-
配置各项指标的告警阈值。超出阈值的指标将在看板上以红色显示。
+
{{ t('admin.ops.runtime.metricThresholds') }}
+
{{ t('admin.ops.runtime.metricThresholdsHint') }}
-
SLA 最低值 (%)
+
{{ t('admin.ops.runtime.slaMinPercent') }}
{
class="input"
placeholder="99.5"
/>
-
SLA 低于此值时将显示为红色
+
{{ t('admin.ops.runtime.slaMinPercentHint') }}
-
-
延迟 P99 最大值 (ms)
-
-
延迟 P99 高于此值时将显示为红色
-
+
-
TTFT P99 最大值 (ms)
+
{{ t('admin.ops.runtime.ttftP99MaxMs') }}
{
class="input"
placeholder="500"
/>
-
TTFT P99 高于此值时将显示为红色
+
{{ t('admin.ops.runtime.ttftP99MaxMsHint') }}
-
请求错误率最大值 (%)
+
{{ t('admin.ops.runtime.requestErrorRateMaxPercent') }}
{
class="input"
placeholder="5"
/>
-
请求错误率高于此值时将显示为红色
+
{{ t('admin.ops.runtime.requestErrorRateMaxPercentHint') }}
-
上游错误率最大值 (%)
+
{{ t('admin.ops.runtime.upstreamErrorRateMaxPercent') }}
{
class="input"
placeholder="5"
/>
-
上游错误率高于此值时将显示为红色
+
{{ t('admin.ops.runtime.upstreamErrorRateMaxPercentHint') }}
@@ -424,7 +407,7 @@ onMounted(() => {
v-model="draftAlert.silencing.global_until_rfc3339"
type="text"
class="input font-mono text-sm"
- :placeholder="t('admin.ops.runtime.silencing.untilPlaceholder')"
+ placeholder="2026-01-05T00:00:00Z"
/>
{{ t('admin.ops.runtime.silencing.untilHint') }}
@@ -496,7 +479,7 @@ onMounted(() => {
v-model="(entry as any).until_rfc3339"
type="text"
class="input font-mono text-sm"
- :placeholder="t('admin.ops.runtime.silencing.untilPlaceholder')"
+ placeholder="2026-01-05T00:00:00Z"
/>
diff --git a/frontend/src/views/admin/ops/components/OpsSettingsDialog.vue b/frontend/src/views/admin/ops/components/OpsSettingsDialog.vue
index 1f64f253..53ab6683 100644
--- a/frontend/src/views/admin/ops/components/OpsSettingsDialog.vue
+++ b/frontend/src/views/admin/ops/components/OpsSettingsDialog.vue
@@ -32,7 +32,6 @@ const advancedSettings = ref(null)
// 指标阈值配置
const metricThresholds = ref({
sla_percent_min: 99.5,
- latency_p99_ms_max: 2000,
ttft_p99_ms_max: 500,
request_error_rate_percent_max: 5,
upstream_error_rate_percent_max: 5
@@ -53,13 +52,12 @@ async function loadAllSettings() {
advancedSettings.value = advanced
// 如果后端返回了阈值,使用后端的值;否则保持默认值
if (thresholds && Object.keys(thresholds).length > 0) {
- metricThresholds.value = {
- sla_percent_min: thresholds.sla_percent_min ?? 99.5,
- latency_p99_ms_max: thresholds.latency_p99_ms_max ?? 2000,
- ttft_p99_ms_max: thresholds.ttft_p99_ms_max ?? 500,
- request_error_rate_percent_max: thresholds.request_error_rate_percent_max ?? 5,
- upstream_error_rate_percent_max: thresholds.upstream_error_rate_percent_max ?? 5
- }
+ metricThresholds.value = {
+ sla_percent_min: thresholds.sla_percent_min ?? 99.5,
+ ttft_p99_ms_max: thresholds.ttft_p99_ms_max ?? 500,
+ request_error_rate_percent_max: thresholds.request_error_rate_percent_max ?? 5,
+ upstream_error_rate_percent_max: thresholds.upstream_error_rate_percent_max ?? 5
+ }
}
} catch (err: any) {
console.error('[OpsSettingsDialog] Failed to load settings', err)
@@ -159,19 +157,16 @@ const validation = computed(() => {
// 验证指标阈值
if (metricThresholds.value.sla_percent_min != null && (metricThresholds.value.sla_percent_min < 0 || metricThresholds.value.sla_percent_min > 100)) {
- errors.push('SLA最低百分比必须在0-100之间')
- }
- if (metricThresholds.value.latency_p99_ms_max != null && metricThresholds.value.latency_p99_ms_max < 0) {
- errors.push('延迟P99最大值必须大于等于0')
+ errors.push(t('admin.ops.settings.validation.slaMinPercentRange'))
}
if (metricThresholds.value.ttft_p99_ms_max != null && metricThresholds.value.ttft_p99_ms_max < 0) {
- errors.push('TTFT P99最大值必须大于等于0')
+ errors.push(t('admin.ops.settings.validation.ttftP99MaxRange'))
}
if (metricThresholds.value.request_error_rate_percent_max != null && (metricThresholds.value.request_error_rate_percent_max < 0 || metricThresholds.value.request_error_rate_percent_max > 100)) {
- errors.push('请求错误率最大值必须在0-100之间')
+ errors.push(t('admin.ops.settings.validation.requestErrorRateMaxRange'))
}
if (metricThresholds.value.upstream_error_rate_percent_max != null && (metricThresholds.value.upstream_error_rate_percent_max < 0 || metricThresholds.value.upstream_error_rate_percent_max > 100)) {
- errors.push('上游错误率最大值必须在0-100之间')
+ errors.push(t('admin.ops.settings.validation.upstreamErrorRateMaxRange'))
}
return { valid: errors.length === 0, errors }
@@ -362,17 +357,6 @@ async function saveAllSettings() {
{{ t('admin.ops.settings.slaMinPercentHint') }}
-
-
-
-
{{ t('admin.ops.settings.latencyP99MaxMsHint') }}
-
@@ -488,43 +472,63 @@ async function saveAllSettings() {
-
+
-
错误过滤
+
{{ t('admin.ops.settings.errorFiltering') }}
-
+
- 启用后,count_tokens 请求的错误将不计入运维监控的统计和告警中(但仍会存储在数据库中)
+ {{ t('admin.ops.settings.ignoreCountTokensErrorsHint') }}
-
-
-
-
-
自动刷新
-
+
- 自动刷新仪表板数据,启用后会定期拉取最新数据
+ {{ t('admin.ops.settings.ignoreContextCanceledHint') }}
+
+
+
+
+
+
+
+
+
+ {{ t('admin.ops.settings.ignoreNoAvailableAccountsHint') }}
+
+
+
+
+
+
+
+
+
{{ t('admin.ops.settings.autoRefresh') }}
+
+
+
+
+
+ {{ t('admin.ops.settings.enableAutoRefreshHint') }}
-
+
diff --git a/frontend/src/views/admin/ops/components/OpsThroughputTrendChart.vue b/frontend/src/views/admin/ops/components/OpsThroughputTrendChart.vue
index 10c958b4..5707cc3b 100644
--- a/frontend/src/views/admin/ops/components/OpsThroughputTrendChart.vue
+++ b/frontend/src/views/admin/ops/components/OpsThroughputTrendChart.vue
@@ -61,7 +61,7 @@ const chartData = computed(() => {
labels: props.points.map((p) => formatHistoryLabel(p.bucket_start, props.timeRange)),
datasets: [
{
- label: t('admin.ops.qps'),
+ label: 'QPS',
data: props.points.map((p) => p.qps ?? 0),
borderColor: colors.value.blue,
backgroundColor: colors.value.blueAlpha,
@@ -183,7 +183,7 @@ function downloadChart() {
- {{ t('admin.ops.qps') }}
+ QPS
{{ t('admin.ops.tpsK') }}
Date: Thu, 15 Jan 2026 15:57:47 +0800
Subject: [PATCH 4/4] =?UTF-8?q?fix(=E8=B4=A6=E5=8F=B7=E7=AE=A1=E7=90=86):?=
=?UTF-8?q?=20=E7=A7=BB=E9=99=A4=E8=B0=83=E5=BA=A6=E5=88=87=E6=8D=A2?=
=?UTF-8?q?=E5=90=8E=E7=9A=84=E5=86=97=E4=BD=99=E5=88=97=E8=A1=A8=E5=88=B7?=
=?UTF-8?q?=E6=96=B0?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
切换账号调度状态后,updateSchedulableInList 已完成局部更新,
无需再调用 load() 刷新整个列表。此修改减少不必要的 API 请求,
避免 UI 闪烁。
Co-Authored-By: Claude Opus 4.5
---
frontend/src/views/admin/AccountsView.vue | 6 ------
1 file changed, 6 deletions(-)
diff --git a/frontend/src/views/admin/AccountsView.vue b/frontend/src/views/admin/AccountsView.vue
index f465c001..cf484303 100644
--- a/frontend/src/views/admin/AccountsView.vue
+++ b/frontend/src/views/admin/AccountsView.vue
@@ -357,9 +357,6 @@ const handleBulkToggleSchedulable = async (schedulable: boolean) => {
} else {
selIds.value = hasIds ? [] : accountIds
}
- load().catch((error) => {
- console.error('Failed to refresh accounts:', error)
- })
} catch (error) {
console.error('Failed to bulk toggle schedulable:', error)
appStore.showError(t('common.error'))
@@ -383,9 +380,6 @@ const handleToggleSchedulable = async (a: Account) => {
try {
const updated = await adminAPI.accounts.setSchedulable(a.id, nextSchedulable)
updateSchedulableInList([a.id], updated?.schedulable ?? nextSchedulable)
- load().catch((error) => {
- console.error('Failed to refresh accounts:', error)
- })
} catch (error) {
console.error('Failed to toggle schedulable:', error)
appStore.showError(t('admin.accounts.failedToToggleSchedulable'))