diff --git a/.gitignore b/.gitignore index 93ae19f3..48172982 100644 --- a/.gitignore +++ b/.gitignore @@ -14,6 +14,9 @@ backend/server backend/sub2api backend/main +# Go 测试二进制 +*.test + # 测试覆盖率 *.out coverage.html @@ -80,6 +83,8 @@ temp/ *.log *.bak .cache/ +.dev/ +.serena/ # =================== # 构建产物 @@ -123,6 +128,5 @@ backend/cmd/server/server deploy/docker-compose.override.yml .gocache/ vite.config.js -!docs/ docs/* -!docs/dependency-security.md +.serena/ \ No newline at end of file diff --git a/PR_DESCRIPTION.md b/PR_DESCRIPTION.md new file mode 100644 index 00000000..b240f45c --- /dev/null +++ b/PR_DESCRIPTION.md @@ -0,0 +1,164 @@ +## 概述 + +全面增强运维监控系统(Ops)的错误日志管理和告警静默功能,优化前端 UI 组件代码质量和用户体验。本次更新重构了核心服务层和数据访问层,提升系统可维护性和运维效率。 + +## 主要改动 + +### 1. 错误日志查询优化 + +**功能特性:** +- 新增 GetErrorLogByID 接口,支持按 ID 精确查询错误详情 +- 优化错误日志过滤逻辑,支持多维度筛选(平台、阶段、来源、所有者等) +- 改进查询参数处理,简化代码结构 +- 增强错误分类和标准化处理 +- 支持错误解决状态追踪(resolved 字段) + +**技术实现:** +- `ops_handler.go` - 新增单条错误日志查询接口 +- `ops_repo.go` - 优化数据查询和过滤条件构建 +- `ops_models.go` - 扩展错误日志数据模型 +- 前端 API 接口同步更新 + +### 2. 告警静默功能 + +**功能特性:** +- 支持按规则、平台、分组、区域等维度静默告警 +- 可设置静默时长和原因说明 +- 静默记录可追溯,记录创建人和创建时间 +- 自动过期机制,避免永久静默 + +**技术实现:** +- `037_ops_alert_silences.sql` - 新增告警静默表 +- `ops_alerts.go` - 告警静默逻辑实现 +- `ops_alerts_handler.go` - 告警静默 API 接口 +- `OpsAlertEventsCard.vue` - 前端告警静默操作界面 + +**数据库结构:** + +| 字段 | 类型 | 说明 | +|------|------|------| +| rule_id | BIGINT | 告警规则 ID | +| platform | VARCHAR(64) | 平台标识 | +| group_id | BIGINT | 分组 ID(可选) | +| region | VARCHAR(64) | 区域(可选) | +| until | TIMESTAMPTZ | 静默截止时间 | +| reason | TEXT | 静默原因 | +| created_by | BIGINT | 创建人 ID | + +### 3. 错误分类标准化 + +**功能特性:** +- 统一错误阶段分类(request|auth|routing|upstream|network|internal) +- 规范错误归属分类(client|provider|platform) +- 标准化错误来源分类(client_request|upstream_http|gateway) +- 自动迁移历史数据到新分类体系 + +**技术实现:** +- `038_ops_errors_resolution_retry_results_and_standardize_classification.sql` - 分类标准化迁移 +- 自动映射历史遗留分类到新标准 +- 自动解决已恢复的上游错误(客户端状态码 < 400) + +### 4. Gateway 服务集成 + +**功能特性:** +- 完善各 Gateway 服务的 Ops 集成 +- 统一错误日志记录接口 +- 增强上游错误追踪能力 + +**涉及服务:** +- `antigravity_gateway_service.go` - Antigravity 网关集成 +- `gateway_service.go` - 通用网关集成 +- `gemini_messages_compat_service.go` - Gemini 兼容层集成 +- `openai_gateway_service.go` - OpenAI 网关集成 + +### 5. 前端 UI 优化 + +**代码重构:** +- 大幅简化错误详情模态框代码(从 828 行优化到 450 行) +- 优化错误日志表格组件,提升可读性 +- 清理未使用的 i18n 翻译,减少冗余 +- 统一组件代码风格和格式 +- 优化骨架屏组件,更好匹配实际看板布局 + +**布局改进:** +- 修复模态框内容溢出和滚动问题 +- 优化表格布局,使用 flex 布局确保正确显示 +- 改进看板头部布局和交互 +- 提升响应式体验 +- 骨架屏支持全屏模式适配 + +**交互优化:** +- 优化告警事件卡片功能和展示 +- 改进错误详情展示逻辑 +- 增强请求详情模态框 +- 完善运行时设置卡片 +- 改进加载动画效果 + +### 6. 国际化完善 + +**文案补充:** +- 补充错误日志相关的英文翻译 +- 添加告警静默功能的中英文文案 +- 完善提示文本和错误信息 +- 统一术语翻译标准 + +## 文件变更 + +**后端(26 个文件):** +- `backend/internal/handler/admin/ops_alerts_handler.go` - 告警接口增强 +- `backend/internal/handler/admin/ops_handler.go` - 错误日志接口优化 +- `backend/internal/handler/ops_error_logger.go` - 错误记录器增强 +- `backend/internal/repository/ops_repo.go` - 数据访问层重构 +- `backend/internal/repository/ops_repo_alerts.go` - 告警数据访问增强 +- `backend/internal/service/ops_*.go` - 核心服务层重构(10 个文件) +- `backend/internal/service/*_gateway_service.go` - Gateway 集成(4 个文件) +- `backend/internal/server/routes/admin.go` - 路由配置更新 +- `backend/migrations/*.sql` - 数据库迁移(2 个文件) +- 测试文件更新(5 个文件) + +**前端(13 个文件):** +- `frontend/src/views/admin/ops/OpsDashboard.vue` - 看板主页优化 +- `frontend/src/views/admin/ops/components/*.vue` - 组件重构(10 个文件) +- `frontend/src/api/admin/ops.ts` - API 接口扩展 +- `frontend/src/i18n/locales/*.ts` - 国际化文本(2 个文件) + +## 代码统计 + +- 44 个文件修改 +- 3733 行新增 +- 995 行删除 +- 净增加 2738 行 + +## 核心改进 + +**可维护性提升:** +- 重构核心服务层,职责更清晰 +- 简化前端组件代码,降低复杂度 +- 统一代码风格和命名规范 +- 清理冗余代码和未使用的翻译 +- 标准化错误分类体系 + +**功能完善:** +- 告警静默功能,减少告警噪音 +- 错误日志查询优化,提升运维效率 +- Gateway 服务集成完善,统一监控能力 +- 错误解决状态追踪,便于问题管理 + +**用户体验优化:** +- 修复多个 UI 布局问题 +- 优化交互流程 +- 完善国际化支持 +- 提升响应式体验 +- 改进加载状态展示 + +## 测试验证 + +- ✅ 错误日志查询和过滤功能 +- ✅ 告警静默创建和自动过期 +- ✅ 错误分类标准化迁移 +- ✅ Gateway 服务错误日志记录 +- ✅ 前端组件布局和交互 +- ✅ 骨架屏全屏模式适配 +- ✅ 国际化文本完整性 +- ✅ API 接口功能正确性 +- ✅ 数据库迁移执行成功 diff --git a/README_CN.md b/README_CN.md index b8a818b3..41d399d5 100644 --- a/README_CN.md +++ b/README_CN.md @@ -57,6 +57,13 @@ Sub2API 是一个 AI API 网关平台,用于分发和管理 AI 产品订阅( --- +## OpenAI Responses 兼容注意事项 + +- 当请求包含 `function_call_output` 时,需要携带 `previous_response_id`,或在 `input` 中包含带 `call_id` 的 `tool_call`/`function_call`,或带非空 `id` 且与 `function_call_output.call_id` 匹配的 `item_reference`。 +- 若依赖上游历史记录,网关会强制 `store=true` 并需要复用 `previous_response_id`,以避免出现 “No tool call found for function call output” 错误。 + +--- + ## 部署方式 ### 方式一:脚本安装(推荐) diff --git a/backend/.dockerignore b/backend/.dockerignore new file mode 100644 index 00000000..c1c2a854 --- /dev/null +++ b/backend/.dockerignore @@ -0,0 +1,2 @@ +.cache/ +.DS_Store diff --git a/backend/.golangci.yml b/backend/.golangci.yml index 52072b16..3ec692a8 100644 --- a/backend/.golangci.yml +++ b/backend/.golangci.yml @@ -18,6 +18,12 @@ linters: list-mode: original files: - "**/internal/service/**" + - "!**/internal/service/ops_aggregation_service.go" + - "!**/internal/service/ops_alert_evaluator_service.go" + - "!**/internal/service/ops_cleanup_service.go" + - "!**/internal/service/ops_metrics_collector.go" + - "!**/internal/service/ops_scheduled_report_service.go" + - "!**/internal/service/wire.go" deny: - pkg: github.com/Wei-Shaw/sub2api/internal/repository desc: "service must not import repository" diff --git a/backend/cmd/jwtgen/main.go b/backend/cmd/jwtgen/main.go index 1b7f4aa4..c461198b 100644 --- a/backend/cmd/jwtgen/main.go +++ b/backend/cmd/jwtgen/main.go @@ -33,7 +33,7 @@ func main() { }() userRepo := repository.NewUserRepository(client, sqlDB) - authService := service.NewAuthService(userRepo, cfg, nil, nil, nil, nil) + authService := service.NewAuthService(userRepo, cfg, nil, nil, nil, nil, nil) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() diff --git a/backend/cmd/server/wire.go b/backend/cmd/server/wire.go index 9447de45..0a5f9744 100644 --- a/backend/cmd/server/wire.go +++ b/backend/cmd/server/wire.go @@ -62,6 +62,12 @@ func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo { func provideCleanup( entClient *ent.Client, rdb *redis.Client, + opsMetricsCollector *service.OpsMetricsCollector, + opsAggregation *service.OpsAggregationService, + opsAlertEvaluator *service.OpsAlertEvaluatorService, + opsCleanup *service.OpsCleanupService, + opsScheduledReport *service.OpsScheduledReportService, + schedulerSnapshot *service.SchedulerSnapshotService, tokenRefresh *service.TokenRefreshService, accountExpiry *service.AccountExpiryService, pricing *service.PricingService, @@ -81,6 +87,42 @@ func provideCleanup( name string fn func() error }{ + {"OpsScheduledReportService", func() error { + if opsScheduledReport != nil { + opsScheduledReport.Stop() + } + return nil + }}, + {"OpsCleanupService", func() error { + if opsCleanup != nil { + opsCleanup.Stop() + } + return nil + }}, + {"OpsAlertEvaluatorService", func() error { + if opsAlertEvaluator != nil { + opsAlertEvaluator.Stop() + } + return nil + }}, + {"OpsAggregationService", func() error { + if opsAggregation != nil { + opsAggregation.Stop() + } + return nil + }}, + {"OpsMetricsCollector", func() error { + if opsMetricsCollector != nil { + opsMetricsCollector.Stop() + } + return nil + }}, + {"SchedulerSnapshotService", func() error { + if schedulerSnapshot != nil { + schedulerSnapshot.Stop() + } + return nil + }}, {"TokenRefreshService", func() error { tokenRefresh.Stop() return nil diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 85bed3f3..31e47332 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -51,33 +51,44 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { turnstileVerifier := repository.NewTurnstileVerifier() turnstileService := service.NewTurnstileService(settingService, turnstileVerifier) emailQueueService := service.ProvideEmailQueueService(emailService) - authService := service.NewAuthService(userRepository, configConfig, settingService, emailService, turnstileService, emailQueueService) - userService := service.NewUserService(userRepository) - authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService) - userHandler := handler.NewUserHandler(userService) + promoCodeRepository := repository.NewPromoCodeRepository(client) + billingCache := repository.NewBillingCache(redisClient) + userSubscriptionRepository := repository.NewUserSubscriptionRepository(client) + billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, configConfig) apiKeyRepository := repository.NewAPIKeyRepository(client) groupRepository := repository.NewGroupRepository(client, db) - userSubscriptionRepository := repository.NewUserSubscriptionRepository(client) apiKeyCache := repository.NewAPIKeyCache(redisClient) apiKeyService := service.NewAPIKeyService(apiKeyRepository, userRepository, groupRepository, userSubscriptionRepository, apiKeyCache, configConfig) + apiKeyAuthCacheInvalidator := service.ProvideAPIKeyAuthCacheInvalidator(apiKeyService) + promoService := service.NewPromoService(promoCodeRepository, userRepository, billingCacheService, client, apiKeyAuthCacheInvalidator) + authService := service.NewAuthService(userRepository, configConfig, settingService, emailService, turnstileService, emailQueueService, promoService) + userService := service.NewUserService(userRepository, apiKeyAuthCacheInvalidator) + authHandler := handler.NewAuthHandler(configConfig, authService, userService, settingService, promoService) + userHandler := handler.NewUserHandler(userService) apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService) usageLogRepository := repository.NewUsageLogRepository(client, db) - usageService := service.NewUsageService(usageLogRepository, userRepository, client) + usageService := service.NewUsageService(usageLogRepository, userRepository, client, apiKeyAuthCacheInvalidator) usageHandler := handler.NewUsageHandler(usageService, apiKeyService) redeemCodeRepository := repository.NewRedeemCodeRepository(client) - billingCache := repository.NewBillingCache(redisClient) - billingCacheService := service.NewBillingCacheService(billingCache, userRepository, userSubscriptionRepository, configConfig) subscriptionService := service.NewSubscriptionService(groupRepository, userSubscriptionRepository, billingCacheService) redeemCache := repository.NewRedeemCache(redisClient) - redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client) + redeemService := service.NewRedeemService(redeemCodeRepository, userRepository, subscriptionService, redeemCache, billingCacheService, client, apiKeyAuthCacheInvalidator) redeemHandler := handler.NewRedeemHandler(redeemService) subscriptionHandler := handler.NewSubscriptionHandler(subscriptionService) - dashboardService := service.NewDashboardService(usageLogRepository) - dashboardHandler := admin.NewDashboardHandler(dashboardService) + dashboardAggregationRepository := repository.NewDashboardAggregationRepository(db) + dashboardStatsCache := repository.NewDashboardCache(redisClient, configConfig) + dashboardService := service.NewDashboardService(usageLogRepository, dashboardAggregationRepository, dashboardStatsCache, configConfig) + timingWheelService, err := service.ProvideTimingWheelService() + if err != nil { + return nil, err + } + dashboardAggregationService := service.ProvideDashboardAggregationService(dashboardAggregationRepository, timingWheelService, configConfig) + dashboardHandler := admin.NewDashboardHandler(dashboardService, dashboardAggregationService) accountRepository := repository.NewAccountRepository(client, db) proxyRepository := repository.NewProxyRepository(client, db) proxyExitInfoProber := repository.NewProxyExitInfoProber(configConfig) - adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, billingCacheService, proxyExitInfoProber) + proxyLatencyCache := repository.NewProxyLatencyCache(redisClient) + adminService := service.NewAdminService(userRepository, groupRepository, accountRepository, proxyRepository, apiKeyRepository, redeemCodeRepository, billingCacheService, proxyExitInfoProber, proxyLatencyCache, apiKeyAuthCacheInvalidator) adminUserHandler := admin.NewUserHandler(adminService) groupHandler := admin.NewGroupHandler(adminService) claudeOAuthClient := repository.NewClaudeOAuthClient() @@ -90,12 +101,14 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { antigravityOAuthService := service.NewAntigravityOAuthService(proxyRepository) geminiQuotaService := service.NewGeminiQuotaService(configConfig, settingRepository) tempUnschedCache := repository.NewTempUnschedCache(redisClient) - rateLimitService := service.NewRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache) + timeoutCounterCache := repository.NewTimeoutCounterCache(redisClient) + geminiTokenCache := repository.NewGeminiTokenCache(redisClient) + compositeTokenCacheInvalidator := service.NewCompositeTokenCacheInvalidator(geminiTokenCache) + rateLimitService := service.ProvideRateLimitService(accountRepository, usageLogRepository, configConfig, geminiQuotaService, tempUnschedCache, timeoutCounterCache, settingService, compositeTokenCacheInvalidator) claudeUsageFetcher := repository.NewClaudeUsageFetcher() antigravityQuotaFetcher := service.NewAntigravityQuotaFetcher(proxyRepository) usageCache := service.NewUsageCache() accountUsageService := service.NewAccountUsageService(accountRepository, usageLogRepository, claudeUsageFetcher, geminiQuotaService, antigravityQuotaFetcher, usageCache) - geminiTokenCache := repository.NewGeminiTokenCache(redisClient) geminiTokenProvider := service.NewGeminiTokenProvider(accountRepository, geminiTokenCache, geminiOAuthService) gatewayCache := repository.NewGatewayCache(redisClient) antigravityTokenProvider := service.NewAntigravityTokenProvider(accountRepository, geminiTokenCache, antigravityOAuthService) @@ -105,14 +118,36 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { concurrencyCache := repository.ProvideConcurrencyCache(redisClient, configConfig) concurrencyService := service.ProvideConcurrencyService(concurrencyCache, accountRepository, configConfig) crsSyncService := service.NewCRSSyncService(accountRepository, proxyRepository, oAuthService, openAIOAuthService, geminiOAuthService, configConfig) - accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService) + sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig) + accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache) oAuthHandler := admin.NewOAuthHandler(oAuthService) openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService) geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService) antigravityOAuthHandler := admin.NewAntigravityOAuthHandler(antigravityOAuthService) proxyHandler := admin.NewProxyHandler(adminService) adminRedeemHandler := admin.NewRedeemHandler(adminService) - settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService) + promoHandler := admin.NewPromoHandler(promoService) + opsRepository := repository.NewOpsRepository(db) + schedulerCache := repository.NewSchedulerCache(redisClient) + schedulerOutboxRepository := repository.NewSchedulerOutboxRepository(db) + schedulerSnapshotService := service.ProvideSchedulerSnapshotService(schedulerCache, schedulerOutboxRepository, accountRepository, groupRepository, configConfig) + pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig) + pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient) + if err != nil { + return nil, err + } + billingService := service.NewBillingService(configConfig, pricingService) + identityCache := repository.NewIdentityCache(redisClient) + identityService := service.NewIdentityService(identityCache) + deferredService := service.ProvideDeferredService(accountRepository, timingWheelService) + claudeTokenProvider := service.NewClaudeTokenProvider(accountRepository, geminiTokenCache, oAuthService) + gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService, claudeTokenProvider, sessionLimitCache) + openAITokenProvider := service.NewOpenAITokenProvider(accountRepository, geminiTokenCache, openAIOAuthService) + openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, schedulerSnapshotService, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService, openAITokenProvider) + geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, schedulerSnapshotService, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig) + opsService := service.NewOpsService(opsRepository, settingRepository, configConfig, accountRepository, concurrencyService, gatewayService, openAIGatewayService, geminiMessagesCompatService, antigravityGatewayService) + settingHandler := admin.NewSettingHandler(settingService, emailService, turnstileService, opsService) + opsHandler := admin.NewOpsHandler(opsService) updateCache := repository.NewUpdateCache(redisClient) gitHubReleaseClient := repository.ProvideGitHubReleaseClient(configConfig) serviceBuildInfo := provideServiceBuildInfo(buildInfo) @@ -124,32 +159,24 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { userAttributeValueRepository := repository.NewUserAttributeValueRepository(client) userAttributeService := service.NewUserAttributeService(userAttributeDefinitionRepository, userAttributeValueRepository) userAttributeHandler := admin.NewUserAttributeHandler(userAttributeService) - adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, settingHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler) - pricingRemoteClient := repository.ProvidePricingRemoteClient(configConfig) - pricingService, err := service.ProvidePricingService(configConfig, pricingRemoteClient) - if err != nil { - return nil, err - } - billingService := service.NewBillingService(configConfig, pricingService) - identityCache := repository.NewIdentityCache(redisClient) - identityService := service.NewIdentityService(identityCache) - timingWheelService := service.ProvideTimingWheelService() - deferredService := service.ProvideDeferredService(accountRepository, timingWheelService) - gatewayService := service.NewGatewayService(accountRepository, groupRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, concurrencyService, billingService, rateLimitService, billingCacheService, identityService, httpUpstream, deferredService) - geminiMessagesCompatService := service.NewGeminiMessagesCompatService(accountRepository, groupRepository, gatewayCache, geminiTokenProvider, rateLimitService, httpUpstream, antigravityGatewayService, configConfig) + adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler) gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, configConfig) - openAIGatewayService := service.NewOpenAIGatewayService(accountRepository, usageLogRepository, userRepository, userSubscriptionRepository, gatewayCache, configConfig, concurrencyService, billingService, rateLimitService, billingCacheService, httpUpstream, deferredService) openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, configConfig) handlerSettingHandler := handler.ProvideSettingHandler(settingService, buildInfo) handlers := handler.ProvideHandlers(authHandler, userHandler, apiKeyHandler, usageHandler, redeemHandler, subscriptionHandler, adminHandlers, gatewayHandler, openAIGatewayHandler, handlerSettingHandler) jwtAuthMiddleware := middleware.NewJWTAuthMiddleware(authService, userService) adminAuthMiddleware := middleware.NewAdminAuthMiddleware(authService, userService, settingService) apiKeyAuthMiddleware := middleware.NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, configConfig) - engine := server.ProvideRouter(configConfig, handlers, jwtAuthMiddleware, adminAuthMiddleware, apiKeyAuthMiddleware, apiKeyService, subscriptionService) + engine := server.ProvideRouter(configConfig, handlers, jwtAuthMiddleware, adminAuthMiddleware, apiKeyAuthMiddleware, apiKeyService, subscriptionService, opsService, settingService, redisClient) httpServer := server.ProvideHTTPServer(configConfig, engine) - tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, configConfig) + opsMetricsCollector := service.ProvideOpsMetricsCollector(opsRepository, settingRepository, accountRepository, concurrencyService, db, redisClient, configConfig) + opsAggregationService := service.ProvideOpsAggregationService(opsRepository, settingRepository, db, redisClient, configConfig) + opsAlertEvaluatorService := service.ProvideOpsAlertEvaluatorService(opsService, opsRepository, emailService, redisClient, configConfig) + opsCleanupService := service.ProvideOpsCleanupService(opsRepository, db, redisClient, configConfig) + opsScheduledReportService := service.ProvideOpsScheduledReportService(opsService, userService, emailService, redisClient, configConfig) + tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, configConfig) accountExpiryService := service.ProvideAccountExpiryService(accountRepository) - v := provideCleanup(client, redisClient, tokenRefreshService, accountExpiryService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) + v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, pricingService, emailQueueService, billingCacheService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) application := &Application{ Server: httpServer, Cleanup: v, @@ -174,6 +201,12 @@ func provideServiceBuildInfo(buildInfo handler.BuildInfo) service.BuildInfo { func provideCleanup( entClient *ent.Client, rdb *redis.Client, + opsMetricsCollector *service.OpsMetricsCollector, + opsAggregation *service.OpsAggregationService, + opsAlertEvaluator *service.OpsAlertEvaluatorService, + opsCleanup *service.OpsCleanupService, + opsScheduledReport *service.OpsScheduledReportService, + schedulerSnapshot *service.SchedulerSnapshotService, tokenRefresh *service.TokenRefreshService, accountExpiry *service.AccountExpiryService, pricing *service.PricingService, @@ -192,6 +225,42 @@ func provideCleanup( name string fn func() error }{ + {"OpsScheduledReportService", func() error { + if opsScheduledReport != nil { + opsScheduledReport.Stop() + } + return nil + }}, + {"OpsCleanupService", func() error { + if opsCleanup != nil { + opsCleanup.Stop() + } + return nil + }}, + {"OpsAlertEvaluatorService", func() error { + if opsAlertEvaluator != nil { + opsAlertEvaluator.Stop() + } + return nil + }}, + {"OpsAggregationService", func() error { + if opsAggregation != nil { + opsAggregation.Stop() + } + return nil + }}, + {"OpsMetricsCollector", func() error { + if opsMetricsCollector != nil { + opsMetricsCollector.Stop() + } + return nil + }}, + {"SchedulerSnapshotService", func() error { + if schedulerSnapshot != nil { + schedulerSnapshot.Stop() + } + return nil + }}, {"TokenRefreshService", func() error { tokenRefresh.Stop() return nil diff --git a/backend/ent/account.go b/backend/ent/account.go index e960d324..038aa7e5 100644 --- a/backend/ent/account.go +++ b/backend/ent/account.go @@ -43,6 +43,8 @@ type Account struct { Concurrency int `json:"concurrency,omitempty"` // Priority holds the value of the "priority" field. Priority int `json:"priority,omitempty"` + // RateMultiplier holds the value of the "rate_multiplier" field. + RateMultiplier float64 `json:"rate_multiplier,omitempty"` // Status holds the value of the "status" field. Status string `json:"status,omitempty"` // ErrorMessage holds the value of the "error_message" field. @@ -135,6 +137,8 @@ func (*Account) scanValues(columns []string) ([]any, error) { values[i] = new([]byte) case account.FieldAutoPauseOnExpired, account.FieldSchedulable: values[i] = new(sql.NullBool) + case account.FieldRateMultiplier: + values[i] = new(sql.NullFloat64) case account.FieldID, account.FieldProxyID, account.FieldConcurrency, account.FieldPriority: values[i] = new(sql.NullInt64) case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldSessionWindowStatus: @@ -241,6 +245,12 @@ func (_m *Account) assignValues(columns []string, values []any) error { } else if value.Valid { _m.Priority = int(value.Int64) } + case account.FieldRateMultiplier: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field rate_multiplier", values[i]) + } else if value.Valid { + _m.RateMultiplier = value.Float64 + } case account.FieldStatus: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field status", values[i]) @@ -420,6 +430,9 @@ func (_m *Account) String() string { builder.WriteString("priority=") builder.WriteString(fmt.Sprintf("%v", _m.Priority)) builder.WriteString(", ") + builder.WriteString("rate_multiplier=") + builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier)) + builder.WriteString(", ") builder.WriteString("status=") builder.WriteString(_m.Status) builder.WriteString(", ") diff --git a/backend/ent/account/account.go b/backend/ent/account/account.go index 402e16ee..73c0e8c2 100644 --- a/backend/ent/account/account.go +++ b/backend/ent/account/account.go @@ -39,6 +39,8 @@ const ( FieldConcurrency = "concurrency" // FieldPriority holds the string denoting the priority field in the database. FieldPriority = "priority" + // FieldRateMultiplier holds the string denoting the rate_multiplier field in the database. + FieldRateMultiplier = "rate_multiplier" // FieldStatus holds the string denoting the status field in the database. FieldStatus = "status" // FieldErrorMessage holds the string denoting the error_message field in the database. @@ -116,6 +118,7 @@ var Columns = []string{ FieldProxyID, FieldConcurrency, FieldPriority, + FieldRateMultiplier, FieldStatus, FieldErrorMessage, FieldLastUsedAt, @@ -174,6 +177,8 @@ var ( DefaultConcurrency int // DefaultPriority holds the default value on creation for the "priority" field. DefaultPriority int + // DefaultRateMultiplier holds the default value on creation for the "rate_multiplier" field. + DefaultRateMultiplier float64 // DefaultStatus holds the default value on creation for the "status" field. DefaultStatus string // StatusValidator is a validator for the "status" field. It is called by the builders before save. @@ -244,6 +249,11 @@ func ByPriority(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldPriority, opts...).ToFunc() } +// ByRateMultiplier orders the results by the rate_multiplier field. +func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc() +} + // ByStatus orders the results by the status field. func ByStatus(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldStatus, opts...).ToFunc() diff --git a/backend/ent/account/where.go b/backend/ent/account/where.go index 6c639fd1..dea1127a 100644 --- a/backend/ent/account/where.go +++ b/backend/ent/account/where.go @@ -105,6 +105,11 @@ func Priority(v int) predicate.Account { return predicate.Account(sql.FieldEQ(FieldPriority, v)) } +// RateMultiplier applies equality check predicate on the "rate_multiplier" field. It's identical to RateMultiplierEQ. +func RateMultiplier(v float64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateMultiplier, v)) +} + // Status applies equality check predicate on the "status" field. It's identical to StatusEQ. func Status(v string) predicate.Account { return predicate.Account(sql.FieldEQ(FieldStatus, v)) @@ -675,6 +680,46 @@ func PriorityLTE(v int) predicate.Account { return predicate.Account(sql.FieldLTE(FieldPriority, v)) } +// RateMultiplierEQ applies the EQ predicate on the "rate_multiplier" field. +func RateMultiplierEQ(v float64) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldRateMultiplier, v)) +} + +// RateMultiplierNEQ applies the NEQ predicate on the "rate_multiplier" field. +func RateMultiplierNEQ(v float64) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldRateMultiplier, v)) +} + +// RateMultiplierIn applies the In predicate on the "rate_multiplier" field. +func RateMultiplierIn(vs ...float64) predicate.Account { + return predicate.Account(sql.FieldIn(FieldRateMultiplier, vs...)) +} + +// RateMultiplierNotIn applies the NotIn predicate on the "rate_multiplier" field. +func RateMultiplierNotIn(vs ...float64) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldRateMultiplier, vs...)) +} + +// RateMultiplierGT applies the GT predicate on the "rate_multiplier" field. +func RateMultiplierGT(v float64) predicate.Account { + return predicate.Account(sql.FieldGT(FieldRateMultiplier, v)) +} + +// RateMultiplierGTE applies the GTE predicate on the "rate_multiplier" field. +func RateMultiplierGTE(v float64) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldRateMultiplier, v)) +} + +// RateMultiplierLT applies the LT predicate on the "rate_multiplier" field. +func RateMultiplierLT(v float64) predicate.Account { + return predicate.Account(sql.FieldLT(FieldRateMultiplier, v)) +} + +// RateMultiplierLTE applies the LTE predicate on the "rate_multiplier" field. +func RateMultiplierLTE(v float64) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldRateMultiplier, v)) +} + // StatusEQ applies the EQ predicate on the "status" field. func StatusEQ(v string) predicate.Account { return predicate.Account(sql.FieldEQ(FieldStatus, v)) diff --git a/backend/ent/account_create.go b/backend/ent/account_create.go index 0725d43d..42a561cf 100644 --- a/backend/ent/account_create.go +++ b/backend/ent/account_create.go @@ -153,6 +153,20 @@ func (_c *AccountCreate) SetNillablePriority(v *int) *AccountCreate { return _c } +// SetRateMultiplier sets the "rate_multiplier" field. +func (_c *AccountCreate) SetRateMultiplier(v float64) *AccountCreate { + _c.mutation.SetRateMultiplier(v) + return _c +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_c *AccountCreate) SetNillableRateMultiplier(v *float64) *AccountCreate { + if v != nil { + _c.SetRateMultiplier(*v) + } + return _c +} + // SetStatus sets the "status" field. func (_c *AccountCreate) SetStatus(v string) *AccountCreate { _c.mutation.SetStatus(v) @@ -429,6 +443,10 @@ func (_c *AccountCreate) defaults() error { v := account.DefaultPriority _c.mutation.SetPriority(v) } + if _, ok := _c.mutation.RateMultiplier(); !ok { + v := account.DefaultRateMultiplier + _c.mutation.SetRateMultiplier(v) + } if _, ok := _c.mutation.Status(); !ok { v := account.DefaultStatus _c.mutation.SetStatus(v) @@ -488,6 +506,9 @@ func (_c *AccountCreate) check() error { if _, ok := _c.mutation.Priority(); !ok { return &ValidationError{Name: "priority", err: errors.New(`ent: missing required field "Account.priority"`)} } + if _, ok := _c.mutation.RateMultiplier(); !ok { + return &ValidationError{Name: "rate_multiplier", err: errors.New(`ent: missing required field "Account.rate_multiplier"`)} + } if _, ok := _c.mutation.Status(); !ok { return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Account.status"`)} } @@ -578,6 +599,10 @@ func (_c *AccountCreate) createSpec() (*Account, *sqlgraph.CreateSpec) { _spec.SetField(account.FieldPriority, field.TypeInt, value) _node.Priority = value } + if value, ok := _c.mutation.RateMultiplier(); ok { + _spec.SetField(account.FieldRateMultiplier, field.TypeFloat64, value) + _node.RateMultiplier = value + } if value, ok := _c.mutation.Status(); ok { _spec.SetField(account.FieldStatus, field.TypeString, value) _node.Status = value @@ -893,6 +918,24 @@ func (u *AccountUpsert) AddPriority(v int) *AccountUpsert { return u } +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *AccountUpsert) SetRateMultiplier(v float64) *AccountUpsert { + u.Set(account.FieldRateMultiplier, v) + return u +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *AccountUpsert) UpdateRateMultiplier() *AccountUpsert { + u.SetExcluded(account.FieldRateMultiplier) + return u +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *AccountUpsert) AddRateMultiplier(v float64) *AccountUpsert { + u.Add(account.FieldRateMultiplier, v) + return u +} + // SetStatus sets the "status" field. func (u *AccountUpsert) SetStatus(v string) *AccountUpsert { u.Set(account.FieldStatus, v) @@ -1325,6 +1368,27 @@ func (u *AccountUpsertOne) UpdatePriority() *AccountUpsertOne { }) } +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *AccountUpsertOne) SetRateMultiplier(v float64) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetRateMultiplier(v) + }) +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *AccountUpsertOne) AddRateMultiplier(v float64) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.AddRateMultiplier(v) + }) +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateRateMultiplier() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateMultiplier() + }) +} + // SetStatus sets the "status" field. func (u *AccountUpsertOne) SetStatus(v string) *AccountUpsertOne { return u.Update(func(s *AccountUpsert) { @@ -1956,6 +2020,27 @@ func (u *AccountUpsertBulk) UpdatePriority() *AccountUpsertBulk { }) } +// SetRateMultiplier sets the "rate_multiplier" field. +func (u *AccountUpsertBulk) SetRateMultiplier(v float64) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetRateMultiplier(v) + }) +} + +// AddRateMultiplier adds v to the "rate_multiplier" field. +func (u *AccountUpsertBulk) AddRateMultiplier(v float64) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.AddRateMultiplier(v) + }) +} + +// UpdateRateMultiplier sets the "rate_multiplier" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateRateMultiplier() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateRateMultiplier() + }) +} + // SetStatus sets the "status" field. func (u *AccountUpsertBulk) SetStatus(v string) *AccountUpsertBulk { return u.Update(func(s *AccountUpsert) { diff --git a/backend/ent/account_query.go b/backend/ent/account_query.go index 3e363ecd..1761fa63 100644 --- a/backend/ent/account_query.go +++ b/backend/ent/account_query.go @@ -9,6 +9,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -31,6 +32,7 @@ type AccountQuery struct { withProxy *ProxyQuery withUsageLogs *UsageLogQuery withAccountGroups *AccountGroupQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -495,6 +497,9 @@ func (_q *AccountQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Acco node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -690,6 +695,9 @@ func (_q *AccountQuery) loadAccountGroups(ctx context.Context, query *AccountGro func (_q *AccountQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -755,6 +763,9 @@ func (_q *AccountQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -772,6 +783,32 @@ func (_q *AccountQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *AccountQuery) ForUpdate(opts ...sql.LockOption) *AccountQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *AccountQuery) ForShare(opts ...sql.LockOption) *AccountQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // AccountGroupBy is the group-by builder for Account entities. type AccountGroupBy struct { selector diff --git a/backend/ent/account_update.go b/backend/ent/account_update.go index dcc3212d..63fab096 100644 --- a/backend/ent/account_update.go +++ b/backend/ent/account_update.go @@ -193,6 +193,27 @@ func (_u *AccountUpdate) AddPriority(v int) *AccountUpdate { return _u } +// SetRateMultiplier sets the "rate_multiplier" field. +func (_u *AccountUpdate) SetRateMultiplier(v float64) *AccountUpdate { + _u.mutation.ResetRateMultiplier() + _u.mutation.SetRateMultiplier(v) + return _u +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableRateMultiplier(v *float64) *AccountUpdate { + if v != nil { + _u.SetRateMultiplier(*v) + } + return _u +} + +// AddRateMultiplier adds value to the "rate_multiplier" field. +func (_u *AccountUpdate) AddRateMultiplier(v float64) *AccountUpdate { + _u.mutation.AddRateMultiplier(v) + return _u +} + // SetStatus sets the "status" field. func (_u *AccountUpdate) SetStatus(v string) *AccountUpdate { _u.mutation.SetStatus(v) @@ -629,6 +650,12 @@ func (_u *AccountUpdate) sqlSave(ctx context.Context) (_node int, err error) { if value, ok := _u.mutation.AddedPriority(); ok { _spec.AddField(account.FieldPriority, field.TypeInt, value) } + if value, ok := _u.mutation.RateMultiplier(); ok { + _spec.SetField(account.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedRateMultiplier(); ok { + _spec.AddField(account.FieldRateMultiplier, field.TypeFloat64, value) + } if value, ok := _u.mutation.Status(); ok { _spec.SetField(account.FieldStatus, field.TypeString, value) } @@ -1005,6 +1032,27 @@ func (_u *AccountUpdateOne) AddPriority(v int) *AccountUpdateOne { return _u } +// SetRateMultiplier sets the "rate_multiplier" field. +func (_u *AccountUpdateOne) SetRateMultiplier(v float64) *AccountUpdateOne { + _u.mutation.ResetRateMultiplier() + _u.mutation.SetRateMultiplier(v) + return _u +} + +// SetNillableRateMultiplier sets the "rate_multiplier" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableRateMultiplier(v *float64) *AccountUpdateOne { + if v != nil { + _u.SetRateMultiplier(*v) + } + return _u +} + +// AddRateMultiplier adds value to the "rate_multiplier" field. +func (_u *AccountUpdateOne) AddRateMultiplier(v float64) *AccountUpdateOne { + _u.mutation.AddRateMultiplier(v) + return _u +} + // SetStatus sets the "status" field. func (_u *AccountUpdateOne) SetStatus(v string) *AccountUpdateOne { _u.mutation.SetStatus(v) @@ -1471,6 +1519,12 @@ func (_u *AccountUpdateOne) sqlSave(ctx context.Context) (_node *Account, err er if value, ok := _u.mutation.AddedPriority(); ok { _spec.AddField(account.FieldPriority, field.TypeInt, value) } + if value, ok := _u.mutation.RateMultiplier(); ok { + _spec.SetField(account.FieldRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedRateMultiplier(); ok { + _spec.AddField(account.FieldRateMultiplier, field.TypeFloat64, value) + } if value, ok := _u.mutation.Status(); ok { _spec.SetField(account.FieldStatus, field.TypeString, value) } diff --git a/backend/ent/accountgroup_query.go b/backend/ent/accountgroup_query.go index 98e1c3f6..d0a4f58d 100644 --- a/backend/ent/accountgroup_query.go +++ b/backend/ent/accountgroup_query.go @@ -8,6 +8,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "github.com/Wei-Shaw/sub2api/ent/account" @@ -25,6 +26,7 @@ type AccountGroupQuery struct { predicates []predicate.AccountGroup withAccount *AccountQuery withGroup *GroupQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -347,6 +349,9 @@ func (_q *AccountGroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([] node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -432,6 +437,9 @@ func (_q *AccountGroupQuery) loadGroup(ctx context.Context, query *GroupQuery, n func (_q *AccountGroupQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Unique = false _spec.Node.Columns = nil return sqlgraph.CountNodes(ctx, _q.driver, _spec) @@ -495,6 +503,9 @@ func (_q *AccountGroupQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -512,6 +523,32 @@ func (_q *AccountGroupQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *AccountGroupQuery) ForUpdate(opts ...sql.LockOption) *AccountGroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *AccountGroupQuery) ForShare(opts ...sql.LockOption) *AccountGroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // AccountGroupGroupBy is the group-by builder for AccountGroup entities. type AccountGroupGroupBy struct { selector diff --git a/backend/ent/apikey.go b/backend/ent/apikey.go index fe3ad0cf..95586017 100644 --- a/backend/ent/apikey.go +++ b/backend/ent/apikey.go @@ -3,6 +3,7 @@ package ent import ( + "encoding/json" "fmt" "strings" "time" @@ -35,6 +36,10 @@ type APIKey struct { GroupID *int64 `json:"group_id,omitempty"` // Status holds the value of the "status" field. Status string `json:"status,omitempty"` + // Allowed IPs/CIDRs, e.g. ["192.168.1.100", "10.0.0.0/8"] + IPWhitelist []string `json:"ip_whitelist,omitempty"` + // Blocked IPs/CIDRs + IPBlacklist []string `json:"ip_blacklist,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the APIKeyQuery when eager-loading is set. Edges APIKeyEdges `json:"edges"` @@ -90,6 +95,8 @@ func (*APIKey) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { + case apikey.FieldIPWhitelist, apikey.FieldIPBlacklist: + values[i] = new([]byte) case apikey.FieldID, apikey.FieldUserID, apikey.FieldGroupID: values[i] = new(sql.NullInt64) case apikey.FieldKey, apikey.FieldName, apikey.FieldStatus: @@ -167,6 +174,22 @@ func (_m *APIKey) assignValues(columns []string, values []any) error { } else if value.Valid { _m.Status = value.String } + case apikey.FieldIPWhitelist: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field ip_whitelist", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.IPWhitelist); err != nil { + return fmt.Errorf("unmarshal field ip_whitelist: %w", err) + } + } + case apikey.FieldIPBlacklist: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field ip_blacklist", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.IPBlacklist); err != nil { + return fmt.Errorf("unmarshal field ip_blacklist: %w", err) + } + } default: _m.selectValues.Set(columns[i], values[i]) } @@ -245,6 +268,12 @@ func (_m *APIKey) String() string { builder.WriteString(", ") builder.WriteString("status=") builder.WriteString(_m.Status) + builder.WriteString(", ") + builder.WriteString("ip_whitelist=") + builder.WriteString(fmt.Sprintf("%v", _m.IPWhitelist)) + builder.WriteString(", ") + builder.WriteString("ip_blacklist=") + builder.WriteString(fmt.Sprintf("%v", _m.IPBlacklist)) builder.WriteByte(')') return builder.String() } diff --git a/backend/ent/apikey/apikey.go b/backend/ent/apikey/apikey.go index 91f7d620..564cddb1 100644 --- a/backend/ent/apikey/apikey.go +++ b/backend/ent/apikey/apikey.go @@ -31,6 +31,10 @@ const ( FieldGroupID = "group_id" // FieldStatus holds the string denoting the status field in the database. FieldStatus = "status" + // FieldIPWhitelist holds the string denoting the ip_whitelist field in the database. + FieldIPWhitelist = "ip_whitelist" + // FieldIPBlacklist holds the string denoting the ip_blacklist field in the database. + FieldIPBlacklist = "ip_blacklist" // EdgeUser holds the string denoting the user edge name in mutations. EdgeUser = "user" // EdgeGroup holds the string denoting the group edge name in mutations. @@ -73,6 +77,8 @@ var Columns = []string{ FieldName, FieldGroupID, FieldStatus, + FieldIPWhitelist, + FieldIPBlacklist, } // ValidColumn reports if the column name is valid (part of the table columns). diff --git a/backend/ent/apikey/where.go b/backend/ent/apikey/where.go index 5e739006..5152867f 100644 --- a/backend/ent/apikey/where.go +++ b/backend/ent/apikey/where.go @@ -470,6 +470,26 @@ func StatusContainsFold(v string) predicate.APIKey { return predicate.APIKey(sql.FieldContainsFold(FieldStatus, v)) } +// IPWhitelistIsNil applies the IsNil predicate on the "ip_whitelist" field. +func IPWhitelistIsNil() predicate.APIKey { + return predicate.APIKey(sql.FieldIsNull(FieldIPWhitelist)) +} + +// IPWhitelistNotNil applies the NotNil predicate on the "ip_whitelist" field. +func IPWhitelistNotNil() predicate.APIKey { + return predicate.APIKey(sql.FieldNotNull(FieldIPWhitelist)) +} + +// IPBlacklistIsNil applies the IsNil predicate on the "ip_blacklist" field. +func IPBlacklistIsNil() predicate.APIKey { + return predicate.APIKey(sql.FieldIsNull(FieldIPBlacklist)) +} + +// IPBlacklistNotNil applies the NotNil predicate on the "ip_blacklist" field. +func IPBlacklistNotNil() predicate.APIKey { + return predicate.APIKey(sql.FieldNotNull(FieldIPBlacklist)) +} + // HasUser applies the HasEdge predicate on the "user" edge. func HasUser() predicate.APIKey { return predicate.APIKey(func(s *sql.Selector) { diff --git a/backend/ent/apikey_create.go b/backend/ent/apikey_create.go index 2098872c..d5363be5 100644 --- a/backend/ent/apikey_create.go +++ b/backend/ent/apikey_create.go @@ -113,6 +113,18 @@ func (_c *APIKeyCreate) SetNillableStatus(v *string) *APIKeyCreate { return _c } +// SetIPWhitelist sets the "ip_whitelist" field. +func (_c *APIKeyCreate) SetIPWhitelist(v []string) *APIKeyCreate { + _c.mutation.SetIPWhitelist(v) + return _c +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (_c *APIKeyCreate) SetIPBlacklist(v []string) *APIKeyCreate { + _c.mutation.SetIPBlacklist(v) + return _c +} + // SetUser sets the "user" edge to the User entity. func (_c *APIKeyCreate) SetUser(v *User) *APIKeyCreate { return _c.SetUserID(v.ID) @@ -285,6 +297,14 @@ func (_c *APIKeyCreate) createSpec() (*APIKey, *sqlgraph.CreateSpec) { _spec.SetField(apikey.FieldStatus, field.TypeString, value) _node.Status = value } + if value, ok := _c.mutation.IPWhitelist(); ok { + _spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value) + _node.IPWhitelist = value + } + if value, ok := _c.mutation.IPBlacklist(); ok { + _spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value) + _node.IPBlacklist = value + } if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -483,6 +503,42 @@ func (u *APIKeyUpsert) UpdateStatus() *APIKeyUpsert { return u } +// SetIPWhitelist sets the "ip_whitelist" field. +func (u *APIKeyUpsert) SetIPWhitelist(v []string) *APIKeyUpsert { + u.Set(apikey.FieldIPWhitelist, v) + return u +} + +// UpdateIPWhitelist sets the "ip_whitelist" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateIPWhitelist() *APIKeyUpsert { + u.SetExcluded(apikey.FieldIPWhitelist) + return u +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (u *APIKeyUpsert) ClearIPWhitelist() *APIKeyUpsert { + u.SetNull(apikey.FieldIPWhitelist) + return u +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (u *APIKeyUpsert) SetIPBlacklist(v []string) *APIKeyUpsert { + u.Set(apikey.FieldIPBlacklist, v) + return u +} + +// UpdateIPBlacklist sets the "ip_blacklist" field to the value that was provided on create. +func (u *APIKeyUpsert) UpdateIPBlacklist() *APIKeyUpsert { + u.SetExcluded(apikey.FieldIPBlacklist) + return u +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (u *APIKeyUpsert) ClearIPBlacklist() *APIKeyUpsert { + u.SetNull(apikey.FieldIPBlacklist) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create. // Using this option is equivalent to using: // @@ -640,6 +696,48 @@ func (u *APIKeyUpsertOne) UpdateStatus() *APIKeyUpsertOne { }) } +// SetIPWhitelist sets the "ip_whitelist" field. +func (u *APIKeyUpsertOne) SetIPWhitelist(v []string) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetIPWhitelist(v) + }) +} + +// UpdateIPWhitelist sets the "ip_whitelist" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateIPWhitelist() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateIPWhitelist() + }) +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (u *APIKeyUpsertOne) ClearIPWhitelist() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.ClearIPWhitelist() + }) +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (u *APIKeyUpsertOne) SetIPBlacklist(v []string) *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.SetIPBlacklist(v) + }) +} + +// UpdateIPBlacklist sets the "ip_blacklist" field to the value that was provided on create. +func (u *APIKeyUpsertOne) UpdateIPBlacklist() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateIPBlacklist() + }) +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (u *APIKeyUpsertOne) ClearIPBlacklist() *APIKeyUpsertOne { + return u.Update(func(s *APIKeyUpsert) { + s.ClearIPBlacklist() + }) +} + // Exec executes the query. func (u *APIKeyUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -963,6 +1061,48 @@ func (u *APIKeyUpsertBulk) UpdateStatus() *APIKeyUpsertBulk { }) } +// SetIPWhitelist sets the "ip_whitelist" field. +func (u *APIKeyUpsertBulk) SetIPWhitelist(v []string) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetIPWhitelist(v) + }) +} + +// UpdateIPWhitelist sets the "ip_whitelist" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateIPWhitelist() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateIPWhitelist() + }) +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (u *APIKeyUpsertBulk) ClearIPWhitelist() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.ClearIPWhitelist() + }) +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (u *APIKeyUpsertBulk) SetIPBlacklist(v []string) *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.SetIPBlacklist(v) + }) +} + +// UpdateIPBlacklist sets the "ip_blacklist" field to the value that was provided on create. +func (u *APIKeyUpsertBulk) UpdateIPBlacklist() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.UpdateIPBlacklist() + }) +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (u *APIKeyUpsertBulk) ClearIPBlacklist() *APIKeyUpsertBulk { + return u.Update(func(s *APIKeyUpsert) { + s.ClearIPBlacklist() + }) +} + // Exec executes the query. func (u *APIKeyUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/backend/ent/apikey_query.go b/backend/ent/apikey_query.go index 6e5c0f5e..9eee4077 100644 --- a/backend/ent/apikey_query.go +++ b/backend/ent/apikey_query.go @@ -9,6 +9,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -29,6 +30,7 @@ type APIKeyQuery struct { withUser *UserQuery withGroup *GroupQuery withUsageLogs *UsageLogQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -458,6 +460,9 @@ func (_q *APIKeyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*APIKe node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -583,6 +588,9 @@ func (_q *APIKeyQuery) loadUsageLogs(ctx context.Context, query *UsageLogQuery, func (_q *APIKeyQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -651,6 +659,9 @@ func (_q *APIKeyQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -668,6 +679,32 @@ func (_q *APIKeyQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *APIKeyQuery) ForUpdate(opts ...sql.LockOption) *APIKeyQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *APIKeyQuery) ForShare(opts ...sql.LockOption) *APIKeyQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // APIKeyGroupBy is the group-by builder for APIKey entities. type APIKeyGroupBy struct { selector diff --git a/backend/ent/apikey_update.go b/backend/ent/apikey_update.go index 4a16369b..9ae332a8 100644 --- a/backend/ent/apikey_update.go +++ b/backend/ent/apikey_update.go @@ -10,6 +10,7 @@ import ( "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" "entgo.io/ent/schema/field" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" @@ -133,6 +134,42 @@ func (_u *APIKeyUpdate) SetNillableStatus(v *string) *APIKeyUpdate { return _u } +// SetIPWhitelist sets the "ip_whitelist" field. +func (_u *APIKeyUpdate) SetIPWhitelist(v []string) *APIKeyUpdate { + _u.mutation.SetIPWhitelist(v) + return _u +} + +// AppendIPWhitelist appends value to the "ip_whitelist" field. +func (_u *APIKeyUpdate) AppendIPWhitelist(v []string) *APIKeyUpdate { + _u.mutation.AppendIPWhitelist(v) + return _u +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (_u *APIKeyUpdate) ClearIPWhitelist() *APIKeyUpdate { + _u.mutation.ClearIPWhitelist() + return _u +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (_u *APIKeyUpdate) SetIPBlacklist(v []string) *APIKeyUpdate { + _u.mutation.SetIPBlacklist(v) + return _u +} + +// AppendIPBlacklist appends value to the "ip_blacklist" field. +func (_u *APIKeyUpdate) AppendIPBlacklist(v []string) *APIKeyUpdate { + _u.mutation.AppendIPBlacklist(v) + return _u +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (_u *APIKeyUpdate) ClearIPBlacklist() *APIKeyUpdate { + _u.mutation.ClearIPBlacklist() + return _u +} + // SetUser sets the "user" edge to the User entity. func (_u *APIKeyUpdate) SetUser(v *User) *APIKeyUpdate { return _u.SetUserID(v.ID) @@ -291,6 +328,28 @@ func (_u *APIKeyUpdate) sqlSave(ctx context.Context) (_node int, err error) { if value, ok := _u.mutation.Status(); ok { _spec.SetField(apikey.FieldStatus, field.TypeString, value) } + if value, ok := _u.mutation.IPWhitelist(); ok { + _spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedIPWhitelist(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, apikey.FieldIPWhitelist, value) + }) + } + if _u.mutation.IPWhitelistCleared() { + _spec.ClearField(apikey.FieldIPWhitelist, field.TypeJSON) + } + if value, ok := _u.mutation.IPBlacklist(); ok { + _spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedIPBlacklist(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, apikey.FieldIPBlacklist, value) + }) + } + if _u.mutation.IPBlacklistCleared() { + _spec.ClearField(apikey.FieldIPBlacklist, field.TypeJSON) + } if _u.mutation.UserCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, @@ -516,6 +575,42 @@ func (_u *APIKeyUpdateOne) SetNillableStatus(v *string) *APIKeyUpdateOne { return _u } +// SetIPWhitelist sets the "ip_whitelist" field. +func (_u *APIKeyUpdateOne) SetIPWhitelist(v []string) *APIKeyUpdateOne { + _u.mutation.SetIPWhitelist(v) + return _u +} + +// AppendIPWhitelist appends value to the "ip_whitelist" field. +func (_u *APIKeyUpdateOne) AppendIPWhitelist(v []string) *APIKeyUpdateOne { + _u.mutation.AppendIPWhitelist(v) + return _u +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (_u *APIKeyUpdateOne) ClearIPWhitelist() *APIKeyUpdateOne { + _u.mutation.ClearIPWhitelist() + return _u +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (_u *APIKeyUpdateOne) SetIPBlacklist(v []string) *APIKeyUpdateOne { + _u.mutation.SetIPBlacklist(v) + return _u +} + +// AppendIPBlacklist appends value to the "ip_blacklist" field. +func (_u *APIKeyUpdateOne) AppendIPBlacklist(v []string) *APIKeyUpdateOne { + _u.mutation.AppendIPBlacklist(v) + return _u +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (_u *APIKeyUpdateOne) ClearIPBlacklist() *APIKeyUpdateOne { + _u.mutation.ClearIPBlacklist() + return _u +} + // SetUser sets the "user" edge to the User entity. func (_u *APIKeyUpdateOne) SetUser(v *User) *APIKeyUpdateOne { return _u.SetUserID(v.ID) @@ -704,6 +799,28 @@ func (_u *APIKeyUpdateOne) sqlSave(ctx context.Context) (_node *APIKey, err erro if value, ok := _u.mutation.Status(); ok { _spec.SetField(apikey.FieldStatus, field.TypeString, value) } + if value, ok := _u.mutation.IPWhitelist(); ok { + _spec.SetField(apikey.FieldIPWhitelist, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedIPWhitelist(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, apikey.FieldIPWhitelist, value) + }) + } + if _u.mutation.IPWhitelistCleared() { + _spec.ClearField(apikey.FieldIPWhitelist, field.TypeJSON) + } + if value, ok := _u.mutation.IPBlacklist(); ok { + _spec.SetField(apikey.FieldIPBlacklist, field.TypeJSON, value) + } + if value, ok := _u.mutation.AppendedIPBlacklist(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, apikey.FieldIPBlacklist, value) + }) + } + if _u.mutation.IPBlacklistCleared() { + _spec.ClearField(apikey.FieldIPBlacklist, field.TypeJSON) + } if _u.mutation.UserCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.M2O, diff --git a/backend/ent/client.go b/backend/ent/client.go index 4084dac2..35cf644f 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -19,6 +19,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/accountgroup" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" @@ -45,6 +47,10 @@ type Client struct { AccountGroup *AccountGroupClient // Group is the client for interacting with the Group builders. Group *GroupClient + // PromoCode is the client for interacting with the PromoCode builders. + PromoCode *PromoCodeClient + // PromoCodeUsage is the client for interacting with the PromoCodeUsage builders. + PromoCodeUsage *PromoCodeUsageClient // Proxy is the client for interacting with the Proxy builders. Proxy *ProxyClient // RedeemCode is the client for interacting with the RedeemCode builders. @@ -78,6 +84,8 @@ func (c *Client) init() { c.Account = NewAccountClient(c.config) c.AccountGroup = NewAccountGroupClient(c.config) c.Group = NewGroupClient(c.config) + c.PromoCode = NewPromoCodeClient(c.config) + c.PromoCodeUsage = NewPromoCodeUsageClient(c.config) c.Proxy = NewProxyClient(c.config) c.RedeemCode = NewRedeemCodeClient(c.config) c.Setting = NewSettingClient(c.config) @@ -183,6 +191,8 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { Account: NewAccountClient(cfg), AccountGroup: NewAccountGroupClient(cfg), Group: NewGroupClient(cfg), + PromoCode: NewPromoCodeClient(cfg), + PromoCodeUsage: NewPromoCodeUsageClient(cfg), Proxy: NewProxyClient(cfg), RedeemCode: NewRedeemCodeClient(cfg), Setting: NewSettingClient(cfg), @@ -215,6 +225,8 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) Account: NewAccountClient(cfg), AccountGroup: NewAccountGroupClient(cfg), Group: NewGroupClient(cfg), + PromoCode: NewPromoCodeClient(cfg), + PromoCodeUsage: NewPromoCodeUsageClient(cfg), Proxy: NewProxyClient(cfg), RedeemCode: NewRedeemCodeClient(cfg), Setting: NewSettingClient(cfg), @@ -253,9 +265,9 @@ func (c *Client) Close() error { // In order to add hooks to a specific client, call: `client.Node.Use(...)`. func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ - c.APIKey, c.Account, c.AccountGroup, c.Group, c.Proxy, c.RedeemCode, c.Setting, - c.UsageLog, c.User, c.UserAllowedGroup, c.UserAttributeDefinition, - c.UserAttributeValue, c.UserSubscription, + c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage, + c.Proxy, c.RedeemCode, c.Setting, c.UsageLog, c.User, c.UserAllowedGroup, + c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Use(hooks...) } @@ -265,9 +277,9 @@ func (c *Client) Use(hooks ...Hook) { // In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ - c.APIKey, c.Account, c.AccountGroup, c.Group, c.Proxy, c.RedeemCode, c.Setting, - c.UsageLog, c.User, c.UserAllowedGroup, c.UserAttributeDefinition, - c.UserAttributeValue, c.UserSubscription, + c.APIKey, c.Account, c.AccountGroup, c.Group, c.PromoCode, c.PromoCodeUsage, + c.Proxy, c.RedeemCode, c.Setting, c.UsageLog, c.User, c.UserAllowedGroup, + c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Intercept(interceptors...) } @@ -284,6 +296,10 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.AccountGroup.mutate(ctx, m) case *GroupMutation: return c.Group.mutate(ctx, m) + case *PromoCodeMutation: + return c.PromoCode.mutate(ctx, m) + case *PromoCodeUsageMutation: + return c.PromoCodeUsage.mutate(ctx, m) case *ProxyMutation: return c.Proxy.mutate(ctx, m) case *RedeemCodeMutation: @@ -1068,6 +1084,320 @@ func (c *GroupClient) mutate(ctx context.Context, m *GroupMutation) (Value, erro } } +// PromoCodeClient is a client for the PromoCode schema. +type PromoCodeClient struct { + config +} + +// NewPromoCodeClient returns a client for the PromoCode from the given config. +func NewPromoCodeClient(c config) *PromoCodeClient { + return &PromoCodeClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `promocode.Hooks(f(g(h())))`. +func (c *PromoCodeClient) Use(hooks ...Hook) { + c.hooks.PromoCode = append(c.hooks.PromoCode, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `promocode.Intercept(f(g(h())))`. +func (c *PromoCodeClient) Intercept(interceptors ...Interceptor) { + c.inters.PromoCode = append(c.inters.PromoCode, interceptors...) +} + +// Create returns a builder for creating a PromoCode entity. +func (c *PromoCodeClient) Create() *PromoCodeCreate { + mutation := newPromoCodeMutation(c.config, OpCreate) + return &PromoCodeCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of PromoCode entities. +func (c *PromoCodeClient) CreateBulk(builders ...*PromoCodeCreate) *PromoCodeCreateBulk { + return &PromoCodeCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PromoCodeClient) MapCreateBulk(slice any, setFunc func(*PromoCodeCreate, int)) *PromoCodeCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PromoCodeCreateBulk{err: fmt.Errorf("calling to PromoCodeClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PromoCodeCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PromoCodeCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for PromoCode. +func (c *PromoCodeClient) Update() *PromoCodeUpdate { + mutation := newPromoCodeMutation(c.config, OpUpdate) + return &PromoCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PromoCodeClient) UpdateOne(_m *PromoCode) *PromoCodeUpdateOne { + mutation := newPromoCodeMutation(c.config, OpUpdateOne, withPromoCode(_m)) + return &PromoCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PromoCodeClient) UpdateOneID(id int64) *PromoCodeUpdateOne { + mutation := newPromoCodeMutation(c.config, OpUpdateOne, withPromoCodeID(id)) + return &PromoCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for PromoCode. +func (c *PromoCodeClient) Delete() *PromoCodeDelete { + mutation := newPromoCodeMutation(c.config, OpDelete) + return &PromoCodeDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PromoCodeClient) DeleteOne(_m *PromoCode) *PromoCodeDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PromoCodeClient) DeleteOneID(id int64) *PromoCodeDeleteOne { + builder := c.Delete().Where(promocode.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PromoCodeDeleteOne{builder} +} + +// Query returns a query builder for PromoCode. +func (c *PromoCodeClient) Query() *PromoCodeQuery { + return &PromoCodeQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePromoCode}, + inters: c.Interceptors(), + } +} + +// Get returns a PromoCode entity by its id. +func (c *PromoCodeClient) Get(ctx context.Context, id int64) (*PromoCode, error) { + return c.Query().Where(promocode.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PromoCodeClient) GetX(ctx context.Context, id int64) *PromoCode { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryUsageRecords queries the usage_records edge of a PromoCode. +func (c *PromoCodeClient) QueryUsageRecords(_m *PromoCode) *PromoCodeUsageQuery { + query := (&PromoCodeUsageClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(promocode.Table, promocode.FieldID, id), + sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, promocode.UsageRecordsTable, promocode.UsageRecordsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PromoCodeClient) Hooks() []Hook { + return c.hooks.PromoCode +} + +// Interceptors returns the client interceptors. +func (c *PromoCodeClient) Interceptors() []Interceptor { + return c.inters.PromoCode +} + +func (c *PromoCodeClient) mutate(ctx context.Context, m *PromoCodeMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PromoCodeCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PromoCodeUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PromoCodeUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PromoCodeDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown PromoCode mutation op: %q", m.Op()) + } +} + +// PromoCodeUsageClient is a client for the PromoCodeUsage schema. +type PromoCodeUsageClient struct { + config +} + +// NewPromoCodeUsageClient returns a client for the PromoCodeUsage from the given config. +func NewPromoCodeUsageClient(c config) *PromoCodeUsageClient { + return &PromoCodeUsageClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `promocodeusage.Hooks(f(g(h())))`. +func (c *PromoCodeUsageClient) Use(hooks ...Hook) { + c.hooks.PromoCodeUsage = append(c.hooks.PromoCodeUsage, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `promocodeusage.Intercept(f(g(h())))`. +func (c *PromoCodeUsageClient) Intercept(interceptors ...Interceptor) { + c.inters.PromoCodeUsage = append(c.inters.PromoCodeUsage, interceptors...) +} + +// Create returns a builder for creating a PromoCodeUsage entity. +func (c *PromoCodeUsageClient) Create() *PromoCodeUsageCreate { + mutation := newPromoCodeUsageMutation(c.config, OpCreate) + return &PromoCodeUsageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of PromoCodeUsage entities. +func (c *PromoCodeUsageClient) CreateBulk(builders ...*PromoCodeUsageCreate) *PromoCodeUsageCreateBulk { + return &PromoCodeUsageCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *PromoCodeUsageClient) MapCreateBulk(slice any, setFunc func(*PromoCodeUsageCreate, int)) *PromoCodeUsageCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &PromoCodeUsageCreateBulk{err: fmt.Errorf("calling to PromoCodeUsageClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*PromoCodeUsageCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &PromoCodeUsageCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for PromoCodeUsage. +func (c *PromoCodeUsageClient) Update() *PromoCodeUsageUpdate { + mutation := newPromoCodeUsageMutation(c.config, OpUpdate) + return &PromoCodeUsageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *PromoCodeUsageClient) UpdateOne(_m *PromoCodeUsage) *PromoCodeUsageUpdateOne { + mutation := newPromoCodeUsageMutation(c.config, OpUpdateOne, withPromoCodeUsage(_m)) + return &PromoCodeUsageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *PromoCodeUsageClient) UpdateOneID(id int64) *PromoCodeUsageUpdateOne { + mutation := newPromoCodeUsageMutation(c.config, OpUpdateOne, withPromoCodeUsageID(id)) + return &PromoCodeUsageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for PromoCodeUsage. +func (c *PromoCodeUsageClient) Delete() *PromoCodeUsageDelete { + mutation := newPromoCodeUsageMutation(c.config, OpDelete) + return &PromoCodeUsageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *PromoCodeUsageClient) DeleteOne(_m *PromoCodeUsage) *PromoCodeUsageDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *PromoCodeUsageClient) DeleteOneID(id int64) *PromoCodeUsageDeleteOne { + builder := c.Delete().Where(promocodeusage.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &PromoCodeUsageDeleteOne{builder} +} + +// Query returns a query builder for PromoCodeUsage. +func (c *PromoCodeUsageClient) Query() *PromoCodeUsageQuery { + return &PromoCodeUsageQuery{ + config: c.config, + ctx: &QueryContext{Type: TypePromoCodeUsage}, + inters: c.Interceptors(), + } +} + +// Get returns a PromoCodeUsage entity by its id. +func (c *PromoCodeUsageClient) Get(ctx context.Context, id int64) (*PromoCodeUsage, error) { + return c.Query().Where(promocodeusage.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *PromoCodeUsageClient) GetX(ctx context.Context, id int64) *PromoCodeUsage { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryPromoCode queries the promo_code edge of a PromoCodeUsage. +func (c *PromoCodeUsageClient) QueryPromoCode(_m *PromoCodeUsage) *PromoCodeQuery { + query := (&PromoCodeClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, id), + sqlgraph.To(promocode.Table, promocode.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.PromoCodeTable, promocodeusage.PromoCodeColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryUser queries the user edge of a PromoCodeUsage. +func (c *PromoCodeUsageClient) QueryUser(_m *PromoCodeUsage) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.UserTable, promocodeusage.UserColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *PromoCodeUsageClient) Hooks() []Hook { + return c.hooks.PromoCodeUsage +} + +// Interceptors returns the client interceptors. +func (c *PromoCodeUsageClient) Interceptors() []Interceptor { + return c.inters.PromoCodeUsage +} + +func (c *PromoCodeUsageClient) mutate(ctx context.Context, m *PromoCodeUsageMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&PromoCodeUsageCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&PromoCodeUsageUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&PromoCodeUsageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&PromoCodeUsageDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown PromoCodeUsage mutation op: %q", m.Op()) + } +} + // ProxyClient is a client for the Proxy schema. type ProxyClient struct { config @@ -1950,6 +2280,22 @@ func (c *UserClient) QueryAttributeValues(_m *User) *UserAttributeValueQuery { return query } +// QueryPromoCodeUsages queries the promo_code_usages edge of a User. +func (c *UserClient) QueryPromoCodeUsages(_m *User) *PromoCodeUsageQuery { + query := (&PromoCodeUsageClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.PromoCodeUsagesTable, user.PromoCodeUsagesColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + // QueryUserAllowedGroups queries the user_allowed_groups edge of a User. func (c *UserClient) QueryUserAllowedGroups(_m *User) *UserAllowedGroupQuery { query := (&UserAllowedGroupClient{config: c.config}).Query() @@ -2627,14 +2973,14 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription // hooks and interceptors per client, for fast access. type ( hooks struct { - APIKey, Account, AccountGroup, Group, Proxy, RedeemCode, Setting, UsageLog, - User, UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, - UserSubscription []ent.Hook + APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy, + RedeemCode, Setting, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, + UserAttributeValue, UserSubscription []ent.Hook } inters struct { - APIKey, Account, AccountGroup, Group, Proxy, RedeemCode, Setting, UsageLog, - User, UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, - UserSubscription []ent.Interceptor + APIKey, Account, AccountGroup, Group, PromoCode, PromoCodeUsage, Proxy, + RedeemCode, Setting, UsageLog, User, UserAllowedGroup, UserAttributeDefinition, + UserAttributeValue, UserSubscription []ent.Interceptor } ) diff --git a/backend/ent/ent.go b/backend/ent/ent.go index 670ea0b2..410375a7 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -16,6 +16,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/accountgroup" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" @@ -89,6 +91,8 @@ func checkColumn(t, c string) error { account.Table: account.ValidColumn, accountgroup.Table: accountgroup.ValidColumn, group.Table: group.ValidColumn, + promocode.Table: promocode.ValidColumn, + promocodeusage.Table: promocodeusage.ValidColumn, proxy.Table: proxy.ValidColumn, redeemcode.Table: redeemcode.ValidColumn, setting.Table: setting.ValidColumn, diff --git a/backend/ent/generate.go b/backend/ent/generate.go index 22ab4a78..59843cec 100644 --- a/backend/ent/generate.go +++ b/backend/ent/generate.go @@ -2,4 +2,5 @@ package ent // 启用 sql/execquery 以生成 ExecContext/QueryContext 的透传接口,便于事务内执行原生 SQL。 -//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/upsert,intercept,sql/execquery --idtype int64 ./schema +// 启用 sql/lock 以支持 FOR UPDATE 行锁。 +//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature sql/upsert,intercept,sql/execquery,sql/lock --idtype int64 ./schema diff --git a/backend/ent/group.go b/backend/ent/group.go index 4a31442a..0d0c0538 100644 --- a/backend/ent/group.go +++ b/backend/ent/group.go @@ -3,6 +3,7 @@ package ent import ( + "encoding/json" "fmt" "strings" "time" @@ -55,6 +56,10 @@ type Group struct { ClaudeCodeOnly bool `json:"claude_code_only,omitempty"` // 非 Claude Code 请求降级使用的分组 ID FallbackGroupID *int64 `json:"fallback_group_id,omitempty"` + // 模型路由配置:模型模式 -> 优先账号ID列表 + ModelRouting map[string][]int64 `json:"model_routing,omitempty"` + // 是否启用模型路由配置 + ModelRoutingEnabled bool `json:"model_routing_enabled,omitempty"` // Edges holds the relations/edges for other nodes in the graph. // The values are being populated by the GroupQuery when eager-loading is set. Edges GroupEdges `json:"edges"` @@ -161,7 +166,9 @@ func (*Group) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case group.FieldIsExclusive, group.FieldClaudeCodeOnly: + case group.FieldModelRouting: + values[i] = new([]byte) + case group.FieldIsExclusive, group.FieldClaudeCodeOnly, group.FieldModelRoutingEnabled: values[i] = new(sql.NullBool) case group.FieldRateMultiplier, group.FieldDailyLimitUsd, group.FieldWeeklyLimitUsd, group.FieldMonthlyLimitUsd, group.FieldImagePrice1k, group.FieldImagePrice2k, group.FieldImagePrice4k: values[i] = new(sql.NullFloat64) @@ -315,6 +322,20 @@ func (_m *Group) assignValues(columns []string, values []any) error { _m.FallbackGroupID = new(int64) *_m.FallbackGroupID = value.Int64 } + case group.FieldModelRouting: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field model_routing", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &_m.ModelRouting); err != nil { + return fmt.Errorf("unmarshal field model_routing: %w", err) + } + } + case group.FieldModelRoutingEnabled: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field model_routing_enabled", values[i]) + } else if value.Valid { + _m.ModelRoutingEnabled = value.Bool + } default: _m.selectValues.Set(columns[i], values[i]) } @@ -465,6 +486,12 @@ func (_m *Group) String() string { builder.WriteString("fallback_group_id=") builder.WriteString(fmt.Sprintf("%v", *v)) } + builder.WriteString(", ") + builder.WriteString("model_routing=") + builder.WriteString(fmt.Sprintf("%v", _m.ModelRouting)) + builder.WriteString(", ") + builder.WriteString("model_routing_enabled=") + builder.WriteString(fmt.Sprintf("%v", _m.ModelRoutingEnabled)) builder.WriteByte(')') return builder.String() } diff --git a/backend/ent/group/group.go b/backend/ent/group/group.go index c4317f00..d66d3edc 100644 --- a/backend/ent/group/group.go +++ b/backend/ent/group/group.go @@ -53,6 +53,10 @@ const ( FieldClaudeCodeOnly = "claude_code_only" // FieldFallbackGroupID holds the string denoting the fallback_group_id field in the database. FieldFallbackGroupID = "fallback_group_id" + // FieldModelRouting holds the string denoting the model_routing field in the database. + FieldModelRouting = "model_routing" + // FieldModelRoutingEnabled holds the string denoting the model_routing_enabled field in the database. + FieldModelRoutingEnabled = "model_routing_enabled" // EdgeAPIKeys holds the string denoting the api_keys edge name in mutations. EdgeAPIKeys = "api_keys" // EdgeRedeemCodes holds the string denoting the redeem_codes edge name in mutations. @@ -147,6 +151,8 @@ var Columns = []string{ FieldImagePrice4k, FieldClaudeCodeOnly, FieldFallbackGroupID, + FieldModelRouting, + FieldModelRoutingEnabled, } var ( @@ -204,6 +210,8 @@ var ( DefaultDefaultValidityDays int // DefaultClaudeCodeOnly holds the default value on creation for the "claude_code_only" field. DefaultClaudeCodeOnly bool + // DefaultModelRoutingEnabled holds the default value on creation for the "model_routing_enabled" field. + DefaultModelRoutingEnabled bool ) // OrderOption defines the ordering options for the Group queries. @@ -309,6 +317,11 @@ func ByFallbackGroupID(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldFallbackGroupID, opts...).ToFunc() } +// ByModelRoutingEnabled orders the results by the model_routing_enabled field. +func ByModelRoutingEnabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldModelRoutingEnabled, opts...).ToFunc() +} + // ByAPIKeysCount orders the results by api_keys count. func ByAPIKeysCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { diff --git a/backend/ent/group/where.go b/backend/ent/group/where.go index fb2f942f..6ce9e4c6 100644 --- a/backend/ent/group/where.go +++ b/backend/ent/group/where.go @@ -150,6 +150,11 @@ func FallbackGroupID(v int64) predicate.Group { return predicate.Group(sql.FieldEQ(FieldFallbackGroupID, v)) } +// ModelRoutingEnabled applies equality check predicate on the "model_routing_enabled" field. It's identical to ModelRoutingEnabledEQ. +func ModelRoutingEnabled(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldModelRoutingEnabled, v)) +} + // CreatedAtEQ applies the EQ predicate on the "created_at" field. func CreatedAtEQ(v time.Time) predicate.Group { return predicate.Group(sql.FieldEQ(FieldCreatedAt, v)) @@ -1065,6 +1070,26 @@ func FallbackGroupIDNotNil() predicate.Group { return predicate.Group(sql.FieldNotNull(FieldFallbackGroupID)) } +// ModelRoutingIsNil applies the IsNil predicate on the "model_routing" field. +func ModelRoutingIsNil() predicate.Group { + return predicate.Group(sql.FieldIsNull(FieldModelRouting)) +} + +// ModelRoutingNotNil applies the NotNil predicate on the "model_routing" field. +func ModelRoutingNotNil() predicate.Group { + return predicate.Group(sql.FieldNotNull(FieldModelRouting)) +} + +// ModelRoutingEnabledEQ applies the EQ predicate on the "model_routing_enabled" field. +func ModelRoutingEnabledEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldEQ(FieldModelRoutingEnabled, v)) +} + +// ModelRoutingEnabledNEQ applies the NEQ predicate on the "model_routing_enabled" field. +func ModelRoutingEnabledNEQ(v bool) predicate.Group { + return predicate.Group(sql.FieldNEQ(FieldModelRoutingEnabled, v)) +} + // HasAPIKeys applies the HasEdge predicate on the "api_keys" edge. func HasAPIKeys() predicate.Group { return predicate.Group(func(s *sql.Selector) { diff --git a/backend/ent/group_create.go b/backend/ent/group_create.go index 59229402..0f251e0b 100644 --- a/backend/ent/group_create.go +++ b/backend/ent/group_create.go @@ -286,6 +286,26 @@ func (_c *GroupCreate) SetNillableFallbackGroupID(v *int64) *GroupCreate { return _c } +// SetModelRouting sets the "model_routing" field. +func (_c *GroupCreate) SetModelRouting(v map[string][]int64) *GroupCreate { + _c.mutation.SetModelRouting(v) + return _c +} + +// SetModelRoutingEnabled sets the "model_routing_enabled" field. +func (_c *GroupCreate) SetModelRoutingEnabled(v bool) *GroupCreate { + _c.mutation.SetModelRoutingEnabled(v) + return _c +} + +// SetNillableModelRoutingEnabled sets the "model_routing_enabled" field if the given value is not nil. +func (_c *GroupCreate) SetNillableModelRoutingEnabled(v *bool) *GroupCreate { + if v != nil { + _c.SetModelRoutingEnabled(*v) + } + return _c +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_c *GroupCreate) AddAPIKeyIDs(ids ...int64) *GroupCreate { _c.mutation.AddAPIKeyIDs(ids...) @@ -455,6 +475,10 @@ func (_c *GroupCreate) defaults() error { v := group.DefaultClaudeCodeOnly _c.mutation.SetClaudeCodeOnly(v) } + if _, ok := _c.mutation.ModelRoutingEnabled(); !ok { + v := group.DefaultModelRoutingEnabled + _c.mutation.SetModelRoutingEnabled(v) + } return nil } @@ -510,6 +534,9 @@ func (_c *GroupCreate) check() error { if _, ok := _c.mutation.ClaudeCodeOnly(); !ok { return &ValidationError{Name: "claude_code_only", err: errors.New(`ent: missing required field "Group.claude_code_only"`)} } + if _, ok := _c.mutation.ModelRoutingEnabled(); !ok { + return &ValidationError{Name: "model_routing_enabled", err: errors.New(`ent: missing required field "Group.model_routing_enabled"`)} + } return nil } @@ -613,6 +640,14 @@ func (_c *GroupCreate) createSpec() (*Group, *sqlgraph.CreateSpec) { _spec.SetField(group.FieldFallbackGroupID, field.TypeInt64, value) _node.FallbackGroupID = &value } + if value, ok := _c.mutation.ModelRouting(); ok { + _spec.SetField(group.FieldModelRouting, field.TypeJSON, value) + _node.ModelRouting = value + } + if value, ok := _c.mutation.ModelRoutingEnabled(); ok { + _spec.SetField(group.FieldModelRoutingEnabled, field.TypeBool, value) + _node.ModelRoutingEnabled = value + } if nodes := _c.mutation.APIKeysIDs(); len(nodes) > 0 { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -1093,6 +1128,36 @@ func (u *GroupUpsert) ClearFallbackGroupID() *GroupUpsert { return u } +// SetModelRouting sets the "model_routing" field. +func (u *GroupUpsert) SetModelRouting(v map[string][]int64) *GroupUpsert { + u.Set(group.FieldModelRouting, v) + return u +} + +// UpdateModelRouting sets the "model_routing" field to the value that was provided on create. +func (u *GroupUpsert) UpdateModelRouting() *GroupUpsert { + u.SetExcluded(group.FieldModelRouting) + return u +} + +// ClearModelRouting clears the value of the "model_routing" field. +func (u *GroupUpsert) ClearModelRouting() *GroupUpsert { + u.SetNull(group.FieldModelRouting) + return u +} + +// SetModelRoutingEnabled sets the "model_routing_enabled" field. +func (u *GroupUpsert) SetModelRoutingEnabled(v bool) *GroupUpsert { + u.Set(group.FieldModelRoutingEnabled, v) + return u +} + +// UpdateModelRoutingEnabled sets the "model_routing_enabled" field to the value that was provided on create. +func (u *GroupUpsert) UpdateModelRoutingEnabled() *GroupUpsert { + u.SetExcluded(group.FieldModelRoutingEnabled) + return u +} + // UpdateNewValues updates the mutable fields using the new values that were set on create. // Using this option is equivalent to using: // @@ -1516,6 +1581,41 @@ func (u *GroupUpsertOne) ClearFallbackGroupID() *GroupUpsertOne { }) } +// SetModelRouting sets the "model_routing" field. +func (u *GroupUpsertOne) SetModelRouting(v map[string][]int64) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetModelRouting(v) + }) +} + +// UpdateModelRouting sets the "model_routing" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateModelRouting() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateModelRouting() + }) +} + +// ClearModelRouting clears the value of the "model_routing" field. +func (u *GroupUpsertOne) ClearModelRouting() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.ClearModelRouting() + }) +} + +// SetModelRoutingEnabled sets the "model_routing_enabled" field. +func (u *GroupUpsertOne) SetModelRoutingEnabled(v bool) *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.SetModelRoutingEnabled(v) + }) +} + +// UpdateModelRoutingEnabled sets the "model_routing_enabled" field to the value that was provided on create. +func (u *GroupUpsertOne) UpdateModelRoutingEnabled() *GroupUpsertOne { + return u.Update(func(s *GroupUpsert) { + s.UpdateModelRoutingEnabled() + }) +} + // Exec executes the query. func (u *GroupUpsertOne) Exec(ctx context.Context) error { if len(u.create.conflict) == 0 { @@ -2105,6 +2205,41 @@ func (u *GroupUpsertBulk) ClearFallbackGroupID() *GroupUpsertBulk { }) } +// SetModelRouting sets the "model_routing" field. +func (u *GroupUpsertBulk) SetModelRouting(v map[string][]int64) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetModelRouting(v) + }) +} + +// UpdateModelRouting sets the "model_routing" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateModelRouting() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateModelRouting() + }) +} + +// ClearModelRouting clears the value of the "model_routing" field. +func (u *GroupUpsertBulk) ClearModelRouting() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.ClearModelRouting() + }) +} + +// SetModelRoutingEnabled sets the "model_routing_enabled" field. +func (u *GroupUpsertBulk) SetModelRoutingEnabled(v bool) *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.SetModelRoutingEnabled(v) + }) +} + +// UpdateModelRoutingEnabled sets the "model_routing_enabled" field to the value that was provided on create. +func (u *GroupUpsertBulk) UpdateModelRoutingEnabled() *GroupUpsertBulk { + return u.Update(func(s *GroupUpsert) { + s.UpdateModelRoutingEnabled() + }) +} + // Exec executes the query. func (u *GroupUpsertBulk) Exec(ctx context.Context) error { if u.create.err != nil { diff --git a/backend/ent/group_query.go b/backend/ent/group_query.go index 3cc976cb..d4cc4f8d 100644 --- a/backend/ent/group_query.go +++ b/backend/ent/group_query.go @@ -9,6 +9,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -39,6 +40,7 @@ type GroupQuery struct { withAllowedUsers *UserQuery withAccountGroups *AccountGroupQuery withUserAllowedGroups *UserAllowedGroupQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -643,6 +645,9 @@ func (_q *GroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Group, node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -1025,6 +1030,9 @@ func (_q *GroupQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllo func (_q *GroupQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -1087,6 +1095,9 @@ func (_q *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -1104,6 +1115,32 @@ func (_q *GroupQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *GroupQuery) ForUpdate(opts ...sql.LockOption) *GroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *GroupQuery) ForShare(opts ...sql.LockOption) *GroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // GroupGroupBy is the group-by builder for Group entities. type GroupGroupBy struct { selector diff --git a/backend/ent/group_update.go b/backend/ent/group_update.go index 1a6f15ec..c3cc2708 100644 --- a/backend/ent/group_update.go +++ b/backend/ent/group_update.go @@ -395,6 +395,32 @@ func (_u *GroupUpdate) ClearFallbackGroupID() *GroupUpdate { return _u } +// SetModelRouting sets the "model_routing" field. +func (_u *GroupUpdate) SetModelRouting(v map[string][]int64) *GroupUpdate { + _u.mutation.SetModelRouting(v) + return _u +} + +// ClearModelRouting clears the value of the "model_routing" field. +func (_u *GroupUpdate) ClearModelRouting() *GroupUpdate { + _u.mutation.ClearModelRouting() + return _u +} + +// SetModelRoutingEnabled sets the "model_routing_enabled" field. +func (_u *GroupUpdate) SetModelRoutingEnabled(v bool) *GroupUpdate { + _u.mutation.SetModelRoutingEnabled(v) + return _u +} + +// SetNillableModelRoutingEnabled sets the "model_routing_enabled" field if the given value is not nil. +func (_u *GroupUpdate) SetNillableModelRoutingEnabled(v *bool) *GroupUpdate { + if v != nil { + _u.SetModelRoutingEnabled(*v) + } + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *GroupUpdate) AddAPIKeyIDs(ids ...int64) *GroupUpdate { _u.mutation.AddAPIKeyIDs(ids...) @@ -803,6 +829,15 @@ func (_u *GroupUpdate) sqlSave(ctx context.Context) (_node int, err error) { if _u.mutation.FallbackGroupIDCleared() { _spec.ClearField(group.FieldFallbackGroupID, field.TypeInt64) } + if value, ok := _u.mutation.ModelRouting(); ok { + _spec.SetField(group.FieldModelRouting, field.TypeJSON, value) + } + if _u.mutation.ModelRoutingCleared() { + _spec.ClearField(group.FieldModelRouting, field.TypeJSON) + } + if value, ok := _u.mutation.ModelRoutingEnabled(); ok { + _spec.SetField(group.FieldModelRoutingEnabled, field.TypeBool, value) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, @@ -1478,6 +1513,32 @@ func (_u *GroupUpdateOne) ClearFallbackGroupID() *GroupUpdateOne { return _u } +// SetModelRouting sets the "model_routing" field. +func (_u *GroupUpdateOne) SetModelRouting(v map[string][]int64) *GroupUpdateOne { + _u.mutation.SetModelRouting(v) + return _u +} + +// ClearModelRouting clears the value of the "model_routing" field. +func (_u *GroupUpdateOne) ClearModelRouting() *GroupUpdateOne { + _u.mutation.ClearModelRouting() + return _u +} + +// SetModelRoutingEnabled sets the "model_routing_enabled" field. +func (_u *GroupUpdateOne) SetModelRoutingEnabled(v bool) *GroupUpdateOne { + _u.mutation.SetModelRoutingEnabled(v) + return _u +} + +// SetNillableModelRoutingEnabled sets the "model_routing_enabled" field if the given value is not nil. +func (_u *GroupUpdateOne) SetNillableModelRoutingEnabled(v *bool) *GroupUpdateOne { + if v != nil { + _u.SetModelRoutingEnabled(*v) + } + return _u +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by IDs. func (_u *GroupUpdateOne) AddAPIKeyIDs(ids ...int64) *GroupUpdateOne { _u.mutation.AddAPIKeyIDs(ids...) @@ -1916,6 +1977,15 @@ func (_u *GroupUpdateOne) sqlSave(ctx context.Context) (_node *Group, err error) if _u.mutation.FallbackGroupIDCleared() { _spec.ClearField(group.FieldFallbackGroupID, field.TypeInt64) } + if value, ok := _u.mutation.ModelRouting(); ok { + _spec.SetField(group.FieldModelRouting, field.TypeJSON, value) + } + if _u.mutation.ModelRoutingCleared() { + _spec.ClearField(group.FieldModelRouting, field.TypeJSON) + } + if value, ok := _u.mutation.ModelRoutingEnabled(); ok { + _spec.SetField(group.FieldModelRoutingEnabled, field.TypeBool, value) + } if _u.mutation.APIKeysCleared() { edge := &sqlgraph.EdgeSpec{ Rel: sqlgraph.O2M, diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index e82b00f9..532b0d2c 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -57,6 +57,30 @@ func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m) } +// The PromoCodeFunc type is an adapter to allow the use of ordinary +// function as PromoCode mutator. +type PromoCodeFunc func(context.Context, *ent.PromoCodeMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PromoCodeFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PromoCodeMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PromoCodeMutation", m) +} + +// The PromoCodeUsageFunc type is an adapter to allow the use of ordinary +// function as PromoCodeUsage mutator. +type PromoCodeUsageFunc func(context.Context, *ent.PromoCodeUsageMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f PromoCodeUsageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.PromoCodeUsageMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.PromoCodeUsageMutation", m) +} + // The ProxyFunc type is an adapter to allow the use of ordinary // function as Proxy mutator. type ProxyFunc func(context.Context, *ent.ProxyMutation) (ent.Value, error) diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go index 6add6fed..765d39b4 100644 --- a/backend/ent/intercept/intercept.go +++ b/backend/ent/intercept/intercept.go @@ -13,6 +13,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" @@ -188,6 +190,60 @@ func (f TraverseGroup) Traverse(ctx context.Context, q ent.Query) error { return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q) } +// The PromoCodeFunc type is an adapter to allow the use of ordinary function as a Querier. +type PromoCodeFunc func(context.Context, *ent.PromoCodeQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f PromoCodeFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.PromoCodeQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeQuery", q) +} + +// The TraversePromoCode type is an adapter to allow the use of ordinary function as Traverser. +type TraversePromoCode func(context.Context, *ent.PromoCodeQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraversePromoCode) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraversePromoCode) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.PromoCodeQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeQuery", q) +} + +// The PromoCodeUsageFunc type is an adapter to allow the use of ordinary function as a Querier. +type PromoCodeUsageFunc func(context.Context, *ent.PromoCodeUsageQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f PromoCodeUsageFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.PromoCodeUsageQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeUsageQuery", q) +} + +// The TraversePromoCodeUsage type is an adapter to allow the use of ordinary function as Traverser. +type TraversePromoCodeUsage func(context.Context, *ent.PromoCodeUsageQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraversePromoCodeUsage) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraversePromoCodeUsage) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.PromoCodeUsageQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.PromoCodeUsageQuery", q) +} + // The ProxyFunc type is an adapter to allow the use of ordinary function as a Querier. type ProxyFunc func(context.Context, *ent.ProxyQuery) (ent.Value, error) @@ -442,6 +498,10 @@ func NewQuery(q ent.Query) (Query, error) { return &query[*ent.AccountGroupQuery, predicate.AccountGroup, accountgroup.OrderOption]{typ: ent.TypeAccountGroup, tq: q}, nil case *ent.GroupQuery: return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil + case *ent.PromoCodeQuery: + return &query[*ent.PromoCodeQuery, predicate.PromoCode, promocode.OrderOption]{typ: ent.TypePromoCode, tq: q}, nil + case *ent.PromoCodeUsageQuery: + return &query[*ent.PromoCodeUsageQuery, predicate.PromoCodeUsage, promocodeusage.OrderOption]{typ: ent.TypePromoCodeUsage, tq: q}, nil case *ent.ProxyQuery: return &query[*ent.ProxyQuery, predicate.Proxy, proxy.OrderOption]{typ: ent.TypeProxy, tq: q}, nil case *ent.RedeemCodeQuery: diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index 13081e31..b377804f 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -18,6 +18,8 @@ var ( {Name: "key", Type: field.TypeString, Unique: true, Size: 128}, {Name: "name", Type: field.TypeString, Size: 100}, {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "ip_whitelist", Type: field.TypeJSON, Nullable: true}, + {Name: "ip_blacklist", Type: field.TypeJSON, Nullable: true}, {Name: "group_id", Type: field.TypeInt64, Nullable: true}, {Name: "user_id", Type: field.TypeInt64}, } @@ -29,13 +31,13 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "api_keys_groups_api_keys", - Columns: []*schema.Column{APIKeysColumns[7]}, + Columns: []*schema.Column{APIKeysColumns[9]}, RefColumns: []*schema.Column{GroupsColumns[0]}, OnDelete: schema.SetNull, }, { Symbol: "api_keys_users_api_keys", - Columns: []*schema.Column{APIKeysColumns[8]}, + Columns: []*schema.Column{APIKeysColumns[10]}, RefColumns: []*schema.Column{UsersColumns[0]}, OnDelete: schema.NoAction, }, @@ -44,12 +46,12 @@ var ( { Name: "apikey_user_id", Unique: false, - Columns: []*schema.Column{APIKeysColumns[8]}, + Columns: []*schema.Column{APIKeysColumns[10]}, }, { Name: "apikey_group_id", Unique: false, - Columns: []*schema.Column{APIKeysColumns[7]}, + Columns: []*schema.Column{APIKeysColumns[9]}, }, { Name: "apikey_status", @@ -77,6 +79,7 @@ var ( {Name: "extra", Type: field.TypeJSON, SchemaType: map[string]string{"postgres": "jsonb"}}, {Name: "concurrency", Type: field.TypeInt, Default: 3}, {Name: "priority", Type: field.TypeInt, Default: 50}, + {Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}}, {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, {Name: "error_message", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, {Name: "last_used_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, @@ -99,7 +102,7 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "accounts_proxies_proxy", - Columns: []*schema.Column{AccountsColumns[24]}, + Columns: []*schema.Column{AccountsColumns[25]}, RefColumns: []*schema.Column{ProxiesColumns[0]}, OnDelete: schema.SetNull, }, @@ -118,12 +121,12 @@ var ( { Name: "account_status", Unique: false, - Columns: []*schema.Column{AccountsColumns[12]}, + Columns: []*schema.Column{AccountsColumns[13]}, }, { Name: "account_proxy_id", Unique: false, - Columns: []*schema.Column{AccountsColumns[24]}, + Columns: []*schema.Column{AccountsColumns[25]}, }, { Name: "account_priority", @@ -133,27 +136,27 @@ var ( { Name: "account_last_used_at", Unique: false, - Columns: []*schema.Column{AccountsColumns[14]}, + Columns: []*schema.Column{AccountsColumns[15]}, }, { Name: "account_schedulable", Unique: false, - Columns: []*schema.Column{AccountsColumns[17]}, + Columns: []*schema.Column{AccountsColumns[18]}, }, { Name: "account_rate_limited_at", Unique: false, - Columns: []*schema.Column{AccountsColumns[18]}, + Columns: []*schema.Column{AccountsColumns[19]}, }, { Name: "account_rate_limit_reset_at", Unique: false, - Columns: []*schema.Column{AccountsColumns[19]}, + Columns: []*schema.Column{AccountsColumns[20]}, }, { Name: "account_overload_until", Unique: false, - Columns: []*schema.Column{AccountsColumns[20]}, + Columns: []*schema.Column{AccountsColumns[21]}, }, { Name: "account_deleted_at", @@ -223,6 +226,8 @@ var ( {Name: "image_price_4k", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, {Name: "claude_code_only", Type: field.TypeBool, Default: false}, {Name: "fallback_group_id", Type: field.TypeInt64, Nullable: true}, + {Name: "model_routing", Type: field.TypeJSON, Nullable: true, SchemaType: map[string]string{"postgres": "jsonb"}}, + {Name: "model_routing_enabled", Type: field.TypeBool, Default: false}, } // GroupsTable holds the schema information for the "groups" table. GroupsTable = &schema.Table{ @@ -257,6 +262,82 @@ var ( }, }, } + // PromoCodesColumns holds the columns for the "promo_codes" table. + PromoCodesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "code", Type: field.TypeString, Unique: true, Size: 32}, + {Name: "bonus_amount", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "max_uses", Type: field.TypeInt, Default: 0}, + {Name: "used_count", Type: field.TypeInt, Default: 0}, + {Name: "status", Type: field.TypeString, Size: 20, Default: "active"}, + {Name: "expires_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "notes", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + } + // PromoCodesTable holds the schema information for the "promo_codes" table. + PromoCodesTable = &schema.Table{ + Name: "promo_codes", + Columns: PromoCodesColumns, + PrimaryKey: []*schema.Column{PromoCodesColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "promocode_status", + Unique: false, + Columns: []*schema.Column{PromoCodesColumns[5]}, + }, + { + Name: "promocode_expires_at", + Unique: false, + Columns: []*schema.Column{PromoCodesColumns[6]}, + }, + }, + } + // PromoCodeUsagesColumns holds the columns for the "promo_code_usages" table. + PromoCodeUsagesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "bonus_amount", Type: field.TypeFloat64, SchemaType: map[string]string{"postgres": "decimal(20,8)"}}, + {Name: "used_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "promo_code_id", Type: field.TypeInt64}, + {Name: "user_id", Type: field.TypeInt64}, + } + // PromoCodeUsagesTable holds the schema information for the "promo_code_usages" table. + PromoCodeUsagesTable = &schema.Table{ + Name: "promo_code_usages", + Columns: PromoCodeUsagesColumns, + PrimaryKey: []*schema.Column{PromoCodeUsagesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "promo_code_usages_promo_codes_usage_records", + Columns: []*schema.Column{PromoCodeUsagesColumns[3]}, + RefColumns: []*schema.Column{PromoCodesColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "promo_code_usages_users_promo_code_usages", + Columns: []*schema.Column{PromoCodeUsagesColumns[4]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "promocodeusage_promo_code_id", + Unique: false, + Columns: []*schema.Column{PromoCodeUsagesColumns[3]}, + }, + { + Name: "promocodeusage_user_id", + Unique: false, + Columns: []*schema.Column{PromoCodeUsagesColumns[4]}, + }, + { + Name: "promocodeusage_promo_code_id_user_id", + Unique: true, + Columns: []*schema.Column{PromoCodeUsagesColumns[3], PromoCodeUsagesColumns[4]}, + }, + }, + } // ProxiesColumns holds the columns for the "proxies" table. ProxiesColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt64, Increment: true}, @@ -371,11 +452,13 @@ var ( {Name: "total_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, {Name: "actual_cost", Type: field.TypeFloat64, Default: 0, SchemaType: map[string]string{"postgres": "decimal(20,10)"}}, {Name: "rate_multiplier", Type: field.TypeFloat64, Default: 1, SchemaType: map[string]string{"postgres": "decimal(10,4)"}}, + {Name: "account_rate_multiplier", Type: field.TypeFloat64, Nullable: true, SchemaType: map[string]string{"postgres": "decimal(10,4)"}}, {Name: "billing_type", Type: field.TypeInt8, Default: 0}, {Name: "stream", Type: field.TypeBool, Default: false}, {Name: "duration_ms", Type: field.TypeInt, Nullable: true}, {Name: "first_token_ms", Type: field.TypeInt, Nullable: true}, {Name: "user_agent", Type: field.TypeString, Nullable: true, Size: 512}, + {Name: "ip_address", Type: field.TypeString, Nullable: true, Size: 45}, {Name: "image_count", Type: field.TypeInt, Default: 0}, {Name: "image_size", Type: field.TypeString, Nullable: true, Size: 10}, {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, @@ -393,31 +476,31 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "usage_logs_api_keys_usage_logs", - Columns: []*schema.Column{UsageLogsColumns[24]}, + Columns: []*schema.Column{UsageLogsColumns[26]}, RefColumns: []*schema.Column{APIKeysColumns[0]}, OnDelete: schema.NoAction, }, { Symbol: "usage_logs_accounts_usage_logs", - Columns: []*schema.Column{UsageLogsColumns[25]}, + Columns: []*schema.Column{UsageLogsColumns[27]}, RefColumns: []*schema.Column{AccountsColumns[0]}, OnDelete: schema.NoAction, }, { Symbol: "usage_logs_groups_usage_logs", - Columns: []*schema.Column{UsageLogsColumns[26]}, + Columns: []*schema.Column{UsageLogsColumns[28]}, RefColumns: []*schema.Column{GroupsColumns[0]}, OnDelete: schema.SetNull, }, { Symbol: "usage_logs_users_usage_logs", - Columns: []*schema.Column{UsageLogsColumns[27]}, + Columns: []*schema.Column{UsageLogsColumns[29]}, RefColumns: []*schema.Column{UsersColumns[0]}, OnDelete: schema.NoAction, }, { Symbol: "usage_logs_user_subscriptions_usage_logs", - Columns: []*schema.Column{UsageLogsColumns[28]}, + Columns: []*schema.Column{UsageLogsColumns[30]}, RefColumns: []*schema.Column{UserSubscriptionsColumns[0]}, OnDelete: schema.SetNull, }, @@ -426,32 +509,32 @@ var ( { Name: "usagelog_user_id", Unique: false, - Columns: []*schema.Column{UsageLogsColumns[27]}, + Columns: []*schema.Column{UsageLogsColumns[29]}, }, { Name: "usagelog_api_key_id", Unique: false, - Columns: []*schema.Column{UsageLogsColumns[24]}, + Columns: []*schema.Column{UsageLogsColumns[26]}, }, { Name: "usagelog_account_id", Unique: false, - Columns: []*schema.Column{UsageLogsColumns[25]}, + Columns: []*schema.Column{UsageLogsColumns[27]}, }, { Name: "usagelog_group_id", Unique: false, - Columns: []*schema.Column{UsageLogsColumns[26]}, + Columns: []*schema.Column{UsageLogsColumns[28]}, }, { Name: "usagelog_subscription_id", Unique: false, - Columns: []*schema.Column{UsageLogsColumns[28]}, + Columns: []*schema.Column{UsageLogsColumns[30]}, }, { Name: "usagelog_created_at", Unique: false, - Columns: []*schema.Column{UsageLogsColumns[23]}, + Columns: []*schema.Column{UsageLogsColumns[25]}, }, { Name: "usagelog_model", @@ -466,12 +549,12 @@ var ( { Name: "usagelog_user_id_created_at", Unique: false, - Columns: []*schema.Column{UsageLogsColumns[27], UsageLogsColumns[23]}, + Columns: []*schema.Column{UsageLogsColumns[29], UsageLogsColumns[25]}, }, { Name: "usagelog_api_key_id_created_at", Unique: false, - Columns: []*schema.Column{UsageLogsColumns[24], UsageLogsColumns[23]}, + Columns: []*schema.Column{UsageLogsColumns[26], UsageLogsColumns[25]}, }, }, } @@ -717,6 +800,8 @@ var ( AccountsTable, AccountGroupsTable, GroupsTable, + PromoCodesTable, + PromoCodeUsagesTable, ProxiesTable, RedeemCodesTable, SettingsTable, @@ -747,6 +832,14 @@ func init() { GroupsTable.Annotation = &entsql.Annotation{ Table: "groups", } + PromoCodesTable.Annotation = &entsql.Annotation{ + Table: "promo_codes", + } + PromoCodeUsagesTable.ForeignKeys[0].RefTable = PromoCodesTable + PromoCodeUsagesTable.ForeignKeys[1].RefTable = UsersTable + PromoCodeUsagesTable.Annotation = &entsql.Annotation{ + Table: "promo_code_usages", + } ProxiesTable.Annotation = &entsql.Annotation{ Table: "proxies", } diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 4e01e12b..cd2fe8e0 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -16,6 +16,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/setting" @@ -40,6 +42,8 @@ const ( TypeAccount = "Account" TypeAccountGroup = "AccountGroup" TypeGroup = "Group" + TypePromoCode = "PromoCode" + TypePromoCodeUsage = "PromoCodeUsage" TypeProxy = "Proxy" TypeRedeemCode = "RedeemCode" TypeSetting = "Setting" @@ -54,26 +58,30 @@ const ( // APIKeyMutation represents an operation that mutates the APIKey nodes in the graph. type APIKeyMutation struct { config - op Op - typ string - id *int64 - created_at *time.Time - updated_at *time.Time - deleted_at *time.Time - key *string - name *string - status *string - clearedFields map[string]struct{} - user *int64 - cleareduser bool - group *int64 - clearedgroup bool - usage_logs map[int64]struct{} - removedusage_logs map[int64]struct{} - clearedusage_logs bool - done bool - oldValue func(context.Context) (*APIKey, error) - predicates []predicate.APIKey + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + key *string + name *string + status *string + ip_whitelist *[]string + appendip_whitelist []string + ip_blacklist *[]string + appendip_blacklist []string + clearedFields map[string]struct{} + user *int64 + cleareduser bool + group *int64 + clearedgroup bool + usage_logs map[int64]struct{} + removedusage_logs map[int64]struct{} + clearedusage_logs bool + done bool + oldValue func(context.Context) (*APIKey, error) + predicates []predicate.APIKey } var _ ent.Mutation = (*APIKeyMutation)(nil) @@ -488,6 +496,136 @@ func (m *APIKeyMutation) ResetStatus() { m.status = nil } +// SetIPWhitelist sets the "ip_whitelist" field. +func (m *APIKeyMutation) SetIPWhitelist(s []string) { + m.ip_whitelist = &s + m.appendip_whitelist = nil +} + +// IPWhitelist returns the value of the "ip_whitelist" field in the mutation. +func (m *APIKeyMutation) IPWhitelist() (r []string, exists bool) { + v := m.ip_whitelist + if v == nil { + return + } + return *v, true +} + +// OldIPWhitelist returns the old "ip_whitelist" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldIPWhitelist(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIPWhitelist is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIPWhitelist requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIPWhitelist: %w", err) + } + return oldValue.IPWhitelist, nil +} + +// AppendIPWhitelist adds s to the "ip_whitelist" field. +func (m *APIKeyMutation) AppendIPWhitelist(s []string) { + m.appendip_whitelist = append(m.appendip_whitelist, s...) +} + +// AppendedIPWhitelist returns the list of values that were appended to the "ip_whitelist" field in this mutation. +func (m *APIKeyMutation) AppendedIPWhitelist() ([]string, bool) { + if len(m.appendip_whitelist) == 0 { + return nil, false + } + return m.appendip_whitelist, true +} + +// ClearIPWhitelist clears the value of the "ip_whitelist" field. +func (m *APIKeyMutation) ClearIPWhitelist() { + m.ip_whitelist = nil + m.appendip_whitelist = nil + m.clearedFields[apikey.FieldIPWhitelist] = struct{}{} +} + +// IPWhitelistCleared returns if the "ip_whitelist" field was cleared in this mutation. +func (m *APIKeyMutation) IPWhitelistCleared() bool { + _, ok := m.clearedFields[apikey.FieldIPWhitelist] + return ok +} + +// ResetIPWhitelist resets all changes to the "ip_whitelist" field. +func (m *APIKeyMutation) ResetIPWhitelist() { + m.ip_whitelist = nil + m.appendip_whitelist = nil + delete(m.clearedFields, apikey.FieldIPWhitelist) +} + +// SetIPBlacklist sets the "ip_blacklist" field. +func (m *APIKeyMutation) SetIPBlacklist(s []string) { + m.ip_blacklist = &s + m.appendip_blacklist = nil +} + +// IPBlacklist returns the value of the "ip_blacklist" field in the mutation. +func (m *APIKeyMutation) IPBlacklist() (r []string, exists bool) { + v := m.ip_blacklist + if v == nil { + return + } + return *v, true +} + +// OldIPBlacklist returns the old "ip_blacklist" field's value of the APIKey entity. +// If the APIKey object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *APIKeyMutation) OldIPBlacklist(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIPBlacklist is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIPBlacklist requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIPBlacklist: %w", err) + } + return oldValue.IPBlacklist, nil +} + +// AppendIPBlacklist adds s to the "ip_blacklist" field. +func (m *APIKeyMutation) AppendIPBlacklist(s []string) { + m.appendip_blacklist = append(m.appendip_blacklist, s...) +} + +// AppendedIPBlacklist returns the list of values that were appended to the "ip_blacklist" field in this mutation. +func (m *APIKeyMutation) AppendedIPBlacklist() ([]string, bool) { + if len(m.appendip_blacklist) == 0 { + return nil, false + } + return m.appendip_blacklist, true +} + +// ClearIPBlacklist clears the value of the "ip_blacklist" field. +func (m *APIKeyMutation) ClearIPBlacklist() { + m.ip_blacklist = nil + m.appendip_blacklist = nil + m.clearedFields[apikey.FieldIPBlacklist] = struct{}{} +} + +// IPBlacklistCleared returns if the "ip_blacklist" field was cleared in this mutation. +func (m *APIKeyMutation) IPBlacklistCleared() bool { + _, ok := m.clearedFields[apikey.FieldIPBlacklist] + return ok +} + +// ResetIPBlacklist resets all changes to the "ip_blacklist" field. +func (m *APIKeyMutation) ResetIPBlacklist() { + m.ip_blacklist = nil + m.appendip_blacklist = nil + delete(m.clearedFields, apikey.FieldIPBlacklist) +} + // ClearUser clears the "user" edge to the User entity. func (m *APIKeyMutation) ClearUser() { m.cleareduser = true @@ -630,7 +768,7 @@ func (m *APIKeyMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *APIKeyMutation) Fields() []string { - fields := make([]string, 0, 8) + fields := make([]string, 0, 10) if m.created_at != nil { fields = append(fields, apikey.FieldCreatedAt) } @@ -655,6 +793,12 @@ func (m *APIKeyMutation) Fields() []string { if m.status != nil { fields = append(fields, apikey.FieldStatus) } + if m.ip_whitelist != nil { + fields = append(fields, apikey.FieldIPWhitelist) + } + if m.ip_blacklist != nil { + fields = append(fields, apikey.FieldIPBlacklist) + } return fields } @@ -679,6 +823,10 @@ func (m *APIKeyMutation) Field(name string) (ent.Value, bool) { return m.GroupID() case apikey.FieldStatus: return m.Status() + case apikey.FieldIPWhitelist: + return m.IPWhitelist() + case apikey.FieldIPBlacklist: + return m.IPBlacklist() } return nil, false } @@ -704,6 +852,10 @@ func (m *APIKeyMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldGroupID(ctx) case apikey.FieldStatus: return m.OldStatus(ctx) + case apikey.FieldIPWhitelist: + return m.OldIPWhitelist(ctx) + case apikey.FieldIPBlacklist: + return m.OldIPBlacklist(ctx) } return nil, fmt.Errorf("unknown APIKey field %s", name) } @@ -769,6 +921,20 @@ func (m *APIKeyMutation) SetField(name string, value ent.Value) error { } m.SetStatus(v) return nil + case apikey.FieldIPWhitelist: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIPWhitelist(v) + return nil + case apikey.FieldIPBlacklist: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIPBlacklist(v) + return nil } return fmt.Errorf("unknown APIKey field %s", name) } @@ -808,6 +974,12 @@ func (m *APIKeyMutation) ClearedFields() []string { if m.FieldCleared(apikey.FieldGroupID) { fields = append(fields, apikey.FieldGroupID) } + if m.FieldCleared(apikey.FieldIPWhitelist) { + fields = append(fields, apikey.FieldIPWhitelist) + } + if m.FieldCleared(apikey.FieldIPBlacklist) { + fields = append(fields, apikey.FieldIPBlacklist) + } return fields } @@ -828,6 +1000,12 @@ func (m *APIKeyMutation) ClearField(name string) error { case apikey.FieldGroupID: m.ClearGroupID() return nil + case apikey.FieldIPWhitelist: + m.ClearIPWhitelist() + return nil + case apikey.FieldIPBlacklist: + m.ClearIPBlacklist() + return nil } return fmt.Errorf("unknown APIKey nullable field %s", name) } @@ -860,6 +1038,12 @@ func (m *APIKeyMutation) ResetField(name string) error { case apikey.FieldStatus: m.ResetStatus() return nil + case apikey.FieldIPWhitelist: + m.ResetIPWhitelist() + return nil + case apikey.FieldIPBlacklist: + m.ResetIPBlacklist() + return nil } return fmt.Errorf("unknown APIKey field %s", name) } @@ -1003,6 +1187,8 @@ type AccountMutation struct { addconcurrency *int priority *int addpriority *int + rate_multiplier *float64 + addrate_multiplier *float64 status *string error_message *string last_used_at *time.Time @@ -1638,6 +1824,62 @@ func (m *AccountMutation) ResetPriority() { m.addpriority = nil } +// SetRateMultiplier sets the "rate_multiplier" field. +func (m *AccountMutation) SetRateMultiplier(f float64) { + m.rate_multiplier = &f + m.addrate_multiplier = nil +} + +// RateMultiplier returns the value of the "rate_multiplier" field in the mutation. +func (m *AccountMutation) RateMultiplier() (r float64, exists bool) { + v := m.rate_multiplier + if v == nil { + return + } + return *v, true +} + +// OldRateMultiplier returns the old "rate_multiplier" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldRateMultiplier(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRateMultiplier is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRateMultiplier requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRateMultiplier: %w", err) + } + return oldValue.RateMultiplier, nil +} + +// AddRateMultiplier adds f to the "rate_multiplier" field. +func (m *AccountMutation) AddRateMultiplier(f float64) { + if m.addrate_multiplier != nil { + *m.addrate_multiplier += f + } else { + m.addrate_multiplier = &f + } +} + +// AddedRateMultiplier returns the value that was added to the "rate_multiplier" field in this mutation. +func (m *AccountMutation) AddedRateMultiplier() (r float64, exists bool) { + v := m.addrate_multiplier + if v == nil { + return + } + return *v, true +} + +// ResetRateMultiplier resets all changes to the "rate_multiplier" field. +func (m *AccountMutation) ResetRateMultiplier() { + m.rate_multiplier = nil + m.addrate_multiplier = nil +} + // SetStatus sets the "status" field. func (m *AccountMutation) SetStatus(s string) { m.status = &s @@ -2356,7 +2598,7 @@ func (m *AccountMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *AccountMutation) Fields() []string { - fields := make([]string, 0, 24) + fields := make([]string, 0, 25) if m.created_at != nil { fields = append(fields, account.FieldCreatedAt) } @@ -2393,6 +2635,9 @@ func (m *AccountMutation) Fields() []string { if m.priority != nil { fields = append(fields, account.FieldPriority) } + if m.rate_multiplier != nil { + fields = append(fields, account.FieldRateMultiplier) + } if m.status != nil { fields = append(fields, account.FieldStatus) } @@ -2461,6 +2706,8 @@ func (m *AccountMutation) Field(name string) (ent.Value, bool) { return m.Concurrency() case account.FieldPriority: return m.Priority() + case account.FieldRateMultiplier: + return m.RateMultiplier() case account.FieldStatus: return m.Status() case account.FieldErrorMessage: @@ -2518,6 +2765,8 @@ func (m *AccountMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldConcurrency(ctx) case account.FieldPriority: return m.OldPriority(ctx) + case account.FieldRateMultiplier: + return m.OldRateMultiplier(ctx) case account.FieldStatus: return m.OldStatus(ctx) case account.FieldErrorMessage: @@ -2635,6 +2884,13 @@ func (m *AccountMutation) SetField(name string, value ent.Value) error { } m.SetPriority(v) return nil + case account.FieldRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRateMultiplier(v) + return nil case account.FieldStatus: v, ok := value.(string) if !ok { @@ -2733,6 +2989,9 @@ func (m *AccountMutation) AddedFields() []string { if m.addpriority != nil { fields = append(fields, account.FieldPriority) } + if m.addrate_multiplier != nil { + fields = append(fields, account.FieldRateMultiplier) + } return fields } @@ -2745,6 +3004,8 @@ func (m *AccountMutation) AddedField(name string) (ent.Value, bool) { return m.AddedConcurrency() case account.FieldPriority: return m.AddedPriority() + case account.FieldRateMultiplier: + return m.AddedRateMultiplier() } return nil, false } @@ -2768,6 +3029,13 @@ func (m *AccountMutation) AddField(name string, value ent.Value) error { } m.AddPriority(v) return nil + case account.FieldRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddRateMultiplier(v) + return nil } return fmt.Errorf("unknown Account numeric field %s", name) } @@ -2906,6 +3174,9 @@ func (m *AccountMutation) ResetField(name string) error { case account.FieldPriority: m.ResetPriority() return nil + case account.FieldRateMultiplier: + m.ResetRateMultiplier() + return nil case account.FieldStatus: m.ResetStatus() return nil @@ -3593,6 +3864,8 @@ type GroupMutation struct { claude_code_only *bool fallback_group_id *int64 addfallback_group_id *int64 + model_routing *map[string][]int64 + model_routing_enabled *bool clearedFields map[string]struct{} api_keys map[int64]struct{} removedapi_keys map[int64]struct{} @@ -4703,6 +4976,91 @@ func (m *GroupMutation) ResetFallbackGroupID() { delete(m.clearedFields, group.FieldFallbackGroupID) } +// SetModelRouting sets the "model_routing" field. +func (m *GroupMutation) SetModelRouting(value map[string][]int64) { + m.model_routing = &value +} + +// ModelRouting returns the value of the "model_routing" field in the mutation. +func (m *GroupMutation) ModelRouting() (r map[string][]int64, exists bool) { + v := m.model_routing + if v == nil { + return + } + return *v, true +} + +// OldModelRouting returns the old "model_routing" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldModelRouting(ctx context.Context) (v map[string][]int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldModelRouting is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldModelRouting requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldModelRouting: %w", err) + } + return oldValue.ModelRouting, nil +} + +// ClearModelRouting clears the value of the "model_routing" field. +func (m *GroupMutation) ClearModelRouting() { + m.model_routing = nil + m.clearedFields[group.FieldModelRouting] = struct{}{} +} + +// ModelRoutingCleared returns if the "model_routing" field was cleared in this mutation. +func (m *GroupMutation) ModelRoutingCleared() bool { + _, ok := m.clearedFields[group.FieldModelRouting] + return ok +} + +// ResetModelRouting resets all changes to the "model_routing" field. +func (m *GroupMutation) ResetModelRouting() { + m.model_routing = nil + delete(m.clearedFields, group.FieldModelRouting) +} + +// SetModelRoutingEnabled sets the "model_routing_enabled" field. +func (m *GroupMutation) SetModelRoutingEnabled(b bool) { + m.model_routing_enabled = &b +} + +// ModelRoutingEnabled returns the value of the "model_routing_enabled" field in the mutation. +func (m *GroupMutation) ModelRoutingEnabled() (r bool, exists bool) { + v := m.model_routing_enabled + if v == nil { + return + } + return *v, true +} + +// OldModelRoutingEnabled returns the old "model_routing_enabled" field's value of the Group entity. +// If the Group object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *GroupMutation) OldModelRoutingEnabled(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldModelRoutingEnabled is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldModelRoutingEnabled requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldModelRoutingEnabled: %w", err) + } + return oldValue.ModelRoutingEnabled, nil +} + +// ResetModelRoutingEnabled resets all changes to the "model_routing_enabled" field. +func (m *GroupMutation) ResetModelRoutingEnabled() { + m.model_routing_enabled = nil +} + // AddAPIKeyIDs adds the "api_keys" edge to the APIKey entity by ids. func (m *GroupMutation) AddAPIKeyIDs(ids ...int64) { if m.api_keys == nil { @@ -5061,7 +5419,7 @@ func (m *GroupMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *GroupMutation) Fields() []string { - fields := make([]string, 0, 19) + fields := make([]string, 0, 21) if m.created_at != nil { fields = append(fields, group.FieldCreatedAt) } @@ -5119,6 +5477,12 @@ func (m *GroupMutation) Fields() []string { if m.fallback_group_id != nil { fields = append(fields, group.FieldFallbackGroupID) } + if m.model_routing != nil { + fields = append(fields, group.FieldModelRouting) + } + if m.model_routing_enabled != nil { + fields = append(fields, group.FieldModelRoutingEnabled) + } return fields } @@ -5165,6 +5529,10 @@ func (m *GroupMutation) Field(name string) (ent.Value, bool) { return m.ClaudeCodeOnly() case group.FieldFallbackGroupID: return m.FallbackGroupID() + case group.FieldModelRouting: + return m.ModelRouting() + case group.FieldModelRoutingEnabled: + return m.ModelRoutingEnabled() } return nil, false } @@ -5212,6 +5580,10 @@ func (m *GroupMutation) OldField(ctx context.Context, name string) (ent.Value, e return m.OldClaudeCodeOnly(ctx) case group.FieldFallbackGroupID: return m.OldFallbackGroupID(ctx) + case group.FieldModelRouting: + return m.OldModelRouting(ctx) + case group.FieldModelRoutingEnabled: + return m.OldModelRoutingEnabled(ctx) } return nil, fmt.Errorf("unknown Group field %s", name) } @@ -5354,6 +5726,20 @@ func (m *GroupMutation) SetField(name string, value ent.Value) error { } m.SetFallbackGroupID(v) return nil + case group.FieldModelRouting: + v, ok := value.(map[string][]int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetModelRouting(v) + return nil + case group.FieldModelRoutingEnabled: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetModelRoutingEnabled(v) + return nil } return fmt.Errorf("unknown Group field %s", name) } @@ -5522,6 +5908,9 @@ func (m *GroupMutation) ClearedFields() []string { if m.FieldCleared(group.FieldFallbackGroupID) { fields = append(fields, group.FieldFallbackGroupID) } + if m.FieldCleared(group.FieldModelRouting) { + fields = append(fields, group.FieldModelRouting) + } return fields } @@ -5563,6 +5952,9 @@ func (m *GroupMutation) ClearField(name string) error { case group.FieldFallbackGroupID: m.ClearFallbackGroupID() return nil + case group.FieldModelRouting: + m.ClearModelRouting() + return nil } return fmt.Errorf("unknown Group nullable field %s", name) } @@ -5628,6 +6020,12 @@ func (m *GroupMutation) ResetField(name string) error { case group.FieldFallbackGroupID: m.ResetFallbackGroupID() return nil + case group.FieldModelRouting: + m.ResetModelRouting() + return nil + case group.FieldModelRoutingEnabled: + m.ResetModelRoutingEnabled() + return nil } return fmt.Errorf("unknown Group field %s", name) } @@ -5846,6 +6244,1624 @@ func (m *GroupMutation) ResetEdge(name string) error { return fmt.Errorf("unknown Group edge %s", name) } +// PromoCodeMutation represents an operation that mutates the PromoCode nodes in the graph. +type PromoCodeMutation struct { + config + op Op + typ string + id *int64 + code *string + bonus_amount *float64 + addbonus_amount *float64 + max_uses *int + addmax_uses *int + used_count *int + addused_count *int + status *string + expires_at *time.Time + notes *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + usage_records map[int64]struct{} + removedusage_records map[int64]struct{} + clearedusage_records bool + done bool + oldValue func(context.Context) (*PromoCode, error) + predicates []predicate.PromoCode +} + +var _ ent.Mutation = (*PromoCodeMutation)(nil) + +// promocodeOption allows management of the mutation configuration using functional options. +type promocodeOption func(*PromoCodeMutation) + +// newPromoCodeMutation creates new mutation for the PromoCode entity. +func newPromoCodeMutation(c config, op Op, opts ...promocodeOption) *PromoCodeMutation { + m := &PromoCodeMutation{ + config: c, + op: op, + typ: TypePromoCode, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPromoCodeID sets the ID field of the mutation. +func withPromoCodeID(id int64) promocodeOption { + return func(m *PromoCodeMutation) { + var ( + err error + once sync.Once + value *PromoCode + ) + m.oldValue = func(ctx context.Context) (*PromoCode, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().PromoCode.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPromoCode sets the old PromoCode of the mutation. +func withPromoCode(node *PromoCode) promocodeOption { + return func(m *PromoCodeMutation) { + m.oldValue = func(context.Context) (*PromoCode, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PromoCodeMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PromoCodeMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PromoCodeMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PromoCodeMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().PromoCode.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCode sets the "code" field. +func (m *PromoCodeMutation) SetCode(s string) { + m.code = &s +} + +// Code returns the value of the "code" field in the mutation. +func (m *PromoCodeMutation) Code() (r string, exists bool) { + v := m.code + if v == nil { + return + } + return *v, true +} + +// OldCode returns the old "code" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldCode(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCode: %w", err) + } + return oldValue.Code, nil +} + +// ResetCode resets all changes to the "code" field. +func (m *PromoCodeMutation) ResetCode() { + m.code = nil +} + +// SetBonusAmount sets the "bonus_amount" field. +func (m *PromoCodeMutation) SetBonusAmount(f float64) { + m.bonus_amount = &f + m.addbonus_amount = nil +} + +// BonusAmount returns the value of the "bonus_amount" field in the mutation. +func (m *PromoCodeMutation) BonusAmount() (r float64, exists bool) { + v := m.bonus_amount + if v == nil { + return + } + return *v, true +} + +// OldBonusAmount returns the old "bonus_amount" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldBonusAmount(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBonusAmount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBonusAmount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBonusAmount: %w", err) + } + return oldValue.BonusAmount, nil +} + +// AddBonusAmount adds f to the "bonus_amount" field. +func (m *PromoCodeMutation) AddBonusAmount(f float64) { + if m.addbonus_amount != nil { + *m.addbonus_amount += f + } else { + m.addbonus_amount = &f + } +} + +// AddedBonusAmount returns the value that was added to the "bonus_amount" field in this mutation. +func (m *PromoCodeMutation) AddedBonusAmount() (r float64, exists bool) { + v := m.addbonus_amount + if v == nil { + return + } + return *v, true +} + +// ResetBonusAmount resets all changes to the "bonus_amount" field. +func (m *PromoCodeMutation) ResetBonusAmount() { + m.bonus_amount = nil + m.addbonus_amount = nil +} + +// SetMaxUses sets the "max_uses" field. +func (m *PromoCodeMutation) SetMaxUses(i int) { + m.max_uses = &i + m.addmax_uses = nil +} + +// MaxUses returns the value of the "max_uses" field in the mutation. +func (m *PromoCodeMutation) MaxUses() (r int, exists bool) { + v := m.max_uses + if v == nil { + return + } + return *v, true +} + +// OldMaxUses returns the old "max_uses" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldMaxUses(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMaxUses is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMaxUses requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMaxUses: %w", err) + } + return oldValue.MaxUses, nil +} + +// AddMaxUses adds i to the "max_uses" field. +func (m *PromoCodeMutation) AddMaxUses(i int) { + if m.addmax_uses != nil { + *m.addmax_uses += i + } else { + m.addmax_uses = &i + } +} + +// AddedMaxUses returns the value that was added to the "max_uses" field in this mutation. +func (m *PromoCodeMutation) AddedMaxUses() (r int, exists bool) { + v := m.addmax_uses + if v == nil { + return + } + return *v, true +} + +// ResetMaxUses resets all changes to the "max_uses" field. +func (m *PromoCodeMutation) ResetMaxUses() { + m.max_uses = nil + m.addmax_uses = nil +} + +// SetUsedCount sets the "used_count" field. +func (m *PromoCodeMutation) SetUsedCount(i int) { + m.used_count = &i + m.addused_count = nil +} + +// UsedCount returns the value of the "used_count" field in the mutation. +func (m *PromoCodeMutation) UsedCount() (r int, exists bool) { + v := m.used_count + if v == nil { + return + } + return *v, true +} + +// OldUsedCount returns the old "used_count" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldUsedCount(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsedCount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsedCount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsedCount: %w", err) + } + return oldValue.UsedCount, nil +} + +// AddUsedCount adds i to the "used_count" field. +func (m *PromoCodeMutation) AddUsedCount(i int) { + if m.addused_count != nil { + *m.addused_count += i + } else { + m.addused_count = &i + } +} + +// AddedUsedCount returns the value that was added to the "used_count" field in this mutation. +func (m *PromoCodeMutation) AddedUsedCount() (r int, exists bool) { + v := m.addused_count + if v == nil { + return + } + return *v, true +} + +// ResetUsedCount resets all changes to the "used_count" field. +func (m *PromoCodeMutation) ResetUsedCount() { + m.used_count = nil + m.addused_count = nil +} + +// SetStatus sets the "status" field. +func (m *PromoCodeMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *PromoCodeMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *PromoCodeMutation) ResetStatus() { + m.status = nil +} + +// SetExpiresAt sets the "expires_at" field. +func (m *PromoCodeMutation) SetExpiresAt(t time.Time) { + m.expires_at = &t +} + +// ExpiresAt returns the value of the "expires_at" field in the mutation. +func (m *PromoCodeMutation) ExpiresAt() (r time.Time, exists bool) { + v := m.expires_at + if v == nil { + return + } + return *v, true +} + +// OldExpiresAt returns the old "expires_at" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldExpiresAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiresAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err) + } + return oldValue.ExpiresAt, nil +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (m *PromoCodeMutation) ClearExpiresAt() { + m.expires_at = nil + m.clearedFields[promocode.FieldExpiresAt] = struct{}{} +} + +// ExpiresAtCleared returns if the "expires_at" field was cleared in this mutation. +func (m *PromoCodeMutation) ExpiresAtCleared() bool { + _, ok := m.clearedFields[promocode.FieldExpiresAt] + return ok +} + +// ResetExpiresAt resets all changes to the "expires_at" field. +func (m *PromoCodeMutation) ResetExpiresAt() { + m.expires_at = nil + delete(m.clearedFields, promocode.FieldExpiresAt) +} + +// SetNotes sets the "notes" field. +func (m *PromoCodeMutation) SetNotes(s string) { + m.notes = &s +} + +// Notes returns the value of the "notes" field in the mutation. +func (m *PromoCodeMutation) Notes() (r string, exists bool) { + v := m.notes + if v == nil { + return + } + return *v, true +} + +// OldNotes returns the old "notes" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldNotes(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldNotes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldNotes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldNotes: %w", err) + } + return oldValue.Notes, nil +} + +// ClearNotes clears the value of the "notes" field. +func (m *PromoCodeMutation) ClearNotes() { + m.notes = nil + m.clearedFields[promocode.FieldNotes] = struct{}{} +} + +// NotesCleared returns if the "notes" field was cleared in this mutation. +func (m *PromoCodeMutation) NotesCleared() bool { + _, ok := m.clearedFields[promocode.FieldNotes] + return ok +} + +// ResetNotes resets all changes to the "notes" field. +func (m *PromoCodeMutation) ResetNotes() { + m.notes = nil + delete(m.clearedFields, promocode.FieldNotes) +} + +// SetCreatedAt sets the "created_at" field. +func (m *PromoCodeMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *PromoCodeMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *PromoCodeMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *PromoCodeMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *PromoCodeMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the PromoCode entity. +// If the PromoCode object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *PromoCodeMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by ids. +func (m *PromoCodeMutation) AddUsageRecordIDs(ids ...int64) { + if m.usage_records == nil { + m.usage_records = make(map[int64]struct{}) + } + for i := range ids { + m.usage_records[ids[i]] = struct{}{} + } +} + +// ClearUsageRecords clears the "usage_records" edge to the PromoCodeUsage entity. +func (m *PromoCodeMutation) ClearUsageRecords() { + m.clearedusage_records = true +} + +// UsageRecordsCleared reports if the "usage_records" edge to the PromoCodeUsage entity was cleared. +func (m *PromoCodeMutation) UsageRecordsCleared() bool { + return m.clearedusage_records +} + +// RemoveUsageRecordIDs removes the "usage_records" edge to the PromoCodeUsage entity by IDs. +func (m *PromoCodeMutation) RemoveUsageRecordIDs(ids ...int64) { + if m.removedusage_records == nil { + m.removedusage_records = make(map[int64]struct{}) + } + for i := range ids { + delete(m.usage_records, ids[i]) + m.removedusage_records[ids[i]] = struct{}{} + } +} + +// RemovedUsageRecords returns the removed IDs of the "usage_records" edge to the PromoCodeUsage entity. +func (m *PromoCodeMutation) RemovedUsageRecordsIDs() (ids []int64) { + for id := range m.removedusage_records { + ids = append(ids, id) + } + return +} + +// UsageRecordsIDs returns the "usage_records" edge IDs in the mutation. +func (m *PromoCodeMutation) UsageRecordsIDs() (ids []int64) { + for id := range m.usage_records { + ids = append(ids, id) + } + return +} + +// ResetUsageRecords resets all changes to the "usage_records" edge. +func (m *PromoCodeMutation) ResetUsageRecords() { + m.usage_records = nil + m.clearedusage_records = false + m.removedusage_records = nil +} + +// Where appends a list predicates to the PromoCodeMutation builder. +func (m *PromoCodeMutation) Where(ps ...predicate.PromoCode) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PromoCodeMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PromoCodeMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PromoCode, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PromoCodeMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PromoCodeMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (PromoCode). +func (m *PromoCodeMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PromoCodeMutation) Fields() []string { + fields := make([]string, 0, 9) + if m.code != nil { + fields = append(fields, promocode.FieldCode) + } + if m.bonus_amount != nil { + fields = append(fields, promocode.FieldBonusAmount) + } + if m.max_uses != nil { + fields = append(fields, promocode.FieldMaxUses) + } + if m.used_count != nil { + fields = append(fields, promocode.FieldUsedCount) + } + if m.status != nil { + fields = append(fields, promocode.FieldStatus) + } + if m.expires_at != nil { + fields = append(fields, promocode.FieldExpiresAt) + } + if m.notes != nil { + fields = append(fields, promocode.FieldNotes) + } + if m.created_at != nil { + fields = append(fields, promocode.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, promocode.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PromoCodeMutation) Field(name string) (ent.Value, bool) { + switch name { + case promocode.FieldCode: + return m.Code() + case promocode.FieldBonusAmount: + return m.BonusAmount() + case promocode.FieldMaxUses: + return m.MaxUses() + case promocode.FieldUsedCount: + return m.UsedCount() + case promocode.FieldStatus: + return m.Status() + case promocode.FieldExpiresAt: + return m.ExpiresAt() + case promocode.FieldNotes: + return m.Notes() + case promocode.FieldCreatedAt: + return m.CreatedAt() + case promocode.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PromoCodeMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case promocode.FieldCode: + return m.OldCode(ctx) + case promocode.FieldBonusAmount: + return m.OldBonusAmount(ctx) + case promocode.FieldMaxUses: + return m.OldMaxUses(ctx) + case promocode.FieldUsedCount: + return m.OldUsedCount(ctx) + case promocode.FieldStatus: + return m.OldStatus(ctx) + case promocode.FieldExpiresAt: + return m.OldExpiresAt(ctx) + case promocode.FieldNotes: + return m.OldNotes(ctx) + case promocode.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case promocode.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown PromoCode field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PromoCodeMutation) SetField(name string, value ent.Value) error { + switch name { + case promocode.FieldCode: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCode(v) + return nil + case promocode.FieldBonusAmount: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBonusAmount(v) + return nil + case promocode.FieldMaxUses: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMaxUses(v) + return nil + case promocode.FieldUsedCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsedCount(v) + return nil + case promocode.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case promocode.FieldExpiresAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiresAt(v) + return nil + case promocode.FieldNotes: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetNotes(v) + return nil + case promocode.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case promocode.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown PromoCode field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PromoCodeMutation) AddedFields() []string { + var fields []string + if m.addbonus_amount != nil { + fields = append(fields, promocode.FieldBonusAmount) + } + if m.addmax_uses != nil { + fields = append(fields, promocode.FieldMaxUses) + } + if m.addused_count != nil { + fields = append(fields, promocode.FieldUsedCount) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PromoCodeMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case promocode.FieldBonusAmount: + return m.AddedBonusAmount() + case promocode.FieldMaxUses: + return m.AddedMaxUses() + case promocode.FieldUsedCount: + return m.AddedUsedCount() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PromoCodeMutation) AddField(name string, value ent.Value) error { + switch name { + case promocode.FieldBonusAmount: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddBonusAmount(v) + return nil + case promocode.FieldMaxUses: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddMaxUses(v) + return nil + case promocode.FieldUsedCount: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddUsedCount(v) + return nil + } + return fmt.Errorf("unknown PromoCode numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PromoCodeMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(promocode.FieldExpiresAt) { + fields = append(fields, promocode.FieldExpiresAt) + } + if m.FieldCleared(promocode.FieldNotes) { + fields = append(fields, promocode.FieldNotes) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PromoCodeMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PromoCodeMutation) ClearField(name string) error { + switch name { + case promocode.FieldExpiresAt: + m.ClearExpiresAt() + return nil + case promocode.FieldNotes: + m.ClearNotes() + return nil + } + return fmt.Errorf("unknown PromoCode nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PromoCodeMutation) ResetField(name string) error { + switch name { + case promocode.FieldCode: + m.ResetCode() + return nil + case promocode.FieldBonusAmount: + m.ResetBonusAmount() + return nil + case promocode.FieldMaxUses: + m.ResetMaxUses() + return nil + case promocode.FieldUsedCount: + m.ResetUsedCount() + return nil + case promocode.FieldStatus: + m.ResetStatus() + return nil + case promocode.FieldExpiresAt: + m.ResetExpiresAt() + return nil + case promocode.FieldNotes: + m.ResetNotes() + return nil + case promocode.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case promocode.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown PromoCode field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PromoCodeMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.usage_records != nil { + edges = append(edges, promocode.EdgeUsageRecords) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PromoCodeMutation) AddedIDs(name string) []ent.Value { + switch name { + case promocode.EdgeUsageRecords: + ids := make([]ent.Value, 0, len(m.usage_records)) + for id := range m.usage_records { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PromoCodeMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedusage_records != nil { + edges = append(edges, promocode.EdgeUsageRecords) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PromoCodeMutation) RemovedIDs(name string) []ent.Value { + switch name { + case promocode.EdgeUsageRecords: + ids := make([]ent.Value, 0, len(m.removedusage_records)) + for id := range m.removedusage_records { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PromoCodeMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedusage_records { + edges = append(edges, promocode.EdgeUsageRecords) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PromoCodeMutation) EdgeCleared(name string) bool { + switch name { + case promocode.EdgeUsageRecords: + return m.clearedusage_records + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PromoCodeMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown PromoCode unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PromoCodeMutation) ResetEdge(name string) error { + switch name { + case promocode.EdgeUsageRecords: + m.ResetUsageRecords() + return nil + } + return fmt.Errorf("unknown PromoCode edge %s", name) +} + +// PromoCodeUsageMutation represents an operation that mutates the PromoCodeUsage nodes in the graph. +type PromoCodeUsageMutation struct { + config + op Op + typ string + id *int64 + bonus_amount *float64 + addbonus_amount *float64 + used_at *time.Time + clearedFields map[string]struct{} + promo_code *int64 + clearedpromo_code bool + user *int64 + cleareduser bool + done bool + oldValue func(context.Context) (*PromoCodeUsage, error) + predicates []predicate.PromoCodeUsage +} + +var _ ent.Mutation = (*PromoCodeUsageMutation)(nil) + +// promocodeusageOption allows management of the mutation configuration using functional options. +type promocodeusageOption func(*PromoCodeUsageMutation) + +// newPromoCodeUsageMutation creates new mutation for the PromoCodeUsage entity. +func newPromoCodeUsageMutation(c config, op Op, opts ...promocodeusageOption) *PromoCodeUsageMutation { + m := &PromoCodeUsageMutation{ + config: c, + op: op, + typ: TypePromoCodeUsage, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withPromoCodeUsageID sets the ID field of the mutation. +func withPromoCodeUsageID(id int64) promocodeusageOption { + return func(m *PromoCodeUsageMutation) { + var ( + err error + once sync.Once + value *PromoCodeUsage + ) + m.oldValue = func(ctx context.Context) (*PromoCodeUsage, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().PromoCodeUsage.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withPromoCodeUsage sets the old PromoCodeUsage of the mutation. +func withPromoCodeUsage(node *PromoCodeUsage) promocodeusageOption { + return func(m *PromoCodeUsageMutation) { + m.oldValue = func(context.Context) (*PromoCodeUsage, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m PromoCodeUsageMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m PromoCodeUsageMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *PromoCodeUsageMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *PromoCodeUsageMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().PromoCodeUsage.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (m *PromoCodeUsageMutation) SetPromoCodeID(i int64) { + m.promo_code = &i +} + +// PromoCodeID returns the value of the "promo_code_id" field in the mutation. +func (m *PromoCodeUsageMutation) PromoCodeID() (r int64, exists bool) { + v := m.promo_code + if v == nil { + return + } + return *v, true +} + +// OldPromoCodeID returns the old "promo_code_id" field's value of the PromoCodeUsage entity. +// If the PromoCodeUsage object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeUsageMutation) OldPromoCodeID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPromoCodeID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPromoCodeID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPromoCodeID: %w", err) + } + return oldValue.PromoCodeID, nil +} + +// ResetPromoCodeID resets all changes to the "promo_code_id" field. +func (m *PromoCodeUsageMutation) ResetPromoCodeID() { + m.promo_code = nil +} + +// SetUserID sets the "user_id" field. +func (m *PromoCodeUsageMutation) SetUserID(i int64) { + m.user = &i +} + +// UserID returns the value of the "user_id" field in the mutation. +func (m *PromoCodeUsageMutation) UserID() (r int64, exists bool) { + v := m.user + if v == nil { + return + } + return *v, true +} + +// OldUserID returns the old "user_id" field's value of the PromoCodeUsage entity. +// If the PromoCodeUsage object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeUsageMutation) OldUserID(ctx context.Context) (v int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUserID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUserID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUserID: %w", err) + } + return oldValue.UserID, nil +} + +// ResetUserID resets all changes to the "user_id" field. +func (m *PromoCodeUsageMutation) ResetUserID() { + m.user = nil +} + +// SetBonusAmount sets the "bonus_amount" field. +func (m *PromoCodeUsageMutation) SetBonusAmount(f float64) { + m.bonus_amount = &f + m.addbonus_amount = nil +} + +// BonusAmount returns the value of the "bonus_amount" field in the mutation. +func (m *PromoCodeUsageMutation) BonusAmount() (r float64, exists bool) { + v := m.bonus_amount + if v == nil { + return + } + return *v, true +} + +// OldBonusAmount returns the old "bonus_amount" field's value of the PromoCodeUsage entity. +// If the PromoCodeUsage object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeUsageMutation) OldBonusAmount(ctx context.Context) (v float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBonusAmount is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBonusAmount requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBonusAmount: %w", err) + } + return oldValue.BonusAmount, nil +} + +// AddBonusAmount adds f to the "bonus_amount" field. +func (m *PromoCodeUsageMutation) AddBonusAmount(f float64) { + if m.addbonus_amount != nil { + *m.addbonus_amount += f + } else { + m.addbonus_amount = &f + } +} + +// AddedBonusAmount returns the value that was added to the "bonus_amount" field in this mutation. +func (m *PromoCodeUsageMutation) AddedBonusAmount() (r float64, exists bool) { + v := m.addbonus_amount + if v == nil { + return + } + return *v, true +} + +// ResetBonusAmount resets all changes to the "bonus_amount" field. +func (m *PromoCodeUsageMutation) ResetBonusAmount() { + m.bonus_amount = nil + m.addbonus_amount = nil +} + +// SetUsedAt sets the "used_at" field. +func (m *PromoCodeUsageMutation) SetUsedAt(t time.Time) { + m.used_at = &t +} + +// UsedAt returns the value of the "used_at" field in the mutation. +func (m *PromoCodeUsageMutation) UsedAt() (r time.Time, exists bool) { + v := m.used_at + if v == nil { + return + } + return *v, true +} + +// OldUsedAt returns the old "used_at" field's value of the PromoCodeUsage entity. +// If the PromoCodeUsage object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *PromoCodeUsageMutation) OldUsedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsedAt: %w", err) + } + return oldValue.UsedAt, nil +} + +// ResetUsedAt resets all changes to the "used_at" field. +func (m *PromoCodeUsageMutation) ResetUsedAt() { + m.used_at = nil +} + +// ClearPromoCode clears the "promo_code" edge to the PromoCode entity. +func (m *PromoCodeUsageMutation) ClearPromoCode() { + m.clearedpromo_code = true + m.clearedFields[promocodeusage.FieldPromoCodeID] = struct{}{} +} + +// PromoCodeCleared reports if the "promo_code" edge to the PromoCode entity was cleared. +func (m *PromoCodeUsageMutation) PromoCodeCleared() bool { + return m.clearedpromo_code +} + +// PromoCodeIDs returns the "promo_code" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// PromoCodeID instead. It exists only for internal usage by the builders. +func (m *PromoCodeUsageMutation) PromoCodeIDs() (ids []int64) { + if id := m.promo_code; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetPromoCode resets all changes to the "promo_code" edge. +func (m *PromoCodeUsageMutation) ResetPromoCode() { + m.promo_code = nil + m.clearedpromo_code = false +} + +// ClearUser clears the "user" edge to the User entity. +func (m *PromoCodeUsageMutation) ClearUser() { + m.cleareduser = true + m.clearedFields[promocodeusage.FieldUserID] = struct{}{} +} + +// UserCleared reports if the "user" edge to the User entity was cleared. +func (m *PromoCodeUsageMutation) UserCleared() bool { + return m.cleareduser +} + +// UserIDs returns the "user" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// UserID instead. It exists only for internal usage by the builders. +func (m *PromoCodeUsageMutation) UserIDs() (ids []int64) { + if id := m.user; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetUser resets all changes to the "user" edge. +func (m *PromoCodeUsageMutation) ResetUser() { + m.user = nil + m.cleareduser = false +} + +// Where appends a list predicates to the PromoCodeUsageMutation builder. +func (m *PromoCodeUsageMutation) Where(ps ...predicate.PromoCodeUsage) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the PromoCodeUsageMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *PromoCodeUsageMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.PromoCodeUsage, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *PromoCodeUsageMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *PromoCodeUsageMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (PromoCodeUsage). +func (m *PromoCodeUsageMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *PromoCodeUsageMutation) Fields() []string { + fields := make([]string, 0, 4) + if m.promo_code != nil { + fields = append(fields, promocodeusage.FieldPromoCodeID) + } + if m.user != nil { + fields = append(fields, promocodeusage.FieldUserID) + } + if m.bonus_amount != nil { + fields = append(fields, promocodeusage.FieldBonusAmount) + } + if m.used_at != nil { + fields = append(fields, promocodeusage.FieldUsedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *PromoCodeUsageMutation) Field(name string) (ent.Value, bool) { + switch name { + case promocodeusage.FieldPromoCodeID: + return m.PromoCodeID() + case promocodeusage.FieldUserID: + return m.UserID() + case promocodeusage.FieldBonusAmount: + return m.BonusAmount() + case promocodeusage.FieldUsedAt: + return m.UsedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *PromoCodeUsageMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case promocodeusage.FieldPromoCodeID: + return m.OldPromoCodeID(ctx) + case promocodeusage.FieldUserID: + return m.OldUserID(ctx) + case promocodeusage.FieldBonusAmount: + return m.OldBonusAmount(ctx) + case promocodeusage.FieldUsedAt: + return m.OldUsedAt(ctx) + } + return nil, fmt.Errorf("unknown PromoCodeUsage field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PromoCodeUsageMutation) SetField(name string, value ent.Value) error { + switch name { + case promocodeusage.FieldPromoCodeID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPromoCodeID(v) + return nil + case promocodeusage.FieldUserID: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUserID(v) + return nil + case promocodeusage.FieldBonusAmount: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBonusAmount(v) + return nil + case promocodeusage.FieldUsedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsedAt(v) + return nil + } + return fmt.Errorf("unknown PromoCodeUsage field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *PromoCodeUsageMutation) AddedFields() []string { + var fields []string + if m.addbonus_amount != nil { + fields = append(fields, promocodeusage.FieldBonusAmount) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *PromoCodeUsageMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case promocodeusage.FieldBonusAmount: + return m.AddedBonusAmount() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *PromoCodeUsageMutation) AddField(name string, value ent.Value) error { + switch name { + case promocodeusage.FieldBonusAmount: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddBonusAmount(v) + return nil + } + return fmt.Errorf("unknown PromoCodeUsage numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *PromoCodeUsageMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *PromoCodeUsageMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *PromoCodeUsageMutation) ClearField(name string) error { + return fmt.Errorf("unknown PromoCodeUsage nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *PromoCodeUsageMutation) ResetField(name string) error { + switch name { + case promocodeusage.FieldPromoCodeID: + m.ResetPromoCodeID() + return nil + case promocodeusage.FieldUserID: + m.ResetUserID() + return nil + case promocodeusage.FieldBonusAmount: + m.ResetBonusAmount() + return nil + case promocodeusage.FieldUsedAt: + m.ResetUsedAt() + return nil + } + return fmt.Errorf("unknown PromoCodeUsage field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *PromoCodeUsageMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.promo_code != nil { + edges = append(edges, promocodeusage.EdgePromoCode) + } + if m.user != nil { + edges = append(edges, promocodeusage.EdgeUser) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *PromoCodeUsageMutation) AddedIDs(name string) []ent.Value { + switch name { + case promocodeusage.EdgePromoCode: + if id := m.promo_code; id != nil { + return []ent.Value{*id} + } + case promocodeusage.EdgeUser: + if id := m.user; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *PromoCodeUsageMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *PromoCodeUsageMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *PromoCodeUsageMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedpromo_code { + edges = append(edges, promocodeusage.EdgePromoCode) + } + if m.cleareduser { + edges = append(edges, promocodeusage.EdgeUser) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *PromoCodeUsageMutation) EdgeCleared(name string) bool { + switch name { + case promocodeusage.EdgePromoCode: + return m.clearedpromo_code + case promocodeusage.EdgeUser: + return m.cleareduser + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *PromoCodeUsageMutation) ClearEdge(name string) error { + switch name { + case promocodeusage.EdgePromoCode: + m.ClearPromoCode() + return nil + case promocodeusage.EdgeUser: + m.ClearUser() + return nil + } + return fmt.Errorf("unknown PromoCodeUsage unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *PromoCodeUsageMutation) ResetEdge(name string) error { + switch name { + case promocodeusage.EdgePromoCode: + m.ResetPromoCode() + return nil + case promocodeusage.EdgeUser: + m.ResetUser() + return nil + } + return fmt.Errorf("unknown PromoCodeUsage edge %s", name) +} + // ProxyMutation represents an operation that mutates the Proxy nodes in the graph. type ProxyMutation struct { config @@ -8388,6 +10404,8 @@ type UsageLogMutation struct { addactual_cost *float64 rate_multiplier *float64 addrate_multiplier *float64 + account_rate_multiplier *float64 + addaccount_rate_multiplier *float64 billing_type *int8 addbilling_type *int8 stream *bool @@ -8396,6 +10414,7 @@ type UsageLogMutation struct { first_token_ms *int addfirst_token_ms *int user_agent *string + ip_address *string image_count *int addimage_count *int image_size *string @@ -9520,6 +11539,76 @@ func (m *UsageLogMutation) ResetRateMultiplier() { m.addrate_multiplier = nil } +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (m *UsageLogMutation) SetAccountRateMultiplier(f float64) { + m.account_rate_multiplier = &f + m.addaccount_rate_multiplier = nil +} + +// AccountRateMultiplier returns the value of the "account_rate_multiplier" field in the mutation. +func (m *UsageLogMutation) AccountRateMultiplier() (r float64, exists bool) { + v := m.account_rate_multiplier + if v == nil { + return + } + return *v, true +} + +// OldAccountRateMultiplier returns the old "account_rate_multiplier" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldAccountRateMultiplier(ctx context.Context) (v *float64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAccountRateMultiplier is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAccountRateMultiplier requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAccountRateMultiplier: %w", err) + } + return oldValue.AccountRateMultiplier, nil +} + +// AddAccountRateMultiplier adds f to the "account_rate_multiplier" field. +func (m *UsageLogMutation) AddAccountRateMultiplier(f float64) { + if m.addaccount_rate_multiplier != nil { + *m.addaccount_rate_multiplier += f + } else { + m.addaccount_rate_multiplier = &f + } +} + +// AddedAccountRateMultiplier returns the value that was added to the "account_rate_multiplier" field in this mutation. +func (m *UsageLogMutation) AddedAccountRateMultiplier() (r float64, exists bool) { + v := m.addaccount_rate_multiplier + if v == nil { + return + } + return *v, true +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (m *UsageLogMutation) ClearAccountRateMultiplier() { + m.account_rate_multiplier = nil + m.addaccount_rate_multiplier = nil + m.clearedFields[usagelog.FieldAccountRateMultiplier] = struct{}{} +} + +// AccountRateMultiplierCleared returns if the "account_rate_multiplier" field was cleared in this mutation. +func (m *UsageLogMutation) AccountRateMultiplierCleared() bool { + _, ok := m.clearedFields[usagelog.FieldAccountRateMultiplier] + return ok +} + +// ResetAccountRateMultiplier resets all changes to the "account_rate_multiplier" field. +func (m *UsageLogMutation) ResetAccountRateMultiplier() { + m.account_rate_multiplier = nil + m.addaccount_rate_multiplier = nil + delete(m.clearedFields, usagelog.FieldAccountRateMultiplier) +} + // SetBillingType sets the "billing_type" field. func (m *UsageLogMutation) SetBillingType(i int8) { m.billing_type = &i @@ -9801,6 +11890,55 @@ func (m *UsageLogMutation) ResetUserAgent() { delete(m.clearedFields, usagelog.FieldUserAgent) } +// SetIPAddress sets the "ip_address" field. +func (m *UsageLogMutation) SetIPAddress(s string) { + m.ip_address = &s +} + +// IPAddress returns the value of the "ip_address" field in the mutation. +func (m *UsageLogMutation) IPAddress() (r string, exists bool) { + v := m.ip_address + if v == nil { + return + } + return *v, true +} + +// OldIPAddress returns the old "ip_address" field's value of the UsageLog entity. +// If the UsageLog object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UsageLogMutation) OldIPAddress(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIPAddress is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIPAddress requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIPAddress: %w", err) + } + return oldValue.IPAddress, nil +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (m *UsageLogMutation) ClearIPAddress() { + m.ip_address = nil + m.clearedFields[usagelog.FieldIPAddress] = struct{}{} +} + +// IPAddressCleared returns if the "ip_address" field was cleared in this mutation. +func (m *UsageLogMutation) IPAddressCleared() bool { + _, ok := m.clearedFields[usagelog.FieldIPAddress] + return ok +} + +// ResetIPAddress resets all changes to the "ip_address" field. +func (m *UsageLogMutation) ResetIPAddress() { + m.ip_address = nil + delete(m.clearedFields, usagelog.FieldIPAddress) +} + // SetImageCount sets the "image_count" field. func (m *UsageLogMutation) SetImageCount(i int) { m.image_count = &i @@ -10111,7 +12249,7 @@ func (m *UsageLogMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *UsageLogMutation) Fields() []string { - fields := make([]string, 0, 28) + fields := make([]string, 0, 30) if m.user != nil { fields = append(fields, usagelog.FieldUserID) } @@ -10172,6 +12310,9 @@ func (m *UsageLogMutation) Fields() []string { if m.rate_multiplier != nil { fields = append(fields, usagelog.FieldRateMultiplier) } + if m.account_rate_multiplier != nil { + fields = append(fields, usagelog.FieldAccountRateMultiplier) + } if m.billing_type != nil { fields = append(fields, usagelog.FieldBillingType) } @@ -10187,6 +12328,9 @@ func (m *UsageLogMutation) Fields() []string { if m.user_agent != nil { fields = append(fields, usagelog.FieldUserAgent) } + if m.ip_address != nil { + fields = append(fields, usagelog.FieldIPAddress) + } if m.image_count != nil { fields = append(fields, usagelog.FieldImageCount) } @@ -10244,6 +12388,8 @@ func (m *UsageLogMutation) Field(name string) (ent.Value, bool) { return m.ActualCost() case usagelog.FieldRateMultiplier: return m.RateMultiplier() + case usagelog.FieldAccountRateMultiplier: + return m.AccountRateMultiplier() case usagelog.FieldBillingType: return m.BillingType() case usagelog.FieldStream: @@ -10254,6 +12400,8 @@ func (m *UsageLogMutation) Field(name string) (ent.Value, bool) { return m.FirstTokenMs() case usagelog.FieldUserAgent: return m.UserAgent() + case usagelog.FieldIPAddress: + return m.IPAddress() case usagelog.FieldImageCount: return m.ImageCount() case usagelog.FieldImageSize: @@ -10309,6 +12457,8 @@ func (m *UsageLogMutation) OldField(ctx context.Context, name string) (ent.Value return m.OldActualCost(ctx) case usagelog.FieldRateMultiplier: return m.OldRateMultiplier(ctx) + case usagelog.FieldAccountRateMultiplier: + return m.OldAccountRateMultiplier(ctx) case usagelog.FieldBillingType: return m.OldBillingType(ctx) case usagelog.FieldStream: @@ -10319,6 +12469,8 @@ func (m *UsageLogMutation) OldField(ctx context.Context, name string) (ent.Value return m.OldFirstTokenMs(ctx) case usagelog.FieldUserAgent: return m.OldUserAgent(ctx) + case usagelog.FieldIPAddress: + return m.OldIPAddress(ctx) case usagelog.FieldImageCount: return m.OldImageCount(ctx) case usagelog.FieldImageSize: @@ -10474,6 +12626,13 @@ func (m *UsageLogMutation) SetField(name string, value ent.Value) error { } m.SetRateMultiplier(v) return nil + case usagelog.FieldAccountRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccountRateMultiplier(v) + return nil case usagelog.FieldBillingType: v, ok := value.(int8) if !ok { @@ -10509,6 +12668,13 @@ func (m *UsageLogMutation) SetField(name string, value ent.Value) error { } m.SetUserAgent(v) return nil + case usagelog.FieldIPAddress: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIPAddress(v) + return nil case usagelog.FieldImageCount: v, ok := value.(int) if !ok { @@ -10577,6 +12743,9 @@ func (m *UsageLogMutation) AddedFields() []string { if m.addrate_multiplier != nil { fields = append(fields, usagelog.FieldRateMultiplier) } + if m.addaccount_rate_multiplier != nil { + fields = append(fields, usagelog.FieldAccountRateMultiplier) + } if m.addbilling_type != nil { fields = append(fields, usagelog.FieldBillingType) } @@ -10623,6 +12792,8 @@ func (m *UsageLogMutation) AddedField(name string) (ent.Value, bool) { return m.AddedActualCost() case usagelog.FieldRateMultiplier: return m.AddedRateMultiplier() + case usagelog.FieldAccountRateMultiplier: + return m.AddedAccountRateMultiplier() case usagelog.FieldBillingType: return m.AddedBillingType() case usagelog.FieldDurationMs: @@ -10731,6 +12902,13 @@ func (m *UsageLogMutation) AddField(name string, value ent.Value) error { } m.AddRateMultiplier(v) return nil + case usagelog.FieldAccountRateMultiplier: + v, ok := value.(float64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddAccountRateMultiplier(v) + return nil case usagelog.FieldBillingType: v, ok := value.(int8) if !ok { @@ -10773,6 +12951,9 @@ func (m *UsageLogMutation) ClearedFields() []string { if m.FieldCleared(usagelog.FieldSubscriptionID) { fields = append(fields, usagelog.FieldSubscriptionID) } + if m.FieldCleared(usagelog.FieldAccountRateMultiplier) { + fields = append(fields, usagelog.FieldAccountRateMultiplier) + } if m.FieldCleared(usagelog.FieldDurationMs) { fields = append(fields, usagelog.FieldDurationMs) } @@ -10782,6 +12963,9 @@ func (m *UsageLogMutation) ClearedFields() []string { if m.FieldCleared(usagelog.FieldUserAgent) { fields = append(fields, usagelog.FieldUserAgent) } + if m.FieldCleared(usagelog.FieldIPAddress) { + fields = append(fields, usagelog.FieldIPAddress) + } if m.FieldCleared(usagelog.FieldImageSize) { fields = append(fields, usagelog.FieldImageSize) } @@ -10805,6 +12989,9 @@ func (m *UsageLogMutation) ClearField(name string) error { case usagelog.FieldSubscriptionID: m.ClearSubscriptionID() return nil + case usagelog.FieldAccountRateMultiplier: + m.ClearAccountRateMultiplier() + return nil case usagelog.FieldDurationMs: m.ClearDurationMs() return nil @@ -10814,6 +13001,9 @@ func (m *UsageLogMutation) ClearField(name string) error { case usagelog.FieldUserAgent: m.ClearUserAgent() return nil + case usagelog.FieldIPAddress: + m.ClearIPAddress() + return nil case usagelog.FieldImageSize: m.ClearImageSize() return nil @@ -10885,6 +13075,9 @@ func (m *UsageLogMutation) ResetField(name string) error { case usagelog.FieldRateMultiplier: m.ResetRateMultiplier() return nil + case usagelog.FieldAccountRateMultiplier: + m.ResetAccountRateMultiplier() + return nil case usagelog.FieldBillingType: m.ResetBillingType() return nil @@ -10900,6 +13093,9 @@ func (m *UsageLogMutation) ResetField(name string) error { case usagelog.FieldUserAgent: m.ResetUserAgent() return nil + case usagelog.FieldIPAddress: + m.ResetIPAddress() + return nil case usagelog.FieldImageCount: m.ResetImageCount() return nil @@ -11100,6 +13296,9 @@ type UserMutation struct { attribute_values map[int64]struct{} removedattribute_values map[int64]struct{} clearedattribute_values bool + promo_code_usages map[int64]struct{} + removedpromo_code_usages map[int64]struct{} + clearedpromo_code_usages bool done bool oldValue func(context.Context) (*User, error) predicates []predicate.User @@ -12030,6 +14229,60 @@ func (m *UserMutation) ResetAttributeValues() { m.removedattribute_values = nil } +// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by ids. +func (m *UserMutation) AddPromoCodeUsageIDs(ids ...int64) { + if m.promo_code_usages == nil { + m.promo_code_usages = make(map[int64]struct{}) + } + for i := range ids { + m.promo_code_usages[ids[i]] = struct{}{} + } +} + +// ClearPromoCodeUsages clears the "promo_code_usages" edge to the PromoCodeUsage entity. +func (m *UserMutation) ClearPromoCodeUsages() { + m.clearedpromo_code_usages = true +} + +// PromoCodeUsagesCleared reports if the "promo_code_usages" edge to the PromoCodeUsage entity was cleared. +func (m *UserMutation) PromoCodeUsagesCleared() bool { + return m.clearedpromo_code_usages +} + +// RemovePromoCodeUsageIDs removes the "promo_code_usages" edge to the PromoCodeUsage entity by IDs. +func (m *UserMutation) RemovePromoCodeUsageIDs(ids ...int64) { + if m.removedpromo_code_usages == nil { + m.removedpromo_code_usages = make(map[int64]struct{}) + } + for i := range ids { + delete(m.promo_code_usages, ids[i]) + m.removedpromo_code_usages[ids[i]] = struct{}{} + } +} + +// RemovedPromoCodeUsages returns the removed IDs of the "promo_code_usages" edge to the PromoCodeUsage entity. +func (m *UserMutation) RemovedPromoCodeUsagesIDs() (ids []int64) { + for id := range m.removedpromo_code_usages { + ids = append(ids, id) + } + return +} + +// PromoCodeUsagesIDs returns the "promo_code_usages" edge IDs in the mutation. +func (m *UserMutation) PromoCodeUsagesIDs() (ids []int64) { + for id := range m.promo_code_usages { + ids = append(ids, id) + } + return +} + +// ResetPromoCodeUsages resets all changes to the "promo_code_usages" edge. +func (m *UserMutation) ResetPromoCodeUsages() { + m.promo_code_usages = nil + m.clearedpromo_code_usages = false + m.removedpromo_code_usages = nil +} + // Where appends a list predicates to the UserMutation builder. func (m *UserMutation) Where(ps ...predicate.User) { m.predicates = append(m.predicates, ps...) @@ -12369,7 +14622,7 @@ func (m *UserMutation) ResetField(name string) error { // AddedEdges returns all edge names that were set/added in this mutation. func (m *UserMutation) AddedEdges() []string { - edges := make([]string, 0, 7) + edges := make([]string, 0, 8) if m.api_keys != nil { edges = append(edges, user.EdgeAPIKeys) } @@ -12391,6 +14644,9 @@ func (m *UserMutation) AddedEdges() []string { if m.attribute_values != nil { edges = append(edges, user.EdgeAttributeValues) } + if m.promo_code_usages != nil { + edges = append(edges, user.EdgePromoCodeUsages) + } return edges } @@ -12440,13 +14696,19 @@ func (m *UserMutation) AddedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case user.EdgePromoCodeUsages: + ids := make([]ent.Value, 0, len(m.promo_code_usages)) + for id := range m.promo_code_usages { + ids = append(ids, id) + } + return ids } return nil } // RemovedEdges returns all edge names that were removed in this mutation. func (m *UserMutation) RemovedEdges() []string { - edges := make([]string, 0, 7) + edges := make([]string, 0, 8) if m.removedapi_keys != nil { edges = append(edges, user.EdgeAPIKeys) } @@ -12468,6 +14730,9 @@ func (m *UserMutation) RemovedEdges() []string { if m.removedattribute_values != nil { edges = append(edges, user.EdgeAttributeValues) } + if m.removedpromo_code_usages != nil { + edges = append(edges, user.EdgePromoCodeUsages) + } return edges } @@ -12517,13 +14782,19 @@ func (m *UserMutation) RemovedIDs(name string) []ent.Value { ids = append(ids, id) } return ids + case user.EdgePromoCodeUsages: + ids := make([]ent.Value, 0, len(m.removedpromo_code_usages)) + for id := range m.removedpromo_code_usages { + ids = append(ids, id) + } + return ids } return nil } // ClearedEdges returns all edge names that were cleared in this mutation. func (m *UserMutation) ClearedEdges() []string { - edges := make([]string, 0, 7) + edges := make([]string, 0, 8) if m.clearedapi_keys { edges = append(edges, user.EdgeAPIKeys) } @@ -12545,6 +14816,9 @@ func (m *UserMutation) ClearedEdges() []string { if m.clearedattribute_values { edges = append(edges, user.EdgeAttributeValues) } + if m.clearedpromo_code_usages { + edges = append(edges, user.EdgePromoCodeUsages) + } return edges } @@ -12566,6 +14840,8 @@ func (m *UserMutation) EdgeCleared(name string) bool { return m.clearedusage_logs case user.EdgeAttributeValues: return m.clearedattribute_values + case user.EdgePromoCodeUsages: + return m.clearedpromo_code_usages } return false } @@ -12603,6 +14879,9 @@ func (m *UserMutation) ResetEdge(name string) error { case user.EdgeAttributeValues: m.ResetAttributeValues() return nil + case user.EdgePromoCodeUsages: + m.ResetPromoCodeUsages() + return nil } return fmt.Errorf("unknown User edge %s", name) } diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index 87c56902..7a443c5d 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -18,6 +18,12 @@ type AccountGroup func(*sql.Selector) // Group is the predicate function for group builders. type Group func(*sql.Selector) +// PromoCode is the predicate function for promocode builders. +type PromoCode func(*sql.Selector) + +// PromoCodeUsage is the predicate function for promocodeusage builders. +type PromoCodeUsage func(*sql.Selector) + // Proxy is the predicate function for proxy builders. type Proxy func(*sql.Selector) diff --git a/backend/ent/promocode.go b/backend/ent/promocode.go new file mode 100644 index 00000000..1123bbd6 --- /dev/null +++ b/backend/ent/promocode.go @@ -0,0 +1,228 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/promocode" +) + +// PromoCode is the model entity for the PromoCode schema. +type PromoCode struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // 优惠码 + Code string `json:"code,omitempty"` + // 赠送余额金额 + BonusAmount float64 `json:"bonus_amount,omitempty"` + // 最大使用次数,0表示无限制 + MaxUses int `json:"max_uses,omitempty"` + // 已使用次数 + UsedCount int `json:"used_count,omitempty"` + // 状态: active, disabled + Status string `json:"status,omitempty"` + // 过期时间,null表示永不过期 + ExpiresAt *time.Time `json:"expires_at,omitempty"` + // 备注 + Notes *string `json:"notes,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PromoCodeQuery when eager-loading is set. + Edges PromoCodeEdges `json:"edges"` + selectValues sql.SelectValues +} + +// PromoCodeEdges holds the relations/edges for other nodes in the graph. +type PromoCodeEdges struct { + // UsageRecords holds the value of the usage_records edge. + UsageRecords []*PromoCodeUsage `json:"usage_records,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// UsageRecordsOrErr returns the UsageRecords value or an error if the edge +// was not loaded in eager-loading. +func (e PromoCodeEdges) UsageRecordsOrErr() ([]*PromoCodeUsage, error) { + if e.loadedTypes[0] { + return e.UsageRecords, nil + } + return nil, &NotLoadedError{edge: "usage_records"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*PromoCode) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case promocode.FieldBonusAmount: + values[i] = new(sql.NullFloat64) + case promocode.FieldID, promocode.FieldMaxUses, promocode.FieldUsedCount: + values[i] = new(sql.NullInt64) + case promocode.FieldCode, promocode.FieldStatus, promocode.FieldNotes: + values[i] = new(sql.NullString) + case promocode.FieldExpiresAt, promocode.FieldCreatedAt, promocode.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the PromoCode fields. +func (_m *PromoCode) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case promocode.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case promocode.FieldCode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field code", values[i]) + } else if value.Valid { + _m.Code = value.String + } + case promocode.FieldBonusAmount: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field bonus_amount", values[i]) + } else if value.Valid { + _m.BonusAmount = value.Float64 + } + case promocode.FieldMaxUses: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field max_uses", values[i]) + } else if value.Valid { + _m.MaxUses = int(value.Int64) + } + case promocode.FieldUsedCount: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field used_count", values[i]) + } else if value.Valid { + _m.UsedCount = int(value.Int64) + } + case promocode.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case promocode.FieldExpiresAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expires_at", values[i]) + } else if value.Valid { + _m.ExpiresAt = new(time.Time) + *_m.ExpiresAt = value.Time + } + case promocode.FieldNotes: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field notes", values[i]) + } else if value.Valid { + _m.Notes = new(string) + *_m.Notes = value.String + } + case promocode.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case promocode.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the PromoCode. +// This includes values selected through modifiers, order, etc. +func (_m *PromoCode) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryUsageRecords queries the "usage_records" edge of the PromoCode entity. +func (_m *PromoCode) QueryUsageRecords() *PromoCodeUsageQuery { + return NewPromoCodeClient(_m.config).QueryUsageRecords(_m) +} + +// Update returns a builder for updating this PromoCode. +// Note that you need to call PromoCode.Unwrap() before calling this method if this PromoCode +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *PromoCode) Update() *PromoCodeUpdateOne { + return NewPromoCodeClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the PromoCode entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *PromoCode) Unwrap() *PromoCode { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: PromoCode is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *PromoCode) String() string { + var builder strings.Builder + builder.WriteString("PromoCode(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("code=") + builder.WriteString(_m.Code) + builder.WriteString(", ") + builder.WriteString("bonus_amount=") + builder.WriteString(fmt.Sprintf("%v", _m.BonusAmount)) + builder.WriteString(", ") + builder.WriteString("max_uses=") + builder.WriteString(fmt.Sprintf("%v", _m.MaxUses)) + builder.WriteString(", ") + builder.WriteString("used_count=") + builder.WriteString(fmt.Sprintf("%v", _m.UsedCount)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + if v := _m.ExpiresAt; v != nil { + builder.WriteString("expires_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.Notes; v != nil { + builder.WriteString("notes=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// PromoCodes is a parsable slice of PromoCode. +type PromoCodes []*PromoCode diff --git a/backend/ent/promocode/promocode.go b/backend/ent/promocode/promocode.go new file mode 100644 index 00000000..ba91658f --- /dev/null +++ b/backend/ent/promocode/promocode.go @@ -0,0 +1,165 @@ +// Code generated by ent, DO NOT EDIT. + +package promocode + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the promocode type in the database. + Label = "promo_code" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCode holds the string denoting the code field in the database. + FieldCode = "code" + // FieldBonusAmount holds the string denoting the bonus_amount field in the database. + FieldBonusAmount = "bonus_amount" + // FieldMaxUses holds the string denoting the max_uses field in the database. + FieldMaxUses = "max_uses" + // FieldUsedCount holds the string denoting the used_count field in the database. + FieldUsedCount = "used_count" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldExpiresAt holds the string denoting the expires_at field in the database. + FieldExpiresAt = "expires_at" + // FieldNotes holds the string denoting the notes field in the database. + FieldNotes = "notes" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeUsageRecords holds the string denoting the usage_records edge name in mutations. + EdgeUsageRecords = "usage_records" + // Table holds the table name of the promocode in the database. + Table = "promo_codes" + // UsageRecordsTable is the table that holds the usage_records relation/edge. + UsageRecordsTable = "promo_code_usages" + // UsageRecordsInverseTable is the table name for the PromoCodeUsage entity. + // It exists in this package in order to avoid circular dependency with the "promocodeusage" package. + UsageRecordsInverseTable = "promo_code_usages" + // UsageRecordsColumn is the table column denoting the usage_records relation/edge. + UsageRecordsColumn = "promo_code_id" +) + +// Columns holds all SQL columns for promocode fields. +var Columns = []string{ + FieldID, + FieldCode, + FieldBonusAmount, + FieldMaxUses, + FieldUsedCount, + FieldStatus, + FieldExpiresAt, + FieldNotes, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // CodeValidator is a validator for the "code" field. It is called by the builders before save. + CodeValidator func(string) error + // DefaultBonusAmount holds the default value on creation for the "bonus_amount" field. + DefaultBonusAmount float64 + // DefaultMaxUses holds the default value on creation for the "max_uses" field. + DefaultMaxUses int + // DefaultUsedCount holds the default value on creation for the "used_count" field. + DefaultUsedCount int + // DefaultStatus holds the default value on creation for the "status" field. + DefaultStatus string + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the PromoCode queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCode orders the results by the code field. +func ByCode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCode, opts...).ToFunc() +} + +// ByBonusAmount orders the results by the bonus_amount field. +func ByBonusAmount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBonusAmount, opts...).ToFunc() +} + +// ByMaxUses orders the results by the max_uses field. +func ByMaxUses(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMaxUses, opts...).ToFunc() +} + +// ByUsedCount orders the results by the used_count field. +func ByUsedCount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsedCount, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByExpiresAt orders the results by the expires_at field. +func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiresAt, opts...).ToFunc() +} + +// ByNotes orders the results by the notes field. +func ByNotes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldNotes, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByUsageRecordsCount orders the results by usage_records count. +func ByUsageRecordsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newUsageRecordsStep(), opts...) + } +} + +// ByUsageRecords orders the results by usage_records terms. +func ByUsageRecords(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUsageRecordsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newUsageRecordsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UsageRecordsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageRecordsTable, UsageRecordsColumn), + ) +} diff --git a/backend/ent/promocode/where.go b/backend/ent/promocode/where.go new file mode 100644 index 00000000..84b6460a --- /dev/null +++ b/backend/ent/promocode/where.go @@ -0,0 +1,594 @@ +// Code generated by ent, DO NOT EDIT. + +package promocode + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldID, id)) +} + +// Code applies equality check predicate on the "code" field. It's identical to CodeEQ. +func Code(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldCode, v)) +} + +// BonusAmount applies equality check predicate on the "bonus_amount" field. It's identical to BonusAmountEQ. +func BonusAmount(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldBonusAmount, v)) +} + +// MaxUses applies equality check predicate on the "max_uses" field. It's identical to MaxUsesEQ. +func MaxUses(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldMaxUses, v)) +} + +// UsedCount applies equality check predicate on the "used_count" field. It's identical to UsedCountEQ. +func UsedCount(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldUsedCount, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldStatus, v)) +} + +// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ. +func ExpiresAt(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldExpiresAt, v)) +} + +// Notes applies equality check predicate on the "notes" field. It's identical to NotesEQ. +func Notes(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldNotes, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// CodeEQ applies the EQ predicate on the "code" field. +func CodeEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldCode, v)) +} + +// CodeNEQ applies the NEQ predicate on the "code" field. +func CodeNEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldCode, v)) +} + +// CodeIn applies the In predicate on the "code" field. +func CodeIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldCode, vs...)) +} + +// CodeNotIn applies the NotIn predicate on the "code" field. +func CodeNotIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldCode, vs...)) +} + +// CodeGT applies the GT predicate on the "code" field. +func CodeGT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldCode, v)) +} + +// CodeGTE applies the GTE predicate on the "code" field. +func CodeGTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldCode, v)) +} + +// CodeLT applies the LT predicate on the "code" field. +func CodeLT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldCode, v)) +} + +// CodeLTE applies the LTE predicate on the "code" field. +func CodeLTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldCode, v)) +} + +// CodeContains applies the Contains predicate on the "code" field. +func CodeContains(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContains(FieldCode, v)) +} + +// CodeHasPrefix applies the HasPrefix predicate on the "code" field. +func CodeHasPrefix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasPrefix(FieldCode, v)) +} + +// CodeHasSuffix applies the HasSuffix predicate on the "code" field. +func CodeHasSuffix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasSuffix(FieldCode, v)) +} + +// CodeEqualFold applies the EqualFold predicate on the "code" field. +func CodeEqualFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEqualFold(FieldCode, v)) +} + +// CodeContainsFold applies the ContainsFold predicate on the "code" field. +func CodeContainsFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContainsFold(FieldCode, v)) +} + +// BonusAmountEQ applies the EQ predicate on the "bonus_amount" field. +func BonusAmountEQ(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldBonusAmount, v)) +} + +// BonusAmountNEQ applies the NEQ predicate on the "bonus_amount" field. +func BonusAmountNEQ(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldBonusAmount, v)) +} + +// BonusAmountIn applies the In predicate on the "bonus_amount" field. +func BonusAmountIn(vs ...float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldBonusAmount, vs...)) +} + +// BonusAmountNotIn applies the NotIn predicate on the "bonus_amount" field. +func BonusAmountNotIn(vs ...float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldBonusAmount, vs...)) +} + +// BonusAmountGT applies the GT predicate on the "bonus_amount" field. +func BonusAmountGT(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldBonusAmount, v)) +} + +// BonusAmountGTE applies the GTE predicate on the "bonus_amount" field. +func BonusAmountGTE(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldBonusAmount, v)) +} + +// BonusAmountLT applies the LT predicate on the "bonus_amount" field. +func BonusAmountLT(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldBonusAmount, v)) +} + +// BonusAmountLTE applies the LTE predicate on the "bonus_amount" field. +func BonusAmountLTE(v float64) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldBonusAmount, v)) +} + +// MaxUsesEQ applies the EQ predicate on the "max_uses" field. +func MaxUsesEQ(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldMaxUses, v)) +} + +// MaxUsesNEQ applies the NEQ predicate on the "max_uses" field. +func MaxUsesNEQ(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldMaxUses, v)) +} + +// MaxUsesIn applies the In predicate on the "max_uses" field. +func MaxUsesIn(vs ...int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldMaxUses, vs...)) +} + +// MaxUsesNotIn applies the NotIn predicate on the "max_uses" field. +func MaxUsesNotIn(vs ...int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldMaxUses, vs...)) +} + +// MaxUsesGT applies the GT predicate on the "max_uses" field. +func MaxUsesGT(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldMaxUses, v)) +} + +// MaxUsesGTE applies the GTE predicate on the "max_uses" field. +func MaxUsesGTE(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldMaxUses, v)) +} + +// MaxUsesLT applies the LT predicate on the "max_uses" field. +func MaxUsesLT(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldMaxUses, v)) +} + +// MaxUsesLTE applies the LTE predicate on the "max_uses" field. +func MaxUsesLTE(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldMaxUses, v)) +} + +// UsedCountEQ applies the EQ predicate on the "used_count" field. +func UsedCountEQ(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldUsedCount, v)) +} + +// UsedCountNEQ applies the NEQ predicate on the "used_count" field. +func UsedCountNEQ(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldUsedCount, v)) +} + +// UsedCountIn applies the In predicate on the "used_count" field. +func UsedCountIn(vs ...int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldUsedCount, vs...)) +} + +// UsedCountNotIn applies the NotIn predicate on the "used_count" field. +func UsedCountNotIn(vs ...int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldUsedCount, vs...)) +} + +// UsedCountGT applies the GT predicate on the "used_count" field. +func UsedCountGT(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldUsedCount, v)) +} + +// UsedCountGTE applies the GTE predicate on the "used_count" field. +func UsedCountGTE(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldUsedCount, v)) +} + +// UsedCountLT applies the LT predicate on the "used_count" field. +func UsedCountLT(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldUsedCount, v)) +} + +// UsedCountLTE applies the LTE predicate on the "used_count" field. +func UsedCountLTE(v int) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldUsedCount, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContainsFold(FieldStatus, v)) +} + +// ExpiresAtEQ applies the EQ predicate on the "expires_at" field. +func ExpiresAtEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldExpiresAt, v)) +} + +// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field. +func ExpiresAtNEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldExpiresAt, v)) +} + +// ExpiresAtIn applies the In predicate on the "expires_at" field. +func ExpiresAtIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field. +func ExpiresAtNotIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtGT applies the GT predicate on the "expires_at" field. +func ExpiresAtGT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldExpiresAt, v)) +} + +// ExpiresAtGTE applies the GTE predicate on the "expires_at" field. +func ExpiresAtGTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldExpiresAt, v)) +} + +// ExpiresAtLT applies the LT predicate on the "expires_at" field. +func ExpiresAtLT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldExpiresAt, v)) +} + +// ExpiresAtLTE applies the LTE predicate on the "expires_at" field. +func ExpiresAtLTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldExpiresAt, v)) +} + +// ExpiresAtIsNil applies the IsNil predicate on the "expires_at" field. +func ExpiresAtIsNil() predicate.PromoCode { + return predicate.PromoCode(sql.FieldIsNull(FieldExpiresAt)) +} + +// ExpiresAtNotNil applies the NotNil predicate on the "expires_at" field. +func ExpiresAtNotNil() predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotNull(FieldExpiresAt)) +} + +// NotesEQ applies the EQ predicate on the "notes" field. +func NotesEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldNotes, v)) +} + +// NotesNEQ applies the NEQ predicate on the "notes" field. +func NotesNEQ(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldNotes, v)) +} + +// NotesIn applies the In predicate on the "notes" field. +func NotesIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldNotes, vs...)) +} + +// NotesNotIn applies the NotIn predicate on the "notes" field. +func NotesNotIn(vs ...string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldNotes, vs...)) +} + +// NotesGT applies the GT predicate on the "notes" field. +func NotesGT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldNotes, v)) +} + +// NotesGTE applies the GTE predicate on the "notes" field. +func NotesGTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldNotes, v)) +} + +// NotesLT applies the LT predicate on the "notes" field. +func NotesLT(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldNotes, v)) +} + +// NotesLTE applies the LTE predicate on the "notes" field. +func NotesLTE(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldNotes, v)) +} + +// NotesContains applies the Contains predicate on the "notes" field. +func NotesContains(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContains(FieldNotes, v)) +} + +// NotesHasPrefix applies the HasPrefix predicate on the "notes" field. +func NotesHasPrefix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasPrefix(FieldNotes, v)) +} + +// NotesHasSuffix applies the HasSuffix predicate on the "notes" field. +func NotesHasSuffix(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldHasSuffix(FieldNotes, v)) +} + +// NotesIsNil applies the IsNil predicate on the "notes" field. +func NotesIsNil() predicate.PromoCode { + return predicate.PromoCode(sql.FieldIsNull(FieldNotes)) +} + +// NotesNotNil applies the NotNil predicate on the "notes" field. +func NotesNotNil() predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotNull(FieldNotes)) +} + +// NotesEqualFold applies the EqualFold predicate on the "notes" field. +func NotesEqualFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEqualFold(FieldNotes, v)) +} + +// NotesContainsFold applies the ContainsFold predicate on the "notes" field. +func NotesContainsFold(v string) predicate.PromoCode { + return predicate.PromoCode(sql.FieldContainsFold(FieldNotes, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.PromoCode { + return predicate.PromoCode(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasUsageRecords applies the HasEdge predicate on the "usage_records" edge. +func HasUsageRecords() predicate.PromoCode { + return predicate.PromoCode(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, UsageRecordsTable, UsageRecordsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUsageRecordsWith applies the HasEdge predicate on the "usage_records" edge with a given conditions (other predicates). +func HasUsageRecordsWith(preds ...predicate.PromoCodeUsage) predicate.PromoCode { + return predicate.PromoCode(func(s *sql.Selector) { + step := newUsageRecordsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.PromoCode) predicate.PromoCode { + return predicate.PromoCode(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.PromoCode) predicate.PromoCode { + return predicate.PromoCode(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.PromoCode) predicate.PromoCode { + return predicate.PromoCode(sql.NotPredicates(p)) +} diff --git a/backend/ent/promocode_create.go b/backend/ent/promocode_create.go new file mode 100644 index 00000000..4fd2c39c --- /dev/null +++ b/backend/ent/promocode_create.go @@ -0,0 +1,1081 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" +) + +// PromoCodeCreate is the builder for creating a PromoCode entity. +type PromoCodeCreate struct { + config + mutation *PromoCodeMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCode sets the "code" field. +func (_c *PromoCodeCreate) SetCode(v string) *PromoCodeCreate { + _c.mutation.SetCode(v) + return _c +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_c *PromoCodeCreate) SetBonusAmount(v float64) *PromoCodeCreate { + _c.mutation.SetBonusAmount(v) + return _c +} + +// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableBonusAmount(v *float64) *PromoCodeCreate { + if v != nil { + _c.SetBonusAmount(*v) + } + return _c +} + +// SetMaxUses sets the "max_uses" field. +func (_c *PromoCodeCreate) SetMaxUses(v int) *PromoCodeCreate { + _c.mutation.SetMaxUses(v) + return _c +} + +// SetNillableMaxUses sets the "max_uses" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableMaxUses(v *int) *PromoCodeCreate { + if v != nil { + _c.SetMaxUses(*v) + } + return _c +} + +// SetUsedCount sets the "used_count" field. +func (_c *PromoCodeCreate) SetUsedCount(v int) *PromoCodeCreate { + _c.mutation.SetUsedCount(v) + return _c +} + +// SetNillableUsedCount sets the "used_count" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableUsedCount(v *int) *PromoCodeCreate { + if v != nil { + _c.SetUsedCount(*v) + } + return _c +} + +// SetStatus sets the "status" field. +func (_c *PromoCodeCreate) SetStatus(v string) *PromoCodeCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableStatus(v *string) *PromoCodeCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetExpiresAt sets the "expires_at" field. +func (_c *PromoCodeCreate) SetExpiresAt(v time.Time) *PromoCodeCreate { + _c.mutation.SetExpiresAt(v) + return _c +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableExpiresAt(v *time.Time) *PromoCodeCreate { + if v != nil { + _c.SetExpiresAt(*v) + } + return _c +} + +// SetNotes sets the "notes" field. +func (_c *PromoCodeCreate) SetNotes(v string) *PromoCodeCreate { + _c.mutation.SetNotes(v) + return _c +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableNotes(v *string) *PromoCodeCreate { + if v != nil { + _c.SetNotes(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *PromoCodeCreate) SetCreatedAt(v time.Time) *PromoCodeCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableCreatedAt(v *time.Time) *PromoCodeCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *PromoCodeCreate) SetUpdatedAt(v time.Time) *PromoCodeCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *PromoCodeCreate) SetNillableUpdatedAt(v *time.Time) *PromoCodeCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by IDs. +func (_c *PromoCodeCreate) AddUsageRecordIDs(ids ...int64) *PromoCodeCreate { + _c.mutation.AddUsageRecordIDs(ids...) + return _c +} + +// AddUsageRecords adds the "usage_records" edges to the PromoCodeUsage entity. +func (_c *PromoCodeCreate) AddUsageRecords(v ...*PromoCodeUsage) *PromoCodeCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddUsageRecordIDs(ids...) +} + +// Mutation returns the PromoCodeMutation object of the builder. +func (_c *PromoCodeCreate) Mutation() *PromoCodeMutation { + return _c.mutation +} + +// Save creates the PromoCode in the database. +func (_c *PromoCodeCreate) Save(ctx context.Context) (*PromoCode, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *PromoCodeCreate) SaveX(ctx context.Context) *PromoCode { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *PromoCodeCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *PromoCodeCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *PromoCodeCreate) defaults() { + if _, ok := _c.mutation.BonusAmount(); !ok { + v := promocode.DefaultBonusAmount + _c.mutation.SetBonusAmount(v) + } + if _, ok := _c.mutation.MaxUses(); !ok { + v := promocode.DefaultMaxUses + _c.mutation.SetMaxUses(v) + } + if _, ok := _c.mutation.UsedCount(); !ok { + v := promocode.DefaultUsedCount + _c.mutation.SetUsedCount(v) + } + if _, ok := _c.mutation.Status(); !ok { + v := promocode.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := promocode.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := promocode.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *PromoCodeCreate) check() error { + if _, ok := _c.mutation.Code(); !ok { + return &ValidationError{Name: "code", err: errors.New(`ent: missing required field "PromoCode.code"`)} + } + if v, ok := _c.mutation.Code(); ok { + if err := promocode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "PromoCode.code": %w`, err)} + } + } + if _, ok := _c.mutation.BonusAmount(); !ok { + return &ValidationError{Name: "bonus_amount", err: errors.New(`ent: missing required field "PromoCode.bonus_amount"`)} + } + if _, ok := _c.mutation.MaxUses(); !ok { + return &ValidationError{Name: "max_uses", err: errors.New(`ent: missing required field "PromoCode.max_uses"`)} + } + if _, ok := _c.mutation.UsedCount(); !ok { + return &ValidationError{Name: "used_count", err: errors.New(`ent: missing required field "PromoCode.used_count"`)} + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "PromoCode.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := promocode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "PromoCode.status": %w`, err)} + } + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "PromoCode.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "PromoCode.updated_at"`)} + } + return nil +} + +func (_c *PromoCodeCreate) sqlSave(ctx context.Context) (*PromoCode, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *PromoCodeCreate) createSpec() (*PromoCode, *sqlgraph.CreateSpec) { + var ( + _node = &PromoCode{config: _c.config} + _spec = sqlgraph.NewCreateSpec(promocode.Table, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.Code(); ok { + _spec.SetField(promocode.FieldCode, field.TypeString, value) + _node.Code = value + } + if value, ok := _c.mutation.BonusAmount(); ok { + _spec.SetField(promocode.FieldBonusAmount, field.TypeFloat64, value) + _node.BonusAmount = value + } + if value, ok := _c.mutation.MaxUses(); ok { + _spec.SetField(promocode.FieldMaxUses, field.TypeInt, value) + _node.MaxUses = value + } + if value, ok := _c.mutation.UsedCount(); ok { + _spec.SetField(promocode.FieldUsedCount, field.TypeInt, value) + _node.UsedCount = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(promocode.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.ExpiresAt(); ok { + _spec.SetField(promocode.FieldExpiresAt, field.TypeTime, value) + _node.ExpiresAt = &value + } + if value, ok := _c.mutation.Notes(); ok { + _spec.SetField(promocode.FieldNotes, field.TypeString, value) + _node.Notes = &value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(promocode.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(promocode.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := _c.mutation.UsageRecordsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PromoCode.Create(). +// SetCode(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PromoCodeUpsert) { +// SetCode(v+v). +// }). +// Exec(ctx) +func (_c *PromoCodeCreate) OnConflict(opts ...sql.ConflictOption) *PromoCodeUpsertOne { + _c.conflict = opts + return &PromoCodeUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *PromoCodeCreate) OnConflictColumns(columns ...string) *PromoCodeUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &PromoCodeUpsertOne{ + create: _c, + } +} + +type ( + // PromoCodeUpsertOne is the builder for "upsert"-ing + // one PromoCode node. + PromoCodeUpsertOne struct { + create *PromoCodeCreate + } + + // PromoCodeUpsert is the "OnConflict" setter. + PromoCodeUpsert struct { + *sql.UpdateSet + } +) + +// SetCode sets the "code" field. +func (u *PromoCodeUpsert) SetCode(v string) *PromoCodeUpsert { + u.Set(promocode.FieldCode, v) + return u +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateCode() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldCode) + return u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUpsert) SetBonusAmount(v float64) *PromoCodeUpsert { + u.Set(promocode.FieldBonusAmount, v) + return u +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateBonusAmount() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldBonusAmount) + return u +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUpsert) AddBonusAmount(v float64) *PromoCodeUpsert { + u.Add(promocode.FieldBonusAmount, v) + return u +} + +// SetMaxUses sets the "max_uses" field. +func (u *PromoCodeUpsert) SetMaxUses(v int) *PromoCodeUpsert { + u.Set(promocode.FieldMaxUses, v) + return u +} + +// UpdateMaxUses sets the "max_uses" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateMaxUses() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldMaxUses) + return u +} + +// AddMaxUses adds v to the "max_uses" field. +func (u *PromoCodeUpsert) AddMaxUses(v int) *PromoCodeUpsert { + u.Add(promocode.FieldMaxUses, v) + return u +} + +// SetUsedCount sets the "used_count" field. +func (u *PromoCodeUpsert) SetUsedCount(v int) *PromoCodeUpsert { + u.Set(promocode.FieldUsedCount, v) + return u +} + +// UpdateUsedCount sets the "used_count" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateUsedCount() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldUsedCount) + return u +} + +// AddUsedCount adds v to the "used_count" field. +func (u *PromoCodeUpsert) AddUsedCount(v int) *PromoCodeUpsert { + u.Add(promocode.FieldUsedCount, v) + return u +} + +// SetStatus sets the "status" field. +func (u *PromoCodeUpsert) SetStatus(v string) *PromoCodeUpsert { + u.Set(promocode.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateStatus() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldStatus) + return u +} + +// SetExpiresAt sets the "expires_at" field. +func (u *PromoCodeUpsert) SetExpiresAt(v time.Time) *PromoCodeUpsert { + u.Set(promocode.FieldExpiresAt, v) + return u +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateExpiresAt() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldExpiresAt) + return u +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (u *PromoCodeUpsert) ClearExpiresAt() *PromoCodeUpsert { + u.SetNull(promocode.FieldExpiresAt) + return u +} + +// SetNotes sets the "notes" field. +func (u *PromoCodeUpsert) SetNotes(v string) *PromoCodeUpsert { + u.Set(promocode.FieldNotes, v) + return u +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateNotes() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldNotes) + return u +} + +// ClearNotes clears the value of the "notes" field. +func (u *PromoCodeUpsert) ClearNotes() *PromoCodeUpsert { + u.SetNull(promocode.FieldNotes) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *PromoCodeUpsert) SetUpdatedAt(v time.Time) *PromoCodeUpsert { + u.Set(promocode.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *PromoCodeUpsert) UpdateUpdatedAt() *PromoCodeUpsert { + u.SetExcluded(promocode.FieldUpdatedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *PromoCodeUpsertOne) UpdateNewValues() *PromoCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(promocode.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PromoCodeUpsertOne) Ignore() *PromoCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PromoCodeUpsertOne) DoNothing() *PromoCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PromoCodeCreate.OnConflict +// documentation for more info. +func (u *PromoCodeUpsertOne) Update(set func(*PromoCodeUpsert)) *PromoCodeUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PromoCodeUpsert{UpdateSet: update}) + })) + return u +} + +// SetCode sets the "code" field. +func (u *PromoCodeUpsertOne) SetCode(v string) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetCode(v) + }) +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateCode() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateCode() + }) +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUpsertOne) SetBonusAmount(v float64) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetBonusAmount(v) + }) +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUpsertOne) AddBonusAmount(v float64) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.AddBonusAmount(v) + }) +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateBonusAmount() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateBonusAmount() + }) +} + +// SetMaxUses sets the "max_uses" field. +func (u *PromoCodeUpsertOne) SetMaxUses(v int) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetMaxUses(v) + }) +} + +// AddMaxUses adds v to the "max_uses" field. +func (u *PromoCodeUpsertOne) AddMaxUses(v int) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.AddMaxUses(v) + }) +} + +// UpdateMaxUses sets the "max_uses" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateMaxUses() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateMaxUses() + }) +} + +// SetUsedCount sets the "used_count" field. +func (u *PromoCodeUpsertOne) SetUsedCount(v int) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetUsedCount(v) + }) +} + +// AddUsedCount adds v to the "used_count" field. +func (u *PromoCodeUpsertOne) AddUsedCount(v int) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.AddUsedCount(v) + }) +} + +// UpdateUsedCount sets the "used_count" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateUsedCount() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateUsedCount() + }) +} + +// SetStatus sets the "status" field. +func (u *PromoCodeUpsertOne) SetStatus(v string) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateStatus() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateStatus() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *PromoCodeUpsertOne) SetExpiresAt(v time.Time) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateExpiresAt() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateExpiresAt() + }) +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (u *PromoCodeUpsertOne) ClearExpiresAt() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.ClearExpiresAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *PromoCodeUpsertOne) SetNotes(v string) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateNotes() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *PromoCodeUpsertOne) ClearNotes() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.ClearNotes() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *PromoCodeUpsertOne) SetUpdatedAt(v time.Time) *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *PromoCodeUpsertOne) UpdateUpdatedAt() *PromoCodeUpsertOne { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *PromoCodeUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PromoCodeCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PromoCodeUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *PromoCodeUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *PromoCodeUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// PromoCodeCreateBulk is the builder for creating many PromoCode entities in bulk. +type PromoCodeCreateBulk struct { + config + err error + builders []*PromoCodeCreate + conflict []sql.ConflictOption +} + +// Save creates the PromoCode entities in the database. +func (_c *PromoCodeCreateBulk) Save(ctx context.Context) ([]*PromoCode, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*PromoCode, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PromoCodeMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *PromoCodeCreateBulk) SaveX(ctx context.Context) []*PromoCode { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *PromoCodeCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *PromoCodeCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PromoCode.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PromoCodeUpsert) { +// SetCode(v+v). +// }). +// Exec(ctx) +func (_c *PromoCodeCreateBulk) OnConflict(opts ...sql.ConflictOption) *PromoCodeUpsertBulk { + _c.conflict = opts + return &PromoCodeUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *PromoCodeCreateBulk) OnConflictColumns(columns ...string) *PromoCodeUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &PromoCodeUpsertBulk{ + create: _c, + } +} + +// PromoCodeUpsertBulk is the builder for "upsert"-ing +// a bulk of PromoCode nodes. +type PromoCodeUpsertBulk struct { + create *PromoCodeCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *PromoCodeUpsertBulk) UpdateNewValues() *PromoCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(promocode.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PromoCode.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PromoCodeUpsertBulk) Ignore() *PromoCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PromoCodeUpsertBulk) DoNothing() *PromoCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PromoCodeCreateBulk.OnConflict +// documentation for more info. +func (u *PromoCodeUpsertBulk) Update(set func(*PromoCodeUpsert)) *PromoCodeUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PromoCodeUpsert{UpdateSet: update}) + })) + return u +} + +// SetCode sets the "code" field. +func (u *PromoCodeUpsertBulk) SetCode(v string) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetCode(v) + }) +} + +// UpdateCode sets the "code" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateCode() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateCode() + }) +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUpsertBulk) SetBonusAmount(v float64) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetBonusAmount(v) + }) +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUpsertBulk) AddBonusAmount(v float64) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.AddBonusAmount(v) + }) +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateBonusAmount() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateBonusAmount() + }) +} + +// SetMaxUses sets the "max_uses" field. +func (u *PromoCodeUpsertBulk) SetMaxUses(v int) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetMaxUses(v) + }) +} + +// AddMaxUses adds v to the "max_uses" field. +func (u *PromoCodeUpsertBulk) AddMaxUses(v int) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.AddMaxUses(v) + }) +} + +// UpdateMaxUses sets the "max_uses" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateMaxUses() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateMaxUses() + }) +} + +// SetUsedCount sets the "used_count" field. +func (u *PromoCodeUpsertBulk) SetUsedCount(v int) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetUsedCount(v) + }) +} + +// AddUsedCount adds v to the "used_count" field. +func (u *PromoCodeUpsertBulk) AddUsedCount(v int) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.AddUsedCount(v) + }) +} + +// UpdateUsedCount sets the "used_count" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateUsedCount() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateUsedCount() + }) +} + +// SetStatus sets the "status" field. +func (u *PromoCodeUpsertBulk) SetStatus(v string) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateStatus() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateStatus() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *PromoCodeUpsertBulk) SetExpiresAt(v time.Time) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateExpiresAt() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateExpiresAt() + }) +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (u *PromoCodeUpsertBulk) ClearExpiresAt() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.ClearExpiresAt() + }) +} + +// SetNotes sets the "notes" field. +func (u *PromoCodeUpsertBulk) SetNotes(v string) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetNotes(v) + }) +} + +// UpdateNotes sets the "notes" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateNotes() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateNotes() + }) +} + +// ClearNotes clears the value of the "notes" field. +func (u *PromoCodeUpsertBulk) ClearNotes() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.ClearNotes() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *PromoCodeUpsertBulk) SetUpdatedAt(v time.Time) *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *PromoCodeUpsertBulk) UpdateUpdatedAt() *PromoCodeUpsertBulk { + return u.Update(func(s *PromoCodeUpsert) { + s.UpdateUpdatedAt() + }) +} + +// Exec executes the query. +func (u *PromoCodeUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the PromoCodeCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PromoCodeCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PromoCodeUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/promocode_delete.go b/backend/ent/promocode_delete.go new file mode 100644 index 00000000..7e4fa3a6 --- /dev/null +++ b/backend/ent/promocode_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" +) + +// PromoCodeDelete is the builder for deleting a PromoCode entity. +type PromoCodeDelete struct { + config + hooks []Hook + mutation *PromoCodeMutation +} + +// Where appends a list predicates to the PromoCodeDelete builder. +func (_d *PromoCodeDelete) Where(ps ...predicate.PromoCode) *PromoCodeDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *PromoCodeDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *PromoCodeDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *PromoCodeDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(promocode.Table, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// PromoCodeDeleteOne is the builder for deleting a single PromoCode entity. +type PromoCodeDeleteOne struct { + _d *PromoCodeDelete +} + +// Where appends a list predicates to the PromoCodeDelete builder. +func (_d *PromoCodeDeleteOne) Where(ps ...predicate.PromoCode) *PromoCodeDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *PromoCodeDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{promocode.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *PromoCodeDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/promocode_query.go b/backend/ent/promocode_query.go new file mode 100644 index 00000000..2156b0f0 --- /dev/null +++ b/backend/ent/promocode_query.go @@ -0,0 +1,643 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" +) + +// PromoCodeQuery is the builder for querying PromoCode entities. +type PromoCodeQuery struct { + config + ctx *QueryContext + order []promocode.OrderOption + inters []Interceptor + predicates []predicate.PromoCode + withUsageRecords *PromoCodeUsageQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PromoCodeQuery builder. +func (_q *PromoCodeQuery) Where(ps ...predicate.PromoCode) *PromoCodeQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *PromoCodeQuery) Limit(limit int) *PromoCodeQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *PromoCodeQuery) Offset(offset int) *PromoCodeQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *PromoCodeQuery) Unique(unique bool) *PromoCodeQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *PromoCodeQuery) Order(o ...promocode.OrderOption) *PromoCodeQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryUsageRecords chains the current query on the "usage_records" edge. +func (_q *PromoCodeQuery) QueryUsageRecords() *PromoCodeUsageQuery { + query := (&PromoCodeUsageClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(promocode.Table, promocode.FieldID, selector), + sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, promocode.UsageRecordsTable, promocode.UsageRecordsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first PromoCode entity from the query. +// Returns a *NotFoundError when no PromoCode was found. +func (_q *PromoCodeQuery) First(ctx context.Context) (*PromoCode, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{promocode.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *PromoCodeQuery) FirstX(ctx context.Context) *PromoCode { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first PromoCode ID from the query. +// Returns a *NotFoundError when no PromoCode ID was found. +func (_q *PromoCodeQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{promocode.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *PromoCodeQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single PromoCode entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one PromoCode entity is found. +// Returns a *NotFoundError when no PromoCode entities are found. +func (_q *PromoCodeQuery) Only(ctx context.Context) (*PromoCode, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{promocode.Label} + default: + return nil, &NotSingularError{promocode.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *PromoCodeQuery) OnlyX(ctx context.Context) *PromoCode { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only PromoCode ID in the query. +// Returns a *NotSingularError when more than one PromoCode ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *PromoCodeQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{promocode.Label} + default: + err = &NotSingularError{promocode.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *PromoCodeQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of PromoCodes. +func (_q *PromoCodeQuery) All(ctx context.Context) ([]*PromoCode, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*PromoCode, *PromoCodeQuery]() + return withInterceptors[[]*PromoCode](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *PromoCodeQuery) AllX(ctx context.Context) []*PromoCode { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of PromoCode IDs. +func (_q *PromoCodeQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(promocode.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *PromoCodeQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *PromoCodeQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*PromoCodeQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *PromoCodeQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *PromoCodeQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *PromoCodeQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PromoCodeQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *PromoCodeQuery) Clone() *PromoCodeQuery { + if _q == nil { + return nil + } + return &PromoCodeQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]promocode.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.PromoCode{}, _q.predicates...), + withUsageRecords: _q.withUsageRecords.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithUsageRecords tells the query-builder to eager-load the nodes that are connected to +// the "usage_records" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *PromoCodeQuery) WithUsageRecords(opts ...func(*PromoCodeUsageQuery)) *PromoCodeQuery { + query := (&PromoCodeUsageClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUsageRecords = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Code string `json:"code,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.PromoCode.Query(). +// GroupBy(promocode.FieldCode). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *PromoCodeQuery) GroupBy(field string, fields ...string) *PromoCodeGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &PromoCodeGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = promocode.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Code string `json:"code,omitempty"` +// } +// +// client.PromoCode.Query(). +// Select(promocode.FieldCode). +// Scan(ctx, &v) +func (_q *PromoCodeQuery) Select(fields ...string) *PromoCodeSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &PromoCodeSelect{PromoCodeQuery: _q} + sbuild.label = promocode.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PromoCodeSelect configured with the given aggregations. +func (_q *PromoCodeQuery) Aggregate(fns ...AggregateFunc) *PromoCodeSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *PromoCodeQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !promocode.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *PromoCodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PromoCode, error) { + var ( + nodes = []*PromoCode{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withUsageRecords != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*PromoCode).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &PromoCode{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withUsageRecords; query != nil { + if err := _q.loadUsageRecords(ctx, query, nodes, + func(n *PromoCode) { n.Edges.UsageRecords = []*PromoCodeUsage{} }, + func(n *PromoCode, e *PromoCodeUsage) { n.Edges.UsageRecords = append(n.Edges.UsageRecords, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *PromoCodeQuery) loadUsageRecords(ctx context.Context, query *PromoCodeUsageQuery, nodes []*PromoCode, init func(*PromoCode), assign func(*PromoCode, *PromoCodeUsage)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*PromoCode) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(promocodeusage.FieldPromoCodeID) + } + query.Where(predicate.PromoCodeUsage(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(promocode.UsageRecordsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.PromoCodeID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "promo_code_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *PromoCodeQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *PromoCodeQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, promocode.FieldID) + for i := range fields { + if fields[i] != promocode.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *PromoCodeQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(promocode.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = promocode.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *PromoCodeQuery) ForUpdate(opts ...sql.LockOption) *PromoCodeQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *PromoCodeQuery) ForShare(opts ...sql.LockOption) *PromoCodeQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// PromoCodeGroupBy is the group-by builder for PromoCode entities. +type PromoCodeGroupBy struct { + selector + build *PromoCodeQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *PromoCodeGroupBy) Aggregate(fns ...AggregateFunc) *PromoCodeGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *PromoCodeGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PromoCodeQuery, *PromoCodeGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *PromoCodeGroupBy) sqlScan(ctx context.Context, root *PromoCodeQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PromoCodeSelect is the builder for selecting fields of PromoCode entities. +type PromoCodeSelect struct { + *PromoCodeQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *PromoCodeSelect) Aggregate(fns ...AggregateFunc) *PromoCodeSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *PromoCodeSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PromoCodeQuery, *PromoCodeSelect](ctx, _s.PromoCodeQuery, _s, _s.inters, v) +} + +func (_s *PromoCodeSelect) sqlScan(ctx context.Context, root *PromoCodeQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/promocode_update.go b/backend/ent/promocode_update.go new file mode 100644 index 00000000..1a7481c8 --- /dev/null +++ b/backend/ent/promocode_update.go @@ -0,0 +1,745 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" +) + +// PromoCodeUpdate is the builder for updating PromoCode entities. +type PromoCodeUpdate struct { + config + hooks []Hook + mutation *PromoCodeMutation +} + +// Where appends a list predicates to the PromoCodeUpdate builder. +func (_u *PromoCodeUpdate) Where(ps ...predicate.PromoCode) *PromoCodeUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetCode sets the "code" field. +func (_u *PromoCodeUpdate) SetCode(v string) *PromoCodeUpdate { + _u.mutation.SetCode(v) + return _u +} + +// SetNillableCode sets the "code" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableCode(v *string) *PromoCodeUpdate { + if v != nil { + _u.SetCode(*v) + } + return _u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_u *PromoCodeUpdate) SetBonusAmount(v float64) *PromoCodeUpdate { + _u.mutation.ResetBonusAmount() + _u.mutation.SetBonusAmount(v) + return _u +} + +// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableBonusAmount(v *float64) *PromoCodeUpdate { + if v != nil { + _u.SetBonusAmount(*v) + } + return _u +} + +// AddBonusAmount adds value to the "bonus_amount" field. +func (_u *PromoCodeUpdate) AddBonusAmount(v float64) *PromoCodeUpdate { + _u.mutation.AddBonusAmount(v) + return _u +} + +// SetMaxUses sets the "max_uses" field. +func (_u *PromoCodeUpdate) SetMaxUses(v int) *PromoCodeUpdate { + _u.mutation.ResetMaxUses() + _u.mutation.SetMaxUses(v) + return _u +} + +// SetNillableMaxUses sets the "max_uses" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableMaxUses(v *int) *PromoCodeUpdate { + if v != nil { + _u.SetMaxUses(*v) + } + return _u +} + +// AddMaxUses adds value to the "max_uses" field. +func (_u *PromoCodeUpdate) AddMaxUses(v int) *PromoCodeUpdate { + _u.mutation.AddMaxUses(v) + return _u +} + +// SetUsedCount sets the "used_count" field. +func (_u *PromoCodeUpdate) SetUsedCount(v int) *PromoCodeUpdate { + _u.mutation.ResetUsedCount() + _u.mutation.SetUsedCount(v) + return _u +} + +// SetNillableUsedCount sets the "used_count" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableUsedCount(v *int) *PromoCodeUpdate { + if v != nil { + _u.SetUsedCount(*v) + } + return _u +} + +// AddUsedCount adds value to the "used_count" field. +func (_u *PromoCodeUpdate) AddUsedCount(v int) *PromoCodeUpdate { + _u.mutation.AddUsedCount(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *PromoCodeUpdate) SetStatus(v string) *PromoCodeUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableStatus(v *string) *PromoCodeUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *PromoCodeUpdate) SetExpiresAt(v time.Time) *PromoCodeUpdate { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableExpiresAt(v *time.Time) *PromoCodeUpdate { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (_u *PromoCodeUpdate) ClearExpiresAt() *PromoCodeUpdate { + _u.mutation.ClearExpiresAt() + return _u +} + +// SetNotes sets the "notes" field. +func (_u *PromoCodeUpdate) SetNotes(v string) *PromoCodeUpdate { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *PromoCodeUpdate) SetNillableNotes(v *string) *PromoCodeUpdate { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *PromoCodeUpdate) ClearNotes() *PromoCodeUpdate { + _u.mutation.ClearNotes() + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *PromoCodeUpdate) SetUpdatedAt(v time.Time) *PromoCodeUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by IDs. +func (_u *PromoCodeUpdate) AddUsageRecordIDs(ids ...int64) *PromoCodeUpdate { + _u.mutation.AddUsageRecordIDs(ids...) + return _u +} + +// AddUsageRecords adds the "usage_records" edges to the PromoCodeUsage entity. +func (_u *PromoCodeUpdate) AddUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageRecordIDs(ids...) +} + +// Mutation returns the PromoCodeMutation object of the builder. +func (_u *PromoCodeUpdate) Mutation() *PromoCodeMutation { + return _u.mutation +} + +// ClearUsageRecords clears all "usage_records" edges to the PromoCodeUsage entity. +func (_u *PromoCodeUpdate) ClearUsageRecords() *PromoCodeUpdate { + _u.mutation.ClearUsageRecords() + return _u +} + +// RemoveUsageRecordIDs removes the "usage_records" edge to PromoCodeUsage entities by IDs. +func (_u *PromoCodeUpdate) RemoveUsageRecordIDs(ids ...int64) *PromoCodeUpdate { + _u.mutation.RemoveUsageRecordIDs(ids...) + return _u +} + +// RemoveUsageRecords removes "usage_records" edges to PromoCodeUsage entities. +func (_u *PromoCodeUpdate) RemoveUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageRecordIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *PromoCodeUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *PromoCodeUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *PromoCodeUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *PromoCodeUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *PromoCodeUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := promocode.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *PromoCodeUpdate) check() error { + if v, ok := _u.mutation.Code(); ok { + if err := promocode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "PromoCode.code": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := promocode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "PromoCode.status": %w`, err)} + } + } + return nil +} + +func (_u *PromoCodeUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Code(); ok { + _spec.SetField(promocode.FieldCode, field.TypeString, value) + } + if value, ok := _u.mutation.BonusAmount(); ok { + _spec.SetField(promocode.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBonusAmount(); ok { + _spec.AddField(promocode.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.MaxUses(); ok { + _spec.SetField(promocode.FieldMaxUses, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedMaxUses(); ok { + _spec.AddField(promocode.FieldMaxUses, field.TypeInt, value) + } + if value, ok := _u.mutation.UsedCount(); ok { + _spec.SetField(promocode.FieldUsedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedUsedCount(); ok { + _spec.AddField(promocode.FieldUsedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(promocode.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(promocode.FieldExpiresAt, field.TypeTime, value) + } + if _u.mutation.ExpiresAtCleared() { + _spec.ClearField(promocode.FieldExpiresAt, field.TypeTime) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(promocode.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(promocode.FieldNotes, field.TypeString) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(promocode.FieldUpdatedAt, field.TypeTime, value) + } + if _u.mutation.UsageRecordsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageRecordsIDs(); len(nodes) > 0 && !_u.mutation.UsageRecordsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageRecordsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{promocode.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// PromoCodeUpdateOne is the builder for updating a single PromoCode entity. +type PromoCodeUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PromoCodeMutation +} + +// SetCode sets the "code" field. +func (_u *PromoCodeUpdateOne) SetCode(v string) *PromoCodeUpdateOne { + _u.mutation.SetCode(v) + return _u +} + +// SetNillableCode sets the "code" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableCode(v *string) *PromoCodeUpdateOne { + if v != nil { + _u.SetCode(*v) + } + return _u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_u *PromoCodeUpdateOne) SetBonusAmount(v float64) *PromoCodeUpdateOne { + _u.mutation.ResetBonusAmount() + _u.mutation.SetBonusAmount(v) + return _u +} + +// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableBonusAmount(v *float64) *PromoCodeUpdateOne { + if v != nil { + _u.SetBonusAmount(*v) + } + return _u +} + +// AddBonusAmount adds value to the "bonus_amount" field. +func (_u *PromoCodeUpdateOne) AddBonusAmount(v float64) *PromoCodeUpdateOne { + _u.mutation.AddBonusAmount(v) + return _u +} + +// SetMaxUses sets the "max_uses" field. +func (_u *PromoCodeUpdateOne) SetMaxUses(v int) *PromoCodeUpdateOne { + _u.mutation.ResetMaxUses() + _u.mutation.SetMaxUses(v) + return _u +} + +// SetNillableMaxUses sets the "max_uses" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableMaxUses(v *int) *PromoCodeUpdateOne { + if v != nil { + _u.SetMaxUses(*v) + } + return _u +} + +// AddMaxUses adds value to the "max_uses" field. +func (_u *PromoCodeUpdateOne) AddMaxUses(v int) *PromoCodeUpdateOne { + _u.mutation.AddMaxUses(v) + return _u +} + +// SetUsedCount sets the "used_count" field. +func (_u *PromoCodeUpdateOne) SetUsedCount(v int) *PromoCodeUpdateOne { + _u.mutation.ResetUsedCount() + _u.mutation.SetUsedCount(v) + return _u +} + +// SetNillableUsedCount sets the "used_count" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableUsedCount(v *int) *PromoCodeUpdateOne { + if v != nil { + _u.SetUsedCount(*v) + } + return _u +} + +// AddUsedCount adds value to the "used_count" field. +func (_u *PromoCodeUpdateOne) AddUsedCount(v int) *PromoCodeUpdateOne { + _u.mutation.AddUsedCount(v) + return _u +} + +// SetStatus sets the "status" field. +func (_u *PromoCodeUpdateOne) SetStatus(v string) *PromoCodeUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableStatus(v *string) *PromoCodeUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *PromoCodeUpdateOne) SetExpiresAt(v time.Time) *PromoCodeUpdateOne { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableExpiresAt(v *time.Time) *PromoCodeUpdateOne { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// ClearExpiresAt clears the value of the "expires_at" field. +func (_u *PromoCodeUpdateOne) ClearExpiresAt() *PromoCodeUpdateOne { + _u.mutation.ClearExpiresAt() + return _u +} + +// SetNotes sets the "notes" field. +func (_u *PromoCodeUpdateOne) SetNotes(v string) *PromoCodeUpdateOne { + _u.mutation.SetNotes(v) + return _u +} + +// SetNillableNotes sets the "notes" field if the given value is not nil. +func (_u *PromoCodeUpdateOne) SetNillableNotes(v *string) *PromoCodeUpdateOne { + if v != nil { + _u.SetNotes(*v) + } + return _u +} + +// ClearNotes clears the value of the "notes" field. +func (_u *PromoCodeUpdateOne) ClearNotes() *PromoCodeUpdateOne { + _u.mutation.ClearNotes() + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *PromoCodeUpdateOne) SetUpdatedAt(v time.Time) *PromoCodeUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// AddUsageRecordIDs adds the "usage_records" edge to the PromoCodeUsage entity by IDs. +func (_u *PromoCodeUpdateOne) AddUsageRecordIDs(ids ...int64) *PromoCodeUpdateOne { + _u.mutation.AddUsageRecordIDs(ids...) + return _u +} + +// AddUsageRecords adds the "usage_records" edges to the PromoCodeUsage entity. +func (_u *PromoCodeUpdateOne) AddUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddUsageRecordIDs(ids...) +} + +// Mutation returns the PromoCodeMutation object of the builder. +func (_u *PromoCodeUpdateOne) Mutation() *PromoCodeMutation { + return _u.mutation +} + +// ClearUsageRecords clears all "usage_records" edges to the PromoCodeUsage entity. +func (_u *PromoCodeUpdateOne) ClearUsageRecords() *PromoCodeUpdateOne { + _u.mutation.ClearUsageRecords() + return _u +} + +// RemoveUsageRecordIDs removes the "usage_records" edge to PromoCodeUsage entities by IDs. +func (_u *PromoCodeUpdateOne) RemoveUsageRecordIDs(ids ...int64) *PromoCodeUpdateOne { + _u.mutation.RemoveUsageRecordIDs(ids...) + return _u +} + +// RemoveUsageRecords removes "usage_records" edges to PromoCodeUsage entities. +func (_u *PromoCodeUpdateOne) RemoveUsageRecords(v ...*PromoCodeUsage) *PromoCodeUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveUsageRecordIDs(ids...) +} + +// Where appends a list predicates to the PromoCodeUpdate builder. +func (_u *PromoCodeUpdateOne) Where(ps ...predicate.PromoCode) *PromoCodeUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *PromoCodeUpdateOne) Select(field string, fields ...string) *PromoCodeUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated PromoCode entity. +func (_u *PromoCodeUpdateOne) Save(ctx context.Context) (*PromoCode, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *PromoCodeUpdateOne) SaveX(ctx context.Context) *PromoCode { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *PromoCodeUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *PromoCodeUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *PromoCodeUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := promocode.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *PromoCodeUpdateOne) check() error { + if v, ok := _u.mutation.Code(); ok { + if err := promocode.CodeValidator(v); err != nil { + return &ValidationError{Name: "code", err: fmt.Errorf(`ent: validator failed for field "PromoCode.code": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := promocode.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "PromoCode.status": %w`, err)} + } + } + return nil +} + +func (_u *PromoCodeUpdateOne) sqlSave(ctx context.Context) (_node *PromoCode, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(promocode.Table, promocode.Columns, sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PromoCode.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, promocode.FieldID) + for _, f := range fields { + if !promocode.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != promocode.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Code(); ok { + _spec.SetField(promocode.FieldCode, field.TypeString, value) + } + if value, ok := _u.mutation.BonusAmount(); ok { + _spec.SetField(promocode.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBonusAmount(); ok { + _spec.AddField(promocode.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.MaxUses(); ok { + _spec.SetField(promocode.FieldMaxUses, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedMaxUses(); ok { + _spec.AddField(promocode.FieldMaxUses, field.TypeInt, value) + } + if value, ok := _u.mutation.UsedCount(); ok { + _spec.SetField(promocode.FieldUsedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedUsedCount(); ok { + _spec.AddField(promocode.FieldUsedCount, field.TypeInt, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(promocode.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(promocode.FieldExpiresAt, field.TypeTime, value) + } + if _u.mutation.ExpiresAtCleared() { + _spec.ClearField(promocode.FieldExpiresAt, field.TypeTime) + } + if value, ok := _u.mutation.Notes(); ok { + _spec.SetField(promocode.FieldNotes, field.TypeString, value) + } + if _u.mutation.NotesCleared() { + _spec.ClearField(promocode.FieldNotes, field.TypeString) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(promocode.FieldUpdatedAt, field.TypeTime, value) + } + if _u.mutation.UsageRecordsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedUsageRecordsIDs(); len(nodes) > 0 && !_u.mutation.UsageRecordsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UsageRecordsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: promocode.UsageRecordsTable, + Columns: []string{promocode.UsageRecordsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &PromoCode{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{promocode.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/promocodeusage.go b/backend/ent/promocodeusage.go new file mode 100644 index 00000000..1ba3a8bf --- /dev/null +++ b/backend/ent/promocodeusage.go @@ -0,0 +1,187 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// PromoCodeUsage is the model entity for the PromoCodeUsage schema. +type PromoCodeUsage struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // 优惠码ID + PromoCodeID int64 `json:"promo_code_id,omitempty"` + // 使用用户ID + UserID int64 `json:"user_id,omitempty"` + // 实际赠送金额 + BonusAmount float64 `json:"bonus_amount,omitempty"` + // 使用时间 + UsedAt time.Time `json:"used_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the PromoCodeUsageQuery when eager-loading is set. + Edges PromoCodeUsageEdges `json:"edges"` + selectValues sql.SelectValues +} + +// PromoCodeUsageEdges holds the relations/edges for other nodes in the graph. +type PromoCodeUsageEdges struct { + // PromoCode holds the value of the promo_code edge. + PromoCode *PromoCode `json:"promo_code,omitempty"` + // User holds the value of the user edge. + User *User `json:"user,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// PromoCodeOrErr returns the PromoCode value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PromoCodeUsageEdges) PromoCodeOrErr() (*PromoCode, error) { + if e.PromoCode != nil { + return e.PromoCode, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: promocode.Label} + } + return nil, &NotLoadedError{edge: "promo_code"} +} + +// UserOrErr returns the User value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e PromoCodeUsageEdges) UserOrErr() (*User, error) { + if e.User != nil { + return e.User, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "user"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*PromoCodeUsage) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case promocodeusage.FieldBonusAmount: + values[i] = new(sql.NullFloat64) + case promocodeusage.FieldID, promocodeusage.FieldPromoCodeID, promocodeusage.FieldUserID: + values[i] = new(sql.NullInt64) + case promocodeusage.FieldUsedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the PromoCodeUsage fields. +func (_m *PromoCodeUsage) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case promocodeusage.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case promocodeusage.FieldPromoCodeID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field promo_code_id", values[i]) + } else if value.Valid { + _m.PromoCodeID = value.Int64 + } + case promocodeusage.FieldUserID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field user_id", values[i]) + } else if value.Valid { + _m.UserID = value.Int64 + } + case promocodeusage.FieldBonusAmount: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field bonus_amount", values[i]) + } else if value.Valid { + _m.BonusAmount = value.Float64 + } + case promocodeusage.FieldUsedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field used_at", values[i]) + } else if value.Valid { + _m.UsedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the PromoCodeUsage. +// This includes values selected through modifiers, order, etc. +func (_m *PromoCodeUsage) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryPromoCode queries the "promo_code" edge of the PromoCodeUsage entity. +func (_m *PromoCodeUsage) QueryPromoCode() *PromoCodeQuery { + return NewPromoCodeUsageClient(_m.config).QueryPromoCode(_m) +} + +// QueryUser queries the "user" edge of the PromoCodeUsage entity. +func (_m *PromoCodeUsage) QueryUser() *UserQuery { + return NewPromoCodeUsageClient(_m.config).QueryUser(_m) +} + +// Update returns a builder for updating this PromoCodeUsage. +// Note that you need to call PromoCodeUsage.Unwrap() before calling this method if this PromoCodeUsage +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *PromoCodeUsage) Update() *PromoCodeUsageUpdateOne { + return NewPromoCodeUsageClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the PromoCodeUsage entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *PromoCodeUsage) Unwrap() *PromoCodeUsage { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: PromoCodeUsage is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *PromoCodeUsage) String() string { + var builder strings.Builder + builder.WriteString("PromoCodeUsage(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("promo_code_id=") + builder.WriteString(fmt.Sprintf("%v", _m.PromoCodeID)) + builder.WriteString(", ") + builder.WriteString("user_id=") + builder.WriteString(fmt.Sprintf("%v", _m.UserID)) + builder.WriteString(", ") + builder.WriteString("bonus_amount=") + builder.WriteString(fmt.Sprintf("%v", _m.BonusAmount)) + builder.WriteString(", ") + builder.WriteString("used_at=") + builder.WriteString(_m.UsedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// PromoCodeUsages is a parsable slice of PromoCodeUsage. +type PromoCodeUsages []*PromoCodeUsage diff --git a/backend/ent/promocodeusage/promocodeusage.go b/backend/ent/promocodeusage/promocodeusage.go new file mode 100644 index 00000000..f4e05970 --- /dev/null +++ b/backend/ent/promocodeusage/promocodeusage.go @@ -0,0 +1,125 @@ +// Code generated by ent, DO NOT EDIT. + +package promocodeusage + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the promocodeusage type in the database. + Label = "promo_code_usage" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldPromoCodeID holds the string denoting the promo_code_id field in the database. + FieldPromoCodeID = "promo_code_id" + // FieldUserID holds the string denoting the user_id field in the database. + FieldUserID = "user_id" + // FieldBonusAmount holds the string denoting the bonus_amount field in the database. + FieldBonusAmount = "bonus_amount" + // FieldUsedAt holds the string denoting the used_at field in the database. + FieldUsedAt = "used_at" + // EdgePromoCode holds the string denoting the promo_code edge name in mutations. + EdgePromoCode = "promo_code" + // EdgeUser holds the string denoting the user edge name in mutations. + EdgeUser = "user" + // Table holds the table name of the promocodeusage in the database. + Table = "promo_code_usages" + // PromoCodeTable is the table that holds the promo_code relation/edge. + PromoCodeTable = "promo_code_usages" + // PromoCodeInverseTable is the table name for the PromoCode entity. + // It exists in this package in order to avoid circular dependency with the "promocode" package. + PromoCodeInverseTable = "promo_codes" + // PromoCodeColumn is the table column denoting the promo_code relation/edge. + PromoCodeColumn = "promo_code_id" + // UserTable is the table that holds the user relation/edge. + UserTable = "promo_code_usages" + // UserInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + UserInverseTable = "users" + // UserColumn is the table column denoting the user relation/edge. + UserColumn = "user_id" +) + +// Columns holds all SQL columns for promocodeusage fields. +var Columns = []string{ + FieldID, + FieldPromoCodeID, + FieldUserID, + FieldBonusAmount, + FieldUsedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultUsedAt holds the default value on creation for the "used_at" field. + DefaultUsedAt func() time.Time +) + +// OrderOption defines the ordering options for the PromoCodeUsage queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByPromoCodeID orders the results by the promo_code_id field. +func ByPromoCodeID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPromoCodeID, opts...).ToFunc() +} + +// ByUserID orders the results by the user_id field. +func ByUserID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUserID, opts...).ToFunc() +} + +// ByBonusAmount orders the results by the bonus_amount field. +func ByBonusAmount(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBonusAmount, opts...).ToFunc() +} + +// ByUsedAt orders the results by the used_at field. +func ByUsedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsedAt, opts...).ToFunc() +} + +// ByPromoCodeField orders the results by promo_code field. +func ByPromoCodeField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPromoCodeStep(), sql.OrderByField(field, opts...)) + } +} + +// ByUserField orders the results by user field. +func ByUserField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newUserStep(), sql.OrderByField(field, opts...)) + } +} +func newPromoCodeStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PromoCodeInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PromoCodeTable, PromoCodeColumn), + ) +} +func newUserStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(UserInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) +} diff --git a/backend/ent/promocodeusage/where.go b/backend/ent/promocodeusage/where.go new file mode 100644 index 00000000..fe657fd4 --- /dev/null +++ b/backend/ent/promocodeusage/where.go @@ -0,0 +1,257 @@ +// Code generated by ent, DO NOT EDIT. + +package promocodeusage + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLTE(FieldID, id)) +} + +// PromoCodeID applies equality check predicate on the "promo_code_id" field. It's identical to PromoCodeIDEQ. +func PromoCodeID(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldPromoCodeID, v)) +} + +// UserID applies equality check predicate on the "user_id" field. It's identical to UserIDEQ. +func UserID(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldUserID, v)) +} + +// BonusAmount applies equality check predicate on the "bonus_amount" field. It's identical to BonusAmountEQ. +func BonusAmount(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldBonusAmount, v)) +} + +// UsedAt applies equality check predicate on the "used_at" field. It's identical to UsedAtEQ. +func UsedAt(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldUsedAt, v)) +} + +// PromoCodeIDEQ applies the EQ predicate on the "promo_code_id" field. +func PromoCodeIDEQ(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldPromoCodeID, v)) +} + +// PromoCodeIDNEQ applies the NEQ predicate on the "promo_code_id" field. +func PromoCodeIDNEQ(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNEQ(FieldPromoCodeID, v)) +} + +// PromoCodeIDIn applies the In predicate on the "promo_code_id" field. +func PromoCodeIDIn(vs ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldIn(FieldPromoCodeID, vs...)) +} + +// PromoCodeIDNotIn applies the NotIn predicate on the "promo_code_id" field. +func PromoCodeIDNotIn(vs ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNotIn(FieldPromoCodeID, vs...)) +} + +// UserIDEQ applies the EQ predicate on the "user_id" field. +func UserIDEQ(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldUserID, v)) +} + +// UserIDNEQ applies the NEQ predicate on the "user_id" field. +func UserIDNEQ(v int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNEQ(FieldUserID, v)) +} + +// UserIDIn applies the In predicate on the "user_id" field. +func UserIDIn(vs ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldIn(FieldUserID, vs...)) +} + +// UserIDNotIn applies the NotIn predicate on the "user_id" field. +func UserIDNotIn(vs ...int64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNotIn(FieldUserID, vs...)) +} + +// BonusAmountEQ applies the EQ predicate on the "bonus_amount" field. +func BonusAmountEQ(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldBonusAmount, v)) +} + +// BonusAmountNEQ applies the NEQ predicate on the "bonus_amount" field. +func BonusAmountNEQ(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNEQ(FieldBonusAmount, v)) +} + +// BonusAmountIn applies the In predicate on the "bonus_amount" field. +func BonusAmountIn(vs ...float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldIn(FieldBonusAmount, vs...)) +} + +// BonusAmountNotIn applies the NotIn predicate on the "bonus_amount" field. +func BonusAmountNotIn(vs ...float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNotIn(FieldBonusAmount, vs...)) +} + +// BonusAmountGT applies the GT predicate on the "bonus_amount" field. +func BonusAmountGT(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGT(FieldBonusAmount, v)) +} + +// BonusAmountGTE applies the GTE predicate on the "bonus_amount" field. +func BonusAmountGTE(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGTE(FieldBonusAmount, v)) +} + +// BonusAmountLT applies the LT predicate on the "bonus_amount" field. +func BonusAmountLT(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLT(FieldBonusAmount, v)) +} + +// BonusAmountLTE applies the LTE predicate on the "bonus_amount" field. +func BonusAmountLTE(v float64) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLTE(FieldBonusAmount, v)) +} + +// UsedAtEQ applies the EQ predicate on the "used_at" field. +func UsedAtEQ(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldEQ(FieldUsedAt, v)) +} + +// UsedAtNEQ applies the NEQ predicate on the "used_at" field. +func UsedAtNEQ(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNEQ(FieldUsedAt, v)) +} + +// UsedAtIn applies the In predicate on the "used_at" field. +func UsedAtIn(vs ...time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldIn(FieldUsedAt, vs...)) +} + +// UsedAtNotIn applies the NotIn predicate on the "used_at" field. +func UsedAtNotIn(vs ...time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldNotIn(FieldUsedAt, vs...)) +} + +// UsedAtGT applies the GT predicate on the "used_at" field. +func UsedAtGT(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGT(FieldUsedAt, v)) +} + +// UsedAtGTE applies the GTE predicate on the "used_at" field. +func UsedAtGTE(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldGTE(FieldUsedAt, v)) +} + +// UsedAtLT applies the LT predicate on the "used_at" field. +func UsedAtLT(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLT(FieldUsedAt, v)) +} + +// UsedAtLTE applies the LTE predicate on the "used_at" field. +func UsedAtLTE(v time.Time) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.FieldLTE(FieldUsedAt, v)) +} + +// HasPromoCode applies the HasEdge predicate on the "promo_code" edge. +func HasPromoCode() predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, PromoCodeTable, PromoCodeColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPromoCodeWith applies the HasEdge predicate on the "promo_code" edge with a given conditions (other predicates). +func HasPromoCodeWith(preds ...predicate.PromoCode) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(func(s *sql.Selector) { + step := newPromoCodeStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasUser applies the HasEdge predicate on the "user" edge. +func HasUser() predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, UserTable, UserColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasUserWith applies the HasEdge predicate on the "user" edge with a given conditions (other predicates). +func HasUserWith(preds ...predicate.User) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(func(s *sql.Selector) { + step := newUserStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.PromoCodeUsage) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.PromoCodeUsage) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.PromoCodeUsage) predicate.PromoCodeUsage { + return predicate.PromoCodeUsage(sql.NotPredicates(p)) +} diff --git a/backend/ent/promocodeusage_create.go b/backend/ent/promocodeusage_create.go new file mode 100644 index 00000000..79d9c768 --- /dev/null +++ b/backend/ent/promocodeusage_create.go @@ -0,0 +1,696 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// PromoCodeUsageCreate is the builder for creating a PromoCodeUsage entity. +type PromoCodeUsageCreate struct { + config + mutation *PromoCodeUsageMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (_c *PromoCodeUsageCreate) SetPromoCodeID(v int64) *PromoCodeUsageCreate { + _c.mutation.SetPromoCodeID(v) + return _c +} + +// SetUserID sets the "user_id" field. +func (_c *PromoCodeUsageCreate) SetUserID(v int64) *PromoCodeUsageCreate { + _c.mutation.SetUserID(v) + return _c +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_c *PromoCodeUsageCreate) SetBonusAmount(v float64) *PromoCodeUsageCreate { + _c.mutation.SetBonusAmount(v) + return _c +} + +// SetUsedAt sets the "used_at" field. +func (_c *PromoCodeUsageCreate) SetUsedAt(v time.Time) *PromoCodeUsageCreate { + _c.mutation.SetUsedAt(v) + return _c +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_c *PromoCodeUsageCreate) SetNillableUsedAt(v *time.Time) *PromoCodeUsageCreate { + if v != nil { + _c.SetUsedAt(*v) + } + return _c +} + +// SetPromoCode sets the "promo_code" edge to the PromoCode entity. +func (_c *PromoCodeUsageCreate) SetPromoCode(v *PromoCode) *PromoCodeUsageCreate { + return _c.SetPromoCodeID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_c *PromoCodeUsageCreate) SetUser(v *User) *PromoCodeUsageCreate { + return _c.SetUserID(v.ID) +} + +// Mutation returns the PromoCodeUsageMutation object of the builder. +func (_c *PromoCodeUsageCreate) Mutation() *PromoCodeUsageMutation { + return _c.mutation +} + +// Save creates the PromoCodeUsage in the database. +func (_c *PromoCodeUsageCreate) Save(ctx context.Context) (*PromoCodeUsage, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *PromoCodeUsageCreate) SaveX(ctx context.Context) *PromoCodeUsage { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *PromoCodeUsageCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *PromoCodeUsageCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *PromoCodeUsageCreate) defaults() { + if _, ok := _c.mutation.UsedAt(); !ok { + v := promocodeusage.DefaultUsedAt() + _c.mutation.SetUsedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *PromoCodeUsageCreate) check() error { + if _, ok := _c.mutation.PromoCodeID(); !ok { + return &ValidationError{Name: "promo_code_id", err: errors.New(`ent: missing required field "PromoCodeUsage.promo_code_id"`)} + } + if _, ok := _c.mutation.UserID(); !ok { + return &ValidationError{Name: "user_id", err: errors.New(`ent: missing required field "PromoCodeUsage.user_id"`)} + } + if _, ok := _c.mutation.BonusAmount(); !ok { + return &ValidationError{Name: "bonus_amount", err: errors.New(`ent: missing required field "PromoCodeUsage.bonus_amount"`)} + } + if _, ok := _c.mutation.UsedAt(); !ok { + return &ValidationError{Name: "used_at", err: errors.New(`ent: missing required field "PromoCodeUsage.used_at"`)} + } + if len(_c.mutation.PromoCodeIDs()) == 0 { + return &ValidationError{Name: "promo_code", err: errors.New(`ent: missing required edge "PromoCodeUsage.promo_code"`)} + } + if len(_c.mutation.UserIDs()) == 0 { + return &ValidationError{Name: "user", err: errors.New(`ent: missing required edge "PromoCodeUsage.user"`)} + } + return nil +} + +func (_c *PromoCodeUsageCreate) sqlSave(ctx context.Context) (*PromoCodeUsage, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *PromoCodeUsageCreate) createSpec() (*PromoCodeUsage, *sqlgraph.CreateSpec) { + var ( + _node = &PromoCodeUsage{config: _c.config} + _spec = sqlgraph.NewCreateSpec(promocodeusage.Table, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.BonusAmount(); ok { + _spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value) + _node.BonusAmount = value + } + if value, ok := _c.mutation.UsedAt(); ok { + _spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value) + _node.UsedAt = value + } + if nodes := _c.mutation.PromoCodeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.PromoCodeTable, + Columns: []string{promocodeusage.PromoCodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.PromoCodeID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := _c.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.UserTable, + Columns: []string{promocodeusage.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.UserID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PromoCodeUsage.Create(). +// SetPromoCodeID(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PromoCodeUsageUpsert) { +// SetPromoCodeID(v+v). +// }). +// Exec(ctx) +func (_c *PromoCodeUsageCreate) OnConflict(opts ...sql.ConflictOption) *PromoCodeUsageUpsertOne { + _c.conflict = opts + return &PromoCodeUsageUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *PromoCodeUsageCreate) OnConflictColumns(columns ...string) *PromoCodeUsageUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &PromoCodeUsageUpsertOne{ + create: _c, + } +} + +type ( + // PromoCodeUsageUpsertOne is the builder for "upsert"-ing + // one PromoCodeUsage node. + PromoCodeUsageUpsertOne struct { + create *PromoCodeUsageCreate + } + + // PromoCodeUsageUpsert is the "OnConflict" setter. + PromoCodeUsageUpsert struct { + *sql.UpdateSet + } +) + +// SetPromoCodeID sets the "promo_code_id" field. +func (u *PromoCodeUsageUpsert) SetPromoCodeID(v int64) *PromoCodeUsageUpsert { + u.Set(promocodeusage.FieldPromoCodeID, v) + return u +} + +// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsert) UpdatePromoCodeID() *PromoCodeUsageUpsert { + u.SetExcluded(promocodeusage.FieldPromoCodeID) + return u +} + +// SetUserID sets the "user_id" field. +func (u *PromoCodeUsageUpsert) SetUserID(v int64) *PromoCodeUsageUpsert { + u.Set(promocodeusage.FieldUserID, v) + return u +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsert) UpdateUserID() *PromoCodeUsageUpsert { + u.SetExcluded(promocodeusage.FieldUserID) + return u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUsageUpsert) SetBonusAmount(v float64) *PromoCodeUsageUpsert { + u.Set(promocodeusage.FieldBonusAmount, v) + return u +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUsageUpsert) UpdateBonusAmount() *PromoCodeUsageUpsert { + u.SetExcluded(promocodeusage.FieldBonusAmount) + return u +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUsageUpsert) AddBonusAmount(v float64) *PromoCodeUsageUpsert { + u.Add(promocodeusage.FieldBonusAmount, v) + return u +} + +// SetUsedAt sets the "used_at" field. +func (u *PromoCodeUsageUpsert) SetUsedAt(v time.Time) *PromoCodeUsageUpsert { + u.Set(promocodeusage.FieldUsedAt, v) + return u +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *PromoCodeUsageUpsert) UpdateUsedAt() *PromoCodeUsageUpsert { + u.SetExcluded(promocodeusage.FieldUsedAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *PromoCodeUsageUpsertOne) UpdateNewValues() *PromoCodeUsageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PromoCodeUsageUpsertOne) Ignore() *PromoCodeUsageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PromoCodeUsageUpsertOne) DoNothing() *PromoCodeUsageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PromoCodeUsageCreate.OnConflict +// documentation for more info. +func (u *PromoCodeUsageUpsertOne) Update(set func(*PromoCodeUsageUpsert)) *PromoCodeUsageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PromoCodeUsageUpsert{UpdateSet: update}) + })) + return u +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (u *PromoCodeUsageUpsertOne) SetPromoCodeID(v int64) *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetPromoCodeID(v) + }) +} + +// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertOne) UpdatePromoCodeID() *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdatePromoCodeID() + }) +} + +// SetUserID sets the "user_id" field. +func (u *PromoCodeUsageUpsertOne) SetUserID(v int64) *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertOne) UpdateUserID() *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateUserID() + }) +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUsageUpsertOne) SetBonusAmount(v float64) *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetBonusAmount(v) + }) +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUsageUpsertOne) AddBonusAmount(v float64) *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.AddBonusAmount(v) + }) +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertOne) UpdateBonusAmount() *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateBonusAmount() + }) +} + +// SetUsedAt sets the "used_at" field. +func (u *PromoCodeUsageUpsertOne) SetUsedAt(v time.Time) *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetUsedAt(v) + }) +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertOne) UpdateUsedAt() *PromoCodeUsageUpsertOne { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateUsedAt() + }) +} + +// Exec executes the query. +func (u *PromoCodeUsageUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PromoCodeUsageCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PromoCodeUsageUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *PromoCodeUsageUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *PromoCodeUsageUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// PromoCodeUsageCreateBulk is the builder for creating many PromoCodeUsage entities in bulk. +type PromoCodeUsageCreateBulk struct { + config + err error + builders []*PromoCodeUsageCreate + conflict []sql.ConflictOption +} + +// Save creates the PromoCodeUsage entities in the database. +func (_c *PromoCodeUsageCreateBulk) Save(ctx context.Context) ([]*PromoCodeUsage, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*PromoCodeUsage, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*PromoCodeUsageMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *PromoCodeUsageCreateBulk) SaveX(ctx context.Context) []*PromoCodeUsage { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *PromoCodeUsageCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *PromoCodeUsageCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.PromoCodeUsage.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.PromoCodeUsageUpsert) { +// SetPromoCodeID(v+v). +// }). +// Exec(ctx) +func (_c *PromoCodeUsageCreateBulk) OnConflict(opts ...sql.ConflictOption) *PromoCodeUsageUpsertBulk { + _c.conflict = opts + return &PromoCodeUsageUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *PromoCodeUsageCreateBulk) OnConflictColumns(columns ...string) *PromoCodeUsageUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &PromoCodeUsageUpsertBulk{ + create: _c, + } +} + +// PromoCodeUsageUpsertBulk is the builder for "upsert"-ing +// a bulk of PromoCodeUsage nodes. +type PromoCodeUsageUpsertBulk struct { + create *PromoCodeUsageCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *PromoCodeUsageUpsertBulk) UpdateNewValues() *PromoCodeUsageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.PromoCodeUsage.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *PromoCodeUsageUpsertBulk) Ignore() *PromoCodeUsageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *PromoCodeUsageUpsertBulk) DoNothing() *PromoCodeUsageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the PromoCodeUsageCreateBulk.OnConflict +// documentation for more info. +func (u *PromoCodeUsageUpsertBulk) Update(set func(*PromoCodeUsageUpsert)) *PromoCodeUsageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&PromoCodeUsageUpsert{UpdateSet: update}) + })) + return u +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (u *PromoCodeUsageUpsertBulk) SetPromoCodeID(v int64) *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetPromoCodeID(v) + }) +} + +// UpdatePromoCodeID sets the "promo_code_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertBulk) UpdatePromoCodeID() *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdatePromoCodeID() + }) +} + +// SetUserID sets the "user_id" field. +func (u *PromoCodeUsageUpsertBulk) SetUserID(v int64) *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetUserID(v) + }) +} + +// UpdateUserID sets the "user_id" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertBulk) UpdateUserID() *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateUserID() + }) +} + +// SetBonusAmount sets the "bonus_amount" field. +func (u *PromoCodeUsageUpsertBulk) SetBonusAmount(v float64) *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetBonusAmount(v) + }) +} + +// AddBonusAmount adds v to the "bonus_amount" field. +func (u *PromoCodeUsageUpsertBulk) AddBonusAmount(v float64) *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.AddBonusAmount(v) + }) +} + +// UpdateBonusAmount sets the "bonus_amount" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertBulk) UpdateBonusAmount() *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateBonusAmount() + }) +} + +// SetUsedAt sets the "used_at" field. +func (u *PromoCodeUsageUpsertBulk) SetUsedAt(v time.Time) *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.SetUsedAt(v) + }) +} + +// UpdateUsedAt sets the "used_at" field to the value that was provided on create. +func (u *PromoCodeUsageUpsertBulk) UpdateUsedAt() *PromoCodeUsageUpsertBulk { + return u.Update(func(s *PromoCodeUsageUpsert) { + s.UpdateUsedAt() + }) +} + +// Exec executes the query. +func (u *PromoCodeUsageUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the PromoCodeUsageCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for PromoCodeUsageCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *PromoCodeUsageUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/promocodeusage_delete.go b/backend/ent/promocodeusage_delete.go new file mode 100644 index 00000000..bd3fa5e1 --- /dev/null +++ b/backend/ent/promocodeusage_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" +) + +// PromoCodeUsageDelete is the builder for deleting a PromoCodeUsage entity. +type PromoCodeUsageDelete struct { + config + hooks []Hook + mutation *PromoCodeUsageMutation +} + +// Where appends a list predicates to the PromoCodeUsageDelete builder. +func (_d *PromoCodeUsageDelete) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *PromoCodeUsageDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *PromoCodeUsageDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *PromoCodeUsageDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(promocodeusage.Table, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// PromoCodeUsageDeleteOne is the builder for deleting a single PromoCodeUsage entity. +type PromoCodeUsageDeleteOne struct { + _d *PromoCodeUsageDelete +} + +// Where appends a list predicates to the PromoCodeUsageDelete builder. +func (_d *PromoCodeUsageDeleteOne) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *PromoCodeUsageDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{promocodeusage.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *PromoCodeUsageDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/promocodeusage_query.go b/backend/ent/promocodeusage_query.go new file mode 100644 index 00000000..95b02a16 --- /dev/null +++ b/backend/ent/promocodeusage_query.go @@ -0,0 +1,718 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// PromoCodeUsageQuery is the builder for querying PromoCodeUsage entities. +type PromoCodeUsageQuery struct { + config + ctx *QueryContext + order []promocodeusage.OrderOption + inters []Interceptor + predicates []predicate.PromoCodeUsage + withPromoCode *PromoCodeQuery + withUser *UserQuery + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the PromoCodeUsageQuery builder. +func (_q *PromoCodeUsageQuery) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *PromoCodeUsageQuery) Limit(limit int) *PromoCodeUsageQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *PromoCodeUsageQuery) Offset(offset int) *PromoCodeUsageQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *PromoCodeUsageQuery) Unique(unique bool) *PromoCodeUsageQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *PromoCodeUsageQuery) Order(o ...promocodeusage.OrderOption) *PromoCodeUsageQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryPromoCode chains the current query on the "promo_code" edge. +func (_q *PromoCodeUsageQuery) QueryPromoCode() *PromoCodeQuery { + query := (&PromoCodeClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, selector), + sqlgraph.To(promocode.Table, promocode.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.PromoCodeTable, promocodeusage.PromoCodeColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryUser chains the current query on the "user" edge. +func (_q *PromoCodeUsageQuery) QueryUser() *UserQuery { + query := (&UserClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(promocodeusage.Table, promocodeusage.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, true, promocodeusage.UserTable, promocodeusage.UserColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first PromoCodeUsage entity from the query. +// Returns a *NotFoundError when no PromoCodeUsage was found. +func (_q *PromoCodeUsageQuery) First(ctx context.Context) (*PromoCodeUsage, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{promocodeusage.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) FirstX(ctx context.Context) *PromoCodeUsage { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first PromoCodeUsage ID from the query. +// Returns a *NotFoundError when no PromoCodeUsage ID was found. +func (_q *PromoCodeUsageQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{promocodeusage.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single PromoCodeUsage entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one PromoCodeUsage entity is found. +// Returns a *NotFoundError when no PromoCodeUsage entities are found. +func (_q *PromoCodeUsageQuery) Only(ctx context.Context) (*PromoCodeUsage, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{promocodeusage.Label} + default: + return nil, &NotSingularError{promocodeusage.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) OnlyX(ctx context.Context) *PromoCodeUsage { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only PromoCodeUsage ID in the query. +// Returns a *NotSingularError when more than one PromoCodeUsage ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *PromoCodeUsageQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{promocodeusage.Label} + default: + err = &NotSingularError{promocodeusage.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of PromoCodeUsages. +func (_q *PromoCodeUsageQuery) All(ctx context.Context) ([]*PromoCodeUsage, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*PromoCodeUsage, *PromoCodeUsageQuery]() + return withInterceptors[[]*PromoCodeUsage](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) AllX(ctx context.Context) []*PromoCodeUsage { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of PromoCodeUsage IDs. +func (_q *PromoCodeUsageQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(promocodeusage.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *PromoCodeUsageQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*PromoCodeUsageQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *PromoCodeUsageQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *PromoCodeUsageQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the PromoCodeUsageQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *PromoCodeUsageQuery) Clone() *PromoCodeUsageQuery { + if _q == nil { + return nil + } + return &PromoCodeUsageQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]promocodeusage.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.PromoCodeUsage{}, _q.predicates...), + withPromoCode: _q.withPromoCode.Clone(), + withUser: _q.withUser.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithPromoCode tells the query-builder to eager-load the nodes that are connected to +// the "promo_code" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *PromoCodeUsageQuery) WithPromoCode(opts ...func(*PromoCodeQuery)) *PromoCodeUsageQuery { + query := (&PromoCodeClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withPromoCode = query + return _q +} + +// WithUser tells the query-builder to eager-load the nodes that are connected to +// the "user" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *PromoCodeUsageQuery) WithUser(opts ...func(*UserQuery)) *PromoCodeUsageQuery { + query := (&UserClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withUser = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// PromoCodeID int64 `json:"promo_code_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.PromoCodeUsage.Query(). +// GroupBy(promocodeusage.FieldPromoCodeID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *PromoCodeUsageQuery) GroupBy(field string, fields ...string) *PromoCodeUsageGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &PromoCodeUsageGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = promocodeusage.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// PromoCodeID int64 `json:"promo_code_id,omitempty"` +// } +// +// client.PromoCodeUsage.Query(). +// Select(promocodeusage.FieldPromoCodeID). +// Scan(ctx, &v) +func (_q *PromoCodeUsageQuery) Select(fields ...string) *PromoCodeUsageSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &PromoCodeUsageSelect{PromoCodeUsageQuery: _q} + sbuild.label = promocodeusage.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a PromoCodeUsageSelect configured with the given aggregations. +func (_q *PromoCodeUsageQuery) Aggregate(fns ...AggregateFunc) *PromoCodeUsageSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *PromoCodeUsageQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !promocodeusage.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *PromoCodeUsageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*PromoCodeUsage, error) { + var ( + nodes = []*PromoCodeUsage{} + _spec = _q.querySpec() + loadedTypes = [2]bool{ + _q.withPromoCode != nil, + _q.withUser != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*PromoCodeUsage).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &PromoCodeUsage{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withPromoCode; query != nil { + if err := _q.loadPromoCode(ctx, query, nodes, nil, + func(n *PromoCodeUsage, e *PromoCode) { n.Edges.PromoCode = e }); err != nil { + return nil, err + } + } + if query := _q.withUser; query != nil { + if err := _q.loadUser(ctx, query, nodes, nil, + func(n *PromoCodeUsage, e *User) { n.Edges.User = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *PromoCodeUsageQuery) loadPromoCode(ctx context.Context, query *PromoCodeQuery, nodes []*PromoCodeUsage, init func(*PromoCodeUsage), assign func(*PromoCodeUsage, *PromoCode)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*PromoCodeUsage) + for i := range nodes { + fk := nodes[i].PromoCodeID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(promocode.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "promo_code_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (_q *PromoCodeUsageQuery) loadUser(ctx context.Context, query *UserQuery, nodes []*PromoCodeUsage, init func(*PromoCodeUsage), assign func(*PromoCodeUsage, *User)) error { + ids := make([]int64, 0, len(nodes)) + nodeids := make(map[int64][]*PromoCodeUsage) + for i := range nodes { + fk := nodes[i].UserID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *PromoCodeUsageQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *PromoCodeUsageQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, promocodeusage.FieldID) + for i := range fields { + if fields[i] != promocodeusage.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withPromoCode != nil { + _spec.Node.AddColumnOnce(promocodeusage.FieldPromoCodeID) + } + if _q.withUser != nil { + _spec.Node.AddColumnOnce(promocodeusage.FieldUserID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *PromoCodeUsageQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(promocodeusage.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = promocodeusage.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *PromoCodeUsageQuery) ForUpdate(opts ...sql.LockOption) *PromoCodeUsageQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *PromoCodeUsageQuery) ForShare(opts ...sql.LockOption) *PromoCodeUsageQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// PromoCodeUsageGroupBy is the group-by builder for PromoCodeUsage entities. +type PromoCodeUsageGroupBy struct { + selector + build *PromoCodeUsageQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *PromoCodeUsageGroupBy) Aggregate(fns ...AggregateFunc) *PromoCodeUsageGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *PromoCodeUsageGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PromoCodeUsageQuery, *PromoCodeUsageGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *PromoCodeUsageGroupBy) sqlScan(ctx context.Context, root *PromoCodeUsageQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// PromoCodeUsageSelect is the builder for selecting fields of PromoCodeUsage entities. +type PromoCodeUsageSelect struct { + *PromoCodeUsageQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *PromoCodeUsageSelect) Aggregate(fns ...AggregateFunc) *PromoCodeUsageSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *PromoCodeUsageSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*PromoCodeUsageQuery, *PromoCodeUsageSelect](ctx, _s.PromoCodeUsageQuery, _s, _s.inters, v) +} + +func (_s *PromoCodeUsageSelect) sqlScan(ctx context.Context, root *PromoCodeUsageQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/promocodeusage_update.go b/backend/ent/promocodeusage_update.go new file mode 100644 index 00000000..d91a1f10 --- /dev/null +++ b/backend/ent/promocodeusage_update.go @@ -0,0 +1,510 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/ent/user" +) + +// PromoCodeUsageUpdate is the builder for updating PromoCodeUsage entities. +type PromoCodeUsageUpdate struct { + config + hooks []Hook + mutation *PromoCodeUsageMutation +} + +// Where appends a list predicates to the PromoCodeUsageUpdate builder. +func (_u *PromoCodeUsageUpdate) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (_u *PromoCodeUsageUpdate) SetPromoCodeID(v int64) *PromoCodeUsageUpdate { + _u.mutation.SetPromoCodeID(v) + return _u +} + +// SetNillablePromoCodeID sets the "promo_code_id" field if the given value is not nil. +func (_u *PromoCodeUsageUpdate) SetNillablePromoCodeID(v *int64) *PromoCodeUsageUpdate { + if v != nil { + _u.SetPromoCodeID(*v) + } + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *PromoCodeUsageUpdate) SetUserID(v int64) *PromoCodeUsageUpdate { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *PromoCodeUsageUpdate) SetNillableUserID(v *int64) *PromoCodeUsageUpdate { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_u *PromoCodeUsageUpdate) SetBonusAmount(v float64) *PromoCodeUsageUpdate { + _u.mutation.ResetBonusAmount() + _u.mutation.SetBonusAmount(v) + return _u +} + +// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil. +func (_u *PromoCodeUsageUpdate) SetNillableBonusAmount(v *float64) *PromoCodeUsageUpdate { + if v != nil { + _u.SetBonusAmount(*v) + } + return _u +} + +// AddBonusAmount adds value to the "bonus_amount" field. +func (_u *PromoCodeUsageUpdate) AddBonusAmount(v float64) *PromoCodeUsageUpdate { + _u.mutation.AddBonusAmount(v) + return _u +} + +// SetUsedAt sets the "used_at" field. +func (_u *PromoCodeUsageUpdate) SetUsedAt(v time.Time) *PromoCodeUsageUpdate { + _u.mutation.SetUsedAt(v) + return _u +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_u *PromoCodeUsageUpdate) SetNillableUsedAt(v *time.Time) *PromoCodeUsageUpdate { + if v != nil { + _u.SetUsedAt(*v) + } + return _u +} + +// SetPromoCode sets the "promo_code" edge to the PromoCode entity. +func (_u *PromoCodeUsageUpdate) SetPromoCode(v *PromoCode) *PromoCodeUsageUpdate { + return _u.SetPromoCodeID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_u *PromoCodeUsageUpdate) SetUser(v *User) *PromoCodeUsageUpdate { + return _u.SetUserID(v.ID) +} + +// Mutation returns the PromoCodeUsageMutation object of the builder. +func (_u *PromoCodeUsageUpdate) Mutation() *PromoCodeUsageMutation { + return _u.mutation +} + +// ClearPromoCode clears the "promo_code" edge to the PromoCode entity. +func (_u *PromoCodeUsageUpdate) ClearPromoCode() *PromoCodeUsageUpdate { + _u.mutation.ClearPromoCode() + return _u +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *PromoCodeUsageUpdate) ClearUser() *PromoCodeUsageUpdate { + _u.mutation.ClearUser() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *PromoCodeUsageUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *PromoCodeUsageUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *PromoCodeUsageUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *PromoCodeUsageUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *PromoCodeUsageUpdate) check() error { + if _u.mutation.PromoCodeCleared() && len(_u.mutation.PromoCodeIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.promo_code"`) + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.user"`) + } + return nil +} + +func (_u *PromoCodeUsageUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.BonusAmount(); ok { + _spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBonusAmount(); ok { + _spec.AddField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.UsedAt(); ok { + _spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value) + } + if _u.mutation.PromoCodeCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.PromoCodeTable, + Columns: []string{promocodeusage.PromoCodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.PromoCodeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.PromoCodeTable, + Columns: []string{promocodeusage.PromoCodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.UserTable, + Columns: []string{promocodeusage.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.UserTable, + Columns: []string{promocodeusage.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{promocodeusage.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// PromoCodeUsageUpdateOne is the builder for updating a single PromoCodeUsage entity. +type PromoCodeUsageUpdateOne struct { + config + fields []string + hooks []Hook + mutation *PromoCodeUsageMutation +} + +// SetPromoCodeID sets the "promo_code_id" field. +func (_u *PromoCodeUsageUpdateOne) SetPromoCodeID(v int64) *PromoCodeUsageUpdateOne { + _u.mutation.SetPromoCodeID(v) + return _u +} + +// SetNillablePromoCodeID sets the "promo_code_id" field if the given value is not nil. +func (_u *PromoCodeUsageUpdateOne) SetNillablePromoCodeID(v *int64) *PromoCodeUsageUpdateOne { + if v != nil { + _u.SetPromoCodeID(*v) + } + return _u +} + +// SetUserID sets the "user_id" field. +func (_u *PromoCodeUsageUpdateOne) SetUserID(v int64) *PromoCodeUsageUpdateOne { + _u.mutation.SetUserID(v) + return _u +} + +// SetNillableUserID sets the "user_id" field if the given value is not nil. +func (_u *PromoCodeUsageUpdateOne) SetNillableUserID(v *int64) *PromoCodeUsageUpdateOne { + if v != nil { + _u.SetUserID(*v) + } + return _u +} + +// SetBonusAmount sets the "bonus_amount" field. +func (_u *PromoCodeUsageUpdateOne) SetBonusAmount(v float64) *PromoCodeUsageUpdateOne { + _u.mutation.ResetBonusAmount() + _u.mutation.SetBonusAmount(v) + return _u +} + +// SetNillableBonusAmount sets the "bonus_amount" field if the given value is not nil. +func (_u *PromoCodeUsageUpdateOne) SetNillableBonusAmount(v *float64) *PromoCodeUsageUpdateOne { + if v != nil { + _u.SetBonusAmount(*v) + } + return _u +} + +// AddBonusAmount adds value to the "bonus_amount" field. +func (_u *PromoCodeUsageUpdateOne) AddBonusAmount(v float64) *PromoCodeUsageUpdateOne { + _u.mutation.AddBonusAmount(v) + return _u +} + +// SetUsedAt sets the "used_at" field. +func (_u *PromoCodeUsageUpdateOne) SetUsedAt(v time.Time) *PromoCodeUsageUpdateOne { + _u.mutation.SetUsedAt(v) + return _u +} + +// SetNillableUsedAt sets the "used_at" field if the given value is not nil. +func (_u *PromoCodeUsageUpdateOne) SetNillableUsedAt(v *time.Time) *PromoCodeUsageUpdateOne { + if v != nil { + _u.SetUsedAt(*v) + } + return _u +} + +// SetPromoCode sets the "promo_code" edge to the PromoCode entity. +func (_u *PromoCodeUsageUpdateOne) SetPromoCode(v *PromoCode) *PromoCodeUsageUpdateOne { + return _u.SetPromoCodeID(v.ID) +} + +// SetUser sets the "user" edge to the User entity. +func (_u *PromoCodeUsageUpdateOne) SetUser(v *User) *PromoCodeUsageUpdateOne { + return _u.SetUserID(v.ID) +} + +// Mutation returns the PromoCodeUsageMutation object of the builder. +func (_u *PromoCodeUsageUpdateOne) Mutation() *PromoCodeUsageMutation { + return _u.mutation +} + +// ClearPromoCode clears the "promo_code" edge to the PromoCode entity. +func (_u *PromoCodeUsageUpdateOne) ClearPromoCode() *PromoCodeUsageUpdateOne { + _u.mutation.ClearPromoCode() + return _u +} + +// ClearUser clears the "user" edge to the User entity. +func (_u *PromoCodeUsageUpdateOne) ClearUser() *PromoCodeUsageUpdateOne { + _u.mutation.ClearUser() + return _u +} + +// Where appends a list predicates to the PromoCodeUsageUpdate builder. +func (_u *PromoCodeUsageUpdateOne) Where(ps ...predicate.PromoCodeUsage) *PromoCodeUsageUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *PromoCodeUsageUpdateOne) Select(field string, fields ...string) *PromoCodeUsageUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated PromoCodeUsage entity. +func (_u *PromoCodeUsageUpdateOne) Save(ctx context.Context) (*PromoCodeUsage, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *PromoCodeUsageUpdateOne) SaveX(ctx context.Context) *PromoCodeUsage { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *PromoCodeUsageUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *PromoCodeUsageUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *PromoCodeUsageUpdateOne) check() error { + if _u.mutation.PromoCodeCleared() && len(_u.mutation.PromoCodeIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.promo_code"`) + } + if _u.mutation.UserCleared() && len(_u.mutation.UserIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "PromoCodeUsage.user"`) + } + return nil +} + +func (_u *PromoCodeUsageUpdateOne) sqlSave(ctx context.Context) (_node *PromoCodeUsage, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(promocodeusage.Table, promocodeusage.Columns, sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "PromoCodeUsage.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, promocodeusage.FieldID) + for _, f := range fields { + if !promocodeusage.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != promocodeusage.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.BonusAmount(); ok { + _spec.SetField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedBonusAmount(); ok { + _spec.AddField(promocodeusage.FieldBonusAmount, field.TypeFloat64, value) + } + if value, ok := _u.mutation.UsedAt(); ok { + _spec.SetField(promocodeusage.FieldUsedAt, field.TypeTime, value) + } + if _u.mutation.PromoCodeCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.PromoCodeTable, + Columns: []string{promocodeusage.PromoCodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.PromoCodeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.PromoCodeTable, + Columns: []string{promocodeusage.PromoCodeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocode.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _u.mutation.UserCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.UserTable, + Columns: []string{promocodeusage.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.UserIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: true, + Table: promocodeusage.UserTable, + Columns: []string{promocodeusage.UserColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &PromoCodeUsage{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{promocodeusage.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/proxy_query.go b/backend/ent/proxy_query.go index 1358eed2..b817d139 100644 --- a/backend/ent/proxy_query.go +++ b/backend/ent/proxy_query.go @@ -9,6 +9,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -25,6 +26,7 @@ type ProxyQuery struct { inters []Interceptor predicates []predicate.Proxy withAccounts *AccountQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -384,6 +386,9 @@ func (_q *ProxyQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Proxy, node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -439,6 +444,9 @@ func (_q *ProxyQuery) loadAccounts(ctx context.Context, query *AccountQuery, nod func (_q *ProxyQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -501,6 +509,9 @@ func (_q *ProxyQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -518,6 +529,32 @@ func (_q *ProxyQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *ProxyQuery) ForUpdate(opts ...sql.LockOption) *ProxyQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *ProxyQuery) ForShare(opts ...sql.LockOption) *ProxyQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // ProxyGroupBy is the group-by builder for Proxy entities. type ProxyGroupBy struct { selector diff --git a/backend/ent/redeemcode_query.go b/backend/ent/redeemcode_query.go index 442bfe81..f5b8baef 100644 --- a/backend/ent/redeemcode_query.go +++ b/backend/ent/redeemcode_query.go @@ -8,6 +8,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -26,6 +27,7 @@ type RedeemCodeQuery struct { predicates []predicate.RedeemCode withUser *UserQuery withGroup *GroupQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -420,6 +422,9 @@ func (_q *RedeemCodeQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*R node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -511,6 +516,9 @@ func (_q *RedeemCodeQuery) loadGroup(ctx context.Context, query *GroupQuery, nod func (_q *RedeemCodeQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -579,6 +587,9 @@ func (_q *RedeemCodeQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -596,6 +607,32 @@ func (_q *RedeemCodeQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *RedeemCodeQuery) ForUpdate(opts ...sql.LockOption) *RedeemCodeQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *RedeemCodeQuery) ForShare(opts ...sql.LockOption) *RedeemCodeQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // RedeemCodeGroupBy is the group-by builder for RedeemCode entities. type RedeemCodeGroupBy struct { selector diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index fb1c948c..0cb10775 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -9,6 +9,8 @@ import ( "github.com/Wei-Shaw/sub2api/ent/accountgroup" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/schema" @@ -175,22 +177,26 @@ func init() { accountDescPriority := accountFields[8].Descriptor() // account.DefaultPriority holds the default value on creation for the priority field. account.DefaultPriority = accountDescPriority.Default.(int) + // accountDescRateMultiplier is the schema descriptor for rate_multiplier field. + accountDescRateMultiplier := accountFields[9].Descriptor() + // account.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field. + account.DefaultRateMultiplier = accountDescRateMultiplier.Default.(float64) // accountDescStatus is the schema descriptor for status field. - accountDescStatus := accountFields[9].Descriptor() + accountDescStatus := accountFields[10].Descriptor() // account.DefaultStatus holds the default value on creation for the status field. account.DefaultStatus = accountDescStatus.Default.(string) // account.StatusValidator is a validator for the "status" field. It is called by the builders before save. account.StatusValidator = accountDescStatus.Validators[0].(func(string) error) // accountDescAutoPauseOnExpired is the schema descriptor for auto_pause_on_expired field. - accountDescAutoPauseOnExpired := accountFields[13].Descriptor() + accountDescAutoPauseOnExpired := accountFields[14].Descriptor() // account.DefaultAutoPauseOnExpired holds the default value on creation for the auto_pause_on_expired field. account.DefaultAutoPauseOnExpired = accountDescAutoPauseOnExpired.Default.(bool) // accountDescSchedulable is the schema descriptor for schedulable field. - accountDescSchedulable := accountFields[14].Descriptor() + accountDescSchedulable := accountFields[15].Descriptor() // account.DefaultSchedulable holds the default value on creation for the schedulable field. account.DefaultSchedulable = accountDescSchedulable.Default.(bool) // accountDescSessionWindowStatus is the schema descriptor for session_window_status field. - accountDescSessionWindowStatus := accountFields[20].Descriptor() + accountDescSessionWindowStatus := accountFields[21].Descriptor() // account.SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save. account.SessionWindowStatusValidator = accountDescSessionWindowStatus.Validators[0].(func(string) error) accountgroupFields := schema.AccountGroup{}.Fields() @@ -274,6 +280,64 @@ func init() { groupDescClaudeCodeOnly := groupFields[14].Descriptor() // group.DefaultClaudeCodeOnly holds the default value on creation for the claude_code_only field. group.DefaultClaudeCodeOnly = groupDescClaudeCodeOnly.Default.(bool) + // groupDescModelRoutingEnabled is the schema descriptor for model_routing_enabled field. + groupDescModelRoutingEnabled := groupFields[17].Descriptor() + // group.DefaultModelRoutingEnabled holds the default value on creation for the model_routing_enabled field. + group.DefaultModelRoutingEnabled = groupDescModelRoutingEnabled.Default.(bool) + promocodeFields := schema.PromoCode{}.Fields() + _ = promocodeFields + // promocodeDescCode is the schema descriptor for code field. + promocodeDescCode := promocodeFields[0].Descriptor() + // promocode.CodeValidator is a validator for the "code" field. It is called by the builders before save. + promocode.CodeValidator = func() func(string) error { + validators := promocodeDescCode.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(code string) error { + for _, fn := range fns { + if err := fn(code); err != nil { + return err + } + } + return nil + } + }() + // promocodeDescBonusAmount is the schema descriptor for bonus_amount field. + promocodeDescBonusAmount := promocodeFields[1].Descriptor() + // promocode.DefaultBonusAmount holds the default value on creation for the bonus_amount field. + promocode.DefaultBonusAmount = promocodeDescBonusAmount.Default.(float64) + // promocodeDescMaxUses is the schema descriptor for max_uses field. + promocodeDescMaxUses := promocodeFields[2].Descriptor() + // promocode.DefaultMaxUses holds the default value on creation for the max_uses field. + promocode.DefaultMaxUses = promocodeDescMaxUses.Default.(int) + // promocodeDescUsedCount is the schema descriptor for used_count field. + promocodeDescUsedCount := promocodeFields[3].Descriptor() + // promocode.DefaultUsedCount holds the default value on creation for the used_count field. + promocode.DefaultUsedCount = promocodeDescUsedCount.Default.(int) + // promocodeDescStatus is the schema descriptor for status field. + promocodeDescStatus := promocodeFields[4].Descriptor() + // promocode.DefaultStatus holds the default value on creation for the status field. + promocode.DefaultStatus = promocodeDescStatus.Default.(string) + // promocode.StatusValidator is a validator for the "status" field. It is called by the builders before save. + promocode.StatusValidator = promocodeDescStatus.Validators[0].(func(string) error) + // promocodeDescCreatedAt is the schema descriptor for created_at field. + promocodeDescCreatedAt := promocodeFields[7].Descriptor() + // promocode.DefaultCreatedAt holds the default value on creation for the created_at field. + promocode.DefaultCreatedAt = promocodeDescCreatedAt.Default.(func() time.Time) + // promocodeDescUpdatedAt is the schema descriptor for updated_at field. + promocodeDescUpdatedAt := promocodeFields[8].Descriptor() + // promocode.DefaultUpdatedAt holds the default value on creation for the updated_at field. + promocode.DefaultUpdatedAt = promocodeDescUpdatedAt.Default.(func() time.Time) + // promocode.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + promocode.UpdateDefaultUpdatedAt = promocodeDescUpdatedAt.UpdateDefault.(func() time.Time) + promocodeusageFields := schema.PromoCodeUsage{}.Fields() + _ = promocodeusageFields + // promocodeusageDescUsedAt is the schema descriptor for used_at field. + promocodeusageDescUsedAt := promocodeusageFields[3].Descriptor() + // promocodeusage.DefaultUsedAt holds the default value on creation for the used_at field. + promocodeusage.DefaultUsedAt = promocodeusageDescUsedAt.Default.(func() time.Time) proxyMixin := schema.Proxy{}.Mixin() proxyMixinHooks1 := proxyMixin[1].Hooks() proxy.Hooks[0] = proxyMixinHooks1[0] @@ -522,27 +586,31 @@ func init() { // usagelog.DefaultRateMultiplier holds the default value on creation for the rate_multiplier field. usagelog.DefaultRateMultiplier = usagelogDescRateMultiplier.Default.(float64) // usagelogDescBillingType is the schema descriptor for billing_type field. - usagelogDescBillingType := usagelogFields[20].Descriptor() + usagelogDescBillingType := usagelogFields[21].Descriptor() // usagelog.DefaultBillingType holds the default value on creation for the billing_type field. usagelog.DefaultBillingType = usagelogDescBillingType.Default.(int8) // usagelogDescStream is the schema descriptor for stream field. - usagelogDescStream := usagelogFields[21].Descriptor() + usagelogDescStream := usagelogFields[22].Descriptor() // usagelog.DefaultStream holds the default value on creation for the stream field. usagelog.DefaultStream = usagelogDescStream.Default.(bool) // usagelogDescUserAgent is the schema descriptor for user_agent field. - usagelogDescUserAgent := usagelogFields[24].Descriptor() + usagelogDescUserAgent := usagelogFields[25].Descriptor() // usagelog.UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save. usagelog.UserAgentValidator = usagelogDescUserAgent.Validators[0].(func(string) error) + // usagelogDescIPAddress is the schema descriptor for ip_address field. + usagelogDescIPAddress := usagelogFields[26].Descriptor() + // usagelog.IPAddressValidator is a validator for the "ip_address" field. It is called by the builders before save. + usagelog.IPAddressValidator = usagelogDescIPAddress.Validators[0].(func(string) error) // usagelogDescImageCount is the schema descriptor for image_count field. - usagelogDescImageCount := usagelogFields[25].Descriptor() + usagelogDescImageCount := usagelogFields[27].Descriptor() // usagelog.DefaultImageCount holds the default value on creation for the image_count field. usagelog.DefaultImageCount = usagelogDescImageCount.Default.(int) // usagelogDescImageSize is the schema descriptor for image_size field. - usagelogDescImageSize := usagelogFields[26].Descriptor() + usagelogDescImageSize := usagelogFields[28].Descriptor() // usagelog.ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save. usagelog.ImageSizeValidator = usagelogDescImageSize.Validators[0].(func(string) error) // usagelogDescCreatedAt is the schema descriptor for created_at field. - usagelogDescCreatedAt := usagelogFields[27].Descriptor() + usagelogDescCreatedAt := usagelogFields[29].Descriptor() // usagelog.DefaultCreatedAt holds the default value on creation for the created_at field. usagelog.DefaultCreatedAt = usagelogDescCreatedAt.Default.(func() time.Time) userMixin := schema.User{}.Mixin() diff --git a/backend/ent/schema/account.go b/backend/ent/schema/account.go index ec192a97..dd79ba96 100644 --- a/backend/ent/schema/account.go +++ b/backend/ent/schema/account.go @@ -102,6 +102,12 @@ func (Account) Fields() []ent.Field { field.Int("priority"). Default(50), + // rate_multiplier: 账号计费倍率(>=0,允许 0 表示该账号计费为 0) + // 仅影响账号维度计费口径,不影响用户/API Key 扣费(分组倍率) + field.Float("rate_multiplier"). + SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}). + Default(1.0), + // status: 账户状态,如 "active", "error", "disabled" field.String("status"). MaxLen(20). diff --git a/backend/ent/schema/api_key.go b/backend/ent/schema/api_key.go index 94e572c5..1b206089 100644 --- a/backend/ent/schema/api_key.go +++ b/backend/ent/schema/api_key.go @@ -46,6 +46,12 @@ func (APIKey) Fields() []ent.Field { field.String("status"). MaxLen(20). Default(service.StatusActive), + field.JSON("ip_whitelist", []string{}). + Optional(). + Comment("Allowed IPs/CIDRs, e.g. [\"192.168.1.100\", \"10.0.0.0/8\"]"), + field.JSON("ip_blacklist", []string{}). + Optional(). + Comment("Blocked IPs/CIDRs"), } } diff --git a/backend/ent/schema/group.go b/backend/ent/schema/group.go index d38925b1..5d0a1e9a 100644 --- a/backend/ent/schema/group.go +++ b/backend/ent/schema/group.go @@ -95,6 +95,17 @@ func (Group) Fields() []ent.Field { Optional(). Nillable(). Comment("非 Claude Code 请求降级使用的分组 ID"), + + // 模型路由配置 (added by migration 040) + field.JSON("model_routing", map[string][]int64{}). + Optional(). + SchemaType(map[string]string{dialect.Postgres: "jsonb"}). + Comment("模型路由配置:模型模式 -> 优先账号ID列表"), + + // 模型路由开关 (added by migration 041) + field.Bool("model_routing_enabled"). + Default(false). + Comment("是否启用模型路由配置"), } } diff --git a/backend/ent/schema/promo_code.go b/backend/ent/schema/promo_code.go new file mode 100644 index 00000000..c3bb824b --- /dev/null +++ b/backend/ent/schema/promo_code.go @@ -0,0 +1,87 @@ +package schema + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// PromoCode holds the schema definition for the PromoCode entity. +// +// 注册优惠码:用户注册时使用,可获得赠送余额 +// 与 RedeemCode 不同,PromoCode 支持多次使用(有使用次数限制) +// +// 删除策略:硬删除 +type PromoCode struct { + ent.Schema +} + +func (PromoCode) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "promo_codes"}, + } +} + +func (PromoCode) Fields() []ent.Field { + return []ent.Field{ + field.String("code"). + MaxLen(32). + NotEmpty(). + Unique(). + Comment("优惠码"), + field.Float("bonus_amount"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). + Default(0). + Comment("赠送余额金额"), + field.Int("max_uses"). + Default(0). + Comment("最大使用次数,0表示无限制"), + field.Int("used_count"). + Default(0). + Comment("已使用次数"), + field.String("status"). + MaxLen(20). + Default(service.PromoCodeStatusActive). + Comment("状态: active, disabled"), + field.Time("expires_at"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}). + Comment("过期时间,null表示永不过期"), + field.String("notes"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}). + Comment("备注"), + field.Time("created_at"). + Immutable(). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + } +} + +func (PromoCode) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("usage_records", PromoCodeUsage.Type), + } +} + +func (PromoCode) Indexes() []ent.Index { + return []ent.Index{ + // code 字段已在 Fields() 中声明 Unique(),无需重复索引 + index.Fields("status"), + index.Fields("expires_at"), + } +} diff --git a/backend/ent/schema/promo_code_usage.go b/backend/ent/schema/promo_code_usage.go new file mode 100644 index 00000000..28fbabea --- /dev/null +++ b/backend/ent/schema/promo_code_usage.go @@ -0,0 +1,66 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/entsql" + "entgo.io/ent/schema" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +// PromoCodeUsage holds the schema definition for the PromoCodeUsage entity. +// +// 优惠码使用记录:记录每个用户使用优惠码的情况 +type PromoCodeUsage struct { + ent.Schema +} + +func (PromoCodeUsage) Annotations() []schema.Annotation { + return []schema.Annotation{ + entsql.Annotation{Table: "promo_code_usages"}, + } +} + +func (PromoCodeUsage) Fields() []ent.Field { + return []ent.Field{ + field.Int64("promo_code_id"). + Comment("优惠码ID"), + field.Int64("user_id"). + Comment("使用用户ID"), + field.Float("bonus_amount"). + SchemaType(map[string]string{dialect.Postgres: "decimal(20,8)"}). + Comment("实际赠送金额"), + field.Time("used_at"). + Default(time.Now). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}). + Comment("使用时间"), + } +} + +func (PromoCodeUsage) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("promo_code", PromoCode.Type). + Ref("usage_records"). + Field("promo_code_id"). + Required(). + Unique(), + edge.From("user", User.Type). + Ref("promo_code_usages"). + Field("user_id"). + Required(). + Unique(), + } +} + +func (PromoCodeUsage) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("promo_code_id"), + index.Fields("user_id"), + // 每个用户每个优惠码只能使用一次 + index.Fields("promo_code_id", "user_id").Unique(), + } +} diff --git a/backend/ent/schema/usage_log.go b/backend/ent/schema/usage_log.go index df955181..fc7c7165 100644 --- a/backend/ent/schema/usage_log.go +++ b/backend/ent/schema/usage_log.go @@ -85,6 +85,12 @@ func (UsageLog) Fields() []ent.Field { Default(1). SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}), + // account_rate_multiplier: 账号计费倍率快照(NULL 表示按 1.0 处理) + field.Float("account_rate_multiplier"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "decimal(10,4)"}), + // 其他字段 field.Int8("billing_type"). Default(0), @@ -100,6 +106,10 @@ func (UsageLog) Fields() []ent.Field { MaxLen(512). Optional(). Nillable(), + field.String("ip_address"). + MaxLen(45). // 支持 IPv6 + Optional(). + Nillable(), // 图片生成字段(仅 gemini-3-pro-image 等图片模型使用) field.Int("image_count"). diff --git a/backend/ent/schema/user.go b/backend/ent/schema/user.go index 11fecdfd..79dc2286 100644 --- a/backend/ent/schema/user.go +++ b/backend/ent/schema/user.go @@ -74,6 +74,7 @@ func (User) Edges() []ent.Edge { Through("user_allowed_groups", UserAllowedGroup.Type), edge.To("usage_logs", UsageLog.Type), edge.To("attribute_values", UserAttributeValue.Type), + edge.To("promo_code_usages", PromoCodeUsage.Type), } } diff --git a/backend/ent/setting_query.go b/backend/ent/setting_query.go index e9dda6f5..38eb9462 100644 --- a/backend/ent/setting_query.go +++ b/backend/ent/setting_query.go @@ -8,6 +8,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -22,6 +23,7 @@ type SettingQuery struct { order []setting.OrderOption inters []Interceptor predicates []predicate.Setting + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -343,6 +345,9 @@ func (_q *SettingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Sett nodes = append(nodes, node) return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -357,6 +362,9 @@ func (_q *SettingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Sett func (_q *SettingQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -419,6 +427,9 @@ func (_q *SettingQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -436,6 +447,32 @@ func (_q *SettingQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *SettingQuery) ForUpdate(opts ...sql.LockOption) *SettingQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *SettingQuery) ForShare(opts ...sql.LockOption) *SettingQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // SettingGroupBy is the group-by builder for Setting entities. type SettingGroupBy struct { selector diff --git a/backend/ent/tx.go b/backend/ent/tx.go index e45204c0..56df121a 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -22,6 +22,10 @@ type Tx struct { AccountGroup *AccountGroupClient // Group is the client for interacting with the Group builders. Group *GroupClient + // PromoCode is the client for interacting with the PromoCode builders. + PromoCode *PromoCodeClient + // PromoCodeUsage is the client for interacting with the PromoCodeUsage builders. + PromoCodeUsage *PromoCodeUsageClient // Proxy is the client for interacting with the Proxy builders. Proxy *ProxyClient // RedeemCode is the client for interacting with the RedeemCode builders. @@ -175,6 +179,8 @@ func (tx *Tx) init() { tx.Account = NewAccountClient(tx.config) tx.AccountGroup = NewAccountGroupClient(tx.config) tx.Group = NewGroupClient(tx.config) + tx.PromoCode = NewPromoCodeClient(tx.config) + tx.PromoCodeUsage = NewPromoCodeUsageClient(tx.config) tx.Proxy = NewProxyClient(tx.config) tx.RedeemCode = NewRedeemCodeClient(tx.config) tx.Setting = NewSettingClient(tx.config) diff --git a/backend/ent/usagelog.go b/backend/ent/usagelog.go index 798f3a9f..81c466b4 100644 --- a/backend/ent/usagelog.go +++ b/backend/ent/usagelog.go @@ -62,6 +62,8 @@ type UsageLog struct { ActualCost float64 `json:"actual_cost,omitempty"` // RateMultiplier holds the value of the "rate_multiplier" field. RateMultiplier float64 `json:"rate_multiplier,omitempty"` + // AccountRateMultiplier holds the value of the "account_rate_multiplier" field. + AccountRateMultiplier *float64 `json:"account_rate_multiplier,omitempty"` // BillingType holds the value of the "billing_type" field. BillingType int8 `json:"billing_type,omitempty"` // Stream holds the value of the "stream" field. @@ -72,6 +74,8 @@ type UsageLog struct { FirstTokenMs *int `json:"first_token_ms,omitempty"` // UserAgent holds the value of the "user_agent" field. UserAgent *string `json:"user_agent,omitempty"` + // IPAddress holds the value of the "ip_address" field. + IPAddress *string `json:"ip_address,omitempty"` // ImageCount holds the value of the "image_count" field. ImageCount int `json:"image_count,omitempty"` // ImageSize holds the value of the "image_size" field. @@ -163,11 +167,11 @@ func (*UsageLog) scanValues(columns []string) ([]any, error) { switch columns[i] { case usagelog.FieldStream: values[i] = new(sql.NullBool) - case usagelog.FieldInputCost, usagelog.FieldOutputCost, usagelog.FieldCacheCreationCost, usagelog.FieldCacheReadCost, usagelog.FieldTotalCost, usagelog.FieldActualCost, usagelog.FieldRateMultiplier: + case usagelog.FieldInputCost, usagelog.FieldOutputCost, usagelog.FieldCacheCreationCost, usagelog.FieldCacheReadCost, usagelog.FieldTotalCost, usagelog.FieldActualCost, usagelog.FieldRateMultiplier, usagelog.FieldAccountRateMultiplier: values[i] = new(sql.NullFloat64) case usagelog.FieldID, usagelog.FieldUserID, usagelog.FieldAPIKeyID, usagelog.FieldAccountID, usagelog.FieldGroupID, usagelog.FieldSubscriptionID, usagelog.FieldInputTokens, usagelog.FieldOutputTokens, usagelog.FieldCacheCreationTokens, usagelog.FieldCacheReadTokens, usagelog.FieldCacheCreation5mTokens, usagelog.FieldCacheCreation1hTokens, usagelog.FieldBillingType, usagelog.FieldDurationMs, usagelog.FieldFirstTokenMs, usagelog.FieldImageCount: values[i] = new(sql.NullInt64) - case usagelog.FieldRequestID, usagelog.FieldModel, usagelog.FieldUserAgent, usagelog.FieldImageSize: + case usagelog.FieldRequestID, usagelog.FieldModel, usagelog.FieldUserAgent, usagelog.FieldIPAddress, usagelog.FieldImageSize: values[i] = new(sql.NullString) case usagelog.FieldCreatedAt: values[i] = new(sql.NullTime) @@ -314,6 +318,13 @@ func (_m *UsageLog) assignValues(columns []string, values []any) error { } else if value.Valid { _m.RateMultiplier = value.Float64 } + case usagelog.FieldAccountRateMultiplier: + if value, ok := values[i].(*sql.NullFloat64); !ok { + return fmt.Errorf("unexpected type %T for field account_rate_multiplier", values[i]) + } else if value.Valid { + _m.AccountRateMultiplier = new(float64) + *_m.AccountRateMultiplier = value.Float64 + } case usagelog.FieldBillingType: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field billing_type", values[i]) @@ -347,6 +358,13 @@ func (_m *UsageLog) assignValues(columns []string, values []any) error { _m.UserAgent = new(string) *_m.UserAgent = value.String } + case usagelog.FieldIPAddress: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field ip_address", values[i]) + } else if value.Valid { + _m.IPAddress = new(string) + *_m.IPAddress = value.String + } case usagelog.FieldImageCount: if value, ok := values[i].(*sql.NullInt64); !ok { return fmt.Errorf("unexpected type %T for field image_count", values[i]) @@ -491,6 +509,11 @@ func (_m *UsageLog) String() string { builder.WriteString("rate_multiplier=") builder.WriteString(fmt.Sprintf("%v", _m.RateMultiplier)) builder.WriteString(", ") + if v := _m.AccountRateMultiplier; v != nil { + builder.WriteString("account_rate_multiplier=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") builder.WriteString("billing_type=") builder.WriteString(fmt.Sprintf("%v", _m.BillingType)) builder.WriteString(", ") @@ -512,6 +535,11 @@ func (_m *UsageLog) String() string { builder.WriteString(*v) } builder.WriteString(", ") + if v := _m.IPAddress; v != nil { + builder.WriteString("ip_address=") + builder.WriteString(*v) + } + builder.WriteString(", ") builder.WriteString("image_count=") builder.WriteString(fmt.Sprintf("%v", _m.ImageCount)) builder.WriteString(", ") diff --git a/backend/ent/usagelog/usagelog.go b/backend/ent/usagelog/usagelog.go index d3edfb4d..980f1e58 100644 --- a/backend/ent/usagelog/usagelog.go +++ b/backend/ent/usagelog/usagelog.go @@ -54,6 +54,8 @@ const ( FieldActualCost = "actual_cost" // FieldRateMultiplier holds the string denoting the rate_multiplier field in the database. FieldRateMultiplier = "rate_multiplier" + // FieldAccountRateMultiplier holds the string denoting the account_rate_multiplier field in the database. + FieldAccountRateMultiplier = "account_rate_multiplier" // FieldBillingType holds the string denoting the billing_type field in the database. FieldBillingType = "billing_type" // FieldStream holds the string denoting the stream field in the database. @@ -64,6 +66,8 @@ const ( FieldFirstTokenMs = "first_token_ms" // FieldUserAgent holds the string denoting the user_agent field in the database. FieldUserAgent = "user_agent" + // FieldIPAddress holds the string denoting the ip_address field in the database. + FieldIPAddress = "ip_address" // FieldImageCount holds the string denoting the image_count field in the database. FieldImageCount = "image_count" // FieldImageSize holds the string denoting the image_size field in the database. @@ -142,11 +146,13 @@ var Columns = []string{ FieldTotalCost, FieldActualCost, FieldRateMultiplier, + FieldAccountRateMultiplier, FieldBillingType, FieldStream, FieldDurationMs, FieldFirstTokenMs, FieldUserAgent, + FieldIPAddress, FieldImageCount, FieldImageSize, FieldCreatedAt, @@ -199,6 +205,8 @@ var ( DefaultStream bool // UserAgentValidator is a validator for the "user_agent" field. It is called by the builders before save. UserAgentValidator func(string) error + // IPAddressValidator is a validator for the "ip_address" field. It is called by the builders before save. + IPAddressValidator func(string) error // DefaultImageCount holds the default value on creation for the "image_count" field. DefaultImageCount int // ImageSizeValidator is a validator for the "image_size" field. It is called by the builders before save. @@ -315,6 +323,11 @@ func ByRateMultiplier(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldRateMultiplier, opts...).ToFunc() } +// ByAccountRateMultiplier orders the results by the account_rate_multiplier field. +func ByAccountRateMultiplier(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccountRateMultiplier, opts...).ToFunc() +} + // ByBillingType orders the results by the billing_type field. func ByBillingType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldBillingType, opts...).ToFunc() @@ -340,6 +353,11 @@ func ByUserAgent(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldUserAgent, opts...).ToFunc() } +// ByIPAddress orders the results by the ip_address field. +func ByIPAddress(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIPAddress, opts...).ToFunc() +} + // ByImageCount orders the results by the image_count field. func ByImageCount(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldImageCount, opts...).ToFunc() diff --git a/backend/ent/usagelog/where.go b/backend/ent/usagelog/where.go index c7acd59d..28e2ab4c 100644 --- a/backend/ent/usagelog/where.go +++ b/backend/ent/usagelog/where.go @@ -155,6 +155,11 @@ func RateMultiplier(v float64) predicate.UsageLog { return predicate.UsageLog(sql.FieldEQ(FieldRateMultiplier, v)) } +// AccountRateMultiplier applies equality check predicate on the "account_rate_multiplier" field. It's identical to AccountRateMultiplierEQ. +func AccountRateMultiplier(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldAccountRateMultiplier, v)) +} + // BillingType applies equality check predicate on the "billing_type" field. It's identical to BillingTypeEQ. func BillingType(v int8) predicate.UsageLog { return predicate.UsageLog(sql.FieldEQ(FieldBillingType, v)) @@ -180,6 +185,11 @@ func UserAgent(v string) predicate.UsageLog { return predicate.UsageLog(sql.FieldEQ(FieldUserAgent, v)) } +// IPAddress applies equality check predicate on the "ip_address" field. It's identical to IPAddressEQ. +func IPAddress(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldIPAddress, v)) +} + // ImageCount applies equality check predicate on the "image_count" field. It's identical to ImageCountEQ. func ImageCount(v int) predicate.UsageLog { return predicate.UsageLog(sql.FieldEQ(FieldImageCount, v)) @@ -965,6 +975,56 @@ func RateMultiplierLTE(v float64) predicate.UsageLog { return predicate.UsageLog(sql.FieldLTE(FieldRateMultiplier, v)) } +// AccountRateMultiplierEQ applies the EQ predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierNEQ applies the NEQ predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierNEQ(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierIn applies the In predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldAccountRateMultiplier, vs...)) +} + +// AccountRateMultiplierNotIn applies the NotIn predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierNotIn(vs ...float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldAccountRateMultiplier, vs...)) +} + +// AccountRateMultiplierGT applies the GT predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierGT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierGTE applies the GTE predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierGTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierLT applies the LT predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierLT(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierLTE applies the LTE predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierLTE(v float64) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldAccountRateMultiplier, v)) +} + +// AccountRateMultiplierIsNil applies the IsNil predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierIsNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldIsNull(FieldAccountRateMultiplier)) +} + +// AccountRateMultiplierNotNil applies the NotNil predicate on the "account_rate_multiplier" field. +func AccountRateMultiplierNotNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotNull(FieldAccountRateMultiplier)) +} + // BillingTypeEQ applies the EQ predicate on the "billing_type" field. func BillingTypeEQ(v int8) predicate.UsageLog { return predicate.UsageLog(sql.FieldEQ(FieldBillingType, v)) @@ -1190,6 +1250,81 @@ func UserAgentContainsFold(v string) predicate.UsageLog { return predicate.UsageLog(sql.FieldContainsFold(FieldUserAgent, v)) } +// IPAddressEQ applies the EQ predicate on the "ip_address" field. +func IPAddressEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEQ(FieldIPAddress, v)) +} + +// IPAddressNEQ applies the NEQ predicate on the "ip_address" field. +func IPAddressNEQ(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNEQ(FieldIPAddress, v)) +} + +// IPAddressIn applies the In predicate on the "ip_address" field. +func IPAddressIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldIn(FieldIPAddress, vs...)) +} + +// IPAddressNotIn applies the NotIn predicate on the "ip_address" field. +func IPAddressNotIn(vs ...string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotIn(FieldIPAddress, vs...)) +} + +// IPAddressGT applies the GT predicate on the "ip_address" field. +func IPAddressGT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGT(FieldIPAddress, v)) +} + +// IPAddressGTE applies the GTE predicate on the "ip_address" field. +func IPAddressGTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldGTE(FieldIPAddress, v)) +} + +// IPAddressLT applies the LT predicate on the "ip_address" field. +func IPAddressLT(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLT(FieldIPAddress, v)) +} + +// IPAddressLTE applies the LTE predicate on the "ip_address" field. +func IPAddressLTE(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldLTE(FieldIPAddress, v)) +} + +// IPAddressContains applies the Contains predicate on the "ip_address" field. +func IPAddressContains(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContains(FieldIPAddress, v)) +} + +// IPAddressHasPrefix applies the HasPrefix predicate on the "ip_address" field. +func IPAddressHasPrefix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasPrefix(FieldIPAddress, v)) +} + +// IPAddressHasSuffix applies the HasSuffix predicate on the "ip_address" field. +func IPAddressHasSuffix(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldHasSuffix(FieldIPAddress, v)) +} + +// IPAddressIsNil applies the IsNil predicate on the "ip_address" field. +func IPAddressIsNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldIsNull(FieldIPAddress)) +} + +// IPAddressNotNil applies the NotNil predicate on the "ip_address" field. +func IPAddressNotNil() predicate.UsageLog { + return predicate.UsageLog(sql.FieldNotNull(FieldIPAddress)) +} + +// IPAddressEqualFold applies the EqualFold predicate on the "ip_address" field. +func IPAddressEqualFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldEqualFold(FieldIPAddress, v)) +} + +// IPAddressContainsFold applies the ContainsFold predicate on the "ip_address" field. +func IPAddressContainsFold(v string) predicate.UsageLog { + return predicate.UsageLog(sql.FieldContainsFold(FieldIPAddress, v)) +} + // ImageCountEQ applies the EQ predicate on the "image_count" field. func ImageCountEQ(v int) predicate.UsageLog { return predicate.UsageLog(sql.FieldEQ(FieldImageCount, v)) diff --git a/backend/ent/usagelog_create.go b/backend/ent/usagelog_create.go index f77650ab..a17d6507 100644 --- a/backend/ent/usagelog_create.go +++ b/backend/ent/usagelog_create.go @@ -267,6 +267,20 @@ func (_c *UsageLogCreate) SetNillableRateMultiplier(v *float64) *UsageLogCreate return _c } +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (_c *UsageLogCreate) SetAccountRateMultiplier(v float64) *UsageLogCreate { + _c.mutation.SetAccountRateMultiplier(v) + return _c +} + +// SetNillableAccountRateMultiplier sets the "account_rate_multiplier" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableAccountRateMultiplier(v *float64) *UsageLogCreate { + if v != nil { + _c.SetAccountRateMultiplier(*v) + } + return _c +} + // SetBillingType sets the "billing_type" field. func (_c *UsageLogCreate) SetBillingType(v int8) *UsageLogCreate { _c.mutation.SetBillingType(v) @@ -337,6 +351,20 @@ func (_c *UsageLogCreate) SetNillableUserAgent(v *string) *UsageLogCreate { return _c } +// SetIPAddress sets the "ip_address" field. +func (_c *UsageLogCreate) SetIPAddress(v string) *UsageLogCreate { + _c.mutation.SetIPAddress(v) + return _c +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (_c *UsageLogCreate) SetNillableIPAddress(v *string) *UsageLogCreate { + if v != nil { + _c.SetIPAddress(*v) + } + return _c +} + // SetImageCount sets the "image_count" field. func (_c *UsageLogCreate) SetImageCount(v int) *UsageLogCreate { _c.mutation.SetImageCount(v) @@ -586,6 +614,11 @@ func (_c *UsageLogCreate) check() error { return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)} } } + if v, ok := _c.mutation.IPAddress(); ok { + if err := usagelog.IPAddressValidator(v); err != nil { + return &ValidationError{Name: "ip_address", err: fmt.Errorf(`ent: validator failed for field "UsageLog.ip_address": %w`, err)} + } + } if _, ok := _c.mutation.ImageCount(); !ok { return &ValidationError{Name: "image_count", err: errors.New(`ent: missing required field "UsageLog.image_count"`)} } @@ -693,6 +726,10 @@ func (_c *UsageLogCreate) createSpec() (*UsageLog, *sqlgraph.CreateSpec) { _spec.SetField(usagelog.FieldRateMultiplier, field.TypeFloat64, value) _node.RateMultiplier = value } + if value, ok := _c.mutation.AccountRateMultiplier(); ok { + _spec.SetField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value) + _node.AccountRateMultiplier = &value + } if value, ok := _c.mutation.BillingType(); ok { _spec.SetField(usagelog.FieldBillingType, field.TypeInt8, value) _node.BillingType = value @@ -713,6 +750,10 @@ func (_c *UsageLogCreate) createSpec() (*UsageLog, *sqlgraph.CreateSpec) { _spec.SetField(usagelog.FieldUserAgent, field.TypeString, value) _node.UserAgent = &value } + if value, ok := _c.mutation.IPAddress(); ok { + _spec.SetField(usagelog.FieldIPAddress, field.TypeString, value) + _node.IPAddress = &value + } if value, ok := _c.mutation.ImageCount(); ok { _spec.SetField(usagelog.FieldImageCount, field.TypeInt, value) _node.ImageCount = value @@ -1192,6 +1233,30 @@ func (u *UsageLogUpsert) AddRateMultiplier(v float64) *UsageLogUpsert { return u } +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (u *UsageLogUpsert) SetAccountRateMultiplier(v float64) *UsageLogUpsert { + u.Set(usagelog.FieldAccountRateMultiplier, v) + return u +} + +// UpdateAccountRateMultiplier sets the "account_rate_multiplier" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateAccountRateMultiplier() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldAccountRateMultiplier) + return u +} + +// AddAccountRateMultiplier adds v to the "account_rate_multiplier" field. +func (u *UsageLogUpsert) AddAccountRateMultiplier(v float64) *UsageLogUpsert { + u.Add(usagelog.FieldAccountRateMultiplier, v) + return u +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (u *UsageLogUpsert) ClearAccountRateMultiplier() *UsageLogUpsert { + u.SetNull(usagelog.FieldAccountRateMultiplier) + return u +} + // SetBillingType sets the "billing_type" field. func (u *UsageLogUpsert) SetBillingType(v int8) *UsageLogUpsert { u.Set(usagelog.FieldBillingType, v) @@ -1288,6 +1353,24 @@ func (u *UsageLogUpsert) ClearUserAgent() *UsageLogUpsert { return u } +// SetIPAddress sets the "ip_address" field. +func (u *UsageLogUpsert) SetIPAddress(v string) *UsageLogUpsert { + u.Set(usagelog.FieldIPAddress, v) + return u +} + +// UpdateIPAddress sets the "ip_address" field to the value that was provided on create. +func (u *UsageLogUpsert) UpdateIPAddress() *UsageLogUpsert { + u.SetExcluded(usagelog.FieldIPAddress) + return u +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (u *UsageLogUpsert) ClearIPAddress() *UsageLogUpsert { + u.SetNull(usagelog.FieldIPAddress) + return u +} + // SetImageCount sets the "image_count" field. func (u *UsageLogUpsert) SetImageCount(v int) *UsageLogUpsert { u.Set(usagelog.FieldImageCount, v) @@ -1754,6 +1837,34 @@ func (u *UsageLogUpsertOne) UpdateRateMultiplier() *UsageLogUpsertOne { }) } +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (u *UsageLogUpsertOne) SetAccountRateMultiplier(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetAccountRateMultiplier(v) + }) +} + +// AddAccountRateMultiplier adds v to the "account_rate_multiplier" field. +func (u *UsageLogUpsertOne) AddAccountRateMultiplier(v float64) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.AddAccountRateMultiplier(v) + }) +} + +// UpdateAccountRateMultiplier sets the "account_rate_multiplier" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateAccountRateMultiplier() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateAccountRateMultiplier() + }) +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (u *UsageLogUpsertOne) ClearAccountRateMultiplier() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.ClearAccountRateMultiplier() + }) +} + // SetBillingType sets the "billing_type" field. func (u *UsageLogUpsertOne) SetBillingType(v int8) *UsageLogUpsertOne { return u.Update(func(s *UsageLogUpsert) { @@ -1866,6 +1977,27 @@ func (u *UsageLogUpsertOne) ClearUserAgent() *UsageLogUpsertOne { }) } +// SetIPAddress sets the "ip_address" field. +func (u *UsageLogUpsertOne) SetIPAddress(v string) *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.SetIPAddress(v) + }) +} + +// UpdateIPAddress sets the "ip_address" field to the value that was provided on create. +func (u *UsageLogUpsertOne) UpdateIPAddress() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateIPAddress() + }) +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (u *UsageLogUpsertOne) ClearIPAddress() *UsageLogUpsertOne { + return u.Update(func(s *UsageLogUpsert) { + s.ClearIPAddress() + }) +} + // SetImageCount sets the "image_count" field. func (u *UsageLogUpsertOne) SetImageCount(v int) *UsageLogUpsertOne { return u.Update(func(s *UsageLogUpsert) { @@ -2504,6 +2636,34 @@ func (u *UsageLogUpsertBulk) UpdateRateMultiplier() *UsageLogUpsertBulk { }) } +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (u *UsageLogUpsertBulk) SetAccountRateMultiplier(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetAccountRateMultiplier(v) + }) +} + +// AddAccountRateMultiplier adds v to the "account_rate_multiplier" field. +func (u *UsageLogUpsertBulk) AddAccountRateMultiplier(v float64) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.AddAccountRateMultiplier(v) + }) +} + +// UpdateAccountRateMultiplier sets the "account_rate_multiplier" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateAccountRateMultiplier() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateAccountRateMultiplier() + }) +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (u *UsageLogUpsertBulk) ClearAccountRateMultiplier() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.ClearAccountRateMultiplier() + }) +} + // SetBillingType sets the "billing_type" field. func (u *UsageLogUpsertBulk) SetBillingType(v int8) *UsageLogUpsertBulk { return u.Update(func(s *UsageLogUpsert) { @@ -2616,6 +2776,27 @@ func (u *UsageLogUpsertBulk) ClearUserAgent() *UsageLogUpsertBulk { }) } +// SetIPAddress sets the "ip_address" field. +func (u *UsageLogUpsertBulk) SetIPAddress(v string) *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.SetIPAddress(v) + }) +} + +// UpdateIPAddress sets the "ip_address" field to the value that was provided on create. +func (u *UsageLogUpsertBulk) UpdateIPAddress() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.UpdateIPAddress() + }) +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (u *UsageLogUpsertBulk) ClearIPAddress() *UsageLogUpsertBulk { + return u.Update(func(s *UsageLogUpsert) { + s.ClearIPAddress() + }) +} + // SetImageCount sets the "image_count" field. func (u *UsageLogUpsertBulk) SetImageCount(v int) *UsageLogUpsertBulk { return u.Update(func(s *UsageLogUpsert) { diff --git a/backend/ent/usagelog_query.go b/backend/ent/usagelog_query.go index de64171a..c709bde0 100644 --- a/backend/ent/usagelog_query.go +++ b/backend/ent/usagelog_query.go @@ -8,6 +8,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -32,6 +33,7 @@ type UsageLogQuery struct { withAccount *AccountQuery withGroup *GroupQuery withSubscription *UserSubscriptionQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -531,6 +533,9 @@ func (_q *UsageLogQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Usa node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -727,6 +732,9 @@ func (_q *UsageLogQuery) loadSubscription(ctx context.Context, query *UserSubscr func (_q *UsageLogQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -804,6 +812,9 @@ func (_q *UsageLogQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -821,6 +832,32 @@ func (_q *UsageLogQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UsageLogQuery) ForUpdate(opts ...sql.LockOption) *UsageLogQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UsageLogQuery) ForShare(opts ...sql.LockOption) *UsageLogQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // UsageLogGroupBy is the group-by builder for UsageLog entities. type UsageLogGroupBy struct { selector diff --git a/backend/ent/usagelog_update.go b/backend/ent/usagelog_update.go index 2e77eef7..571a7b3c 100644 --- a/backend/ent/usagelog_update.go +++ b/backend/ent/usagelog_update.go @@ -415,6 +415,33 @@ func (_u *UsageLogUpdate) AddRateMultiplier(v float64) *UsageLogUpdate { return _u } +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (_u *UsageLogUpdate) SetAccountRateMultiplier(v float64) *UsageLogUpdate { + _u.mutation.ResetAccountRateMultiplier() + _u.mutation.SetAccountRateMultiplier(v) + return _u +} + +// SetNillableAccountRateMultiplier sets the "account_rate_multiplier" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableAccountRateMultiplier(v *float64) *UsageLogUpdate { + if v != nil { + _u.SetAccountRateMultiplier(*v) + } + return _u +} + +// AddAccountRateMultiplier adds value to the "account_rate_multiplier" field. +func (_u *UsageLogUpdate) AddAccountRateMultiplier(v float64) *UsageLogUpdate { + _u.mutation.AddAccountRateMultiplier(v) + return _u +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (_u *UsageLogUpdate) ClearAccountRateMultiplier() *UsageLogUpdate { + _u.mutation.ClearAccountRateMultiplier() + return _u +} + // SetBillingType sets the "billing_type" field. func (_u *UsageLogUpdate) SetBillingType(v int8) *UsageLogUpdate { _u.mutation.ResetBillingType() @@ -524,6 +551,26 @@ func (_u *UsageLogUpdate) ClearUserAgent() *UsageLogUpdate { return _u } +// SetIPAddress sets the "ip_address" field. +func (_u *UsageLogUpdate) SetIPAddress(v string) *UsageLogUpdate { + _u.mutation.SetIPAddress(v) + return _u +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (_u *UsageLogUpdate) SetNillableIPAddress(v *string) *UsageLogUpdate { + if v != nil { + _u.SetIPAddress(*v) + } + return _u +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (_u *UsageLogUpdate) ClearIPAddress() *UsageLogUpdate { + _u.mutation.ClearIPAddress() + return _u +} + // SetImageCount sets the "image_count" field. func (_u *UsageLogUpdate) SetImageCount(v int) *UsageLogUpdate { _u.mutation.ResetImageCount() @@ -669,6 +716,11 @@ func (_u *UsageLogUpdate) check() error { return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)} } } + if v, ok := _u.mutation.IPAddress(); ok { + if err := usagelog.IPAddressValidator(v); err != nil { + return &ValidationError{Name: "ip_address", err: fmt.Errorf(`ent: validator failed for field "UsageLog.ip_address": %w`, err)} + } + } if v, ok := _u.mutation.ImageSize(); ok { if err := usagelog.ImageSizeValidator(v); err != nil { return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)} @@ -782,6 +834,15 @@ func (_u *UsageLogUpdate) sqlSave(ctx context.Context) (_node int, err error) { if value, ok := _u.mutation.AddedRateMultiplier(); ok { _spec.AddField(usagelog.FieldRateMultiplier, field.TypeFloat64, value) } + if value, ok := _u.mutation.AccountRateMultiplier(); ok { + _spec.SetField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedAccountRateMultiplier(); ok { + _spec.AddField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value) + } + if _u.mutation.AccountRateMultiplierCleared() { + _spec.ClearField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64) + } if value, ok := _u.mutation.BillingType(); ok { _spec.SetField(usagelog.FieldBillingType, field.TypeInt8, value) } @@ -815,6 +876,12 @@ func (_u *UsageLogUpdate) sqlSave(ctx context.Context) (_node int, err error) { if _u.mutation.UserAgentCleared() { _spec.ClearField(usagelog.FieldUserAgent, field.TypeString) } + if value, ok := _u.mutation.IPAddress(); ok { + _spec.SetField(usagelog.FieldIPAddress, field.TypeString, value) + } + if _u.mutation.IPAddressCleared() { + _spec.ClearField(usagelog.FieldIPAddress, field.TypeString) + } if value, ok := _u.mutation.ImageCount(); ok { _spec.SetField(usagelog.FieldImageCount, field.TypeInt, value) } @@ -1375,6 +1442,33 @@ func (_u *UsageLogUpdateOne) AddRateMultiplier(v float64) *UsageLogUpdateOne { return _u } +// SetAccountRateMultiplier sets the "account_rate_multiplier" field. +func (_u *UsageLogUpdateOne) SetAccountRateMultiplier(v float64) *UsageLogUpdateOne { + _u.mutation.ResetAccountRateMultiplier() + _u.mutation.SetAccountRateMultiplier(v) + return _u +} + +// SetNillableAccountRateMultiplier sets the "account_rate_multiplier" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableAccountRateMultiplier(v *float64) *UsageLogUpdateOne { + if v != nil { + _u.SetAccountRateMultiplier(*v) + } + return _u +} + +// AddAccountRateMultiplier adds value to the "account_rate_multiplier" field. +func (_u *UsageLogUpdateOne) AddAccountRateMultiplier(v float64) *UsageLogUpdateOne { + _u.mutation.AddAccountRateMultiplier(v) + return _u +} + +// ClearAccountRateMultiplier clears the value of the "account_rate_multiplier" field. +func (_u *UsageLogUpdateOne) ClearAccountRateMultiplier() *UsageLogUpdateOne { + _u.mutation.ClearAccountRateMultiplier() + return _u +} + // SetBillingType sets the "billing_type" field. func (_u *UsageLogUpdateOne) SetBillingType(v int8) *UsageLogUpdateOne { _u.mutation.ResetBillingType() @@ -1484,6 +1578,26 @@ func (_u *UsageLogUpdateOne) ClearUserAgent() *UsageLogUpdateOne { return _u } +// SetIPAddress sets the "ip_address" field. +func (_u *UsageLogUpdateOne) SetIPAddress(v string) *UsageLogUpdateOne { + _u.mutation.SetIPAddress(v) + return _u +} + +// SetNillableIPAddress sets the "ip_address" field if the given value is not nil. +func (_u *UsageLogUpdateOne) SetNillableIPAddress(v *string) *UsageLogUpdateOne { + if v != nil { + _u.SetIPAddress(*v) + } + return _u +} + +// ClearIPAddress clears the value of the "ip_address" field. +func (_u *UsageLogUpdateOne) ClearIPAddress() *UsageLogUpdateOne { + _u.mutation.ClearIPAddress() + return _u +} + // SetImageCount sets the "image_count" field. func (_u *UsageLogUpdateOne) SetImageCount(v int) *UsageLogUpdateOne { _u.mutation.ResetImageCount() @@ -1642,6 +1756,11 @@ func (_u *UsageLogUpdateOne) check() error { return &ValidationError{Name: "user_agent", err: fmt.Errorf(`ent: validator failed for field "UsageLog.user_agent": %w`, err)} } } + if v, ok := _u.mutation.IPAddress(); ok { + if err := usagelog.IPAddressValidator(v); err != nil { + return &ValidationError{Name: "ip_address", err: fmt.Errorf(`ent: validator failed for field "UsageLog.ip_address": %w`, err)} + } + } if v, ok := _u.mutation.ImageSize(); ok { if err := usagelog.ImageSizeValidator(v); err != nil { return &ValidationError{Name: "image_size", err: fmt.Errorf(`ent: validator failed for field "UsageLog.image_size": %w`, err)} @@ -1772,6 +1891,15 @@ func (_u *UsageLogUpdateOne) sqlSave(ctx context.Context) (_node *UsageLog, err if value, ok := _u.mutation.AddedRateMultiplier(); ok { _spec.AddField(usagelog.FieldRateMultiplier, field.TypeFloat64, value) } + if value, ok := _u.mutation.AccountRateMultiplier(); ok { + _spec.SetField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value) + } + if value, ok := _u.mutation.AddedAccountRateMultiplier(); ok { + _spec.AddField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64, value) + } + if _u.mutation.AccountRateMultiplierCleared() { + _spec.ClearField(usagelog.FieldAccountRateMultiplier, field.TypeFloat64) + } if value, ok := _u.mutation.BillingType(); ok { _spec.SetField(usagelog.FieldBillingType, field.TypeInt8, value) } @@ -1805,6 +1933,12 @@ func (_u *UsageLogUpdateOne) sqlSave(ctx context.Context) (_node *UsageLog, err if _u.mutation.UserAgentCleared() { _spec.ClearField(usagelog.FieldUserAgent, field.TypeString) } + if value, ok := _u.mutation.IPAddress(); ok { + _spec.SetField(usagelog.FieldIPAddress, field.TypeString, value) + } + if _u.mutation.IPAddressCleared() { + _spec.ClearField(usagelog.FieldIPAddress, field.TypeString) + } if value, ok := _u.mutation.ImageCount(); ok { _spec.SetField(usagelog.FieldImageCount, field.TypeInt, value) } diff --git a/backend/ent/user.go b/backend/ent/user.go index 20036475..0b9a48cc 100644 --- a/backend/ent/user.go +++ b/backend/ent/user.go @@ -61,11 +61,13 @@ type UserEdges struct { UsageLogs []*UsageLog `json:"usage_logs,omitempty"` // AttributeValues holds the value of the attribute_values edge. AttributeValues []*UserAttributeValue `json:"attribute_values,omitempty"` + // PromoCodeUsages holds the value of the promo_code_usages edge. + PromoCodeUsages []*PromoCodeUsage `json:"promo_code_usages,omitempty"` // UserAllowedGroups holds the value of the user_allowed_groups edge. UserAllowedGroups []*UserAllowedGroup `json:"user_allowed_groups,omitempty"` // loadedTypes holds the information for reporting if a // type was loaded (or requested) in eager-loading or not. - loadedTypes [8]bool + loadedTypes [9]bool } // APIKeysOrErr returns the APIKeys value or an error if the edge @@ -131,10 +133,19 @@ func (e UserEdges) AttributeValuesOrErr() ([]*UserAttributeValue, error) { return nil, &NotLoadedError{edge: "attribute_values"} } +// PromoCodeUsagesOrErr returns the PromoCodeUsages value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) PromoCodeUsagesOrErr() ([]*PromoCodeUsage, error) { + if e.loadedTypes[7] { + return e.PromoCodeUsages, nil + } + return nil, &NotLoadedError{edge: "promo_code_usages"} +} + // UserAllowedGroupsOrErr returns the UserAllowedGroups value or an error if the edge // was not loaded in eager-loading. func (e UserEdges) UserAllowedGroupsOrErr() ([]*UserAllowedGroup, error) { - if e.loadedTypes[7] { + if e.loadedTypes[8] { return e.UserAllowedGroups, nil } return nil, &NotLoadedError{edge: "user_allowed_groups"} @@ -289,6 +300,11 @@ func (_m *User) QueryAttributeValues() *UserAttributeValueQuery { return NewUserClient(_m.config).QueryAttributeValues(_m) } +// QueryPromoCodeUsages queries the "promo_code_usages" edge of the User entity. +func (_m *User) QueryPromoCodeUsages() *PromoCodeUsageQuery { + return NewUserClient(_m.config).QueryPromoCodeUsages(_m) +} + // QueryUserAllowedGroups queries the "user_allowed_groups" edge of the User entity. func (_m *User) QueryUserAllowedGroups() *UserAllowedGroupQuery { return NewUserClient(_m.config).QueryUserAllowedGroups(_m) diff --git a/backend/ent/user/user.go b/backend/ent/user/user.go index a6871c5d..1be1d871 100644 --- a/backend/ent/user/user.go +++ b/backend/ent/user/user.go @@ -51,6 +51,8 @@ const ( EdgeUsageLogs = "usage_logs" // EdgeAttributeValues holds the string denoting the attribute_values edge name in mutations. EdgeAttributeValues = "attribute_values" + // EdgePromoCodeUsages holds the string denoting the promo_code_usages edge name in mutations. + EdgePromoCodeUsages = "promo_code_usages" // EdgeUserAllowedGroups holds the string denoting the user_allowed_groups edge name in mutations. EdgeUserAllowedGroups = "user_allowed_groups" // Table holds the table name of the user in the database. @@ -102,6 +104,13 @@ const ( AttributeValuesInverseTable = "user_attribute_values" // AttributeValuesColumn is the table column denoting the attribute_values relation/edge. AttributeValuesColumn = "user_id" + // PromoCodeUsagesTable is the table that holds the promo_code_usages relation/edge. + PromoCodeUsagesTable = "promo_code_usages" + // PromoCodeUsagesInverseTable is the table name for the PromoCodeUsage entity. + // It exists in this package in order to avoid circular dependency with the "promocodeusage" package. + PromoCodeUsagesInverseTable = "promo_code_usages" + // PromoCodeUsagesColumn is the table column denoting the promo_code_usages relation/edge. + PromoCodeUsagesColumn = "user_id" // UserAllowedGroupsTable is the table that holds the user_allowed_groups relation/edge. UserAllowedGroupsTable = "user_allowed_groups" // UserAllowedGroupsInverseTable is the table name for the UserAllowedGroup entity. @@ -342,6 +351,20 @@ func ByAttributeValues(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { } } +// ByPromoCodeUsagesCount orders the results by promo_code_usages count. +func ByPromoCodeUsagesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newPromoCodeUsagesStep(), opts...) + } +} + +// ByPromoCodeUsages orders the results by promo_code_usages terms. +func ByPromoCodeUsages(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newPromoCodeUsagesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + // ByUserAllowedGroupsCount orders the results by user_allowed_groups count. func ByUserAllowedGroupsCount(opts ...sql.OrderTermOption) OrderOption { return func(s *sql.Selector) { @@ -404,6 +427,13 @@ func newAttributeValuesStep() *sqlgraph.Step { sqlgraph.Edge(sqlgraph.O2M, false, AttributeValuesTable, AttributeValuesColumn), ) } +func newPromoCodeUsagesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(PromoCodeUsagesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PromoCodeUsagesTable, PromoCodeUsagesColumn), + ) +} func newUserAllowedGroupsStep() *sqlgraph.Step { return sqlgraph.NewStep( sqlgraph.From(Table, FieldID), diff --git a/backend/ent/user/where.go b/backend/ent/user/where.go index 38812770..6a460f10 100644 --- a/backend/ent/user/where.go +++ b/backend/ent/user/where.go @@ -871,6 +871,29 @@ func HasAttributeValuesWith(preds ...predicate.UserAttributeValue) predicate.Use }) } +// HasPromoCodeUsages applies the HasEdge predicate on the "promo_code_usages" edge. +func HasPromoCodeUsages() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, PromoCodeUsagesTable, PromoCodeUsagesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasPromoCodeUsagesWith applies the HasEdge predicate on the "promo_code_usages" edge with a given conditions (other predicates). +func HasPromoCodeUsagesWith(preds ...predicate.PromoCodeUsage) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newPromoCodeUsagesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + // HasUserAllowedGroups applies the HasEdge predicate on the "user_allowed_groups" edge. func HasUserAllowedGroups() predicate.User { return predicate.User(func(s *sql.Selector) { diff --git a/backend/ent/user_create.go b/backend/ent/user_create.go index 4ce48d4b..e12e476c 100644 --- a/backend/ent/user_create.go +++ b/backend/ent/user_create.go @@ -13,6 +13,7 @@ import ( "entgo.io/ent/schema/field" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" @@ -271,6 +272,21 @@ func (_c *UserCreate) AddAttributeValues(v ...*UserAttributeValue) *UserCreate { return _c.AddAttributeValueIDs(ids...) } +// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by IDs. +func (_c *UserCreate) AddPromoCodeUsageIDs(ids ...int64) *UserCreate { + _c.mutation.AddPromoCodeUsageIDs(ids...) + return _c +} + +// AddPromoCodeUsages adds the "promo_code_usages" edges to the PromoCodeUsage entity. +func (_c *UserCreate) AddPromoCodeUsages(v ...*PromoCodeUsage) *UserCreate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddPromoCodeUsageIDs(ids...) +} + // Mutation returns the UserMutation object of the builder. func (_c *UserCreate) Mutation() *UserMutation { return _c.mutation @@ -593,6 +609,22 @@ func (_c *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { } _spec.Edges = append(_spec.Edges, edge) } + if nodes := _c.mutation.PromoCodeUsagesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } return _node, _spec } diff --git a/backend/ent/user_query.go b/backend/ent/user_query.go index 0d65a2dd..e66e2dc8 100644 --- a/backend/ent/user_query.go +++ b/backend/ent/user_query.go @@ -9,12 +9,14 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" @@ -37,7 +39,9 @@ type UserQuery struct { withAllowedGroups *GroupQuery withUsageLogs *UsageLogQuery withAttributeValues *UserAttributeValueQuery + withPromoCodeUsages *PromoCodeUsageQuery withUserAllowedGroups *UserAllowedGroupQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -228,6 +232,28 @@ func (_q *UserQuery) QueryAttributeValues() *UserAttributeValueQuery { return query } +// QueryPromoCodeUsages chains the current query on the "promo_code_usages" edge. +func (_q *UserQuery) QueryPromoCodeUsages() *PromoCodeUsageQuery { + query := (&PromoCodeUsageClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(promocodeusage.Table, promocodeusage.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, user.PromoCodeUsagesTable, user.PromoCodeUsagesColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + // QueryUserAllowedGroups chains the current query on the "user_allowed_groups" edge. func (_q *UserQuery) QueryUserAllowedGroups() *UserAllowedGroupQuery { query := (&UserAllowedGroupClient{config: _q.config}).Query() @@ -449,6 +475,7 @@ func (_q *UserQuery) Clone() *UserQuery { withAllowedGroups: _q.withAllowedGroups.Clone(), withUsageLogs: _q.withUsageLogs.Clone(), withAttributeValues: _q.withAttributeValues.Clone(), + withPromoCodeUsages: _q.withPromoCodeUsages.Clone(), withUserAllowedGroups: _q.withUserAllowedGroups.Clone(), // clone intermediate query. sql: _q.sql.Clone(), @@ -533,6 +560,17 @@ func (_q *UserQuery) WithAttributeValues(opts ...func(*UserAttributeValueQuery)) return _q } +// WithPromoCodeUsages tells the query-builder to eager-load the nodes that are connected to +// the "promo_code_usages" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *UserQuery) WithPromoCodeUsages(opts ...func(*PromoCodeUsageQuery)) *UserQuery { + query := (&PromoCodeUsageClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withPromoCodeUsages = query + return _q +} + // WithUserAllowedGroups tells the query-builder to eager-load the nodes that are connected to // the "user_allowed_groups" edge. The optional arguments are used to configure the query builder of the edge. func (_q *UserQuery) WithUserAllowedGroups(opts ...func(*UserAllowedGroupQuery)) *UserQuery { @@ -622,7 +660,7 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e var ( nodes = []*User{} _spec = _q.querySpec() - loadedTypes = [8]bool{ + loadedTypes = [9]bool{ _q.withAPIKeys != nil, _q.withRedeemCodes != nil, _q.withSubscriptions != nil, @@ -630,6 +668,7 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e _q.withAllowedGroups != nil, _q.withUsageLogs != nil, _q.withAttributeValues != nil, + _q.withPromoCodeUsages != nil, _q.withUserAllowedGroups != nil, } ) @@ -642,6 +681,9 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -702,6 +744,13 @@ func (_q *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, e return nil, err } } + if query := _q.withPromoCodeUsages; query != nil { + if err := _q.loadPromoCodeUsages(ctx, query, nodes, + func(n *User) { n.Edges.PromoCodeUsages = []*PromoCodeUsage{} }, + func(n *User, e *PromoCodeUsage) { n.Edges.PromoCodeUsages = append(n.Edges.PromoCodeUsages, e) }); err != nil { + return nil, err + } + } if query := _q.withUserAllowedGroups; query != nil { if err := _q.loadUserAllowedGroups(ctx, query, nodes, func(n *User) { n.Edges.UserAllowedGroups = []*UserAllowedGroup{} }, @@ -959,6 +1008,36 @@ func (_q *UserQuery) loadAttributeValues(ctx context.Context, query *UserAttribu } return nil } +func (_q *UserQuery) loadPromoCodeUsages(ctx context.Context, query *PromoCodeUsageQuery, nodes []*User, init func(*User), assign func(*User, *PromoCodeUsage)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int64]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(promocodeusage.FieldUserID) + } + query.Where(predicate.PromoCodeUsage(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.PromoCodeUsagesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.UserID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "user_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} func (_q *UserQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllowedGroupQuery, nodes []*User, init func(*User), assign func(*User, *UserAllowedGroup)) error { fks := make([]driver.Value, 0, len(nodes)) nodeids := make(map[int64]*User) @@ -992,6 +1071,9 @@ func (_q *UserQuery) loadUserAllowedGroups(ctx context.Context, query *UserAllow func (_q *UserQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -1054,6 +1136,9 @@ func (_q *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -1071,6 +1156,32 @@ func (_q *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UserQuery) ForUpdate(opts ...sql.LockOption) *UserQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UserQuery) ForShare(opts ...sql.LockOption) *UserQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // UserGroupBy is the group-by builder for User entities. type UserGroupBy struct { selector diff --git a/backend/ent/user_update.go b/backend/ent/user_update.go index 49ddf493..cf189fea 100644 --- a/backend/ent/user_update.go +++ b/backend/ent/user_update.go @@ -14,6 +14,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/predicate" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/redeemcode" "github.com/Wei-Shaw/sub2api/ent/usagelog" "github.com/Wei-Shaw/sub2api/ent/user" @@ -291,6 +292,21 @@ func (_u *UserUpdate) AddAttributeValues(v ...*UserAttributeValue) *UserUpdate { return _u.AddAttributeValueIDs(ids...) } +// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by IDs. +func (_u *UserUpdate) AddPromoCodeUsageIDs(ids ...int64) *UserUpdate { + _u.mutation.AddPromoCodeUsageIDs(ids...) + return _u +} + +// AddPromoCodeUsages adds the "promo_code_usages" edges to the PromoCodeUsage entity. +func (_u *UserUpdate) AddPromoCodeUsages(v ...*PromoCodeUsage) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddPromoCodeUsageIDs(ids...) +} + // Mutation returns the UserMutation object of the builder. func (_u *UserUpdate) Mutation() *UserMutation { return _u.mutation @@ -443,6 +459,27 @@ func (_u *UserUpdate) RemoveAttributeValues(v ...*UserAttributeValue) *UserUpdat return _u.RemoveAttributeValueIDs(ids...) } +// ClearPromoCodeUsages clears all "promo_code_usages" edges to the PromoCodeUsage entity. +func (_u *UserUpdate) ClearPromoCodeUsages() *UserUpdate { + _u.mutation.ClearPromoCodeUsages() + return _u +} + +// RemovePromoCodeUsageIDs removes the "promo_code_usages" edge to PromoCodeUsage entities by IDs. +func (_u *UserUpdate) RemovePromoCodeUsageIDs(ids ...int64) *UserUpdate { + _u.mutation.RemovePromoCodeUsageIDs(ids...) + return _u +} + +// RemovePromoCodeUsages removes "promo_code_usages" edges to PromoCodeUsage entities. +func (_u *UserUpdate) RemovePromoCodeUsages(v ...*PromoCodeUsage) *UserUpdate { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemovePromoCodeUsageIDs(ids...) +} + // Save executes the query and returns the number of nodes affected by the update operation. func (_u *UserUpdate) Save(ctx context.Context) (int, error) { if err := _u.defaults(); err != nil { @@ -893,6 +930,51 @@ func (_u *UserUpdate) sqlSave(ctx context.Context) (_node int, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if _u.mutation.PromoCodeUsagesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedPromoCodeUsagesIDs(); len(nodes) > 0 && !_u.mutation.PromoCodeUsagesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.PromoCodeUsagesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { if _, ok := err.(*sqlgraph.NotFoundError); ok { err = &NotFoundError{user.Label} @@ -1170,6 +1252,21 @@ func (_u *UserUpdateOne) AddAttributeValues(v ...*UserAttributeValue) *UserUpdat return _u.AddAttributeValueIDs(ids...) } +// AddPromoCodeUsageIDs adds the "promo_code_usages" edge to the PromoCodeUsage entity by IDs. +func (_u *UserUpdateOne) AddPromoCodeUsageIDs(ids ...int64) *UserUpdateOne { + _u.mutation.AddPromoCodeUsageIDs(ids...) + return _u +} + +// AddPromoCodeUsages adds the "promo_code_usages" edges to the PromoCodeUsage entity. +func (_u *UserUpdateOne) AddPromoCodeUsages(v ...*PromoCodeUsage) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddPromoCodeUsageIDs(ids...) +} + // Mutation returns the UserMutation object of the builder. func (_u *UserUpdateOne) Mutation() *UserMutation { return _u.mutation @@ -1322,6 +1419,27 @@ func (_u *UserUpdateOne) RemoveAttributeValues(v ...*UserAttributeValue) *UserUp return _u.RemoveAttributeValueIDs(ids...) } +// ClearPromoCodeUsages clears all "promo_code_usages" edges to the PromoCodeUsage entity. +func (_u *UserUpdateOne) ClearPromoCodeUsages() *UserUpdateOne { + _u.mutation.ClearPromoCodeUsages() + return _u +} + +// RemovePromoCodeUsageIDs removes the "promo_code_usages" edge to PromoCodeUsage entities by IDs. +func (_u *UserUpdateOne) RemovePromoCodeUsageIDs(ids ...int64) *UserUpdateOne { + _u.mutation.RemovePromoCodeUsageIDs(ids...) + return _u +} + +// RemovePromoCodeUsages removes "promo_code_usages" edges to PromoCodeUsage entities. +func (_u *UserUpdateOne) RemovePromoCodeUsages(v ...*PromoCodeUsage) *UserUpdateOne { + ids := make([]int64, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemovePromoCodeUsageIDs(ids...) +} + // Where appends a list predicates to the UserUpdate builder. func (_u *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { _u.mutation.Where(ps...) @@ -1802,6 +1920,51 @@ func (_u *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { } _spec.Edges.Add = append(_spec.Edges.Add, edge) } + if _u.mutation.PromoCodeUsagesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedPromoCodeUsagesIDs(); len(nodes) > 0 && !_u.mutation.PromoCodeUsagesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.PromoCodeUsagesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: user.PromoCodeUsagesTable, + Columns: []string{user.PromoCodeUsagesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(promocodeusage.FieldID, field.TypeInt64), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } _node = &User{config: _u.config} _spec.Assign = _node.assignValues _spec.ScanValues = _node.scanValues diff --git a/backend/ent/userallowedgroup_query.go b/backend/ent/userallowedgroup_query.go index da2c19a7..527ddc77 100644 --- a/backend/ent/userallowedgroup_query.go +++ b/backend/ent/userallowedgroup_query.go @@ -8,6 +8,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "github.com/Wei-Shaw/sub2api/ent/group" @@ -25,6 +26,7 @@ type UserAllowedGroupQuery struct { predicates []predicate.UserAllowedGroup withUser *UserQuery withGroup *GroupQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -347,6 +349,9 @@ func (_q *UserAllowedGroupQuery) sqlAll(ctx context.Context, hooks ...queryHook) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -432,6 +437,9 @@ func (_q *UserAllowedGroupQuery) loadGroup(ctx context.Context, query *GroupQuer func (_q *UserAllowedGroupQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Unique = false _spec.Node.Columns = nil return sqlgraph.CountNodes(ctx, _q.driver, _spec) @@ -495,6 +503,9 @@ func (_q *UserAllowedGroupQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -512,6 +523,32 @@ func (_q *UserAllowedGroupQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UserAllowedGroupQuery) ForUpdate(opts ...sql.LockOption) *UserAllowedGroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UserAllowedGroupQuery) ForShare(opts ...sql.LockOption) *UserAllowedGroupQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // UserAllowedGroupGroupBy is the group-by builder for UserAllowedGroup entities. type UserAllowedGroupGroupBy struct { selector diff --git a/backend/ent/userattributedefinition_query.go b/backend/ent/userattributedefinition_query.go index 9022d306..0727b47c 100644 --- a/backend/ent/userattributedefinition_query.go +++ b/backend/ent/userattributedefinition_query.go @@ -9,6 +9,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -25,6 +26,7 @@ type UserAttributeDefinitionQuery struct { inters []Interceptor predicates []predicate.UserAttributeDefinition withValues *UserAttributeValueQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -384,6 +386,9 @@ func (_q *UserAttributeDefinitionQuery) sqlAll(ctx context.Context, hooks ...que node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -436,6 +441,9 @@ func (_q *UserAttributeDefinitionQuery) loadValues(ctx context.Context, query *U func (_q *UserAttributeDefinitionQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -498,6 +506,9 @@ func (_q *UserAttributeDefinitionQuery) sqlQuery(ctx context.Context) *sql.Selec if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -515,6 +526,32 @@ func (_q *UserAttributeDefinitionQuery) sqlQuery(ctx context.Context) *sql.Selec return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UserAttributeDefinitionQuery) ForUpdate(opts ...sql.LockOption) *UserAttributeDefinitionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UserAttributeDefinitionQuery) ForShare(opts ...sql.LockOption) *UserAttributeDefinitionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // UserAttributeDefinitionGroupBy is the group-by builder for UserAttributeDefinition entities. type UserAttributeDefinitionGroupBy struct { selector diff --git a/backend/ent/userattributevalue_query.go b/backend/ent/userattributevalue_query.go index babfc9a9..a7c6b74a 100644 --- a/backend/ent/userattributevalue_query.go +++ b/backend/ent/userattributevalue_query.go @@ -8,6 +8,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -26,6 +27,7 @@ type UserAttributeValueQuery struct { predicates []predicate.UserAttributeValue withUser *UserQuery withDefinition *UserAttributeDefinitionQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -420,6 +422,9 @@ func (_q *UserAttributeValueQuery) sqlAll(ctx context.Context, hooks ...queryHoo node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -505,6 +510,9 @@ func (_q *UserAttributeValueQuery) loadDefinition(ctx context.Context, query *Us func (_q *UserAttributeValueQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -573,6 +581,9 @@ func (_q *UserAttributeValueQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -590,6 +601,32 @@ func (_q *UserAttributeValueQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UserAttributeValueQuery) ForUpdate(opts ...sql.LockOption) *UserAttributeValueQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UserAttributeValueQuery) ForShare(opts ...sql.LockOption) *UserAttributeValueQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // UserAttributeValueGroupBy is the group-by builder for UserAttributeValue entities. type UserAttributeValueGroupBy struct { selector diff --git a/backend/ent/usersubscription_query.go b/backend/ent/usersubscription_query.go index 967fbddb..288b7b1d 100644 --- a/backend/ent/usersubscription_query.go +++ b/backend/ent/usersubscription_query.go @@ -9,6 +9,7 @@ import ( "math" "entgo.io/ent" + "entgo.io/ent/dialect" "entgo.io/ent/dialect/sql" "entgo.io/ent/dialect/sql/sqlgraph" "entgo.io/ent/schema/field" @@ -30,6 +31,7 @@ type UserSubscriptionQuery struct { withGroup *GroupQuery withAssignedByUser *UserQuery withUsageLogs *UsageLogQuery + modifiers []func(*sql.Selector) // intermediate query (i.e. traversal path). sql *sql.Selector path func(context.Context) (*sql.Selector, error) @@ -494,6 +496,9 @@ func (_q *UserSubscriptionQuery) sqlAll(ctx context.Context, hooks ...queryHook) node.Edges.loadedTypes = loadedTypes return node.assignValues(columns, values) } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } for i := range hooks { hooks[i](ctx, _spec) } @@ -657,6 +662,9 @@ func (_q *UserSubscriptionQuery) loadUsageLogs(ctx context.Context, query *Usage func (_q *UserSubscriptionQuery) sqlCount(ctx context.Context) (int, error) { _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } _spec.Node.Columns = _q.ctx.Fields if len(_q.ctx.Fields) > 0 { _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique @@ -728,6 +736,9 @@ func (_q *UserSubscriptionQuery) sqlQuery(ctx context.Context) *sql.Selector { if _q.ctx.Unique != nil && *_q.ctx.Unique { selector.Distinct() } + for _, m := range _q.modifiers { + m(selector) + } for _, p := range _q.predicates { p(selector) } @@ -745,6 +756,32 @@ func (_q *UserSubscriptionQuery) sqlQuery(ctx context.Context) *sql.Selector { return selector } +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *UserSubscriptionQuery) ForUpdate(opts ...sql.LockOption) *UserSubscriptionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *UserSubscriptionQuery) ForShare(opts ...sql.LockOption) *UserSubscriptionQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + // UserSubscriptionGroupBy is the group-by builder for UserSubscription entities. type UserSubscriptionGroupBy struct { selector diff --git a/backend/go.mod b/backend/go.mod index 9ac48305..4ac6ba14 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -8,9 +8,11 @@ require ( github.com/golang-jwt/jwt/v5 v5.2.2 github.com/google/uuid v1.6.0 github.com/google/wire v0.7.0 + github.com/gorilla/websocket v1.5.3 github.com/imroc/req/v3 v3.57.0 github.com/lib/pq v1.10.9 github.com/redis/go-redis/v9 v9.17.2 + github.com/shirou/gopsutil/v4 v4.25.6 github.com/spf13/viper v1.18.2 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go/modules/postgres v0.40.0 @@ -44,11 +46,13 @@ require ( github.com/containerd/platforms v0.2.1 // indirect github.com/cpuguy83/dockercfg v0.3.2 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/dgraph-io/ristretto v0.2.0 // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect github.com/distribution/reference v0.6.0 // indirect github.com/docker/docker v28.5.1+incompatible // indirect github.com/docker/go-connections v0.6.0 // indirect github.com/docker/go-units v0.5.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/ebitengine/purego v0.8.4 // indirect github.com/fatih/color v1.18.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect @@ -104,9 +108,9 @@ require ( github.com/quic-go/quic-go v0.57.1 // indirect github.com/refraction-networking/utls v1.8.1 // indirect github.com/rivo/uniseg v0.2.0 // indirect + github.com/robfig/cron/v3 v3.0.1 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/shirou/gopsutil/v4 v4.25.6 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect diff --git a/backend/go.sum b/backend/go.sum index 38e2b53e..415e73a7 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -51,6 +51,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgraph-io/ristretto v0.2.0 h1:XAfl+7cmoUDWW/2Lx8TGZQjjxIQ2Ley9DSf52dru4WE= +github.com/dgraph-io/ristretto v0.2.0/go.mod h1:8uBHCU/PBV4Ag0CJrP47b9Ofby5dqWNh4FicAdoqFNU= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= @@ -61,6 +63,8 @@ github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pM github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM= @@ -113,6 +117,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4= github.com/google/wire v0.7.0/go.mod h1:n6YbUQD9cPKTnHXEBN2DXlOp/mVADhVErcMFb0v3J18= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= @@ -220,6 +226,8 @@ github.com/refraction-networking/utls v1.8.1 h1:yNY1kapmQU8JeM1sSw2H2asfTIwWxIkr github.com/refraction-networking/utls v1.8.1/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM= github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index dfa5e2f4..3bd72608 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -19,7 +19,9 @@ const ( RunModeSimple = "simple" ) -const DefaultCSPPolicy = "default-src 'self'; script-src 'self' https://challenges.cloudflare.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https:; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' https:; frame-src https://challenges.cloudflare.com; frame-ancestors 'none'; base-uri 'self'; form-action 'self'" +// DefaultCSPPolicy is the default Content-Security-Policy with nonce support +// __CSP_NONCE__ will be replaced with actual nonce at request time by the SecurityHeaders middleware +const DefaultCSPPolicy = "default-src 'self'; script-src 'self' __CSP_NONCE__ https://challenges.cloudflare.com https://static.cloudflareinsights.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https:; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' https:; frame-src https://challenges.cloudflare.com; frame-ancestors 'none'; base-uri 'self'; form-action 'self'" // 连接池隔离策略常量 // 用于控制上游 HTTP 连接池的隔离粒度,影响连接复用和资源消耗 @@ -36,33 +38,29 @@ const ( ) type Config struct { - Server ServerConfig `mapstructure:"server"` - CORS CORSConfig `mapstructure:"cors"` - Security SecurityConfig `mapstructure:"security"` - Billing BillingConfig `mapstructure:"billing"` - Turnstile TurnstileConfig `mapstructure:"turnstile"` - Database DatabaseConfig `mapstructure:"database"` - Redis RedisConfig `mapstructure:"redis"` - JWT JWTConfig `mapstructure:"jwt"` - LinuxDo LinuxDoConnectConfig `mapstructure:"linuxdo_connect"` - Default DefaultConfig `mapstructure:"default"` - RateLimit RateLimitConfig `mapstructure:"rate_limit"` - Pricing PricingConfig `mapstructure:"pricing"` - Gateway GatewayConfig `mapstructure:"gateway"` - Concurrency ConcurrencyConfig `mapstructure:"concurrency"` - TokenRefresh TokenRefreshConfig `mapstructure:"token_refresh"` - RunMode string `mapstructure:"run_mode" yaml:"run_mode"` - Timezone string `mapstructure:"timezone"` // e.g. "Asia/Shanghai", "UTC" - Gemini GeminiConfig `mapstructure:"gemini"` - Update UpdateConfig `mapstructure:"update"` -} - -// UpdateConfig 在线更新相关配置 -type UpdateConfig struct { - // ProxyURL 用于访问 GitHub 的代理地址 - // 支持 http/https/socks5/socks5h 协议 - // 例如: "http://127.0.0.1:7890", "socks5://127.0.0.1:1080" - ProxyURL string `mapstructure:"proxy_url"` + Server ServerConfig `mapstructure:"server"` + CORS CORSConfig `mapstructure:"cors"` + Security SecurityConfig `mapstructure:"security"` + Billing BillingConfig `mapstructure:"billing"` + Turnstile TurnstileConfig `mapstructure:"turnstile"` + Database DatabaseConfig `mapstructure:"database"` + Redis RedisConfig `mapstructure:"redis"` + Ops OpsConfig `mapstructure:"ops"` + JWT JWTConfig `mapstructure:"jwt"` + LinuxDo LinuxDoConnectConfig `mapstructure:"linuxdo_connect"` + Default DefaultConfig `mapstructure:"default"` + RateLimit RateLimitConfig `mapstructure:"rate_limit"` + Pricing PricingConfig `mapstructure:"pricing"` + Gateway GatewayConfig `mapstructure:"gateway"` + APIKeyAuth APIKeyAuthCacheConfig `mapstructure:"api_key_auth_cache"` + Dashboard DashboardCacheConfig `mapstructure:"dashboard_cache"` + DashboardAgg DashboardAggregationConfig `mapstructure:"dashboard_aggregation"` + Concurrency ConcurrencyConfig `mapstructure:"concurrency"` + TokenRefresh TokenRefreshConfig `mapstructure:"token_refresh"` + RunMode string `mapstructure:"run_mode" yaml:"run_mode"` + Timezone string `mapstructure:"timezone"` // e.g. "Asia/Shanghai", "UTC" + Gemini GeminiConfig `mapstructure:"gemini"` + Update UpdateConfig `mapstructure:"update"` } type GeminiConfig struct { @@ -87,6 +85,33 @@ type GeminiTierQuotaConfig struct { CooldownMinutes *int `mapstructure:"cooldown_minutes" json:"cooldown_minutes"` } +type UpdateConfig struct { + // ProxyURL 用于访问 GitHub 的代理地址 + // 支持 http/https/socks5/socks5h 协议 + // 例如: "http://127.0.0.1:7890", "socks5://127.0.0.1:1080" + ProxyURL string `mapstructure:"proxy_url"` +} + +type LinuxDoConnectConfig struct { + Enabled bool `mapstructure:"enabled"` + ClientID string `mapstructure:"client_id"` + ClientSecret string `mapstructure:"client_secret"` + AuthorizeURL string `mapstructure:"authorize_url"` + TokenURL string `mapstructure:"token_url"` + UserInfoURL string `mapstructure:"userinfo_url"` + Scopes string `mapstructure:"scopes"` + RedirectURL string `mapstructure:"redirect_url"` // 后端回调地址(需在提供方后台登记) + FrontendRedirectURL string `mapstructure:"frontend_redirect_url"` // 前端接收 token 的路由(默认:/auth/linuxdo/callback) + TokenAuthMethod string `mapstructure:"token_auth_method"` // client_secret_post / client_secret_basic / none + UsePKCE bool `mapstructure:"use_pkce"` + + // 可选:用于从 userinfo JSON 中提取字段的 gjson 路径。 + // 为空时,服务端会尝试一组常见字段名。 + UserInfoEmailPath string `mapstructure:"userinfo_email_path"` + UserInfoIDPath string `mapstructure:"userinfo_id_path"` + UserInfoUsernamePath string `mapstructure:"userinfo_username_path"` +} + // TokenRefreshConfig OAuth token自动刷新配置 type TokenRefreshConfig struct { // 是否启用自动刷新 @@ -209,6 +234,10 @@ type GatewayConfig struct { // ConcurrencySlotTTLMinutes: 并发槽位过期时间(分钟) // 应大于最长 LLM 请求时间,防止请求完成前槽位过期 ConcurrencySlotTTLMinutes int `mapstructure:"concurrency_slot_ttl_minutes"` + // SessionIdleTimeoutMinutes: 会话空闲超时时间(分钟),默认 5 分钟 + // 用于 Anthropic OAuth/SetupToken 账号的会话数量限制功能 + // 空闲超过此时间的会话将被自动释放 + SessionIdleTimeoutMinutes int `mapstructure:"session_idle_timeout_minutes"` // StreamDataIntervalTimeout: 流数据间隔超时(秒),0表示禁用 StreamDataIntervalTimeout int `mapstructure:"stream_data_interval_timeout"` @@ -258,6 +287,29 @@ type GatewaySchedulingConfig struct { // 过期槽位清理周期(0 表示禁用) SlotCleanupInterval time.Duration `mapstructure:"slot_cleanup_interval"` + + // 受控回源配置 + DbFallbackEnabled bool `mapstructure:"db_fallback_enabled"` + // 受控回源超时(秒),0 表示不额外收紧超时 + DbFallbackTimeoutSeconds int `mapstructure:"db_fallback_timeout_seconds"` + // 受控回源限流(实例级 QPS),0 表示不限制 + DbFallbackMaxQPS int `mapstructure:"db_fallback_max_qps"` + + // Outbox 轮询与滞后阈值配置 + // Outbox 轮询周期(秒) + OutboxPollIntervalSeconds int `mapstructure:"outbox_poll_interval_seconds"` + // Outbox 滞后告警阈值(秒) + OutboxLagWarnSeconds int `mapstructure:"outbox_lag_warn_seconds"` + // Outbox 触发强制重建阈值(秒) + OutboxLagRebuildSeconds int `mapstructure:"outbox_lag_rebuild_seconds"` + // Outbox 连续滞后触发次数 + OutboxLagRebuildFailures int `mapstructure:"outbox_lag_rebuild_failures"` + // Outbox 积压触发重建阈值(行数) + OutboxBacklogRebuildRows int `mapstructure:"outbox_backlog_rebuild_rows"` + + // 全量重建周期配置 + // 全量重建周期(秒),0 表示禁用 + FullRebuildIntervalSeconds int `mapstructure:"full_rebuild_interval_seconds"` } func (s *ServerConfig) Address() string { @@ -285,6 +337,13 @@ type DatabaseConfig struct { } func (d *DatabaseConfig) DSN() string { + // 当密码为空时不包含 password 参数,避免 libpq 解析错误 + if d.Password == "" { + return fmt.Sprintf( + "host=%s port=%d user=%s dbname=%s sslmode=%s", + d.Host, d.Port, d.User, d.DBName, d.SSLMode, + ) + } return fmt.Sprintf( "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s", d.Host, d.Port, d.User, d.Password, d.DBName, d.SSLMode, @@ -296,6 +355,13 @@ func (d *DatabaseConfig) DSNWithTimezone(tz string) string { if tz == "" { tz = "Asia/Shanghai" } + // 当密码为空时不包含 password 参数,避免 libpq 解析错误 + if d.Password == "" { + return fmt.Sprintf( + "host=%s port=%d user=%s dbname=%s sslmode=%s TimeZone=%s", + d.Host, d.Port, d.User, d.DBName, d.SSLMode, tz, + ) + } return fmt.Sprintf( "host=%s port=%d user=%s password=%s dbname=%s sslmode=%s TimeZone=%s", d.Host, d.Port, d.User, d.Password, d.DBName, d.SSLMode, tz, @@ -326,6 +392,47 @@ func (r *RedisConfig) Address() string { return fmt.Sprintf("%s:%d", r.Host, r.Port) } +type OpsConfig struct { + // Enabled controls whether ops features should run. + // + // NOTE: vNext still has a DB-backed feature flag (ops_monitoring_enabled) for runtime on/off. + // This config flag is the "hard switch" for deployments that want to disable ops completely. + Enabled bool `mapstructure:"enabled"` + + // UsePreaggregatedTables prefers ops_metrics_hourly/daily for long-window dashboard queries. + UsePreaggregatedTables bool `mapstructure:"use_preaggregated_tables"` + + // Cleanup controls periodic deletion of old ops data to prevent unbounded growth. + Cleanup OpsCleanupConfig `mapstructure:"cleanup"` + + // MetricsCollectorCache controls Redis caching for expensive per-window collector queries. + MetricsCollectorCache OpsMetricsCollectorCacheConfig `mapstructure:"metrics_collector_cache"` + + // Pre-aggregation configuration. + Aggregation OpsAggregationConfig `mapstructure:"aggregation"` +} + +type OpsCleanupConfig struct { + Enabled bool `mapstructure:"enabled"` + Schedule string `mapstructure:"schedule"` + + // Retention days (0 disables that cleanup target). + // + // vNext requirement: default 30 days across ops datasets. + ErrorLogRetentionDays int `mapstructure:"error_log_retention_days"` + MinuteMetricsRetentionDays int `mapstructure:"minute_metrics_retention_days"` + HourlyMetricsRetentionDays int `mapstructure:"hourly_metrics_retention_days"` +} + +type OpsAggregationConfig struct { + Enabled bool `mapstructure:"enabled"` +} + +type OpsMetricsCollectorCacheConfig struct { + Enabled bool `mapstructure:"enabled"` + TTL time.Duration `mapstructure:"ttl"` +} + type JWTConfig struct { Secret string `mapstructure:"secret"` ExpireHour int `mapstructure:"expire_hour"` @@ -335,30 +442,6 @@ type TurnstileConfig struct { Required bool `mapstructure:"required"` } -// LinuxDoConnectConfig 用于 LinuxDo Connect OAuth 登录(终端用户 SSO)。 -// -// 注意:这与上游账号的 OAuth(例如 OpenAI/Gemini 账号接入)不是一回事。 -// 这里是用于登录 Sub2API 本身的用户体系。 -type LinuxDoConnectConfig struct { - Enabled bool `mapstructure:"enabled"` - ClientID string `mapstructure:"client_id"` - ClientSecret string `mapstructure:"client_secret"` - AuthorizeURL string `mapstructure:"authorize_url"` - TokenURL string `mapstructure:"token_url"` - UserInfoURL string `mapstructure:"userinfo_url"` - Scopes string `mapstructure:"scopes"` - RedirectURL string `mapstructure:"redirect_url"` // 后端回调地址(需在提供方后台登记) - FrontendRedirectURL string `mapstructure:"frontend_redirect_url"` // 前端接收 token 的路由(默认:/auth/linuxdo/callback) - TokenAuthMethod string `mapstructure:"token_auth_method"` // client_secret_post / client_secret_basic / none - UsePKCE bool `mapstructure:"use_pkce"` - - // 可选:用于从 userinfo JSON 中提取字段的 gjson 路径。 - // 为空时,服务端会尝试一组常见字段名。 - UserInfoEmailPath string `mapstructure:"userinfo_email_path"` - UserInfoIDPath string `mapstructure:"userinfo_id_path"` - UserInfoUsernamePath string `mapstructure:"userinfo_username_path"` -} - type DefaultConfig struct { AdminEmail string `mapstructure:"admin_email"` AdminPassword string `mapstructure:"admin_password"` @@ -372,6 +455,55 @@ type RateLimitConfig struct { OverloadCooldownMinutes int `mapstructure:"overload_cooldown_minutes"` // 529过载冷却时间(分钟) } +// APIKeyAuthCacheConfig API Key 认证缓存配置 +type APIKeyAuthCacheConfig struct { + L1Size int `mapstructure:"l1_size"` + L1TTLSeconds int `mapstructure:"l1_ttl_seconds"` + L2TTLSeconds int `mapstructure:"l2_ttl_seconds"` + NegativeTTLSeconds int `mapstructure:"negative_ttl_seconds"` + JitterPercent int `mapstructure:"jitter_percent"` + Singleflight bool `mapstructure:"singleflight"` +} + +// DashboardCacheConfig 仪表盘统计缓存配置 +type DashboardCacheConfig struct { + // Enabled: 是否启用仪表盘缓存 + Enabled bool `mapstructure:"enabled"` + // KeyPrefix: Redis key 前缀,用于多环境隔离 + KeyPrefix string `mapstructure:"key_prefix"` + // StatsFreshTTLSeconds: 缓存命中认为“新鲜”的时间窗口(秒) + StatsFreshTTLSeconds int `mapstructure:"stats_fresh_ttl_seconds"` + // StatsTTLSeconds: Redis 缓存总 TTL(秒) + StatsTTLSeconds int `mapstructure:"stats_ttl_seconds"` + // StatsRefreshTimeoutSeconds: 异步刷新超时(秒) + StatsRefreshTimeoutSeconds int `mapstructure:"stats_refresh_timeout_seconds"` +} + +// DashboardAggregationConfig 仪表盘预聚合配置 +type DashboardAggregationConfig struct { + // Enabled: 是否启用预聚合作业 + Enabled bool `mapstructure:"enabled"` + // IntervalSeconds: 聚合刷新间隔(秒) + IntervalSeconds int `mapstructure:"interval_seconds"` + // LookbackSeconds: 回看窗口(秒) + LookbackSeconds int `mapstructure:"lookback_seconds"` + // BackfillEnabled: 是否允许全量回填 + BackfillEnabled bool `mapstructure:"backfill_enabled"` + // BackfillMaxDays: 回填最大跨度(天) + BackfillMaxDays int `mapstructure:"backfill_max_days"` + // Retention: 各表保留窗口(天) + Retention DashboardAggregationRetentionConfig `mapstructure:"retention"` + // RecomputeDays: 启动时重算最近 N 天 + RecomputeDays int `mapstructure:"recompute_days"` +} + +// DashboardAggregationRetentionConfig 预聚合保留窗口 +type DashboardAggregationRetentionConfig struct { + UsageLogsDays int `mapstructure:"usage_logs_days"` + HourlyDays int `mapstructure:"hourly_days"` + DailyDays int `mapstructure:"daily_days"` +} + func NormalizeRunMode(value string) string { normalized := strings.ToLower(strings.TrimSpace(value)) switch normalized { @@ -437,6 +569,7 @@ func Load() (*Config, error) { cfg.LinuxDo.UserInfoEmailPath = strings.TrimSpace(cfg.LinuxDo.UserInfoEmailPath) cfg.LinuxDo.UserInfoIDPath = strings.TrimSpace(cfg.LinuxDo.UserInfoIDPath) cfg.LinuxDo.UserInfoUsernamePath = strings.TrimSpace(cfg.LinuxDo.UserInfoUsernamePath) + cfg.Dashboard.KeyPrefix = strings.TrimSpace(cfg.Dashboard.KeyPrefix) cfg.CORS.AllowedOrigins = normalizeStringSlice(cfg.CORS.AllowedOrigins) cfg.Security.ResponseHeaders.AdditionalAllowed = normalizeStringSlice(cfg.Security.ResponseHeaders.AdditionalAllowed) cfg.Security.ResponseHeaders.ForceRemove = normalizeStringSlice(cfg.Security.ResponseHeaders.ForceRemove) @@ -475,81 +608,6 @@ func Load() (*Config, error) { return &cfg, nil } -// ValidateAbsoluteHTTPURL 校验一个绝对 http(s) URL(禁止 fragment)。 -func ValidateAbsoluteHTTPURL(raw string) error { - raw = strings.TrimSpace(raw) - if raw == "" { - return fmt.Errorf("empty url") - } - u, err := url.Parse(raw) - if err != nil { - return err - } - if !u.IsAbs() { - return fmt.Errorf("must be absolute") - } - if !isHTTPScheme(u.Scheme) { - return fmt.Errorf("unsupported scheme: %s", u.Scheme) - } - if strings.TrimSpace(u.Host) == "" { - return fmt.Errorf("missing host") - } - if u.Fragment != "" { - return fmt.Errorf("must not include fragment") - } - return nil -} - -// ValidateFrontendRedirectURL 校验前端回调地址: -// - 允许同源相对路径(以 / 开头) -// - 或绝对 http(s) URL(禁止 fragment) -func ValidateFrontendRedirectURL(raw string) error { - raw = strings.TrimSpace(raw) - if raw == "" { - return fmt.Errorf("empty url") - } - if strings.ContainsAny(raw, "\r\n") { - return fmt.Errorf("contains invalid characters") - } - if strings.HasPrefix(raw, "/") { - if strings.HasPrefix(raw, "//") { - return fmt.Errorf("must not start with //") - } - return nil - } - u, err := url.Parse(raw) - if err != nil { - return err - } - if !u.IsAbs() { - return fmt.Errorf("must be absolute http(s) url or relative path") - } - if !isHTTPScheme(u.Scheme) { - return fmt.Errorf("unsupported scheme: %s", u.Scheme) - } - if strings.TrimSpace(u.Host) == "" { - return fmt.Errorf("missing host") - } - if u.Fragment != "" { - return fmt.Errorf("must not include fragment") - } - return nil -} - -func isHTTPScheme(scheme string) bool { - return strings.EqualFold(scheme, "http") || strings.EqualFold(scheme, "https") -} - -func warnIfInsecureURL(field, raw string) { - u, err := url.Parse(strings.TrimSpace(raw)) - if err != nil { - return - } - if strings.EqualFold(u.Scheme, "http") { - log.Printf("Warning: %s uses http scheme; use https in production to avoid token leakage.", field) - } -} - func setDefaults() { viper.SetDefault("run_mode", RunModeStandard) @@ -599,7 +657,7 @@ func setDefaults() { // Turnstile viper.SetDefault("turnstile.required", false) - // LinuxDo Connect OAuth 登录(终端用户 SSO) + // LinuxDo Connect OAuth 登录 viper.SetDefault("linuxdo_connect.enabled", false) viper.SetDefault("linuxdo_connect.client_id", "") viper.SetDefault("linuxdo_connect.client_secret", "") @@ -638,6 +696,20 @@ func setDefaults() { viper.SetDefault("redis.pool_size", 128) viper.SetDefault("redis.min_idle_conns", 10) + // Ops (vNext) + viper.SetDefault("ops.enabled", true) + viper.SetDefault("ops.use_preaggregated_tables", false) + viper.SetDefault("ops.cleanup.enabled", true) + viper.SetDefault("ops.cleanup.schedule", "0 2 * * *") + // Retention days: vNext defaults to 30 days across ops datasets. + viper.SetDefault("ops.cleanup.error_log_retention_days", 30) + viper.SetDefault("ops.cleanup.minute_metrics_retention_days", 30) + viper.SetDefault("ops.cleanup.hourly_metrics_retention_days", 30) + viper.SetDefault("ops.aggregation.enabled", true) + viper.SetDefault("ops.metrics_collector_cache.enabled", true) + // TTL should be slightly larger than collection interval (1m) to maximize cross-replica cache hits. + viper.SetDefault("ops.metrics_collector_cache.ttl", 65*time.Second) + // JWT viper.SetDefault("jwt.secret", "") viper.SetDefault("jwt.expire_hour", 24) @@ -666,9 +738,35 @@ func setDefaults() { // Timezone (default to Asia/Shanghai for Chinese users) viper.SetDefault("timezone", "Asia/Shanghai") + // API Key auth cache + viper.SetDefault("api_key_auth_cache.l1_size", 65535) + viper.SetDefault("api_key_auth_cache.l1_ttl_seconds", 15) + viper.SetDefault("api_key_auth_cache.l2_ttl_seconds", 300) + viper.SetDefault("api_key_auth_cache.negative_ttl_seconds", 30) + viper.SetDefault("api_key_auth_cache.jitter_percent", 10) + viper.SetDefault("api_key_auth_cache.singleflight", true) + + // Dashboard cache + viper.SetDefault("dashboard_cache.enabled", true) + viper.SetDefault("dashboard_cache.key_prefix", "sub2api:") + viper.SetDefault("dashboard_cache.stats_fresh_ttl_seconds", 15) + viper.SetDefault("dashboard_cache.stats_ttl_seconds", 30) + viper.SetDefault("dashboard_cache.stats_refresh_timeout_seconds", 30) + + // Dashboard aggregation + viper.SetDefault("dashboard_aggregation.enabled", true) + viper.SetDefault("dashboard_aggregation.interval_seconds", 60) + viper.SetDefault("dashboard_aggregation.lookback_seconds", 120) + viper.SetDefault("dashboard_aggregation.backfill_enabled", false) + viper.SetDefault("dashboard_aggregation.backfill_max_days", 31) + viper.SetDefault("dashboard_aggregation.retention.usage_logs_days", 90) + viper.SetDefault("dashboard_aggregation.retention.hourly_days", 180) + viper.SetDefault("dashboard_aggregation.retention.daily_days", 730) + viper.SetDefault("dashboard_aggregation.recompute_days", 2) + // Gateway viper.SetDefault("gateway.response_header_timeout", 600) // 600秒(10分钟)等待上游响应头,LLM高负载时可能排队较久 - viper.SetDefault("gateway.log_upstream_error_body", false) + viper.SetDefault("gateway.log_upstream_error_body", true) viper.SetDefault("gateway.log_upstream_error_body_max_bytes", 2048) viper.SetDefault("gateway.inject_beta_for_apikey", false) viper.SetDefault("gateway.failover_on_400", false) @@ -689,12 +787,21 @@ func setDefaults() { viper.SetDefault("gateway.stream_keepalive_interval", 10) viper.SetDefault("gateway.max_line_size", 40*1024*1024) viper.SetDefault("gateway.scheduling.sticky_session_max_waiting", 3) - viper.SetDefault("gateway.scheduling.sticky_session_wait_timeout", 45*time.Second) + viper.SetDefault("gateway.scheduling.sticky_session_wait_timeout", 120*time.Second) viper.SetDefault("gateway.scheduling.fallback_wait_timeout", 30*time.Second) viper.SetDefault("gateway.scheduling.fallback_max_waiting", 100) viper.SetDefault("gateway.scheduling.fallback_selection_mode", "last_used") viper.SetDefault("gateway.scheduling.load_batch_enabled", true) viper.SetDefault("gateway.scheduling.slot_cleanup_interval", 30*time.Second) + viper.SetDefault("gateway.scheduling.db_fallback_enabled", true) + viper.SetDefault("gateway.scheduling.db_fallback_timeout_seconds", 0) + viper.SetDefault("gateway.scheduling.db_fallback_max_qps", 0) + viper.SetDefault("gateway.scheduling.outbox_poll_interval_seconds", 1) + viper.SetDefault("gateway.scheduling.outbox_lag_warn_seconds", 5) + viper.SetDefault("gateway.scheduling.outbox_lag_rebuild_seconds", 10) + viper.SetDefault("gateway.scheduling.outbox_lag_rebuild_failures", 3) + viper.SetDefault("gateway.scheduling.outbox_backlog_rebuild_rows", 10000) + viper.SetDefault("gateway.scheduling.full_rebuild_interval_seconds", 300) viper.SetDefault("concurrency.ping_interval", 10) // TokenRefresh @@ -711,10 +818,6 @@ func setDefaults() { viper.SetDefault("gemini.oauth.client_secret", "") viper.SetDefault("gemini.oauth.scopes", "") viper.SetDefault("gemini.quota.policy", "") - - // Update - 在线更新配置 - // 代理地址为空表示直连 GitHub(适用于海外服务器) - viper.SetDefault("update.proxy_url", "") } func (c *Config) Validate() error { @@ -755,7 +858,8 @@ func (c *Config) Validate() error { if method == "none" && !c.LinuxDo.UsePKCE { return fmt.Errorf("linuxdo_connect.use_pkce must be true when linuxdo_connect.token_auth_method=none") } - if (method == "" || method == "client_secret_post" || method == "client_secret_basic") && strings.TrimSpace(c.LinuxDo.ClientSecret) == "" { + if (method == "" || method == "client_secret_post" || method == "client_secret_basic") && + strings.TrimSpace(c.LinuxDo.ClientSecret) == "" { return fmt.Errorf("linuxdo_connect.client_secret is required when linuxdo_connect.enabled=true and token_auth_method is client_secret_post/client_secret_basic") } if strings.TrimSpace(c.LinuxDo.FrontendRedirectURL) == "" { @@ -828,6 +932,78 @@ func (c *Config) Validate() error { if c.Redis.MinIdleConns > c.Redis.PoolSize { return fmt.Errorf("redis.min_idle_conns cannot exceed redis.pool_size") } + if c.Dashboard.Enabled { + if c.Dashboard.StatsFreshTTLSeconds <= 0 { + return fmt.Errorf("dashboard_cache.stats_fresh_ttl_seconds must be positive") + } + if c.Dashboard.StatsTTLSeconds <= 0 { + return fmt.Errorf("dashboard_cache.stats_ttl_seconds must be positive") + } + if c.Dashboard.StatsRefreshTimeoutSeconds <= 0 { + return fmt.Errorf("dashboard_cache.stats_refresh_timeout_seconds must be positive") + } + if c.Dashboard.StatsFreshTTLSeconds > c.Dashboard.StatsTTLSeconds { + return fmt.Errorf("dashboard_cache.stats_fresh_ttl_seconds must be <= dashboard_cache.stats_ttl_seconds") + } + } else { + if c.Dashboard.StatsFreshTTLSeconds < 0 { + return fmt.Errorf("dashboard_cache.stats_fresh_ttl_seconds must be non-negative") + } + if c.Dashboard.StatsTTLSeconds < 0 { + return fmt.Errorf("dashboard_cache.stats_ttl_seconds must be non-negative") + } + if c.Dashboard.StatsRefreshTimeoutSeconds < 0 { + return fmt.Errorf("dashboard_cache.stats_refresh_timeout_seconds must be non-negative") + } + } + if c.DashboardAgg.Enabled { + if c.DashboardAgg.IntervalSeconds <= 0 { + return fmt.Errorf("dashboard_aggregation.interval_seconds must be positive") + } + if c.DashboardAgg.LookbackSeconds < 0 { + return fmt.Errorf("dashboard_aggregation.lookback_seconds must be non-negative") + } + if c.DashboardAgg.BackfillMaxDays < 0 { + return fmt.Errorf("dashboard_aggregation.backfill_max_days must be non-negative") + } + if c.DashboardAgg.BackfillEnabled && c.DashboardAgg.BackfillMaxDays == 0 { + return fmt.Errorf("dashboard_aggregation.backfill_max_days must be positive") + } + if c.DashboardAgg.Retention.UsageLogsDays <= 0 { + return fmt.Errorf("dashboard_aggregation.retention.usage_logs_days must be positive") + } + if c.DashboardAgg.Retention.HourlyDays <= 0 { + return fmt.Errorf("dashboard_aggregation.retention.hourly_days must be positive") + } + if c.DashboardAgg.Retention.DailyDays <= 0 { + return fmt.Errorf("dashboard_aggregation.retention.daily_days must be positive") + } + if c.DashboardAgg.RecomputeDays < 0 { + return fmt.Errorf("dashboard_aggregation.recompute_days must be non-negative") + } + } else { + if c.DashboardAgg.IntervalSeconds < 0 { + return fmt.Errorf("dashboard_aggregation.interval_seconds must be non-negative") + } + if c.DashboardAgg.LookbackSeconds < 0 { + return fmt.Errorf("dashboard_aggregation.lookback_seconds must be non-negative") + } + if c.DashboardAgg.BackfillMaxDays < 0 { + return fmt.Errorf("dashboard_aggregation.backfill_max_days must be non-negative") + } + if c.DashboardAgg.Retention.UsageLogsDays < 0 { + return fmt.Errorf("dashboard_aggregation.retention.usage_logs_days must be non-negative") + } + if c.DashboardAgg.Retention.HourlyDays < 0 { + return fmt.Errorf("dashboard_aggregation.retention.hourly_days must be non-negative") + } + if c.DashboardAgg.Retention.DailyDays < 0 { + return fmt.Errorf("dashboard_aggregation.retention.daily_days must be non-negative") + } + if c.DashboardAgg.RecomputeDays < 0 { + return fmt.Errorf("dashboard_aggregation.recompute_days must be non-negative") + } + } if c.Gateway.MaxBodySize <= 0 { return fmt.Errorf("gateway.max_body_size must be positive") } @@ -898,6 +1074,50 @@ func (c *Config) Validate() error { if c.Gateway.Scheduling.SlotCleanupInterval < 0 { return fmt.Errorf("gateway.scheduling.slot_cleanup_interval must be non-negative") } + if c.Gateway.Scheduling.DbFallbackTimeoutSeconds < 0 { + return fmt.Errorf("gateway.scheduling.db_fallback_timeout_seconds must be non-negative") + } + if c.Gateway.Scheduling.DbFallbackMaxQPS < 0 { + return fmt.Errorf("gateway.scheduling.db_fallback_max_qps must be non-negative") + } + if c.Gateway.Scheduling.OutboxPollIntervalSeconds <= 0 { + return fmt.Errorf("gateway.scheduling.outbox_poll_interval_seconds must be positive") + } + if c.Gateway.Scheduling.OutboxLagWarnSeconds < 0 { + return fmt.Errorf("gateway.scheduling.outbox_lag_warn_seconds must be non-negative") + } + if c.Gateway.Scheduling.OutboxLagRebuildSeconds < 0 { + return fmt.Errorf("gateway.scheduling.outbox_lag_rebuild_seconds must be non-negative") + } + if c.Gateway.Scheduling.OutboxLagRebuildFailures <= 0 { + return fmt.Errorf("gateway.scheduling.outbox_lag_rebuild_failures must be positive") + } + if c.Gateway.Scheduling.OutboxBacklogRebuildRows < 0 { + return fmt.Errorf("gateway.scheduling.outbox_backlog_rebuild_rows must be non-negative") + } + if c.Gateway.Scheduling.FullRebuildIntervalSeconds < 0 { + return fmt.Errorf("gateway.scheduling.full_rebuild_interval_seconds must be non-negative") + } + if c.Gateway.Scheduling.OutboxLagWarnSeconds > 0 && + c.Gateway.Scheduling.OutboxLagRebuildSeconds > 0 && + c.Gateway.Scheduling.OutboxLagRebuildSeconds < c.Gateway.Scheduling.OutboxLagWarnSeconds { + return fmt.Errorf("gateway.scheduling.outbox_lag_rebuild_seconds must be >= outbox_lag_warn_seconds") + } + if c.Ops.MetricsCollectorCache.TTL < 0 { + return fmt.Errorf("ops.metrics_collector_cache.ttl must be non-negative") + } + if c.Ops.Cleanup.ErrorLogRetentionDays < 0 { + return fmt.Errorf("ops.cleanup.error_log_retention_days must be non-negative") + } + if c.Ops.Cleanup.MinuteMetricsRetentionDays < 0 { + return fmt.Errorf("ops.cleanup.minute_metrics_retention_days must be non-negative") + } + if c.Ops.Cleanup.HourlyMetricsRetentionDays < 0 { + return fmt.Errorf("ops.cleanup.hourly_metrics_retention_days must be non-negative") + } + if c.Ops.Cleanup.Enabled && strings.TrimSpace(c.Ops.Cleanup.Schedule) == "" { + return fmt.Errorf("ops.cleanup.schedule is required when ops.cleanup.enabled=true") + } if c.Concurrency.PingInterval < 5 || c.Concurrency.PingInterval > 30 { return fmt.Errorf("concurrency.ping_interval must be between 5-30 seconds") } @@ -974,3 +1194,77 @@ func GetServerAddress() string { port := v.GetInt("server.port") return fmt.Sprintf("%s:%d", host, port) } + +// ValidateAbsoluteHTTPURL 验证是否为有效的绝对 HTTP(S) URL +func ValidateAbsoluteHTTPURL(raw string) error { + raw = strings.TrimSpace(raw) + if raw == "" { + return fmt.Errorf("empty url") + } + u, err := url.Parse(raw) + if err != nil { + return err + } + if !u.IsAbs() { + return fmt.Errorf("must be absolute") + } + if !isHTTPScheme(u.Scheme) { + return fmt.Errorf("unsupported scheme: %s", u.Scheme) + } + if strings.TrimSpace(u.Host) == "" { + return fmt.Errorf("missing host") + } + if u.Fragment != "" { + return fmt.Errorf("must not include fragment") + } + return nil +} + +// ValidateFrontendRedirectURL 验证前端重定向 URL(可以是绝对 URL 或相对路径) +func ValidateFrontendRedirectURL(raw string) error { + raw = strings.TrimSpace(raw) + if raw == "" { + return fmt.Errorf("empty url") + } + if strings.ContainsAny(raw, "\r\n") { + return fmt.Errorf("contains invalid characters") + } + if strings.HasPrefix(raw, "/") { + if strings.HasPrefix(raw, "//") { + return fmt.Errorf("must not start with //") + } + return nil + } + u, err := url.Parse(raw) + if err != nil { + return err + } + if !u.IsAbs() { + return fmt.Errorf("must be absolute http(s) url or relative path") + } + if !isHTTPScheme(u.Scheme) { + return fmt.Errorf("unsupported scheme: %s", u.Scheme) + } + if strings.TrimSpace(u.Host) == "" { + return fmt.Errorf("missing host") + } + if u.Fragment != "" { + return fmt.Errorf("must not include fragment") + } + return nil +} + +// isHTTPScheme 检查是否为 HTTP 或 HTTPS 协议 +func isHTTPScheme(scheme string) bool { + return strings.EqualFold(scheme, "http") || strings.EqualFold(scheme, "https") +} + +func warnIfInsecureURL(field, raw string) { + u, err := url.Parse(strings.TrimSpace(raw)) + if err != nil { + return + } + if strings.EqualFold(u.Scheme, "http") { + log.Printf("Warning: %s uses http scheme; use https in production to avoid token leakage.", field) + } +} diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go index a39d41f9..4637989e 100644 --- a/backend/internal/config/config_test.go +++ b/backend/internal/config/config_test.go @@ -39,8 +39,8 @@ func TestLoadDefaultSchedulingConfig(t *testing.T) { if cfg.Gateway.Scheduling.StickySessionMaxWaiting != 3 { t.Fatalf("StickySessionMaxWaiting = %d, want 3", cfg.Gateway.Scheduling.StickySessionMaxWaiting) } - if cfg.Gateway.Scheduling.StickySessionWaitTimeout != 45*time.Second { - t.Fatalf("StickySessionWaitTimeout = %v, want 45s", cfg.Gateway.Scheduling.StickySessionWaitTimeout) + if cfg.Gateway.Scheduling.StickySessionWaitTimeout != 120*time.Second { + t.Fatalf("StickySessionWaitTimeout = %v, want 120s", cfg.Gateway.Scheduling.StickySessionWaitTimeout) } if cfg.Gateway.Scheduling.FallbackWaitTimeout != 30*time.Second { t.Fatalf("FallbackWaitTimeout = %v, want 30s", cfg.Gateway.Scheduling.FallbackWaitTimeout) @@ -141,3 +141,142 @@ func TestValidateLinuxDoPKCERequiredForPublicClient(t *testing.T) { t.Fatalf("Validate() expected use_pkce error, got: %v", err) } } + +func TestLoadDefaultDashboardCacheConfig(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if !cfg.Dashboard.Enabled { + t.Fatalf("Dashboard.Enabled = false, want true") + } + if cfg.Dashboard.KeyPrefix != "sub2api:" { + t.Fatalf("Dashboard.KeyPrefix = %q, want %q", cfg.Dashboard.KeyPrefix, "sub2api:") + } + if cfg.Dashboard.StatsFreshTTLSeconds != 15 { + t.Fatalf("Dashboard.StatsFreshTTLSeconds = %d, want 15", cfg.Dashboard.StatsFreshTTLSeconds) + } + if cfg.Dashboard.StatsTTLSeconds != 30 { + t.Fatalf("Dashboard.StatsTTLSeconds = %d, want 30", cfg.Dashboard.StatsTTLSeconds) + } + if cfg.Dashboard.StatsRefreshTimeoutSeconds != 30 { + t.Fatalf("Dashboard.StatsRefreshTimeoutSeconds = %d, want 30", cfg.Dashboard.StatsRefreshTimeoutSeconds) + } +} + +func TestValidateDashboardCacheConfigEnabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.Dashboard.Enabled = true + cfg.Dashboard.StatsFreshTTLSeconds = 10 + cfg.Dashboard.StatsTTLSeconds = 5 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for stats_fresh_ttl_seconds > stats_ttl_seconds, got nil") + } + if !strings.Contains(err.Error(), "dashboard_cache.stats_fresh_ttl_seconds") { + t.Fatalf("Validate() expected stats_fresh_ttl_seconds error, got: %v", err) + } +} + +func TestValidateDashboardCacheConfigDisabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.Dashboard.Enabled = false + cfg.Dashboard.StatsTTLSeconds = -1 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for negative stats_ttl_seconds, got nil") + } + if !strings.Contains(err.Error(), "dashboard_cache.stats_ttl_seconds") { + t.Fatalf("Validate() expected stats_ttl_seconds error, got: %v", err) + } +} + +func TestLoadDefaultDashboardAggregationConfig(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if !cfg.DashboardAgg.Enabled { + t.Fatalf("DashboardAgg.Enabled = false, want true") + } + if cfg.DashboardAgg.IntervalSeconds != 60 { + t.Fatalf("DashboardAgg.IntervalSeconds = %d, want 60", cfg.DashboardAgg.IntervalSeconds) + } + if cfg.DashboardAgg.LookbackSeconds != 120 { + t.Fatalf("DashboardAgg.LookbackSeconds = %d, want 120", cfg.DashboardAgg.LookbackSeconds) + } + if cfg.DashboardAgg.BackfillEnabled { + t.Fatalf("DashboardAgg.BackfillEnabled = true, want false") + } + if cfg.DashboardAgg.BackfillMaxDays != 31 { + t.Fatalf("DashboardAgg.BackfillMaxDays = %d, want 31", cfg.DashboardAgg.BackfillMaxDays) + } + if cfg.DashboardAgg.Retention.UsageLogsDays != 90 { + t.Fatalf("DashboardAgg.Retention.UsageLogsDays = %d, want 90", cfg.DashboardAgg.Retention.UsageLogsDays) + } + if cfg.DashboardAgg.Retention.HourlyDays != 180 { + t.Fatalf("DashboardAgg.Retention.HourlyDays = %d, want 180", cfg.DashboardAgg.Retention.HourlyDays) + } + if cfg.DashboardAgg.Retention.DailyDays != 730 { + t.Fatalf("DashboardAgg.Retention.DailyDays = %d, want 730", cfg.DashboardAgg.Retention.DailyDays) + } + if cfg.DashboardAgg.RecomputeDays != 2 { + t.Fatalf("DashboardAgg.RecomputeDays = %d, want 2", cfg.DashboardAgg.RecomputeDays) + } +} + +func TestValidateDashboardAggregationConfigDisabled(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.DashboardAgg.Enabled = false + cfg.DashboardAgg.IntervalSeconds = -1 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for negative dashboard_aggregation.interval_seconds, got nil") + } + if !strings.Contains(err.Error(), "dashboard_aggregation.interval_seconds") { + t.Fatalf("Validate() expected interval_seconds error, got: %v", err) + } +} + +func TestValidateDashboardAggregationBackfillMaxDays(t *testing.T) { + viper.Reset() + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + cfg.DashboardAgg.BackfillEnabled = true + cfg.DashboardAgg.BackfillMaxDays = 0 + err = cfg.Validate() + if err == nil { + t.Fatalf("Validate() expected error for dashboard_aggregation.backfill_max_days, got nil") + } + if !strings.Contains(err.Error(), "dashboard_aggregation.backfill_max_days") { + t.Fatalf("Validate() expected backfill_max_days error, got: %v", err) + } +} diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index 15ce8960..16fc86bb 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -44,6 +44,7 @@ type AccountHandler struct { accountTestService *service.AccountTestService concurrencyService *service.ConcurrencyService crsSyncService *service.CRSSyncService + sessionLimitCache service.SessionLimitCache } // NewAccountHandler creates a new admin account handler @@ -58,6 +59,7 @@ func NewAccountHandler( accountTestService *service.AccountTestService, concurrencyService *service.ConcurrencyService, crsSyncService *service.CRSSyncService, + sessionLimitCache service.SessionLimitCache, ) *AccountHandler { return &AccountHandler{ adminService: adminService, @@ -70,6 +72,7 @@ func NewAccountHandler( accountTestService: accountTestService, concurrencyService: concurrencyService, crsSyncService: crsSyncService, + sessionLimitCache: sessionLimitCache, } } @@ -84,6 +87,7 @@ type CreateAccountRequest struct { ProxyID *int64 `json:"proxy_id"` Concurrency int `json:"concurrency"` Priority int `json:"priority"` + RateMultiplier *float64 `json:"rate_multiplier"` GroupIDs []int64 `json:"group_ids"` ExpiresAt *int64 `json:"expires_at"` AutoPauseOnExpired *bool `json:"auto_pause_on_expired"` @@ -101,6 +105,7 @@ type UpdateAccountRequest struct { ProxyID *int64 `json:"proxy_id"` Concurrency *int `json:"concurrency"` Priority *int `json:"priority"` + RateMultiplier *float64 `json:"rate_multiplier"` Status string `json:"status" binding:"omitempty,oneof=active inactive"` GroupIDs *[]int64 `json:"group_ids"` ExpiresAt *int64 `json:"expires_at"` @@ -115,6 +120,7 @@ type BulkUpdateAccountsRequest struct { ProxyID *int64 `json:"proxy_id"` Concurrency *int `json:"concurrency"` Priority *int `json:"priority"` + RateMultiplier *float64 `json:"rate_multiplier"` Status string `json:"status" binding:"omitempty,oneof=active inactive error"` Schedulable *bool `json:"schedulable"` GroupIDs *[]int64 `json:"group_ids"` @@ -127,6 +133,9 @@ type BulkUpdateAccountsRequest struct { type AccountWithConcurrency struct { *dto.Account CurrentConcurrency int `json:"current_concurrency"` + // 以下字段仅对 Anthropic OAuth/SetupToken 账号有效,且仅在启用相应功能时返回 + CurrentWindowCost *float64 `json:"current_window_cost,omitempty"` // 当前窗口费用 + ActiveSessions *int `json:"active_sessions,omitempty"` // 当前活跃会话数 } // List handles listing all accounts with pagination @@ -161,13 +170,89 @@ func (h *AccountHandler) List(c *gin.Context) { concurrencyCounts = make(map[int64]int) } + // 识别需要查询窗口费用和会话数的账号(Anthropic OAuth/SetupToken 且启用了相应功能) + windowCostAccountIDs := make([]int64, 0) + sessionLimitAccountIDs := make([]int64, 0) + for i := range accounts { + acc := &accounts[i] + if acc.IsAnthropicOAuthOrSetupToken() { + if acc.GetWindowCostLimit() > 0 { + windowCostAccountIDs = append(windowCostAccountIDs, acc.ID) + } + if acc.GetMaxSessions() > 0 { + sessionLimitAccountIDs = append(sessionLimitAccountIDs, acc.ID) + } + } + } + + // 并行获取窗口费用和活跃会话数 + var windowCosts map[int64]float64 + var activeSessions map[int64]int + + // 获取活跃会话数(批量查询) + if len(sessionLimitAccountIDs) > 0 && h.sessionLimitCache != nil { + activeSessions, _ = h.sessionLimitCache.GetActiveSessionCountBatch(c.Request.Context(), sessionLimitAccountIDs) + if activeSessions == nil { + activeSessions = make(map[int64]int) + } + } + + // 获取窗口费用(并行查询) + if len(windowCostAccountIDs) > 0 { + windowCosts = make(map[int64]float64) + var mu sync.Mutex + g, gctx := errgroup.WithContext(c.Request.Context()) + g.SetLimit(10) // 限制并发数 + + for i := range accounts { + acc := &accounts[i] + if !acc.IsAnthropicOAuthOrSetupToken() || acc.GetWindowCostLimit() <= 0 { + continue + } + accCopy := acc // 闭包捕获 + g.Go(func() error { + var startTime time.Time + if accCopy.SessionWindowStart != nil { + startTime = *accCopy.SessionWindowStart + } else { + startTime = time.Now().Add(-5 * time.Hour) + } + stats, err := h.accountUsageService.GetAccountWindowStats(gctx, accCopy.ID, startTime) + if err == nil && stats != nil { + mu.Lock() + windowCosts[accCopy.ID] = stats.StandardCost // 使用标准费用 + mu.Unlock() + } + return nil // 不返回错误,允许部分失败 + }) + } + _ = g.Wait() + } + // Build response with concurrency info result := make([]AccountWithConcurrency, len(accounts)) for i := range accounts { - result[i] = AccountWithConcurrency{ - Account: dto.AccountFromService(&accounts[i]), - CurrentConcurrency: concurrencyCounts[accounts[i].ID], + acc := &accounts[i] + item := AccountWithConcurrency{ + Account: dto.AccountFromService(acc), + CurrentConcurrency: concurrencyCounts[acc.ID], } + + // 添加窗口费用(仅当启用时) + if windowCosts != nil { + if cost, ok := windowCosts[acc.ID]; ok { + item.CurrentWindowCost = &cost + } + } + + // 添加活跃会话数(仅当启用时) + if activeSessions != nil { + if count, ok := activeSessions[acc.ID]; ok { + item.ActiveSessions = &count + } + } + + result[i] = item } response.Paginated(c, result, total, page, pageSize) @@ -199,6 +284,10 @@ func (h *AccountHandler) Create(c *gin.Context) { response.BadRequest(c, "Invalid request: "+err.Error()) return } + if req.RateMultiplier != nil && *req.RateMultiplier < 0 { + response.BadRequest(c, "rate_multiplier must be >= 0") + return + } // 确定是否跳过混合渠道检查 skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk @@ -213,6 +302,7 @@ func (h *AccountHandler) Create(c *gin.Context) { ProxyID: req.ProxyID, Concurrency: req.Concurrency, Priority: req.Priority, + RateMultiplier: req.RateMultiplier, GroupIDs: req.GroupIDs, ExpiresAt: req.ExpiresAt, AutoPauseOnExpired: req.AutoPauseOnExpired, @@ -258,6 +348,10 @@ func (h *AccountHandler) Update(c *gin.Context) { response.BadRequest(c, "Invalid request: "+err.Error()) return } + if req.RateMultiplier != nil && *req.RateMultiplier < 0 { + response.BadRequest(c, "rate_multiplier must be >= 0") + return + } // 确定是否跳过混合渠道检查 skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk @@ -271,6 +365,7 @@ func (h *AccountHandler) Update(c *gin.Context) { ProxyID: req.ProxyID, Concurrency: req.Concurrency, // 指针类型,nil 表示未提供 Priority: req.Priority, // 指针类型,nil 表示未提供 + RateMultiplier: req.RateMultiplier, Status: req.Status, GroupIDs: req.GroupIDs, ExpiresAt: req.ExpiresAt, @@ -682,6 +777,10 @@ func (h *AccountHandler) BulkUpdate(c *gin.Context) { response.BadRequest(c, "Invalid request: "+err.Error()) return } + if req.RateMultiplier != nil && *req.RateMultiplier < 0 { + response.BadRequest(c, "rate_multiplier must be >= 0") + return + } // 确定是否跳过混合渠道检查 skipCheck := req.ConfirmMixedChannelRisk != nil && *req.ConfirmMixedChannelRisk @@ -690,6 +789,7 @@ func (h *AccountHandler) BulkUpdate(c *gin.Context) { req.ProxyID != nil || req.Concurrency != nil || req.Priority != nil || + req.RateMultiplier != nil || req.Status != "" || req.Schedulable != nil || req.GroupIDs != nil || @@ -707,6 +807,7 @@ func (h *AccountHandler) BulkUpdate(c *gin.Context) { ProxyID: req.ProxyID, Concurrency: req.Concurrency, Priority: req.Priority, + RateMultiplier: req.RateMultiplier, Status: req.Status, Schedulable: req.Schedulable, GroupIDs: req.GroupIDs, diff --git a/backend/internal/handler/admin/dashboard_handler.go b/backend/internal/handler/admin/dashboard_handler.go index 30cdd914..3f07403d 100644 --- a/backend/internal/handler/admin/dashboard_handler.go +++ b/backend/internal/handler/admin/dashboard_handler.go @@ -1,6 +1,7 @@ package admin import ( + "errors" "strconv" "time" @@ -13,15 +14,17 @@ import ( // DashboardHandler handles admin dashboard statistics type DashboardHandler struct { - dashboardService *service.DashboardService - startTime time.Time // Server start time for uptime calculation + dashboardService *service.DashboardService + aggregationService *service.DashboardAggregationService + startTime time.Time // Server start time for uptime calculation } // NewDashboardHandler creates a new admin dashboard handler -func NewDashboardHandler(dashboardService *service.DashboardService) *DashboardHandler { +func NewDashboardHandler(dashboardService *service.DashboardService, aggregationService *service.DashboardAggregationService) *DashboardHandler { return &DashboardHandler{ - dashboardService: dashboardService, - startTime: time.Now(), + dashboardService: dashboardService, + aggregationService: aggregationService, + startTime: time.Now(), } } @@ -114,6 +117,58 @@ func (h *DashboardHandler) GetStats(c *gin.Context) { // 性能指标 "rpm": stats.Rpm, "tpm": stats.Tpm, + + // 预聚合新鲜度 + "hourly_active_users": stats.HourlyActiveUsers, + "stats_updated_at": stats.StatsUpdatedAt, + "stats_stale": stats.StatsStale, + }) +} + +type DashboardAggregationBackfillRequest struct { + Start string `json:"start"` + End string `json:"end"` +} + +// BackfillAggregation handles triggering aggregation backfill +// POST /api/v1/admin/dashboard/aggregation/backfill +func (h *DashboardHandler) BackfillAggregation(c *gin.Context) { + if h.aggregationService == nil { + response.InternalError(c, "Aggregation service not available") + return + } + + var req DashboardAggregationBackfillRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + start, err := time.Parse(time.RFC3339, req.Start) + if err != nil { + response.BadRequest(c, "Invalid start time") + return + } + end, err := time.Parse(time.RFC3339, req.End) + if err != nil { + response.BadRequest(c, "Invalid end time") + return + } + + if err := h.aggregationService.TriggerBackfill(start, end); err != nil { + if errors.Is(err, service.ErrDashboardBackfillDisabled) { + response.Forbidden(c, "Backfill is disabled") + return + } + if errors.Is(err, service.ErrDashboardBackfillTooLarge) { + response.BadRequest(c, "Backfill range too large") + return + } + response.InternalError(c, "Failed to trigger backfill") + return + } + + response.Success(c, gin.H{ + "status": "accepted", }) } @@ -131,13 +186,16 @@ func (h *DashboardHandler) GetRealtimeMetrics(c *gin.Context) { // GetUsageTrend handles getting usage trend data // GET /api/v1/admin/dashboard/trend -// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id +// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, stream func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { startTime, endTime := parseTimeRange(c) granularity := c.DefaultQuery("granularity", "day") // Parse optional filter params - var userID, apiKeyID int64 + var userID, apiKeyID, accountID, groupID int64 + var model string + var stream *bool + if userIDStr := c.Query("user_id"); userIDStr != "" { if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil { userID = id @@ -148,8 +206,26 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { apiKeyID = id } } + if accountIDStr := c.Query("account_id"); accountIDStr != "" { + if id, err := strconv.ParseInt(accountIDStr, 10, 64); err == nil { + accountID = id + } + } + if groupIDStr := c.Query("group_id"); groupIDStr != "" { + if id, err := strconv.ParseInt(groupIDStr, 10, 64); err == nil { + groupID = id + } + } + if modelStr := c.Query("model"); modelStr != "" { + model = modelStr + } + if streamStr := c.Query("stream"); streamStr != "" { + if streamVal, err := strconv.ParseBool(streamStr); err == nil { + stream = &streamVal + } + } - trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID) + trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream) if err != nil { response.Error(c, 500, "Failed to get usage trend") return @@ -165,12 +241,14 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { // GetModelStats handles getting model usage statistics // GET /api/v1/admin/dashboard/models -// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id +// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, stream func (h *DashboardHandler) GetModelStats(c *gin.Context) { startTime, endTime := parseTimeRange(c) // Parse optional filter params - var userID, apiKeyID int64 + var userID, apiKeyID, accountID, groupID int64 + var stream *bool + if userIDStr := c.Query("user_id"); userIDStr != "" { if id, err := strconv.ParseInt(userIDStr, 10, 64); err == nil { userID = id @@ -181,8 +259,23 @@ func (h *DashboardHandler) GetModelStats(c *gin.Context) { apiKeyID = id } } + if accountIDStr := c.Query("account_id"); accountIDStr != "" { + if id, err := strconv.ParseInt(accountIDStr, 10, 64); err == nil { + accountID = id + } + } + if groupIDStr := c.Query("group_id"); groupIDStr != "" { + if id, err := strconv.ParseInt(groupIDStr, 10, 64); err == nil { + groupID = id + } + } + if streamStr := c.Query("stream"); streamStr != "" { + if streamVal, err := strconv.ParseBool(streamStr); err == nil { + stream = &streamVal + } + } - stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID) + stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, stream) if err != nil { response.Error(c, 500, "Failed to get model statistics") return diff --git a/backend/internal/handler/admin/group_handler.go b/backend/internal/handler/admin/group_handler.go index a8bae35e..f6780dee 100644 --- a/backend/internal/handler/admin/group_handler.go +++ b/backend/internal/handler/admin/group_handler.go @@ -40,6 +40,9 @@ type CreateGroupRequest struct { ImagePrice4K *float64 `json:"image_price_4k"` ClaudeCodeOnly bool `json:"claude_code_only"` FallbackGroupID *int64 `json:"fallback_group_id"` + // 模型路由配置(仅 anthropic 平台使用) + ModelRouting map[string][]int64 `json:"model_routing"` + ModelRoutingEnabled bool `json:"model_routing_enabled"` } // UpdateGroupRequest represents update group request @@ -60,6 +63,9 @@ type UpdateGroupRequest struct { ImagePrice4K *float64 `json:"image_price_4k"` ClaudeCodeOnly *bool `json:"claude_code_only"` FallbackGroupID *int64 `json:"fallback_group_id"` + // 模型路由配置(仅 anthropic 平台使用) + ModelRouting map[string][]int64 `json:"model_routing"` + ModelRoutingEnabled *bool `json:"model_routing_enabled"` } // List handles listing all groups with pagination @@ -149,20 +155,22 @@ func (h *GroupHandler) Create(c *gin.Context) { } group, err := h.adminService.CreateGroup(c.Request.Context(), &service.CreateGroupInput{ - Name: req.Name, - Description: req.Description, - Platform: req.Platform, - RateMultiplier: req.RateMultiplier, - IsExclusive: req.IsExclusive, - SubscriptionType: req.SubscriptionType, - DailyLimitUSD: req.DailyLimitUSD, - WeeklyLimitUSD: req.WeeklyLimitUSD, - MonthlyLimitUSD: req.MonthlyLimitUSD, - ImagePrice1K: req.ImagePrice1K, - ImagePrice2K: req.ImagePrice2K, - ImagePrice4K: req.ImagePrice4K, - ClaudeCodeOnly: req.ClaudeCodeOnly, - FallbackGroupID: req.FallbackGroupID, + Name: req.Name, + Description: req.Description, + Platform: req.Platform, + RateMultiplier: req.RateMultiplier, + IsExclusive: req.IsExclusive, + SubscriptionType: req.SubscriptionType, + DailyLimitUSD: req.DailyLimitUSD, + WeeklyLimitUSD: req.WeeklyLimitUSD, + MonthlyLimitUSD: req.MonthlyLimitUSD, + ImagePrice1K: req.ImagePrice1K, + ImagePrice2K: req.ImagePrice2K, + ImagePrice4K: req.ImagePrice4K, + ClaudeCodeOnly: req.ClaudeCodeOnly, + FallbackGroupID: req.FallbackGroupID, + ModelRouting: req.ModelRouting, + ModelRoutingEnabled: req.ModelRoutingEnabled, }) if err != nil { response.ErrorFrom(c, err) @@ -188,21 +196,23 @@ func (h *GroupHandler) Update(c *gin.Context) { } group, err := h.adminService.UpdateGroup(c.Request.Context(), groupID, &service.UpdateGroupInput{ - Name: req.Name, - Description: req.Description, - Platform: req.Platform, - RateMultiplier: req.RateMultiplier, - IsExclusive: req.IsExclusive, - Status: req.Status, - SubscriptionType: req.SubscriptionType, - DailyLimitUSD: req.DailyLimitUSD, - WeeklyLimitUSD: req.WeeklyLimitUSD, - MonthlyLimitUSD: req.MonthlyLimitUSD, - ImagePrice1K: req.ImagePrice1K, - ImagePrice2K: req.ImagePrice2K, - ImagePrice4K: req.ImagePrice4K, - ClaudeCodeOnly: req.ClaudeCodeOnly, - FallbackGroupID: req.FallbackGroupID, + Name: req.Name, + Description: req.Description, + Platform: req.Platform, + RateMultiplier: req.RateMultiplier, + IsExclusive: req.IsExclusive, + Status: req.Status, + SubscriptionType: req.SubscriptionType, + DailyLimitUSD: req.DailyLimitUSD, + WeeklyLimitUSD: req.WeeklyLimitUSD, + MonthlyLimitUSD: req.MonthlyLimitUSD, + ImagePrice1K: req.ImagePrice1K, + ImagePrice2K: req.ImagePrice2K, + ImagePrice4K: req.ImagePrice4K, + ClaudeCodeOnly: req.ClaudeCodeOnly, + FallbackGroupID: req.FallbackGroupID, + ModelRouting: req.ModelRouting, + ModelRoutingEnabled: req.ModelRoutingEnabled, }) if err != nil { response.ErrorFrom(c, err) diff --git a/backend/internal/handler/admin/ops_alerts_handler.go b/backend/internal/handler/admin/ops_alerts_handler.go new file mode 100644 index 00000000..c9da19c7 --- /dev/null +++ b/backend/internal/handler/admin/ops_alerts_handler.go @@ -0,0 +1,602 @@ +package admin + +import ( + "encoding/json" + "fmt" + "math" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/gin-gonic/gin/binding" +) + +var validOpsAlertMetricTypes = []string{ + "success_rate", + "error_rate", + "upstream_error_rate", + "cpu_usage_percent", + "memory_usage_percent", + "concurrency_queue_depth", +} + +var validOpsAlertMetricTypeSet = func() map[string]struct{} { + set := make(map[string]struct{}, len(validOpsAlertMetricTypes)) + for _, v := range validOpsAlertMetricTypes { + set[v] = struct{}{} + } + return set +}() + +var validOpsAlertOperators = []string{">", "<", ">=", "<=", "==", "!="} + +var validOpsAlertOperatorSet = func() map[string]struct{} { + set := make(map[string]struct{}, len(validOpsAlertOperators)) + for _, v := range validOpsAlertOperators { + set[v] = struct{}{} + } + return set +}() + +var validOpsAlertSeverities = []string{"P0", "P1", "P2", "P3"} + +var validOpsAlertSeveritySet = func() map[string]struct{} { + set := make(map[string]struct{}, len(validOpsAlertSeverities)) + for _, v := range validOpsAlertSeverities { + set[v] = struct{}{} + } + return set +}() + +type opsAlertRuleValidatedInput struct { + Name string + MetricType string + Operator string + Threshold float64 + + Severity string + + WindowMinutes int + SustainedMinutes int + CooldownMinutes int + + Enabled bool + NotifyEmail bool + + WindowProvided bool + SustainedProvided bool + CooldownProvided bool + SeverityProvided bool + EnabledProvided bool + NotifyProvided bool +} + +func isPercentOrRateMetric(metricType string) bool { + switch metricType { + case "success_rate", + "error_rate", + "upstream_error_rate", + "cpu_usage_percent", + "memory_usage_percent": + return true + default: + return false + } +} + +func validateOpsAlertRulePayload(raw map[string]json.RawMessage) (*opsAlertRuleValidatedInput, error) { + if raw == nil { + return nil, fmt.Errorf("invalid request body") + } + + requiredFields := []string{"name", "metric_type", "operator", "threshold"} + for _, field := range requiredFields { + if _, ok := raw[field]; !ok { + return nil, fmt.Errorf("%s is required", field) + } + } + + var name string + if err := json.Unmarshal(raw["name"], &name); err != nil || strings.TrimSpace(name) == "" { + return nil, fmt.Errorf("name is required") + } + name = strings.TrimSpace(name) + + var metricType string + if err := json.Unmarshal(raw["metric_type"], &metricType); err != nil || strings.TrimSpace(metricType) == "" { + return nil, fmt.Errorf("metric_type is required") + } + metricType = strings.TrimSpace(metricType) + if _, ok := validOpsAlertMetricTypeSet[metricType]; !ok { + return nil, fmt.Errorf("metric_type must be one of: %s", strings.Join(validOpsAlertMetricTypes, ", ")) + } + + var operator string + if err := json.Unmarshal(raw["operator"], &operator); err != nil || strings.TrimSpace(operator) == "" { + return nil, fmt.Errorf("operator is required") + } + operator = strings.TrimSpace(operator) + if _, ok := validOpsAlertOperatorSet[operator]; !ok { + return nil, fmt.Errorf("operator must be one of: %s", strings.Join(validOpsAlertOperators, ", ")) + } + + var threshold float64 + if err := json.Unmarshal(raw["threshold"], &threshold); err != nil { + return nil, fmt.Errorf("threshold must be a number") + } + if math.IsNaN(threshold) || math.IsInf(threshold, 0) { + return nil, fmt.Errorf("threshold must be a finite number") + } + if isPercentOrRateMetric(metricType) { + if threshold < 0 || threshold > 100 { + return nil, fmt.Errorf("threshold must be between 0 and 100 for metric_type %s", metricType) + } + } else if threshold < 0 { + return nil, fmt.Errorf("threshold must be >= 0") + } + + validated := &opsAlertRuleValidatedInput{ + Name: name, + MetricType: metricType, + Operator: operator, + Threshold: threshold, + } + + if v, ok := raw["severity"]; ok { + validated.SeverityProvided = true + var sev string + if err := json.Unmarshal(v, &sev); err != nil { + return nil, fmt.Errorf("severity must be a string") + } + sev = strings.ToUpper(strings.TrimSpace(sev)) + if sev != "" { + if _, ok := validOpsAlertSeveritySet[sev]; !ok { + return nil, fmt.Errorf("severity must be one of: %s", strings.Join(validOpsAlertSeverities, ", ")) + } + validated.Severity = sev + } + } + if validated.Severity == "" { + validated.Severity = "P2" + } + + if v, ok := raw["enabled"]; ok { + validated.EnabledProvided = true + if err := json.Unmarshal(v, &validated.Enabled); err != nil { + return nil, fmt.Errorf("enabled must be a boolean") + } + } else { + validated.Enabled = true + } + + if v, ok := raw["notify_email"]; ok { + validated.NotifyProvided = true + if err := json.Unmarshal(v, &validated.NotifyEmail); err != nil { + return nil, fmt.Errorf("notify_email must be a boolean") + } + } else { + validated.NotifyEmail = true + } + + if v, ok := raw["window_minutes"]; ok { + validated.WindowProvided = true + if err := json.Unmarshal(v, &validated.WindowMinutes); err != nil { + return nil, fmt.Errorf("window_minutes must be an integer") + } + switch validated.WindowMinutes { + case 1, 5, 60: + default: + return nil, fmt.Errorf("window_minutes must be one of: 1, 5, 60") + } + } else { + validated.WindowMinutes = 1 + } + + if v, ok := raw["sustained_minutes"]; ok { + validated.SustainedProvided = true + if err := json.Unmarshal(v, &validated.SustainedMinutes); err != nil { + return nil, fmt.Errorf("sustained_minutes must be an integer") + } + if validated.SustainedMinutes < 1 || validated.SustainedMinutes > 1440 { + return nil, fmt.Errorf("sustained_minutes must be between 1 and 1440") + } + } else { + validated.SustainedMinutes = 1 + } + + if v, ok := raw["cooldown_minutes"]; ok { + validated.CooldownProvided = true + if err := json.Unmarshal(v, &validated.CooldownMinutes); err != nil { + return nil, fmt.Errorf("cooldown_minutes must be an integer") + } + if validated.CooldownMinutes < 0 || validated.CooldownMinutes > 1440 { + return nil, fmt.Errorf("cooldown_minutes must be between 0 and 1440") + } + } else { + validated.CooldownMinutes = 0 + } + + return validated, nil +} + +// ListAlertRules returns all ops alert rules. +// GET /api/v1/admin/ops/alert-rules +func (h *OpsHandler) ListAlertRules(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + rules, err := h.opsService.ListAlertRules(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, rules) +} + +// CreateAlertRule creates an ops alert rule. +// POST /api/v1/admin/ops/alert-rules +func (h *OpsHandler) CreateAlertRule(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var raw map[string]json.RawMessage + if err := c.ShouldBindBodyWith(&raw, binding.JSON); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + validated, err := validateOpsAlertRulePayload(raw) + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + var rule service.OpsAlertRule + if err := c.ShouldBindBodyWith(&rule, binding.JSON); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + rule.Name = validated.Name + rule.MetricType = validated.MetricType + rule.Operator = validated.Operator + rule.Threshold = validated.Threshold + rule.WindowMinutes = validated.WindowMinutes + rule.SustainedMinutes = validated.SustainedMinutes + rule.CooldownMinutes = validated.CooldownMinutes + rule.Severity = validated.Severity + rule.Enabled = validated.Enabled + rule.NotifyEmail = validated.NotifyEmail + + created, err := h.opsService.CreateAlertRule(c.Request.Context(), &rule) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, created) +} + +// UpdateAlertRule updates an existing ops alert rule. +// PUT /api/v1/admin/ops/alert-rules/:id +func (h *OpsHandler) UpdateAlertRule(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid rule ID") + return + } + + var raw map[string]json.RawMessage + if err := c.ShouldBindBodyWith(&raw, binding.JSON); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + validated, err := validateOpsAlertRulePayload(raw) + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + var rule service.OpsAlertRule + if err := c.ShouldBindBodyWith(&rule, binding.JSON); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + rule.ID = id + rule.Name = validated.Name + rule.MetricType = validated.MetricType + rule.Operator = validated.Operator + rule.Threshold = validated.Threshold + rule.WindowMinutes = validated.WindowMinutes + rule.SustainedMinutes = validated.SustainedMinutes + rule.CooldownMinutes = validated.CooldownMinutes + rule.Severity = validated.Severity + rule.Enabled = validated.Enabled + rule.NotifyEmail = validated.NotifyEmail + + updated, err := h.opsService.UpdateAlertRule(c.Request.Context(), &rule) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, updated) +} + +// DeleteAlertRule deletes an ops alert rule. +// DELETE /api/v1/admin/ops/alert-rules/:id +func (h *OpsHandler) DeleteAlertRule(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid rule ID") + return + } + + if err := h.opsService.DeleteAlertRule(c.Request.Context(), id); err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"deleted": true}) +} + +// GetAlertEvent returns a single ops alert event. +// GET /api/v1/admin/ops/alert-events/:id +func (h *OpsHandler) GetAlertEvent(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid event ID") + return + } + + ev, err := h.opsService.GetAlertEventByID(c.Request.Context(), id) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, ev) +} + +// UpdateAlertEventStatus updates an ops alert event status. +// PUT /api/v1/admin/ops/alert-events/:id/status +func (h *OpsHandler) UpdateAlertEventStatus(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + id, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid event ID") + return + } + + var payload struct { + Status string `json:"status"` + } + if err := c.ShouldBindJSON(&payload); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + payload.Status = strings.TrimSpace(payload.Status) + if payload.Status == "" { + response.BadRequest(c, "Invalid status") + return + } + if payload.Status != service.OpsAlertStatusResolved && payload.Status != service.OpsAlertStatusManualResolved { + response.BadRequest(c, "Invalid status") + return + } + + var resolvedAt *time.Time + if payload.Status == service.OpsAlertStatusResolved || payload.Status == service.OpsAlertStatusManualResolved { + now := time.Now().UTC() + resolvedAt = &now + } + if err := h.opsService.UpdateAlertEventStatus(c.Request.Context(), id, payload.Status, resolvedAt); err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"updated": true}) +} + +// ListAlertEvents lists recent ops alert events. +// GET /api/v1/admin/ops/alert-events +// CreateAlertSilence creates a scoped silence for ops alerts. +// POST /api/v1/admin/ops/alert-silences +func (h *OpsHandler) CreateAlertSilence(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var payload struct { + RuleID int64 `json:"rule_id"` + Platform string `json:"platform"` + GroupID *int64 `json:"group_id"` + Region *string `json:"region"` + Until string `json:"until"` + Reason string `json:"reason"` + } + if err := c.ShouldBindJSON(&payload); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + until, err := time.Parse(time.RFC3339, strings.TrimSpace(payload.Until)) + if err != nil { + response.BadRequest(c, "Invalid until") + return + } + + createdBy := (*int64)(nil) + if subject, ok := middleware.GetAuthSubjectFromContext(c); ok { + uid := subject.UserID + createdBy = &uid + } + + silence := &service.OpsAlertSilence{ + RuleID: payload.RuleID, + Platform: strings.TrimSpace(payload.Platform), + GroupID: payload.GroupID, + Region: payload.Region, + Until: until, + Reason: strings.TrimSpace(payload.Reason), + CreatedBy: createdBy, + } + + created, err := h.opsService.CreateAlertSilence(c.Request.Context(), silence) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, created) +} + +func (h *OpsHandler) ListAlertEvents(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + limit := 20 + if raw := strings.TrimSpace(c.Query("limit")); raw != "" { + n, err := strconv.Atoi(raw) + if err != nil || n <= 0 { + response.BadRequest(c, "Invalid limit") + return + } + limit = n + } + + filter := &service.OpsAlertEventFilter{ + Limit: limit, + Status: strings.TrimSpace(c.Query("status")), + Severity: strings.TrimSpace(c.Query("severity")), + } + + if v := strings.TrimSpace(c.Query("email_sent")); v != "" { + vv := strings.ToLower(v) + switch vv { + case "true", "1": + b := true + filter.EmailSent = &b + case "false", "0": + b := false + filter.EmailSent = &b + default: + response.BadRequest(c, "Invalid email_sent") + return + } + } + + // Cursor pagination: both params must be provided together. + rawTS := strings.TrimSpace(c.Query("before_fired_at")) + rawID := strings.TrimSpace(c.Query("before_id")) + if (rawTS == "") != (rawID == "") { + response.BadRequest(c, "before_fired_at and before_id must be provided together") + return + } + if rawTS != "" { + ts, err := time.Parse(time.RFC3339Nano, rawTS) + if err != nil { + if t2, err2 := time.Parse(time.RFC3339, rawTS); err2 == nil { + ts = t2 + } else { + response.BadRequest(c, "Invalid before_fired_at") + return + } + } + filter.BeforeFiredAt = &ts + } + if rawID != "" { + id, err := strconv.ParseInt(rawID, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid before_id") + return + } + filter.BeforeID = &id + } + + // Optional global filter support (platform/group/time range). + if platform := strings.TrimSpace(c.Query("platform")); platform != "" { + filter.Platform = platform + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + if startTime, endTime, err := parseOpsTimeRange(c, "24h"); err == nil { + // Only apply when explicitly provided to avoid surprising default narrowing. + if strings.TrimSpace(c.Query("start_time")) != "" || strings.TrimSpace(c.Query("end_time")) != "" || strings.TrimSpace(c.Query("time_range")) != "" { + filter.StartTime = &startTime + filter.EndTime = &endTime + } + } else { + response.BadRequest(c, err.Error()) + return + } + + events, err := h.opsService.ListAlertEvents(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, events) +} diff --git a/backend/internal/handler/admin/ops_dashboard_handler.go b/backend/internal/handler/admin/ops_dashboard_handler.go new file mode 100644 index 00000000..2c87f734 --- /dev/null +++ b/backend/internal/handler/admin/ops_dashboard_handler.go @@ -0,0 +1,243 @@ +package admin + +import ( + "net/http" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +// GetDashboardOverview returns vNext ops dashboard overview (raw path). +// GET /api/v1/admin/ops/dashboard/overview +func (h *OpsHandler) GetDashboardOverview(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: strings.TrimSpace(c.Query("platform")), + QueryMode: parseOpsQueryMode(c), + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + data, err := h.opsService.GetDashboardOverview(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, data) +} + +// GetDashboardThroughputTrend returns throughput time series (raw path). +// GET /api/v1/admin/ops/dashboard/throughput-trend +func (h *OpsHandler) GetDashboardThroughputTrend(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: strings.TrimSpace(c.Query("platform")), + QueryMode: parseOpsQueryMode(c), + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + bucketSeconds := pickThroughputBucketSeconds(endTime.Sub(startTime)) + data, err := h.opsService.GetThroughputTrend(c.Request.Context(), filter, bucketSeconds) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, data) +} + +// GetDashboardLatencyHistogram returns the latency distribution histogram (success requests). +// GET /api/v1/admin/ops/dashboard/latency-histogram +func (h *OpsHandler) GetDashboardLatencyHistogram(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: strings.TrimSpace(c.Query("platform")), + QueryMode: parseOpsQueryMode(c), + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + data, err := h.opsService.GetLatencyHistogram(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, data) +} + +// GetDashboardErrorTrend returns error counts time series (raw path). +// GET /api/v1/admin/ops/dashboard/error-trend +func (h *OpsHandler) GetDashboardErrorTrend(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: strings.TrimSpace(c.Query("platform")), + QueryMode: parseOpsQueryMode(c), + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + bucketSeconds := pickThroughputBucketSeconds(endTime.Sub(startTime)) + data, err := h.opsService.GetErrorTrend(c.Request.Context(), filter, bucketSeconds) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, data) +} + +// GetDashboardErrorDistribution returns error distribution by status code (raw path). +// GET /api/v1/admin/ops/dashboard/error-distribution +func (h *OpsHandler) GetDashboardErrorDistribution(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: strings.TrimSpace(c.Query("platform")), + QueryMode: parseOpsQueryMode(c), + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + data, err := h.opsService.GetErrorDistribution(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, data) +} + +func pickThroughputBucketSeconds(window time.Duration) int { + // Keep buckets predictable and avoid huge responses. + switch { + case window <= 2*time.Hour: + return 60 + case window <= 24*time.Hour: + return 300 + default: + return 3600 + } +} + +func parseOpsQueryMode(c *gin.Context) service.OpsQueryMode { + if c == nil { + return "" + } + raw := strings.TrimSpace(c.Query("mode")) + if raw == "" { + // Empty means "use server default" (DB setting ops_query_mode_default). + return "" + } + return service.ParseOpsQueryMode(raw) +} diff --git a/backend/internal/handler/admin/ops_handler.go b/backend/internal/handler/admin/ops_handler.go new file mode 100644 index 00000000..44accc8f --- /dev/null +++ b/backend/internal/handler/admin/ops_handler.go @@ -0,0 +1,925 @@ +package admin + +import ( + "errors" + "fmt" + "io" + "net/http" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +type OpsHandler struct { + opsService *service.OpsService +} + +// GetErrorLogByID returns ops error log detail. +// GET /api/v1/admin/ops/errors/:id +func (h *OpsHandler) GetErrorLogByID(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + detail, err := h.opsService.GetErrorLogByID(c.Request.Context(), id) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, detail) +} + +const ( + opsListViewErrors = "errors" + opsListViewExcluded = "excluded" + opsListViewAll = "all" +) + +func parseOpsViewParam(c *gin.Context) string { + if c == nil { + return "" + } + v := strings.ToLower(strings.TrimSpace(c.Query("view"))) + switch v { + case "", opsListViewErrors: + return opsListViewErrors + case opsListViewExcluded: + return opsListViewExcluded + case opsListViewAll: + return opsListViewAll + default: + return opsListViewErrors + } +} + +func NewOpsHandler(opsService *service.OpsService) *OpsHandler { + return &OpsHandler{opsService: opsService} +} + +// GetErrorLogs lists ops error logs. +// GET /api/v1/admin/ops/errors +func (h *OpsHandler) GetErrorLogs(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + page, pageSize := response.ParsePagination(c) + // Ops list can be larger than standard admin tables. + if pageSize > 500 { + pageSize = 500 + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize} + + if !startTime.IsZero() { + filter.StartTime = &startTime + } + if !endTime.IsZero() { + filter.EndTime = &endTime + } + filter.View = parseOpsViewParam(c) + filter.Phase = strings.TrimSpace(c.Query("phase")) + filter.Owner = strings.TrimSpace(c.Query("error_owner")) + filter.Source = strings.TrimSpace(c.Query("error_source")) + filter.Query = strings.TrimSpace(c.Query("q")) + filter.UserQuery = strings.TrimSpace(c.Query("user_query")) + + // Force request errors: client-visible status >= 400. + // buildOpsErrorLogsWhere already applies this for non-upstream phase. + if strings.EqualFold(strings.TrimSpace(filter.Phase), "upstream") { + filter.Phase = "" + } + + if platform := strings.TrimSpace(c.Query("platform")); platform != "" { + filter.Platform = platform + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + if v := strings.TrimSpace(c.Query("account_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid account_id") + return + } + filter.AccountID = &id + } + + if v := strings.TrimSpace(c.Query("resolved")); v != "" { + switch strings.ToLower(v) { + case "1", "true", "yes": + b := true + filter.Resolved = &b + case "0", "false", "no": + b := false + filter.Resolved = &b + default: + response.BadRequest(c, "Invalid resolved") + return + } + } + if statusCodesStr := strings.TrimSpace(c.Query("status_codes")); statusCodesStr != "" { + parts := strings.Split(statusCodesStr, ",") + out := make([]int, 0, len(parts)) + for _, part := range parts { + p := strings.TrimSpace(part) + if p == "" { + continue + } + n, err := strconv.Atoi(p) + if err != nil || n < 0 { + response.BadRequest(c, "Invalid status_codes") + return + } + out = append(out, n) + } + filter.StatusCodes = out + } + + result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize) +} + +// ListRequestErrors lists client-visible request errors. +// GET /api/v1/admin/ops/request-errors +func (h *OpsHandler) ListRequestErrors(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + page, pageSize := response.ParsePagination(c) + if pageSize > 500 { + pageSize = 500 + } + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize} + if !startTime.IsZero() { + filter.StartTime = &startTime + } + if !endTime.IsZero() { + filter.EndTime = &endTime + } + filter.View = parseOpsViewParam(c) + filter.Phase = strings.TrimSpace(c.Query("phase")) + filter.Owner = strings.TrimSpace(c.Query("error_owner")) + filter.Source = strings.TrimSpace(c.Query("error_source")) + filter.Query = strings.TrimSpace(c.Query("q")) + filter.UserQuery = strings.TrimSpace(c.Query("user_query")) + + // Force request errors: client-visible status >= 400. + // buildOpsErrorLogsWhere already applies this for non-upstream phase. + if strings.EqualFold(strings.TrimSpace(filter.Phase), "upstream") { + filter.Phase = "" + } + + if platform := strings.TrimSpace(c.Query("platform")); platform != "" { + filter.Platform = platform + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + if v := strings.TrimSpace(c.Query("account_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid account_id") + return + } + filter.AccountID = &id + } + + if v := strings.TrimSpace(c.Query("resolved")); v != "" { + switch strings.ToLower(v) { + case "1", "true", "yes": + b := true + filter.Resolved = &b + case "0", "false", "no": + b := false + filter.Resolved = &b + default: + response.BadRequest(c, "Invalid resolved") + return + } + } + if statusCodesStr := strings.TrimSpace(c.Query("status_codes")); statusCodesStr != "" { + parts := strings.Split(statusCodesStr, ",") + out := make([]int, 0, len(parts)) + for _, part := range parts { + p := strings.TrimSpace(part) + if p == "" { + continue + } + n, err := strconv.Atoi(p) + if err != nil || n < 0 { + response.BadRequest(c, "Invalid status_codes") + return + } + out = append(out, n) + } + filter.StatusCodes = out + } + + result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize) +} + +// GetRequestError returns request error detail. +// GET /api/v1/admin/ops/request-errors/:id +func (h *OpsHandler) GetRequestError(c *gin.Context) { + // same storage; just proxy to existing detail + h.GetErrorLogByID(c) +} + +// ListRequestErrorUpstreamErrors lists upstream error logs correlated to a request error. +// GET /api/v1/admin/ops/request-errors/:id/upstream-errors +func (h *OpsHandler) ListRequestErrorUpstreamErrors(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + // Load request error to get correlation keys. + detail, err := h.opsService.GetErrorLogByID(c.Request.Context(), id) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // Correlate by request_id/client_request_id. + requestID := strings.TrimSpace(detail.RequestID) + clientRequestID := strings.TrimSpace(detail.ClientRequestID) + if requestID == "" && clientRequestID == "" { + response.Paginated(c, []*service.OpsErrorLog{}, 0, 1, 10) + return + } + + page, pageSize := response.ParsePagination(c) + if pageSize > 500 { + pageSize = 500 + } + + // Keep correlation window wide enough so linked upstream errors + // are discoverable even when UI defaults to 1h elsewhere. + startTime, endTime, err := parseOpsTimeRange(c, "30d") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize} + if !startTime.IsZero() { + filter.StartTime = &startTime + } + if !endTime.IsZero() { + filter.EndTime = &endTime + } + filter.View = "all" + filter.Phase = "upstream" + filter.Owner = "provider" + filter.Source = strings.TrimSpace(c.Query("error_source")) + filter.Query = strings.TrimSpace(c.Query("q")) + + if platform := strings.TrimSpace(c.Query("platform")); platform != "" { + filter.Platform = platform + } + + // Prefer exact match on request_id; if missing, fall back to client_request_id. + if requestID != "" { + filter.RequestID = requestID + } else { + filter.ClientRequestID = clientRequestID + } + + result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + + // If client asks for details, expand each upstream error log to include upstream response fields. + includeDetail := strings.TrimSpace(c.Query("include_detail")) + if includeDetail == "1" || strings.EqualFold(includeDetail, "true") || strings.EqualFold(includeDetail, "yes") { + details := make([]*service.OpsErrorLogDetail, 0, len(result.Errors)) + for _, item := range result.Errors { + if item == nil { + continue + } + d, err := h.opsService.GetErrorLogByID(c.Request.Context(), item.ID) + if err != nil || d == nil { + continue + } + details = append(details, d) + } + response.Paginated(c, details, int64(result.Total), result.Page, result.PageSize) + return + } + + response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize) +} + +// RetryRequestErrorClient retries the client request based on stored request body. +// POST /api/v1/admin/ops/request-errors/:id/retry-client +func (h *OpsHandler) RetryRequestErrorClient(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Error(c, http.StatusUnauthorized, "Unauthorized") + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + result, err := h.opsService.RetryError(c.Request.Context(), subject.UserID, id, service.OpsRetryModeClient, nil) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, result) +} + +// RetryRequestErrorUpstreamEvent retries a specific upstream attempt using captured upstream_request_body. +// POST /api/v1/admin/ops/request-errors/:id/upstream-errors/:idx/retry +func (h *OpsHandler) RetryRequestErrorUpstreamEvent(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Error(c, http.StatusUnauthorized, "Unauthorized") + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + idxStr := strings.TrimSpace(c.Param("idx")) + idx, err := strconv.Atoi(idxStr) + if err != nil || idx < 0 { + response.BadRequest(c, "Invalid upstream idx") + return + } + + result, err := h.opsService.RetryUpstreamEvent(c.Request.Context(), subject.UserID, id, idx) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, result) +} + +// ResolveRequestError toggles resolved status. +// PUT /api/v1/admin/ops/request-errors/:id/resolve +func (h *OpsHandler) ResolveRequestError(c *gin.Context) { + h.UpdateErrorResolution(c) +} + +// ListUpstreamErrors lists independent upstream errors. +// GET /api/v1/admin/ops/upstream-errors +func (h *OpsHandler) ListUpstreamErrors(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + page, pageSize := response.ParsePagination(c) + if pageSize > 500 { + pageSize = 500 + } + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsErrorLogFilter{Page: page, PageSize: pageSize} + if !startTime.IsZero() { + filter.StartTime = &startTime + } + if !endTime.IsZero() { + filter.EndTime = &endTime + } + + filter.View = parseOpsViewParam(c) + filter.Phase = "upstream" + filter.Owner = "provider" + filter.Source = strings.TrimSpace(c.Query("error_source")) + filter.Query = strings.TrimSpace(c.Query("q")) + + if platform := strings.TrimSpace(c.Query("platform")); platform != "" { + filter.Platform = platform + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + if v := strings.TrimSpace(c.Query("account_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid account_id") + return + } + filter.AccountID = &id + } + + if v := strings.TrimSpace(c.Query("resolved")); v != "" { + switch strings.ToLower(v) { + case "1", "true", "yes": + b := true + filter.Resolved = &b + case "0", "false", "no": + b := false + filter.Resolved = &b + default: + response.BadRequest(c, "Invalid resolved") + return + } + } + if statusCodesStr := strings.TrimSpace(c.Query("status_codes")); statusCodesStr != "" { + parts := strings.Split(statusCodesStr, ",") + out := make([]int, 0, len(parts)) + for _, part := range parts { + p := strings.TrimSpace(part) + if p == "" { + continue + } + n, err := strconv.Atoi(p) + if err != nil || n < 0 { + response.BadRequest(c, "Invalid status_codes") + return + } + out = append(out, n) + } + filter.StatusCodes = out + } + + result, err := h.opsService.GetErrorLogs(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Paginated(c, result.Errors, int64(result.Total), result.Page, result.PageSize) +} + +// GetUpstreamError returns upstream error detail. +// GET /api/v1/admin/ops/upstream-errors/:id +func (h *OpsHandler) GetUpstreamError(c *gin.Context) { + h.GetErrorLogByID(c) +} + +// RetryUpstreamError retries upstream error using the original account_id. +// POST /api/v1/admin/ops/upstream-errors/:id/retry +func (h *OpsHandler) RetryUpstreamError(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Error(c, http.StatusUnauthorized, "Unauthorized") + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + result, err := h.opsService.RetryError(c.Request.Context(), subject.UserID, id, service.OpsRetryModeUpstream, nil) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, result) +} + +// ResolveUpstreamError toggles resolved status. +// PUT /api/v1/admin/ops/upstream-errors/:id/resolve +func (h *OpsHandler) ResolveUpstreamError(c *gin.Context) { + h.UpdateErrorResolution(c) +} + +// ==================== Existing endpoints ==================== + +// ListRequestDetails returns a request-level list (success + error) for drill-down. +// GET /api/v1/admin/ops/requests +func (h *OpsHandler) ListRequestDetails(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + page, pageSize := response.ParsePagination(c) + if pageSize > 100 { + pageSize = 100 + } + + startTime, endTime, err := parseOpsTimeRange(c, "1h") + if err != nil { + response.BadRequest(c, err.Error()) + return + } + + filter := &service.OpsRequestDetailFilter{ + Page: page, + PageSize: pageSize, + StartTime: &startTime, + EndTime: &endTime, + } + + filter.Kind = strings.TrimSpace(c.Query("kind")) + filter.Platform = strings.TrimSpace(c.Query("platform")) + filter.Model = strings.TrimSpace(c.Query("model")) + filter.RequestID = strings.TrimSpace(c.Query("request_id")) + filter.Query = strings.TrimSpace(c.Query("q")) + filter.Sort = strings.TrimSpace(c.Query("sort")) + + if v := strings.TrimSpace(c.Query("user_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid user_id") + return + } + filter.UserID = &id + } + if v := strings.TrimSpace(c.Query("api_key_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid api_key_id") + return + } + filter.APIKeyID = &id + } + if v := strings.TrimSpace(c.Query("account_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid account_id") + return + } + filter.AccountID = &id + } + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + filter.GroupID = &id + } + + if v := strings.TrimSpace(c.Query("min_duration_ms")); v != "" { + parsed, err := strconv.Atoi(v) + if err != nil || parsed < 0 { + response.BadRequest(c, "Invalid min_duration_ms") + return + } + filter.MinDurationMs = &parsed + } + if v := strings.TrimSpace(c.Query("max_duration_ms")); v != "" { + parsed, err := strconv.Atoi(v) + if err != nil || parsed < 0 { + response.BadRequest(c, "Invalid max_duration_ms") + return + } + filter.MaxDurationMs = &parsed + } + + out, err := h.opsService.ListRequestDetails(c.Request.Context(), filter) + if err != nil { + // Invalid sort/kind/platform etc should be a bad request; keep it simple. + if strings.Contains(strings.ToLower(err.Error()), "invalid") { + response.BadRequest(c, err.Error()) + return + } + response.Error(c, http.StatusInternalServerError, "Failed to list request details") + return + } + + response.Paginated(c, out.Items, out.Total, out.Page, out.PageSize) +} + +type opsRetryRequest struct { + Mode string `json:"mode"` + PinnedAccountID *int64 `json:"pinned_account_id"` + Force bool `json:"force"` +} + +type opsResolveRequest struct { + Resolved bool `json:"resolved"` +} + +// RetryErrorRequest retries a failed request using stored request_body. +// POST /api/v1/admin/ops/errors/:id/retry +func (h *OpsHandler) RetryErrorRequest(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Error(c, http.StatusUnauthorized, "Unauthorized") + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + req := opsRetryRequest{Mode: service.OpsRetryModeClient} + if err := c.ShouldBindJSON(&req); err != nil && !errors.Is(err, io.EOF) { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + if strings.TrimSpace(req.Mode) == "" { + req.Mode = service.OpsRetryModeClient + } + + // Force flag is currently a UI-level acknowledgement. Server may still enforce safety constraints. + _ = req.Force + + // Legacy endpoint safety: only allow retrying the client request here. + // Upstream retries must go through the split endpoints. + if strings.EqualFold(strings.TrimSpace(req.Mode), service.OpsRetryModeUpstream) { + response.BadRequest(c, "upstream retry is not supported on this endpoint") + return + } + + result, err := h.opsService.RetryError(c.Request.Context(), subject.UserID, id, req.Mode, req.PinnedAccountID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, result) +} + +// ListRetryAttempts lists retry attempts for an error log. +// GET /api/v1/admin/ops/errors/:id/retries +func (h *OpsHandler) ListRetryAttempts(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + limit := 50 + if v := strings.TrimSpace(c.Query("limit")); v != "" { + n, err := strconv.Atoi(v) + if err != nil || n <= 0 { + response.BadRequest(c, "Invalid limit") + return + } + limit = n + } + + items, err := h.opsService.ListRetryAttemptsByErrorID(c.Request.Context(), id, limit) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, items) +} + +// UpdateErrorResolution allows manual resolve/unresolve. +// PUT /api/v1/admin/ops/errors/:id/resolve +func (h *OpsHandler) UpdateErrorResolution(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + subject, ok := middleware.GetAuthSubjectFromContext(c) + if !ok || subject.UserID <= 0 { + response.Error(c, http.StatusUnauthorized, "Unauthorized") + return + } + + idStr := strings.TrimSpace(c.Param("id")) + id, err := strconv.ParseInt(idStr, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid error id") + return + } + + var req opsResolveRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + uid := subject.UserID + if err := h.opsService.UpdateErrorResolution(c.Request.Context(), id, req.Resolved, &uid, nil); err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"ok": true}) +} + +func parseOpsTimeRange(c *gin.Context, defaultRange string) (time.Time, time.Time, error) { + startStr := strings.TrimSpace(c.Query("start_time")) + endStr := strings.TrimSpace(c.Query("end_time")) + + parseTS := func(s string) (time.Time, error) { + if s == "" { + return time.Time{}, nil + } + if t, err := time.Parse(time.RFC3339Nano, s); err == nil { + return t, nil + } + return time.Parse(time.RFC3339, s) + } + + start, err := parseTS(startStr) + if err != nil { + return time.Time{}, time.Time{}, err + } + end, err := parseTS(endStr) + if err != nil { + return time.Time{}, time.Time{}, err + } + + // start/end explicitly provided (even partially) + if startStr != "" || endStr != "" { + if end.IsZero() { + end = time.Now() + } + if start.IsZero() { + dur, _ := parseOpsDuration(defaultRange) + start = end.Add(-dur) + } + if start.After(end) { + return time.Time{}, time.Time{}, fmt.Errorf("invalid time range: start_time must be <= end_time") + } + if end.Sub(start) > 30*24*time.Hour { + return time.Time{}, time.Time{}, fmt.Errorf("invalid time range: max window is 30 days") + } + return start, end, nil + } + + // time_range fallback + tr := strings.TrimSpace(c.Query("time_range")) + if tr == "" { + tr = defaultRange + } + dur, ok := parseOpsDuration(tr) + if !ok { + dur, _ = parseOpsDuration(defaultRange) + } + + end = time.Now() + start = end.Add(-dur) + if end.Sub(start) > 30*24*time.Hour { + return time.Time{}, time.Time{}, fmt.Errorf("invalid time range: max window is 30 days") + } + return start, end, nil +} + +func parseOpsDuration(v string) (time.Duration, bool) { + switch strings.TrimSpace(v) { + case "5m": + return 5 * time.Minute, true + case "30m": + return 30 * time.Minute, true + case "1h": + return time.Hour, true + case "6h": + return 6 * time.Hour, true + case "24h": + return 24 * time.Hour, true + case "7d": + return 7 * 24 * time.Hour, true + case "30d": + return 30 * 24 * time.Hour, true + default: + return 0, false + } +} diff --git a/backend/internal/handler/admin/ops_realtime_handler.go b/backend/internal/handler/admin/ops_realtime_handler.go new file mode 100644 index 00000000..4f15ec57 --- /dev/null +++ b/backend/internal/handler/admin/ops_realtime_handler.go @@ -0,0 +1,213 @@ +package admin + +import ( + "net/http" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +// GetConcurrencyStats returns real-time concurrency usage aggregated by platform/group/account. +// GET /api/v1/admin/ops/concurrency +func (h *OpsHandler) GetConcurrencyStats(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) { + response.Success(c, gin.H{ + "enabled": false, + "platform": map[string]*service.PlatformConcurrencyInfo{}, + "group": map[int64]*service.GroupConcurrencyInfo{}, + "account": map[int64]*service.AccountConcurrencyInfo{}, + "timestamp": time.Now().UTC(), + }) + return + } + + platformFilter := strings.TrimSpace(c.Query("platform")) + var groupID *int64 + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + groupID = &id + } + + platform, group, account, collectedAt, err := h.opsService.GetConcurrencyStats(c.Request.Context(), platformFilter, groupID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + payload := gin.H{ + "enabled": true, + "platform": platform, + "group": group, + "account": account, + } + if collectedAt != nil { + payload["timestamp"] = collectedAt.UTC() + } + response.Success(c, payload) +} + +// GetAccountAvailability returns account availability statistics. +// GET /api/v1/admin/ops/account-availability +// +// Query params: +// - platform: optional +// - group_id: optional +func (h *OpsHandler) GetAccountAvailability(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) { + response.Success(c, gin.H{ + "enabled": false, + "platform": map[string]*service.PlatformAvailability{}, + "group": map[int64]*service.GroupAvailability{}, + "account": map[int64]*service.AccountAvailability{}, + "timestamp": time.Now().UTC(), + }) + return + } + + platform := strings.TrimSpace(c.Query("platform")) + var groupID *int64 + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + groupID = &id + } + + platformStats, groupStats, accountStats, collectedAt, err := h.opsService.GetAccountAvailabilityStats(c.Request.Context(), platform, groupID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + payload := gin.H{ + "enabled": true, + "platform": platformStats, + "group": groupStats, + "account": accountStats, + } + if collectedAt != nil { + payload["timestamp"] = collectedAt.UTC() + } + response.Success(c, payload) +} + +func parseOpsRealtimeWindow(v string) (time.Duration, string, bool) { + switch strings.ToLower(strings.TrimSpace(v)) { + case "", "1min", "1m": + return 1 * time.Minute, "1min", true + case "5min", "5m": + return 5 * time.Minute, "5min", true + case "30min", "30m": + return 30 * time.Minute, "30min", true + case "1h", "60m", "60min": + return 1 * time.Hour, "1h", true + default: + return 0, "", false + } +} + +// GetRealtimeTrafficSummary returns QPS/TPS current/peak/avg for the selected window. +// GET /api/v1/admin/ops/realtime-traffic +// +// Query params: +// - window: 1min|5min|30min|1h (default: 1min) +// - platform: optional +// - group_id: optional +func (h *OpsHandler) GetRealtimeTrafficSummary(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + windowDur, windowLabel, ok := parseOpsRealtimeWindow(c.Query("window")) + if !ok { + response.BadRequest(c, "Invalid window") + return + } + + platform := strings.TrimSpace(c.Query("platform")) + var groupID *int64 + if v := strings.TrimSpace(c.Query("group_id")); v != "" { + id, err := strconv.ParseInt(v, 10, 64) + if err != nil || id <= 0 { + response.BadRequest(c, "Invalid group_id") + return + } + groupID = &id + } + + endTime := time.Now().UTC() + startTime := endTime.Add(-windowDur) + + if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) { + disabledSummary := &service.OpsRealtimeTrafficSummary{ + Window: windowLabel, + StartTime: startTime, + EndTime: endTime, + Platform: platform, + GroupID: groupID, + QPS: service.OpsRateSummary{}, + TPS: service.OpsRateSummary{}, + } + response.Success(c, gin.H{ + "enabled": false, + "summary": disabledSummary, + "timestamp": endTime, + }) + return + } + + filter := &service.OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + Platform: platform, + GroupID: groupID, + QueryMode: service.OpsQueryModeRaw, + } + + summary, err := h.opsService.GetRealtimeTrafficSummary(c.Request.Context(), filter) + if err != nil { + response.ErrorFrom(c, err) + return + } + if summary != nil { + summary.Window = windowLabel + } + response.Success(c, gin.H{ + "enabled": true, + "summary": summary, + "timestamp": endTime, + }) +} diff --git a/backend/internal/handler/admin/ops_settings_handler.go b/backend/internal/handler/admin/ops_settings_handler.go new file mode 100644 index 00000000..ebc8bf49 --- /dev/null +++ b/backend/internal/handler/admin/ops_settings_handler.go @@ -0,0 +1,194 @@ +package admin + +import ( + "net/http" + + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +// GetEmailNotificationConfig returns Ops email notification config (DB-backed). +// GET /api/v1/admin/ops/email-notification/config +func (h *OpsHandler) GetEmailNotificationConfig(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + cfg, err := h.opsService.GetEmailNotificationConfig(c.Request.Context()) + if err != nil { + response.Error(c, http.StatusInternalServerError, "Failed to get email notification config") + return + } + response.Success(c, cfg) +} + +// UpdateEmailNotificationConfig updates Ops email notification config (DB-backed). +// PUT /api/v1/admin/ops/email-notification/config +func (h *OpsHandler) UpdateEmailNotificationConfig(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var req service.OpsEmailNotificationConfigUpdateRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + updated, err := h.opsService.UpdateEmailNotificationConfig(c.Request.Context(), &req) + if err != nil { + // Most failures here are validation errors from request payload; treat as 400. + response.Error(c, http.StatusBadRequest, err.Error()) + return + } + response.Success(c, updated) +} + +// GetAlertRuntimeSettings returns Ops alert evaluator runtime settings (DB-backed). +// GET /api/v1/admin/ops/runtime/alert +func (h *OpsHandler) GetAlertRuntimeSettings(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + cfg, err := h.opsService.GetOpsAlertRuntimeSettings(c.Request.Context()) + if err != nil { + response.Error(c, http.StatusInternalServerError, "Failed to get alert runtime settings") + return + } + response.Success(c, cfg) +} + +// UpdateAlertRuntimeSettings updates Ops alert evaluator runtime settings (DB-backed). +// PUT /api/v1/admin/ops/runtime/alert +func (h *OpsHandler) UpdateAlertRuntimeSettings(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var req service.OpsAlertRuntimeSettings + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + updated, err := h.opsService.UpdateOpsAlertRuntimeSettings(c.Request.Context(), &req) + if err != nil { + response.Error(c, http.StatusBadRequest, err.Error()) + return + } + response.Success(c, updated) +} + +// GetAdvancedSettings returns Ops advanced settings (DB-backed). +// GET /api/v1/admin/ops/advanced-settings +func (h *OpsHandler) GetAdvancedSettings(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + cfg, err := h.opsService.GetOpsAdvancedSettings(c.Request.Context()) + if err != nil { + response.Error(c, http.StatusInternalServerError, "Failed to get advanced settings") + return + } + response.Success(c, cfg) +} + +// UpdateAdvancedSettings updates Ops advanced settings (DB-backed). +// PUT /api/v1/admin/ops/advanced-settings +func (h *OpsHandler) UpdateAdvancedSettings(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var req service.OpsAdvancedSettings + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + updated, err := h.opsService.UpdateOpsAdvancedSettings(c.Request.Context(), &req) + if err != nil { + response.Error(c, http.StatusBadRequest, err.Error()) + return + } + response.Success(c, updated) +} + +// GetMetricThresholds returns Ops metric thresholds (DB-backed). +// GET /api/v1/admin/ops/settings/metric-thresholds +func (h *OpsHandler) GetMetricThresholds(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + cfg, err := h.opsService.GetMetricThresholds(c.Request.Context()) + if err != nil { + response.Error(c, http.StatusInternalServerError, "Failed to get metric thresholds") + return + } + response.Success(c, cfg) +} + +// UpdateMetricThresholds updates Ops metric thresholds (DB-backed). +// PUT /api/v1/admin/ops/settings/metric-thresholds +func (h *OpsHandler) UpdateMetricThresholds(c *gin.Context) { + if h.opsService == nil { + response.Error(c, http.StatusServiceUnavailable, "Ops service not available") + return + } + if err := h.opsService.RequireMonitoringEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return + } + + var req service.OpsMetricThresholds + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request body") + return + } + + updated, err := h.opsService.UpdateMetricThresholds(c.Request.Context(), &req) + if err != nil { + response.Error(c, http.StatusBadRequest, err.Error()) + return + } + response.Success(c, updated) +} diff --git a/backend/internal/handler/admin/ops_ws_handler.go b/backend/internal/handler/admin/ops_ws_handler.go new file mode 100644 index 00000000..db7442e5 --- /dev/null +++ b/backend/internal/handler/admin/ops_ws_handler.go @@ -0,0 +1,771 @@ +package admin + +import ( + "context" + "encoding/json" + "log" + "math" + "net" + "net/http" + "net/netip" + "net/url" + "os" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/gorilla/websocket" +) + +type OpsWSProxyConfig struct { + TrustProxy bool + TrustedProxies []netip.Prefix + OriginPolicy string +} + +const ( + envOpsWSTrustProxy = "OPS_WS_TRUST_PROXY" + envOpsWSTrustedProxies = "OPS_WS_TRUSTED_PROXIES" + envOpsWSOriginPolicy = "OPS_WS_ORIGIN_POLICY" + envOpsWSMaxConns = "OPS_WS_MAX_CONNS" + envOpsWSMaxConnsPerIP = "OPS_WS_MAX_CONNS_PER_IP" +) + +const ( + OriginPolicyStrict = "strict" + OriginPolicyPermissive = "permissive" +) + +var opsWSProxyConfig = loadOpsWSProxyConfigFromEnv() + +var upgrader = websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { + return isAllowedOpsWSOrigin(r) + }, + // Subprotocol negotiation: + // - The frontend passes ["sub2api-admin", "jwt."]. + // - We always select "sub2api-admin" so the token is never echoed back in the handshake response. + Subprotocols: []string{"sub2api-admin"}, +} + +const ( + qpsWSPushInterval = 2 * time.Second + qpsWSRefreshInterval = 5 * time.Second + qpsWSRequestCountWindow = 1 * time.Minute + + defaultMaxWSConns = 100 + defaultMaxWSConnsPerIP = 20 +) + +var wsConnCount atomic.Int32 +var wsConnCountByIP sync.Map // map[string]*atomic.Int32 + +const qpsWSIdleStopDelay = 30 * time.Second + +const ( + opsWSCloseRealtimeDisabled = 4001 +) + +var qpsWSIdleStopMu sync.Mutex +var qpsWSIdleStopTimer *time.Timer + +func cancelQPSWSIdleStop() { + qpsWSIdleStopMu.Lock() + if qpsWSIdleStopTimer != nil { + qpsWSIdleStopTimer.Stop() + qpsWSIdleStopTimer = nil + } + qpsWSIdleStopMu.Unlock() +} + +func scheduleQPSWSIdleStop() { + qpsWSIdleStopMu.Lock() + if qpsWSIdleStopTimer != nil { + qpsWSIdleStopMu.Unlock() + return + } + qpsWSIdleStopTimer = time.AfterFunc(qpsWSIdleStopDelay, func() { + // Only stop if truly idle at fire time. + if wsConnCount.Load() == 0 { + qpsWSCache.Stop() + } + qpsWSIdleStopMu.Lock() + qpsWSIdleStopTimer = nil + qpsWSIdleStopMu.Unlock() + }) + qpsWSIdleStopMu.Unlock() +} + +type opsWSRuntimeLimits struct { + MaxConns int32 + MaxConnsPerIP int32 +} + +var opsWSLimits = loadOpsWSRuntimeLimitsFromEnv() + +const ( + qpsWSWriteTimeout = 10 * time.Second + qpsWSPongWait = 60 * time.Second + qpsWSPingInterval = 30 * time.Second + + // We don't expect clients to send application messages; we only read to process control frames (Pong/Close). + qpsWSMaxReadBytes = 1024 +) + +type opsWSQPSCache struct { + refreshInterval time.Duration + requestCountWindow time.Duration + + lastUpdatedUnixNano atomic.Int64 + payload atomic.Value // []byte + + opsService *service.OpsService + cancel context.CancelFunc + done chan struct{} + + mu sync.Mutex + running bool +} + +var qpsWSCache = &opsWSQPSCache{ + refreshInterval: qpsWSRefreshInterval, + requestCountWindow: qpsWSRequestCountWindow, +} + +func (c *opsWSQPSCache) start(opsService *service.OpsService) { + if c == nil || opsService == nil { + return + } + + for { + c.mu.Lock() + if c.running { + c.mu.Unlock() + return + } + + // If a previous refresh loop is currently stopping, wait for it to fully exit. + done := c.done + if done != nil { + c.mu.Unlock() + <-done + + c.mu.Lock() + if c.done == done && !c.running { + c.done = nil + } + c.mu.Unlock() + continue + } + + c.opsService = opsService + ctx, cancel := context.WithCancel(context.Background()) + c.cancel = cancel + c.done = make(chan struct{}) + done = c.done + c.running = true + c.mu.Unlock() + + go func() { + defer close(done) + c.refreshLoop(ctx) + }() + return + } +} + +// Stop stops the background refresh loop. +// It is safe to call multiple times. +func (c *opsWSQPSCache) Stop() { + if c == nil { + return + } + + c.mu.Lock() + if !c.running { + done := c.done + c.mu.Unlock() + if done != nil { + <-done + } + return + } + cancel := c.cancel + c.cancel = nil + c.running = false + c.opsService = nil + done := c.done + c.mu.Unlock() + + if cancel != nil { + cancel() + } + if done != nil { + <-done + } + + c.mu.Lock() + if c.done == done && !c.running { + c.done = nil + } + c.mu.Unlock() +} + +func (c *opsWSQPSCache) refreshLoop(ctx context.Context) { + ticker := time.NewTicker(c.refreshInterval) + defer ticker.Stop() + + c.refresh(ctx) + for { + select { + case <-ticker.C: + c.refresh(ctx) + case <-ctx.Done(): + return + } + } +} + +func (c *opsWSQPSCache) refresh(parentCtx context.Context) { + if c == nil { + return + } + + c.mu.Lock() + opsService := c.opsService + c.mu.Unlock() + if opsService == nil { + return + } + + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithTimeout(parentCtx, 10*time.Second) + defer cancel() + + now := time.Now().UTC() + stats, err := opsService.GetWindowStats(ctx, now.Add(-c.requestCountWindow), now) + if err != nil || stats == nil { + if err != nil { + log.Printf("[OpsWS] refresh: get window stats failed: %v", err) + } + return + } + + requestCount := stats.SuccessCount + stats.ErrorCountTotal + qps := 0.0 + tps := 0.0 + if c.requestCountWindow > 0 { + seconds := c.requestCountWindow.Seconds() + qps = roundTo1DP(float64(requestCount) / seconds) + tps = roundTo1DP(float64(stats.TokenConsumed) / seconds) + } + + payload := gin.H{ + "type": "qps_update", + "timestamp": now.Format(time.RFC3339), + "data": gin.H{ + "qps": qps, + "tps": tps, + "request_count": requestCount, + }, + } + + msg, err := json.Marshal(payload) + if err != nil { + log.Printf("[OpsWS] refresh: marshal payload failed: %v", err) + return + } + + c.payload.Store(msg) + c.lastUpdatedUnixNano.Store(now.UnixNano()) +} + +func roundTo1DP(v float64) float64 { + return math.Round(v*10) / 10 +} + +func (c *opsWSQPSCache) getPayload() []byte { + if c == nil { + return nil + } + if cached, ok := c.payload.Load().([]byte); ok && cached != nil { + return cached + } + return nil +} + +func closeWS(conn *websocket.Conn, code int, reason string) { + if conn == nil { + return + } + msg := websocket.FormatCloseMessage(code, reason) + _ = conn.WriteControl(websocket.CloseMessage, msg, time.Now().Add(qpsWSWriteTimeout)) + _ = conn.Close() +} + +// QPSWSHandler handles realtime QPS push via WebSocket. +// GET /api/v1/admin/ops/ws/qps +func (h *OpsHandler) QPSWSHandler(c *gin.Context) { + clientIP := requestClientIP(c.Request) + + if h == nil || h.opsService == nil { + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "ops service not initialized"}) + return + } + + // If realtime monitoring is disabled, prefer a successful WS upgrade followed by a clean close + // with a deterministic close code. This prevents clients from spinning on 404/1006 reconnect loops. + if !h.opsService.IsRealtimeMonitoringEnabled(c.Request.Context()) { + conn, err := upgrader.Upgrade(c.Writer, c.Request, nil) + if err != nil { + c.JSON(http.StatusNotFound, gin.H{"error": "ops realtime monitoring is disabled"}) + return + } + closeWS(conn, opsWSCloseRealtimeDisabled, "realtime_disabled") + return + } + + cancelQPSWSIdleStop() + // Lazily start the background refresh loop so unit tests that never hit the + // websocket route don't spawn goroutines that depend on DB/Redis stubs. + qpsWSCache.start(h.opsService) + + // Reserve a global slot before upgrading the connection to keep the limit strict. + if !tryAcquireOpsWSTotalSlot(opsWSLimits.MaxConns) { + log.Printf("[OpsWS] connection limit reached: %d/%d", wsConnCount.Load(), opsWSLimits.MaxConns) + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "too many connections"}) + return + } + defer func() { + if wsConnCount.Add(-1) == 0 { + scheduleQPSWSIdleStop() + } + }() + + if opsWSLimits.MaxConnsPerIP > 0 && clientIP != "" { + if !tryAcquireOpsWSIPSlot(clientIP, opsWSLimits.MaxConnsPerIP) { + log.Printf("[OpsWS] per-ip connection limit reached: ip=%s limit=%d", clientIP, opsWSLimits.MaxConnsPerIP) + c.JSON(http.StatusServiceUnavailable, gin.H{"error": "too many connections"}) + return + } + defer releaseOpsWSIPSlot(clientIP) + } + + conn, err := upgrader.Upgrade(c.Writer, c.Request, nil) + if err != nil { + log.Printf("[OpsWS] upgrade failed: %v", err) + return + } + + defer func() { + _ = conn.Close() + }() + + handleQPSWebSocket(c.Request.Context(), conn) +} + +func tryAcquireOpsWSTotalSlot(limit int32) bool { + if limit <= 0 { + return true + } + for { + current := wsConnCount.Load() + if current >= limit { + return false + } + if wsConnCount.CompareAndSwap(current, current+1) { + return true + } + } +} + +func tryAcquireOpsWSIPSlot(clientIP string, limit int32) bool { + if strings.TrimSpace(clientIP) == "" || limit <= 0 { + return true + } + + v, _ := wsConnCountByIP.LoadOrStore(clientIP, &atomic.Int32{}) + counter, ok := v.(*atomic.Int32) + if !ok { + return false + } + + for { + current := counter.Load() + if current >= limit { + return false + } + if counter.CompareAndSwap(current, current+1) { + return true + } + } +} + +func releaseOpsWSIPSlot(clientIP string) { + if strings.TrimSpace(clientIP) == "" { + return + } + + v, ok := wsConnCountByIP.Load(clientIP) + if !ok { + return + } + counter, ok := v.(*atomic.Int32) + if !ok { + return + } + next := counter.Add(-1) + if next <= 0 { + // Best-effort cleanup; safe even if a new slot was acquired concurrently. + wsConnCountByIP.Delete(clientIP) + } +} + +func handleQPSWebSocket(parentCtx context.Context, conn *websocket.Conn) { + if conn == nil { + return + } + + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + var closeOnce sync.Once + closeConn := func() { + closeOnce.Do(func() { + _ = conn.Close() + }) + } + + closeFrameCh := make(chan []byte, 1) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + defer cancel() + + conn.SetReadLimit(qpsWSMaxReadBytes) + if err := conn.SetReadDeadline(time.Now().Add(qpsWSPongWait)); err != nil { + log.Printf("[OpsWS] set read deadline failed: %v", err) + return + } + conn.SetPongHandler(func(string) error { + return conn.SetReadDeadline(time.Now().Add(qpsWSPongWait)) + }) + conn.SetCloseHandler(func(code int, text string) error { + select { + case closeFrameCh <- websocket.FormatCloseMessage(code, text): + default: + } + cancel() + return nil + }) + + for { + _, _, err := conn.ReadMessage() + if err != nil { + if websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) { + log.Printf("[OpsWS] read failed: %v", err) + } + return + } + } + }() + + // Push QPS data every 2 seconds (values are globally cached and refreshed at most once per qpsWSRefreshInterval). + pushTicker := time.NewTicker(qpsWSPushInterval) + defer pushTicker.Stop() + + // Heartbeat ping every 30 seconds. + pingTicker := time.NewTicker(qpsWSPingInterval) + defer pingTicker.Stop() + + writeWithTimeout := func(messageType int, data []byte) error { + if err := conn.SetWriteDeadline(time.Now().Add(qpsWSWriteTimeout)); err != nil { + return err + } + return conn.WriteMessage(messageType, data) + } + + sendClose := func(closeFrame []byte) { + if closeFrame == nil { + closeFrame = websocket.FormatCloseMessage(websocket.CloseNormalClosure, "") + } + _ = writeWithTimeout(websocket.CloseMessage, closeFrame) + } + + for { + select { + case <-pushTicker.C: + msg := qpsWSCache.getPayload() + if msg == nil { + continue + } + if err := writeWithTimeout(websocket.TextMessage, msg); err != nil { + log.Printf("[OpsWS] write failed: %v", err) + cancel() + closeConn() + wg.Wait() + return + } + + case <-pingTicker.C: + if err := writeWithTimeout(websocket.PingMessage, nil); err != nil { + log.Printf("[OpsWS] ping failed: %v", err) + cancel() + closeConn() + wg.Wait() + return + } + + case closeFrame := <-closeFrameCh: + sendClose(closeFrame) + closeConn() + wg.Wait() + return + + case <-ctx.Done(): + var closeFrame []byte + select { + case closeFrame = <-closeFrameCh: + default: + } + sendClose(closeFrame) + + closeConn() + wg.Wait() + return + } + } +} + +func isAllowedOpsWSOrigin(r *http.Request) bool { + if r == nil { + return false + } + origin := strings.TrimSpace(r.Header.Get("Origin")) + if origin == "" { + switch strings.ToLower(strings.TrimSpace(opsWSProxyConfig.OriginPolicy)) { + case OriginPolicyStrict: + return false + case OriginPolicyPermissive, "": + return true + default: + return true + } + } + parsed, err := url.Parse(origin) + if err != nil || parsed.Hostname() == "" { + return false + } + originHost := strings.ToLower(parsed.Hostname()) + + trustProxyHeaders := shouldTrustOpsWSProxyHeaders(r) + reqHost := hostWithoutPort(r.Host) + if trustProxyHeaders { + xfHost := strings.TrimSpace(r.Header.Get("X-Forwarded-Host")) + if xfHost != "" { + xfHost = strings.TrimSpace(strings.Split(xfHost, ",")[0]) + if xfHost != "" { + reqHost = hostWithoutPort(xfHost) + } + } + } + reqHost = strings.ToLower(reqHost) + if reqHost == "" { + return false + } + return originHost == reqHost +} + +func shouldTrustOpsWSProxyHeaders(r *http.Request) bool { + if r == nil { + return false + } + if !opsWSProxyConfig.TrustProxy { + return false + } + peerIP, ok := requestPeerIP(r) + if !ok { + return false + } + return isAddrInTrustedProxies(peerIP, opsWSProxyConfig.TrustedProxies) +} + +func requestPeerIP(r *http.Request) (netip.Addr, bool) { + if r == nil { + return netip.Addr{}, false + } + host, _, err := net.SplitHostPort(strings.TrimSpace(r.RemoteAddr)) + if err != nil { + host = strings.TrimSpace(r.RemoteAddr) + } + host = strings.TrimPrefix(host, "[") + host = strings.TrimSuffix(host, "]") + if host == "" { + return netip.Addr{}, false + } + addr, err := netip.ParseAddr(host) + if err != nil { + return netip.Addr{}, false + } + return addr.Unmap(), true +} + +func requestClientIP(r *http.Request) string { + if r == nil { + return "" + } + + trustProxyHeaders := shouldTrustOpsWSProxyHeaders(r) + if trustProxyHeaders { + xff := strings.TrimSpace(r.Header.Get("X-Forwarded-For")) + if xff != "" { + // Use the left-most entry (original client). If multiple proxies add values, they are comma-separated. + xff = strings.TrimSpace(strings.Split(xff, ",")[0]) + xff = strings.TrimPrefix(xff, "[") + xff = strings.TrimSuffix(xff, "]") + if addr, err := netip.ParseAddr(xff); err == nil && addr.IsValid() { + return addr.Unmap().String() + } + } + } + + if peer, ok := requestPeerIP(r); ok && peer.IsValid() { + return peer.String() + } + return "" +} + +func isAddrInTrustedProxies(addr netip.Addr, trusted []netip.Prefix) bool { + if !addr.IsValid() { + return false + } + for _, p := range trusted { + if p.Contains(addr) { + return true + } + } + return false +} + +func loadOpsWSProxyConfigFromEnv() OpsWSProxyConfig { + cfg := OpsWSProxyConfig{ + TrustProxy: true, + TrustedProxies: defaultTrustedProxies(), + OriginPolicy: OriginPolicyPermissive, + } + + if v := strings.TrimSpace(os.Getenv(envOpsWSTrustProxy)); v != "" { + if parsed, err := strconv.ParseBool(v); err == nil { + cfg.TrustProxy = parsed + } else { + log.Printf("[OpsWS] invalid %s=%q (expected bool); using default=%v", envOpsWSTrustProxy, v, cfg.TrustProxy) + } + } + + if raw := strings.TrimSpace(os.Getenv(envOpsWSTrustedProxies)); raw != "" { + prefixes, invalid := parseTrustedProxyList(raw) + if len(invalid) > 0 { + log.Printf("[OpsWS] invalid %s entries ignored: %s", envOpsWSTrustedProxies, strings.Join(invalid, ", ")) + } + cfg.TrustedProxies = prefixes + } + + if v := strings.TrimSpace(os.Getenv(envOpsWSOriginPolicy)); v != "" { + normalized := strings.ToLower(v) + switch normalized { + case OriginPolicyStrict, OriginPolicyPermissive: + cfg.OriginPolicy = normalized + default: + log.Printf("[OpsWS] invalid %s=%q (expected %q or %q); using default=%q", envOpsWSOriginPolicy, v, OriginPolicyStrict, OriginPolicyPermissive, cfg.OriginPolicy) + } + } + + return cfg +} + +func loadOpsWSRuntimeLimitsFromEnv() opsWSRuntimeLimits { + cfg := opsWSRuntimeLimits{ + MaxConns: defaultMaxWSConns, + MaxConnsPerIP: defaultMaxWSConnsPerIP, + } + + if v := strings.TrimSpace(os.Getenv(envOpsWSMaxConns)); v != "" { + if parsed, err := strconv.Atoi(v); err == nil && parsed > 0 { + cfg.MaxConns = int32(parsed) + } else { + log.Printf("[OpsWS] invalid %s=%q (expected int>0); using default=%d", envOpsWSMaxConns, v, cfg.MaxConns) + } + } + if v := strings.TrimSpace(os.Getenv(envOpsWSMaxConnsPerIP)); v != "" { + if parsed, err := strconv.Atoi(v); err == nil && parsed >= 0 { + cfg.MaxConnsPerIP = int32(parsed) + } else { + log.Printf("[OpsWS] invalid %s=%q (expected int>=0); using default=%d", envOpsWSMaxConnsPerIP, v, cfg.MaxConnsPerIP) + } + } + return cfg +} + +func defaultTrustedProxies() []netip.Prefix { + prefixes, _ := parseTrustedProxyList("127.0.0.0/8,::1/128") + return prefixes +} + +func parseTrustedProxyList(raw string) (prefixes []netip.Prefix, invalid []string) { + for _, token := range strings.Split(raw, ",") { + item := strings.TrimSpace(token) + if item == "" { + continue + } + + var ( + p netip.Prefix + err error + ) + if strings.Contains(item, "/") { + p, err = netip.ParsePrefix(item) + } else { + var addr netip.Addr + addr, err = netip.ParseAddr(item) + if err == nil { + addr = addr.Unmap() + bits := 128 + if addr.Is4() { + bits = 32 + } + p = netip.PrefixFrom(addr, bits) + } + } + + if err != nil || !p.IsValid() { + invalid = append(invalid, item) + continue + } + + prefixes = append(prefixes, p.Masked()) + } + return prefixes, invalid +} + +func hostWithoutPort(hostport string) string { + hostport = strings.TrimSpace(hostport) + if hostport == "" { + return "" + } + if host, _, err := net.SplitHostPort(hostport); err == nil { + return host + } + if strings.HasPrefix(hostport, "[") && strings.HasSuffix(hostport, "]") { + return strings.Trim(hostport, "[]") + } + parts := strings.Split(hostport, ":") + return parts[0] +} diff --git a/backend/internal/handler/admin/promo_handler.go b/backend/internal/handler/admin/promo_handler.go new file mode 100644 index 00000000..3eafa380 --- /dev/null +++ b/backend/internal/handler/admin/promo_handler.go @@ -0,0 +1,209 @@ +package admin + +import ( + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +// PromoHandler handles admin promo code management +type PromoHandler struct { + promoService *service.PromoService +} + +// NewPromoHandler creates a new admin promo handler +func NewPromoHandler(promoService *service.PromoService) *PromoHandler { + return &PromoHandler{ + promoService: promoService, + } +} + +// CreatePromoCodeRequest represents create promo code request +type CreatePromoCodeRequest struct { + Code string `json:"code"` // 可选,为空则自动生成 + BonusAmount float64 `json:"bonus_amount" binding:"required,min=0"` // 赠送余额 + MaxUses int `json:"max_uses" binding:"min=0"` // 最大使用次数,0=无限 + ExpiresAt *int64 `json:"expires_at"` // 过期时间戳(秒) + Notes string `json:"notes"` // 备注 +} + +// UpdatePromoCodeRequest represents update promo code request +type UpdatePromoCodeRequest struct { + Code *string `json:"code"` + BonusAmount *float64 `json:"bonus_amount" binding:"omitempty,min=0"` + MaxUses *int `json:"max_uses" binding:"omitempty,min=0"` + Status *string `json:"status" binding:"omitempty,oneof=active disabled"` + ExpiresAt *int64 `json:"expires_at"` + Notes *string `json:"notes"` +} + +// List handles listing all promo codes with pagination +// GET /api/v1/admin/promo-codes +func (h *PromoHandler) List(c *gin.Context) { + page, pageSize := response.ParsePagination(c) + status := c.Query("status") + search := strings.TrimSpace(c.Query("search")) + if len(search) > 100 { + search = search[:100] + } + + params := pagination.PaginationParams{ + Page: page, + PageSize: pageSize, + } + + codes, paginationResult, err := h.promoService.List(c.Request.Context(), params, status, search) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.PromoCode, 0, len(codes)) + for i := range codes { + out = append(out, *dto.PromoCodeFromService(&codes[i])) + } + response.Paginated(c, out, paginationResult.Total, page, pageSize) +} + +// GetByID handles getting a promo code by ID +// GET /api/v1/admin/promo-codes/:id +func (h *PromoHandler) GetByID(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid promo code ID") + return + } + + code, err := h.promoService.GetByID(c.Request.Context(), codeID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.PromoCodeFromService(code)) +} + +// Create handles creating a new promo code +// POST /api/v1/admin/promo-codes +func (h *PromoHandler) Create(c *gin.Context) { + var req CreatePromoCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + input := &service.CreatePromoCodeInput{ + Code: req.Code, + BonusAmount: req.BonusAmount, + MaxUses: req.MaxUses, + Notes: req.Notes, + } + + if req.ExpiresAt != nil { + t := time.Unix(*req.ExpiresAt, 0) + input.ExpiresAt = &t + } + + code, err := h.promoService.Create(c.Request.Context(), input) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.PromoCodeFromService(code)) +} + +// Update handles updating a promo code +// PUT /api/v1/admin/promo-codes/:id +func (h *PromoHandler) Update(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid promo code ID") + return + } + + var req UpdatePromoCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + input := &service.UpdatePromoCodeInput{ + Code: req.Code, + BonusAmount: req.BonusAmount, + MaxUses: req.MaxUses, + Status: req.Status, + Notes: req.Notes, + } + + if req.ExpiresAt != nil { + if *req.ExpiresAt == 0 { + // 0 表示清除过期时间 + input.ExpiresAt = nil + } else { + t := time.Unix(*req.ExpiresAt, 0) + input.ExpiresAt = &t + } + } + + code, err := h.promoService.Update(c.Request.Context(), codeID, input) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.PromoCodeFromService(code)) +} + +// Delete handles deleting a promo code +// DELETE /api/v1/admin/promo-codes/:id +func (h *PromoHandler) Delete(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid promo code ID") + return + } + + err = h.promoService.Delete(c.Request.Context(), codeID) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"message": "Promo code deleted successfully"}) +} + +// GetUsages handles getting usage records for a promo code +// GET /api/v1/admin/promo-codes/:id/usages +func (h *PromoHandler) GetUsages(c *gin.Context) { + codeID, err := strconv.ParseInt(c.Param("id"), 10, 64) + if err != nil { + response.BadRequest(c, "Invalid promo code ID") + return + } + + page, pageSize := response.ParsePagination(c) + params := pagination.PaginationParams{ + Page: page, + PageSize: pageSize, + } + + usages, paginationResult, err := h.promoService.ListUsages(c.Request.Context(), codeID, params) + if err != nil { + response.ErrorFrom(c, err) + return + } + + out := make([]dto.PromoCodeUsage, 0, len(usages)) + for i := range usages { + out = append(out, *dto.PromoCodeUsageFromService(&usages[i])) + } + response.Paginated(c, out, paginationResult.Total, page, pageSize) +} diff --git a/backend/internal/handler/admin/proxy_handler.go b/backend/internal/handler/admin/proxy_handler.go index 437e9300..a6758f69 100644 --- a/backend/internal/handler/admin/proxy_handler.go +++ b/backend/internal/handler/admin/proxy_handler.go @@ -196,6 +196,28 @@ func (h *ProxyHandler) Delete(c *gin.Context) { response.Success(c, gin.H{"message": "Proxy deleted successfully"}) } +// BatchDelete handles batch deleting proxies +// POST /api/v1/admin/proxies/batch-delete +func (h *ProxyHandler) BatchDelete(c *gin.Context) { + type BatchDeleteRequest struct { + IDs []int64 `json:"ids" binding:"required,min=1"` + } + + var req BatchDeleteRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + result, err := h.adminService.BatchDeleteProxies(c.Request.Context(), req.IDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, result) +} + // Test handles testing proxy connectivity // POST /api/v1/admin/proxies/:id/test func (h *ProxyHandler) Test(c *gin.Context) { @@ -243,19 +265,17 @@ func (h *ProxyHandler) GetProxyAccounts(c *gin.Context) { return } - page, pageSize := response.ParsePagination(c) - - accounts, total, err := h.adminService.GetProxyAccounts(c.Request.Context(), proxyID, page, pageSize) + accounts, err := h.adminService.GetProxyAccounts(c.Request.Context(), proxyID) if err != nil { response.ErrorFrom(c, err) return } - out := make([]dto.Account, 0, len(accounts)) + out := make([]dto.ProxyAccountSummary, 0, len(accounts)) for i := range accounts { - out = append(out, *dto.AccountFromService(&accounts[i])) + out = append(out, *dto.ProxyAccountSummaryFromService(&accounts[i])) } - response.Paginated(c, out, total, page, pageSize) + response.Success(c, out) } // BatchCreateProxyItem represents a single proxy in batch create request diff --git a/backend/internal/handler/admin/setting_handler.go b/backend/internal/handler/admin/setting_handler.go index d95a8980..6666ce4e 100644 --- a/backend/internal/handler/admin/setting_handler.go +++ b/backend/internal/handler/admin/setting_handler.go @@ -19,14 +19,16 @@ type SettingHandler struct { settingService *service.SettingService emailService *service.EmailService turnstileService *service.TurnstileService + opsService *service.OpsService } // NewSettingHandler 创建系统设置处理器 -func NewSettingHandler(settingService *service.SettingService, emailService *service.EmailService, turnstileService *service.TurnstileService) *SettingHandler { +func NewSettingHandler(settingService *service.SettingService, emailService *service.EmailService, turnstileService *service.TurnstileService, opsService *service.OpsService) *SettingHandler { return &SettingHandler{ settingService: settingService, emailService: emailService, turnstileService: turnstileService, + opsService: opsService, } } @@ -39,6 +41,9 @@ func (h *SettingHandler) GetSettings(c *gin.Context) { return } + // Check if ops monitoring is enabled (respects config.ops.enabled) + opsEnabled := h.opsService != nil && h.opsService.IsMonitoringEnabled(c.Request.Context()) + response.Success(c, dto.SystemSettings{ RegistrationEnabled: settings.RegistrationEnabled, EmailVerifyEnabled: settings.EmailVerifyEnabled, @@ -62,6 +67,7 @@ func (h *SettingHandler) GetSettings(c *gin.Context) { APIBaseURL: settings.APIBaseURL, ContactInfo: settings.ContactInfo, DocURL: settings.DocURL, + HomeContent: settings.HomeContent, DefaultConcurrency: settings.DefaultConcurrency, DefaultBalance: settings.DefaultBalance, EnableModelFallback: settings.EnableModelFallback, @@ -71,6 +77,10 @@ func (h *SettingHandler) GetSettings(c *gin.Context) { FallbackModelAntigravity: settings.FallbackModelAntigravity, EnableIdentityPatch: settings.EnableIdentityPatch, IdentityPatchPrompt: settings.IdentityPatchPrompt, + OpsMonitoringEnabled: opsEnabled && settings.OpsMonitoringEnabled, + OpsRealtimeMonitoringEnabled: settings.OpsRealtimeMonitoringEnabled, + OpsQueryModeDefault: settings.OpsQueryModeDefault, + OpsMetricsIntervalSeconds: settings.OpsMetricsIntervalSeconds, }) } @@ -94,7 +104,7 @@ type UpdateSettingsRequest struct { TurnstileSiteKey string `json:"turnstile_site_key"` TurnstileSecretKey string `json:"turnstile_secret_key"` - // LinuxDo Connect OAuth 登录(终端用户 SSO) + // LinuxDo Connect OAuth 登录 LinuxDoConnectEnabled bool `json:"linuxdo_connect_enabled"` LinuxDoConnectClientID string `json:"linuxdo_connect_client_id"` LinuxDoConnectClientSecret string `json:"linuxdo_connect_client_secret"` @@ -107,6 +117,7 @@ type UpdateSettingsRequest struct { APIBaseURL string `json:"api_base_url"` ContactInfo string `json:"contact_info"` DocURL string `json:"doc_url"` + HomeContent string `json:"home_content"` // 默认配置 DefaultConcurrency int `json:"default_concurrency"` @@ -122,6 +133,12 @@ type UpdateSettingsRequest struct { // Identity patch configuration (Claude -> Gemini) EnableIdentityPatch bool `json:"enable_identity_patch"` IdentityPatchPrompt string `json:"identity_patch_prompt"` + + // Ops monitoring (vNext) + OpsMonitoringEnabled *bool `json:"ops_monitoring_enabled"` + OpsRealtimeMonitoringEnabled *bool `json:"ops_realtime_monitoring_enabled"` + OpsQueryModeDefault *string `json:"ops_query_mode_default"` + OpsMetricsIntervalSeconds *int `json:"ops_metrics_interval_seconds"` } // UpdateSettings 更新系统设置 @@ -206,6 +223,18 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { } } + // Ops metrics collector interval validation (seconds). + if req.OpsMetricsIntervalSeconds != nil { + v := *req.OpsMetricsIntervalSeconds + if v < 60 { + v = 60 + } + if v > 3600 { + v = 3600 + } + req.OpsMetricsIntervalSeconds = &v + } + settings := &service.SystemSettings{ RegistrationEnabled: req.RegistrationEnabled, EmailVerifyEnabled: req.EmailVerifyEnabled, @@ -229,6 +258,7 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { APIBaseURL: req.APIBaseURL, ContactInfo: req.ContactInfo, DocURL: req.DocURL, + HomeContent: req.HomeContent, DefaultConcurrency: req.DefaultConcurrency, DefaultBalance: req.DefaultBalance, EnableModelFallback: req.EnableModelFallback, @@ -238,6 +268,30 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { FallbackModelAntigravity: req.FallbackModelAntigravity, EnableIdentityPatch: req.EnableIdentityPatch, IdentityPatchPrompt: req.IdentityPatchPrompt, + OpsMonitoringEnabled: func() bool { + if req.OpsMonitoringEnabled != nil { + return *req.OpsMonitoringEnabled + } + return previousSettings.OpsMonitoringEnabled + }(), + OpsRealtimeMonitoringEnabled: func() bool { + if req.OpsRealtimeMonitoringEnabled != nil { + return *req.OpsRealtimeMonitoringEnabled + } + return previousSettings.OpsRealtimeMonitoringEnabled + }(), + OpsQueryModeDefault: func() string { + if req.OpsQueryModeDefault != nil { + return *req.OpsQueryModeDefault + } + return previousSettings.OpsQueryModeDefault + }(), + OpsMetricsIntervalSeconds: func() int { + if req.OpsMetricsIntervalSeconds != nil { + return *req.OpsMetricsIntervalSeconds + } + return previousSettings.OpsMetricsIntervalSeconds + }(), } if err := h.settingService.UpdateSettings(c.Request.Context(), settings); err != nil { @@ -277,6 +331,7 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { APIBaseURL: updatedSettings.APIBaseURL, ContactInfo: updatedSettings.ContactInfo, DocURL: updatedSettings.DocURL, + HomeContent: updatedSettings.HomeContent, DefaultConcurrency: updatedSettings.DefaultConcurrency, DefaultBalance: updatedSettings.DefaultBalance, EnableModelFallback: updatedSettings.EnableModelFallback, @@ -286,6 +341,10 @@ func (h *SettingHandler) UpdateSettings(c *gin.Context) { FallbackModelAntigravity: updatedSettings.FallbackModelAntigravity, EnableIdentityPatch: updatedSettings.EnableIdentityPatch, IdentityPatchPrompt: updatedSettings.IdentityPatchPrompt, + OpsMonitoringEnabled: updatedSettings.OpsMonitoringEnabled, + OpsRealtimeMonitoringEnabled: updatedSettings.OpsRealtimeMonitoringEnabled, + OpsQueryModeDefault: updatedSettings.OpsQueryModeDefault, + OpsMetricsIntervalSeconds: updatedSettings.OpsMetricsIntervalSeconds, }) } @@ -377,6 +436,9 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings, if before.DocURL != after.DocURL { changed = append(changed, "doc_url") } + if before.HomeContent != after.HomeContent { + changed = append(changed, "home_content") + } if before.DefaultConcurrency != after.DefaultConcurrency { changed = append(changed, "default_concurrency") } @@ -404,6 +466,18 @@ func diffSettings(before *service.SystemSettings, after *service.SystemSettings, if before.IdentityPatchPrompt != after.IdentityPatchPrompt { changed = append(changed, "identity_patch_prompt") } + if before.OpsMonitoringEnabled != after.OpsMonitoringEnabled { + changed = append(changed, "ops_monitoring_enabled") + } + if before.OpsRealtimeMonitoringEnabled != after.OpsRealtimeMonitoringEnabled { + changed = append(changed, "ops_realtime_monitoring_enabled") + } + if before.OpsQueryModeDefault != after.OpsQueryModeDefault { + changed = append(changed, "ops_query_mode_default") + } + if before.OpsMetricsIntervalSeconds != after.OpsMetricsIntervalSeconds { + changed = append(changed, "ops_metrics_interval_seconds") + } return changed } @@ -580,3 +654,68 @@ func (h *SettingHandler) DeleteAdminAPIKey(c *gin.Context) { response.Success(c, gin.H{"message": "Admin API key deleted"}) } + +// GetStreamTimeoutSettings 获取流超时处理配置 +// GET /api/v1/admin/settings/stream-timeout +func (h *SettingHandler) GetStreamTimeoutSettings(c *gin.Context) { + settings, err := h.settingService.GetStreamTimeoutSettings(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.StreamTimeoutSettings{ + Enabled: settings.Enabled, + Action: settings.Action, + TempUnschedMinutes: settings.TempUnschedMinutes, + ThresholdCount: settings.ThresholdCount, + ThresholdWindowMinutes: settings.ThresholdWindowMinutes, + }) +} + +// UpdateStreamTimeoutSettingsRequest 更新流超时配置请求 +type UpdateStreamTimeoutSettingsRequest struct { + Enabled bool `json:"enabled"` + Action string `json:"action"` + TempUnschedMinutes int `json:"temp_unsched_minutes"` + ThresholdCount int `json:"threshold_count"` + ThresholdWindowMinutes int `json:"threshold_window_minutes"` +} + +// UpdateStreamTimeoutSettings 更新流超时处理配置 +// PUT /api/v1/admin/settings/stream-timeout +func (h *SettingHandler) UpdateStreamTimeoutSettings(c *gin.Context) { + var req UpdateStreamTimeoutSettingsRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + settings := &service.StreamTimeoutSettings{ + Enabled: req.Enabled, + Action: req.Action, + TempUnschedMinutes: req.TempUnschedMinutes, + ThresholdCount: req.ThresholdCount, + ThresholdWindowMinutes: req.ThresholdWindowMinutes, + } + + if err := h.settingService.SetStreamTimeoutSettings(c.Request.Context(), settings); err != nil { + response.BadRequest(c, err.Error()) + return + } + + // 重新获取设置返回 + updatedSettings, err := h.settingService.GetStreamTimeoutSettings(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, dto.StreamTimeoutSettings{ + Enabled: updatedSettings.Enabled, + Action: updatedSettings.Action, + TempUnschedMinutes: updatedSettings.TempUnschedMinutes, + ThresholdCount: updatedSettings.ThresholdCount, + ThresholdWindowMinutes: updatedSettings.ThresholdWindowMinutes, + }) +} diff --git a/backend/internal/handler/api_key_handler.go b/backend/internal/handler/api_key_handler.go index 09772f22..52dc6911 100644 --- a/backend/internal/handler/api_key_handler.go +++ b/backend/internal/handler/api_key_handler.go @@ -27,16 +27,20 @@ func NewAPIKeyHandler(apiKeyService *service.APIKeyService) *APIKeyHandler { // CreateAPIKeyRequest represents the create API key request payload type CreateAPIKeyRequest struct { - Name string `json:"name" binding:"required"` - GroupID *int64 `json:"group_id"` // nullable - CustomKey *string `json:"custom_key"` // 可选的自定义key + Name string `json:"name" binding:"required"` + GroupID *int64 `json:"group_id"` // nullable + CustomKey *string `json:"custom_key"` // 可选的自定义key + IPWhitelist []string `json:"ip_whitelist"` // IP 白名单 + IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单 } // UpdateAPIKeyRequest represents the update API key request payload type UpdateAPIKeyRequest struct { - Name string `json:"name"` - GroupID *int64 `json:"group_id"` - Status string `json:"status" binding:"omitempty,oneof=active inactive"` + Name string `json:"name"` + GroupID *int64 `json:"group_id"` + Status string `json:"status" binding:"omitempty,oneof=active inactive"` + IPWhitelist []string `json:"ip_whitelist"` // IP 白名单 + IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单 } // List handles listing user's API keys with pagination @@ -110,9 +114,11 @@ func (h *APIKeyHandler) Create(c *gin.Context) { } svcReq := service.CreateAPIKeyRequest{ - Name: req.Name, - GroupID: req.GroupID, - CustomKey: req.CustomKey, + Name: req.Name, + GroupID: req.GroupID, + CustomKey: req.CustomKey, + IPWhitelist: req.IPWhitelist, + IPBlacklist: req.IPBlacklist, } key, err := h.apiKeyService.Create(c.Request.Context(), subject.UserID, svcReq) if err != nil { @@ -144,7 +150,10 @@ func (h *APIKeyHandler) Update(c *gin.Context) { return } - svcReq := service.UpdateAPIKeyRequest{} + svcReq := service.UpdateAPIKeyRequest{ + IPWhitelist: req.IPWhitelist, + IPBlacklist: req.IPBlacklist, + } if req.Name != "" { svcReq.Name = &req.Name } diff --git a/backend/internal/handler/auth_handler.go b/backend/internal/handler/auth_handler.go index 8463367e..882e4cf2 100644 --- a/backend/internal/handler/auth_handler.go +++ b/backend/internal/handler/auth_handler.go @@ -3,6 +3,7 @@ package handler import ( "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/handler/dto" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/pkg/response" middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/Wei-Shaw/sub2api/internal/service" @@ -12,19 +13,21 @@ import ( // AuthHandler handles authentication-related requests type AuthHandler struct { - cfg *config.Config - authService *service.AuthService - userService *service.UserService - settingSvc *service.SettingService + cfg *config.Config + authService *service.AuthService + userService *service.UserService + settingSvc *service.SettingService + promoService *service.PromoService } // NewAuthHandler creates a new AuthHandler -func NewAuthHandler(cfg *config.Config, authService *service.AuthService, userService *service.UserService, settingService *service.SettingService) *AuthHandler { +func NewAuthHandler(cfg *config.Config, authService *service.AuthService, userService *service.UserService, settingService *service.SettingService, promoService *service.PromoService) *AuthHandler { return &AuthHandler{ - cfg: cfg, - authService: authService, - userService: userService, - settingSvc: settingService, + cfg: cfg, + authService: authService, + userService: userService, + settingSvc: settingService, + promoService: promoService, } } @@ -34,6 +37,7 @@ type RegisterRequest struct { Password string `json:"password" binding:"required,min=6"` VerifyCode string `json:"verify_code"` TurnstileToken string `json:"turnstile_token"` + PromoCode string `json:"promo_code"` // 注册优惠码 } // SendVerifyCodeRequest 发送验证码请求 @@ -73,13 +77,13 @@ func (h *AuthHandler) Register(c *gin.Context) { // Turnstile 验证(当提供了邮箱验证码时跳过,因为发送验证码时已验证过) if req.VerifyCode == "" { - if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, c.ClientIP()); err != nil { + if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, ip.GetClientIP(c)); err != nil { response.ErrorFrom(c, err) return } } - token, user, err := h.authService.RegisterWithVerification(c.Request.Context(), req.Email, req.Password, req.VerifyCode) + token, user, err := h.authService.RegisterWithVerification(c.Request.Context(), req.Email, req.Password, req.VerifyCode, req.PromoCode) if err != nil { response.ErrorFrom(c, err) return @@ -102,7 +106,7 @@ func (h *AuthHandler) SendVerifyCode(c *gin.Context) { } // Turnstile 验证 - if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, c.ClientIP()); err != nil { + if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, ip.GetClientIP(c)); err != nil { response.ErrorFrom(c, err) return } @@ -129,7 +133,7 @@ func (h *AuthHandler) Login(c *gin.Context) { } // Turnstile 验证 - if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, c.ClientIP()); err != nil { + if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, ip.GetClientIP(c)); err != nil { response.ErrorFrom(c, err) return } @@ -174,3 +178,63 @@ func (h *AuthHandler) GetCurrentUser(c *gin.Context) { response.Success(c, UserResponse{User: dto.UserFromService(user), RunMode: runMode}) } + +// ValidatePromoCodeRequest 验证优惠码请求 +type ValidatePromoCodeRequest struct { + Code string `json:"code" binding:"required"` +} + +// ValidatePromoCodeResponse 验证优惠码响应 +type ValidatePromoCodeResponse struct { + Valid bool `json:"valid"` + BonusAmount float64 `json:"bonus_amount,omitempty"` + ErrorCode string `json:"error_code,omitempty"` + Message string `json:"message,omitempty"` +} + +// ValidatePromoCode 验证优惠码(公开接口,注册前调用) +// POST /api/v1/auth/validate-promo-code +func (h *AuthHandler) ValidatePromoCode(c *gin.Context) { + var req ValidatePromoCodeRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + promoCode, err := h.promoService.ValidatePromoCode(c.Request.Context(), req.Code) + if err != nil { + // 根据错误类型返回对应的错误码 + errorCode := "PROMO_CODE_INVALID" + switch err { + case service.ErrPromoCodeNotFound: + errorCode = "PROMO_CODE_NOT_FOUND" + case service.ErrPromoCodeExpired: + errorCode = "PROMO_CODE_EXPIRED" + case service.ErrPromoCodeDisabled: + errorCode = "PROMO_CODE_DISABLED" + case service.ErrPromoCodeMaxUsed: + errorCode = "PROMO_CODE_MAX_USED" + case service.ErrPromoCodeAlreadyUsed: + errorCode = "PROMO_CODE_ALREADY_USED" + } + + response.Success(c, ValidatePromoCodeResponse{ + Valid: false, + ErrorCode: errorCode, + }) + return + } + + if promoCode == nil { + response.Success(c, ValidatePromoCodeResponse{ + Valid: false, + ErrorCode: "PROMO_CODE_INVALID", + }) + return + } + + response.Success(c, ValidatePromoCodeResponse{ + Valid: true, + BonusAmount: promoCode.BonusAmount, + }) +} diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go index 9a672064..f5bdd008 100644 --- a/backend/internal/handler/dto/mappers.go +++ b/backend/internal/handler/dto/mappers.go @@ -53,16 +53,18 @@ func APIKeyFromService(k *service.APIKey) *APIKey { return nil } return &APIKey{ - ID: k.ID, - UserID: k.UserID, - Key: k.Key, - Name: k.Name, - GroupID: k.GroupID, - Status: k.Status, - CreatedAt: k.CreatedAt, - UpdatedAt: k.UpdatedAt, - User: UserFromServiceShallow(k.User), - Group: GroupFromServiceShallow(k.Group), + ID: k.ID, + UserID: k.UserID, + Key: k.Key, + Name: k.Name, + GroupID: k.GroupID, + Status: k.Status, + IPWhitelist: k.IPWhitelist, + IPBlacklist: k.IPBlacklist, + CreatedAt: k.CreatedAt, + UpdatedAt: k.UpdatedAt, + User: UserFromServiceShallow(k.User), + Group: GroupFromServiceShallow(k.Group), } } @@ -71,25 +73,27 @@ func GroupFromServiceShallow(g *service.Group) *Group { return nil } return &Group{ - ID: g.ID, - Name: g.Name, - Description: g.Description, - Platform: g.Platform, - RateMultiplier: g.RateMultiplier, - IsExclusive: g.IsExclusive, - Status: g.Status, - SubscriptionType: g.SubscriptionType, - DailyLimitUSD: g.DailyLimitUSD, - WeeklyLimitUSD: g.WeeklyLimitUSD, - MonthlyLimitUSD: g.MonthlyLimitUSD, - ImagePrice1K: g.ImagePrice1K, - ImagePrice2K: g.ImagePrice2K, - ImagePrice4K: g.ImagePrice4K, - ClaudeCodeOnly: g.ClaudeCodeOnly, - FallbackGroupID: g.FallbackGroupID, - CreatedAt: g.CreatedAt, - UpdatedAt: g.UpdatedAt, - AccountCount: g.AccountCount, + ID: g.ID, + Name: g.Name, + Description: g.Description, + Platform: g.Platform, + RateMultiplier: g.RateMultiplier, + IsExclusive: g.IsExclusive, + Status: g.Status, + SubscriptionType: g.SubscriptionType, + DailyLimitUSD: g.DailyLimitUSD, + WeeklyLimitUSD: g.WeeklyLimitUSD, + MonthlyLimitUSD: g.MonthlyLimitUSD, + ImagePrice1K: g.ImagePrice1K, + ImagePrice2K: g.ImagePrice2K, + ImagePrice4K: g.ImagePrice4K, + ClaudeCodeOnly: g.ClaudeCodeOnly, + FallbackGroupID: g.FallbackGroupID, + ModelRouting: g.ModelRouting, + ModelRoutingEnabled: g.ModelRoutingEnabled, + CreatedAt: g.CreatedAt, + UpdatedAt: g.UpdatedAt, + AccountCount: g.AccountCount, } } @@ -112,7 +116,7 @@ func AccountFromServiceShallow(a *service.Account) *Account { if a == nil { return nil } - return &Account{ + out := &Account{ ID: a.ID, Name: a.Name, Notes: a.Notes, @@ -123,6 +127,7 @@ func AccountFromServiceShallow(a *service.Account) *Account { ProxyID: a.ProxyID, Concurrency: a.Concurrency, Priority: a.Priority, + RateMultiplier: a.BillingRateMultiplier(), Status: a.Status, ErrorMessage: a.ErrorMessage, LastUsedAt: a.LastUsedAt, @@ -141,6 +146,24 @@ func AccountFromServiceShallow(a *service.Account) *Account { SessionWindowStatus: a.SessionWindowStatus, GroupIDs: a.GroupIDs, } + + // 提取 5h 窗口费用控制和会话数量控制配置(仅 Anthropic OAuth/SetupToken 账号有效) + if a.IsAnthropicOAuthOrSetupToken() { + if limit := a.GetWindowCostLimit(); limit > 0 { + out.WindowCostLimit = &limit + } + if reserve := a.GetWindowCostStickyReserve(); reserve > 0 { + out.WindowCostStickyReserve = &reserve + } + if maxSessions := a.GetMaxSessions(); maxSessions > 0 { + out.MaxSessions = &maxSessions + } + if idleTimeout := a.GetSessionIdleTimeoutMinutes(); idleTimeout > 0 { + out.SessionIdleTimeoutMin = &idleTimeout + } + } + + return out } func AccountFromService(a *service.Account) *Account { @@ -210,8 +233,29 @@ func ProxyWithAccountCountFromService(p *service.ProxyWithAccountCount) *ProxyWi return nil } return &ProxyWithAccountCount{ - Proxy: *ProxyFromService(&p.Proxy), - AccountCount: p.AccountCount, + Proxy: *ProxyFromService(&p.Proxy), + AccountCount: p.AccountCount, + LatencyMs: p.LatencyMs, + LatencyStatus: p.LatencyStatus, + LatencyMessage: p.LatencyMessage, + IPAddress: p.IPAddress, + Country: p.Country, + CountryCode: p.CountryCode, + Region: p.Region, + City: p.City, + } +} + +func ProxyAccountSummaryFromService(a *service.ProxyAccountSummary) *ProxyAccountSummary { + if a == nil { + return nil + } + return &ProxyAccountSummary{ + ID: a.ID, + Name: a.Name, + Platform: a.Platform, + Type: a.Type, + Notes: a.Notes, } } @@ -250,11 +294,12 @@ func AccountSummaryFromService(a *service.Account) *AccountSummary { // usageLogFromServiceBase is a helper that converts service UsageLog to DTO. // The account parameter allows caller to control what Account info is included. -func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary) *UsageLog { +// The includeIPAddress parameter controls whether to include the IP address (admin-only). +func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary, includeIPAddress bool) *UsageLog { if l == nil { return nil } - return &UsageLog{ + result := &UsageLog{ ID: l.ID, UserID: l.UserID, APIKeyID: l.APIKeyID, @@ -276,6 +321,7 @@ func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary) *Usag TotalCost: l.TotalCost, ActualCost: l.ActualCost, RateMultiplier: l.RateMultiplier, + AccountRateMultiplier: l.AccountRateMultiplier, BillingType: l.BillingType, Stream: l.Stream, DurationMs: l.DurationMs, @@ -290,21 +336,26 @@ func usageLogFromServiceBase(l *service.UsageLog, account *AccountSummary) *Usag Group: GroupFromServiceShallow(l.Group), Subscription: UserSubscriptionFromService(l.Subscription), } + // IP 地址仅对管理员可见 + if includeIPAddress { + result.IPAddress = l.IPAddress + } + return result } // UsageLogFromService converts a service UsageLog to DTO for regular users. -// It excludes Account details - users should not see account information. +// It excludes Account details and IP address - users should not see these. func UsageLogFromService(l *service.UsageLog) *UsageLog { - return usageLogFromServiceBase(l, nil) + return usageLogFromServiceBase(l, nil, false) } // UsageLogFromServiceAdmin converts a service UsageLog to DTO for admin users. -// It includes minimal Account info (ID, Name only). +// It includes minimal Account info (ID, Name only) and IP address. func UsageLogFromServiceAdmin(l *service.UsageLog) *UsageLog { if l == nil { return nil } - return usageLogFromServiceBase(l, AccountSummaryFromService(l.Account)) + return usageLogFromServiceBase(l, AccountSummaryFromService(l.Account), true) } func SettingFromService(s *service.Setting) *Setting { @@ -362,3 +413,35 @@ func BulkAssignResultFromService(r *service.BulkAssignResult) *BulkAssignResult Errors: r.Errors, } } + +func PromoCodeFromService(pc *service.PromoCode) *PromoCode { + if pc == nil { + return nil + } + return &PromoCode{ + ID: pc.ID, + Code: pc.Code, + BonusAmount: pc.BonusAmount, + MaxUses: pc.MaxUses, + UsedCount: pc.UsedCount, + Status: pc.Status, + ExpiresAt: pc.ExpiresAt, + Notes: pc.Notes, + CreatedAt: pc.CreatedAt, + UpdatedAt: pc.UpdatedAt, + } +} + +func PromoCodeUsageFromService(u *service.PromoCodeUsage) *PromoCodeUsage { + if u == nil { + return nil + } + return &PromoCodeUsage{ + ID: u.ID, + PromoCodeID: u.PromoCodeID, + UserID: u.UserID, + BonusAmount: u.BonusAmount, + UsedAt: u.UsedAt, + User: UserFromServiceShallow(u.User), + } +} diff --git a/backend/internal/handler/dto/settings.go b/backend/internal/handler/dto/settings.go index dab5eb75..81206def 100644 --- a/backend/internal/handler/dto/settings.go +++ b/backend/internal/handler/dto/settings.go @@ -28,6 +28,7 @@ type SystemSettings struct { APIBaseURL string `json:"api_base_url"` ContactInfo string `json:"contact_info"` DocURL string `json:"doc_url"` + HomeContent string `json:"home_content"` DefaultConcurrency int `json:"default_concurrency"` DefaultBalance float64 `json:"default_balance"` @@ -42,6 +43,12 @@ type SystemSettings struct { // Identity patch configuration (Claude -> Gemini) EnableIdentityPatch bool `json:"enable_identity_patch"` IdentityPatchPrompt string `json:"identity_patch_prompt"` + + // Ops monitoring (vNext) + OpsMonitoringEnabled bool `json:"ops_monitoring_enabled"` + OpsRealtimeMonitoringEnabled bool `json:"ops_realtime_monitoring_enabled"` + OpsQueryModeDefault string `json:"ops_query_mode_default"` + OpsMetricsIntervalSeconds int `json:"ops_metrics_interval_seconds"` } type PublicSettings struct { @@ -55,6 +62,16 @@ type PublicSettings struct { APIBaseURL string `json:"api_base_url"` ContactInfo string `json:"contact_info"` DocURL string `json:"doc_url"` + HomeContent string `json:"home_content"` LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"` Version string `json:"version"` } + +// StreamTimeoutSettings 流超时处理配置 DTO +type StreamTimeoutSettings struct { + Enabled bool `json:"enabled"` + Action string `json:"action"` + TempUnschedMinutes int `json:"temp_unsched_minutes"` + ThresholdCount int `json:"threshold_count"` + ThresholdWindowMinutes int `json:"threshold_window_minutes"` +} diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index 03f7080b..4519143c 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -20,14 +20,16 @@ type User struct { } type APIKey struct { - ID int64 `json:"id"` - UserID int64 `json:"user_id"` - Key string `json:"key"` - Name string `json:"name"` - GroupID *int64 `json:"group_id"` - Status string `json:"status"` - CreatedAt time.Time `json:"created_at"` - UpdatedAt time.Time `json:"updated_at"` + ID int64 `json:"id"` + UserID int64 `json:"user_id"` + Key string `json:"key"` + Name string `json:"name"` + GroupID *int64 `json:"group_id"` + Status string `json:"status"` + IPWhitelist []string `json:"ip_whitelist"` + IPBlacklist []string `json:"ip_blacklist"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` User *User `json:"user,omitempty"` Group *Group `json:"group,omitempty"` @@ -56,6 +58,10 @@ type Group struct { ClaudeCodeOnly bool `json:"claude_code_only"` FallbackGroupID *int64 `json:"fallback_group_id"` + // 模型路由配置(仅 anthropic 平台使用) + ModelRouting map[string][]int64 `json:"model_routing"` + ModelRoutingEnabled bool `json:"model_routing_enabled"` + CreatedAt time.Time `json:"created_at"` UpdatedAt time.Time `json:"updated_at"` @@ -74,6 +80,7 @@ type Account struct { ProxyID *int64 `json:"proxy_id"` Concurrency int `json:"concurrency"` Priority int `json:"priority"` + RateMultiplier float64 `json:"rate_multiplier"` Status string `json:"status"` ErrorMessage string `json:"error_message"` LastUsedAt *time.Time `json:"last_used_at"` @@ -95,6 +102,16 @@ type Account struct { SessionWindowEnd *time.Time `json:"session_window_end"` SessionWindowStatus string `json:"session_window_status"` + // 5h窗口费用控制(仅 Anthropic OAuth/SetupToken 账号有效) + // 从 extra 字段提取,方便前端显示和编辑 + WindowCostLimit *float64 `json:"window_cost_limit,omitempty"` + WindowCostStickyReserve *float64 `json:"window_cost_sticky_reserve,omitempty"` + + // 会话数量控制(仅 Anthropic OAuth/SetupToken 账号有效) + // 从 extra 字段提取,方便前端显示和编辑 + MaxSessions *int `json:"max_sessions,omitempty"` + SessionIdleTimeoutMin *int `json:"session_idle_timeout_minutes,omitempty"` + Proxy *Proxy `json:"proxy,omitempty"` AccountGroups []AccountGroup `json:"account_groups,omitempty"` @@ -127,7 +144,23 @@ type Proxy struct { type ProxyWithAccountCount struct { Proxy - AccountCount int64 `json:"account_count"` + AccountCount int64 `json:"account_count"` + LatencyMs *int64 `json:"latency_ms,omitempty"` + LatencyStatus string `json:"latency_status,omitempty"` + LatencyMessage string `json:"latency_message,omitempty"` + IPAddress string `json:"ip_address,omitempty"` + Country string `json:"country,omitempty"` + CountryCode string `json:"country_code,omitempty"` + Region string `json:"region,omitempty"` + City string `json:"city,omitempty"` +} + +type ProxyAccountSummary struct { + ID int64 `json:"id"` + Name string `json:"name"` + Platform string `json:"platform"` + Type string `json:"type"` + Notes *string `json:"notes,omitempty"` } type RedeemCode struct { @@ -167,13 +200,14 @@ type UsageLog struct { CacheCreation5mTokens int `json:"cache_creation_5m_tokens"` CacheCreation1hTokens int `json:"cache_creation_1h_tokens"` - InputCost float64 `json:"input_cost"` - OutputCost float64 `json:"output_cost"` - CacheCreationCost float64 `json:"cache_creation_cost"` - CacheReadCost float64 `json:"cache_read_cost"` - TotalCost float64 `json:"total_cost"` - ActualCost float64 `json:"actual_cost"` - RateMultiplier float64 `json:"rate_multiplier"` + InputCost float64 `json:"input_cost"` + OutputCost float64 `json:"output_cost"` + CacheCreationCost float64 `json:"cache_creation_cost"` + CacheReadCost float64 `json:"cache_read_cost"` + TotalCost float64 `json:"total_cost"` + ActualCost float64 `json:"actual_cost"` + RateMultiplier float64 `json:"rate_multiplier"` + AccountRateMultiplier *float64 `json:"account_rate_multiplier"` BillingType int8 `json:"billing_type"` Stream bool `json:"stream"` @@ -187,6 +221,9 @@ type UsageLog struct { // User-Agent UserAgent *string `json:"user_agent"` + // IP 地址(仅管理员可见) + IPAddress *string `json:"ip_address,omitempty"` + CreatedAt time.Time `json:"created_at"` User *User `json:"user,omitempty"` @@ -245,3 +282,28 @@ type BulkAssignResult struct { Subscriptions []UserSubscription `json:"subscriptions"` Errors []string `json:"errors"` } + +// PromoCode 注册优惠码 +type PromoCode struct { + ID int64 `json:"id"` + Code string `json:"code"` + BonusAmount float64 `json:"bonus_amount"` + MaxUses int `json:"max_uses"` + UsedCount int `json:"used_count"` + Status string `json:"status"` + ExpiresAt *time.Time `json:"expires_at"` + Notes string `json:"notes"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +// PromoCodeUsage 优惠码使用记录 +type PromoCodeUsage struct { + ID int64 `json:"id"` + PromoCodeID int64 `json:"promo_code_id"` + UserID int64 `json:"user_id"` + BonusAmount float64 `json:"bonus_amount"` + UsedAt time.Time `json:"used_at"` + + User *User `json:"user,omitempty"` +} diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 2cad9c40..6c8d9ebe 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -15,6 +15,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" "github.com/Wei-Shaw/sub2api/internal/pkg/claude" pkgerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/pkg/openai" middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/Wei-Shaw/sub2api/internal/service" @@ -100,6 +101,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { return } + // 检查是否为 Claude Code 客户端,设置到 context 中 + SetClaudeCodeClientContext(c, body) + + setOpsRequestContext(c, "", false, body) + parsedReq, err := service.ParseGatewayRequest(body) if err != nil { h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") @@ -108,8 +114,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { reqModel := parsedReq.Model reqStream := parsedReq.Stream - // 设置 Claude Code 客户端标识到 context(用于分组限制检查) - SetClaudeCodeClientContext(c, body) + setOpsRequestContext(c, reqModel, reqStream, body) // 验证 model 必填 if reqModel == "" { @@ -123,12 +128,10 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 获取订阅信息(可能为nil)- 提前获取用于后续检查 subscription, _ := middleware2.GetSubscriptionFromContext(c) - // 获取 User-Agent - userAgent := c.Request.UserAgent() - // 0. 检查wait队列是否已满 maxWait := service.CalculateMaxWait(subject.Concurrency) canWait, err := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), subject.UserID, maxWait) + waitCounted := false if err != nil { log.Printf("Increment wait count failed: %v", err) // On error, allow request to proceed @@ -136,8 +139,15 @@ func (h *GatewayHandler) Messages(c *gin.Context) { h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later") return } - // 确保在函数退出时减少wait计数 - defer h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) + if err == nil && canWait { + waitCounted = true + } + // Ensure we decrement if we exit before acquiring the user slot. + defer func() { + if waitCounted { + h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) + } + }() // 1. 首先获取用户并发槽位 userReleaseFunc, err := h.concurrencyHelper.AcquireUserSlotWithWait(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted) @@ -146,6 +156,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { h.handleConcurrencyError(c, err, "user", streamStarted) return } + // User slot acquired: no longer waiting in the queue. + if waitCounted { + h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) + waitCounted = false + } // 在请求结束或 Context 取消时确保释放槽位,避免客户端断开造成泄漏 userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc) if userReleaseFunc != nil { @@ -182,7 +197,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { lastFailoverStatus := 0 for { - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs) + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs, "") // Gemini 不使用会话限制 if err != nil { if len(failedAccountIDs) == 0 { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) @@ -192,6 +207,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { return } account := selection.Account + setOpsSelectedAccount(c, account.ID) // 检查预热请求拦截(在账号选择后、转发前检查) if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { @@ -208,12 +224,12 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 3. 获取账号并发槽位 accountReleaseFunc := selection.ReleaseFunc - var accountWaitRelease func() if !selection.Acquired { if selection.WaitPlan == nil { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) return } + accountWaitCounted := false canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) if err != nil { log.Printf("Increment account wait count failed: %v", err) @@ -221,12 +237,16 @@ func (h *GatewayHandler) Messages(c *gin.Context) { log.Printf("Account wait queue full: account=%d", account.ID) h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) return - } else { - // Only set release function if increment succeeded - accountWaitRelease = func() { + } + if err == nil && canWait { + accountWaitCounted = true + } + // Ensure the wait counter is decremented if we exit before acquiring the slot. + defer func() { + if accountWaitCounted { h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) } - } + }() accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( c, @@ -237,20 +257,21 @@ func (h *GatewayHandler) Messages(c *gin.Context) { &streamStarted, ) if err != nil { - if accountWaitRelease != nil { - accountWaitRelease() - } log.Printf("Account concurrency acquire failed: %v", err) h.handleConcurrencyError(c, err, "account", streamStarted) return } + // Slot acquired: no longer waiting in queue. + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + accountWaitCounted = false + } if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil { log.Printf("Bind sticky session failed: %v", err) } } // 账号槽位/等待计数需要在超时或断开时安全回收 accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) - accountWaitRelease = wrapReleaseOnDone(c.Request.Context(), accountWaitRelease) // 转发请求 - 根据账号平台分流 var result *service.ForwardResult @@ -262,19 +283,15 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } - if accountWaitRelease != nil { - accountWaitRelease() - } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { failedAccountIDs[account.ID] = struct{}{} + lastFailoverStatus = failoverErr.StatusCode if switchCount >= maxAccountSwitches { - lastFailoverStatus = failoverErr.StatusCode h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) return } - lastFailoverStatus = failoverErr.StatusCode switchCount++ log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) continue @@ -284,8 +301,12 @@ func (h *GatewayHandler) Messages(c *gin.Context) { return } + // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) + userAgent := c.GetHeader("User-Agent") + clientIP := ip.GetClientIP(c) + // 异步记录使用量(subscription已在函数开头获取) - go func(result *service.ForwardResult, usedAccount *service.Account, ua string) { + go func(result *service.ForwardResult, usedAccount *service.Account, ua, clientIP string) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ @@ -295,10 +316,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { Account: usedAccount, Subscription: subscription, UserAgent: ua, + IPAddress: clientIP, }); err != nil { log.Printf("Record usage failed: %v", err) } - }(result, account, userAgent) + }(result, account, userAgent, clientIP) return } } @@ -310,7 +332,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { for { // 选择支持该模型的账号 - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs) + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, reqModel, failedAccountIDs, parsedReq.MetadataUserID) if err != nil { if len(failedAccountIDs) == 0 { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error(), streamStarted) @@ -320,6 +342,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { return } account := selection.Account + setOpsSelectedAccount(c, account.ID) // 检查预热请求拦截(在账号选择后、转发前检查) if account.IsInterceptWarmupEnabled() && isWarmupRequest(body) { @@ -336,12 +359,12 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 3. 获取账号并发槽位 accountReleaseFunc := selection.ReleaseFunc - var accountWaitRelease func() if !selection.Acquired { if selection.WaitPlan == nil { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) return } + accountWaitCounted := false canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) if err != nil { log.Printf("Increment account wait count failed: %v", err) @@ -349,12 +372,15 @@ func (h *GatewayHandler) Messages(c *gin.Context) { log.Printf("Account wait queue full: account=%d", account.ID) h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) return - } else { - // Only set release function if increment succeeded - accountWaitRelease = func() { + } + if err == nil && canWait { + accountWaitCounted = true + } + defer func() { + if accountWaitCounted { h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) } - } + }() accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( c, @@ -365,20 +391,20 @@ func (h *GatewayHandler) Messages(c *gin.Context) { &streamStarted, ) if err != nil { - if accountWaitRelease != nil { - accountWaitRelease() - } log.Printf("Account concurrency acquire failed: %v", err) h.handleConcurrencyError(c, err, "account", streamStarted) return } + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + accountWaitCounted = false + } if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil { log.Printf("Bind sticky session failed: %v", err) } } // 账号槽位/等待计数需要在超时或断开时安全回收 accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) - accountWaitRelease = wrapReleaseOnDone(c.Request.Context(), accountWaitRelease) // 转发请求 - 根据账号平台分流 var result *service.ForwardResult @@ -390,19 +416,15 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } - if accountWaitRelease != nil { - accountWaitRelease() - } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { failedAccountIDs[account.ID] = struct{}{} + lastFailoverStatus = failoverErr.StatusCode if switchCount >= maxAccountSwitches { - lastFailoverStatus = failoverErr.StatusCode h.handleFailoverExhausted(c, lastFailoverStatus, streamStarted) return } - lastFailoverStatus = failoverErr.StatusCode switchCount++ log.Printf("Account %d: upstream error %d, switching account %d/%d", account.ID, failoverErr.StatusCode, switchCount, maxAccountSwitches) continue @@ -412,8 +434,12 @@ func (h *GatewayHandler) Messages(c *gin.Context) { return } + // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) + userAgent := c.GetHeader("User-Agent") + clientIP := ip.GetClientIP(c) + // 异步记录使用量(subscription已在函数开头获取) - go func(result *service.ForwardResult, usedAccount *service.Account, ua string) { + go func(result *service.ForwardResult, usedAccount *service.Account, ua, clientIP string) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ @@ -423,10 +449,11 @@ func (h *GatewayHandler) Messages(c *gin.Context) { Account: usedAccount, Subscription: subscription, UserAgent: ua, + IPAddress: clientIP, }); err != nil { log.Printf("Record usage failed: %v", err) } - }(result, account, userAgent) + }(result, account, userAgent, clientIP) return } } @@ -692,21 +719,22 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) { return } + setOpsRequestContext(c, "", false, body) + parsedReq, err := service.ParseGatewayRequest(body) if err != nil { h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") return } - // 设置 Claude Code 客户端标识到 context(用于分组限制检查) - SetClaudeCodeClientContext(c, body) - // 验证 model 必填 if parsedReq.Model == "" { h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "model is required") return } + setOpsRequestContext(c, parsedReq.Model, parsedReq.Stream, body) + // 获取订阅信息(可能为nil) subscription, _ := middleware2.GetSubscriptionFromContext(c) @@ -727,6 +755,7 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) { h.errorResponse(c, http.StatusServiceUnavailable, "api_error", "No available accounts: "+err.Error()) return } + setOpsSelectedAccount(c, account.ID) // 转发请求(不记录使用量) if err := h.gatewayService.ForwardCountTokens(c.Request.Context(), c, account, parsedReq); err != nil { diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 9909fa90..c7646b38 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -12,6 +12,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" "github.com/Wei-Shaw/sub2api/internal/pkg/gemini" "github.com/Wei-Shaw/sub2api/internal/pkg/googleapi" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/Wei-Shaw/sub2api/internal/service" @@ -161,25 +162,32 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { return } + setOpsRequestContext(c, modelName, stream, body) + // Get subscription (may be nil) subscription, _ := middleware.GetSubscriptionFromContext(c) - // 获取 User-Agent - userAgent := c.Request.UserAgent() - // For Gemini native API, do not send Claude-style ping frames. geminiConcurrency := NewConcurrencyHelper(h.concurrencyHelper.concurrencyService, SSEPingFormatNone, 0) // 0) wait queue check maxWait := service.CalculateMaxWait(authSubject.Concurrency) canWait, err := geminiConcurrency.IncrementWaitCount(c.Request.Context(), authSubject.UserID, maxWait) + waitCounted := false if err != nil { log.Printf("Increment wait count failed: %v", err) } else if !canWait { googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later") return } - defer geminiConcurrency.DecrementWaitCount(c.Request.Context(), authSubject.UserID) + if err == nil && canWait { + waitCounted = true + } + defer func() { + if waitCounted { + geminiConcurrency.DecrementWaitCount(c.Request.Context(), authSubject.UserID) + } + }() // 1) user concurrency slot streamStarted := false @@ -188,6 +196,10 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { googleError(c, http.StatusTooManyRequests, err.Error()) return } + if waitCounted { + geminiConcurrency.DecrementWaitCount(c.Request.Context(), authSubject.UserID) + waitCounted = false + } // 确保请求取消时也会释放槽位,避免长连接被动中断造成泄漏 userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc) if userReleaseFunc != nil { @@ -203,10 +215,6 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { // 3) select account (sticky session based on request body) parsedReq, _ := service.ParseGatewayRequest(body) - - // 设置 Claude Code 客户端标识到 context(用于分组限制检查) - SetClaudeCodeClientContext(c, body) - sessionHash := h.gatewayService.GenerateSessionHash(parsedReq) sessionKey := sessionHash if sessionHash != "" { @@ -218,7 +226,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { lastFailoverStatus := 0 for { - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, modelName, failedAccountIDs) + selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionKey, modelName, failedAccountIDs, "") // Gemini 不使用会话限制 if err != nil { if len(failedAccountIDs) == 0 { googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts: "+err.Error()) @@ -228,15 +236,16 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { return } account := selection.Account + setOpsSelectedAccount(c, account.ID) // 4) account concurrency slot accountReleaseFunc := selection.ReleaseFunc - var accountWaitRelease func() if !selection.Acquired { if selection.WaitPlan == nil { googleError(c, http.StatusServiceUnavailable, "No available Gemini accounts") return } + accountWaitCounted := false canWait, err := geminiConcurrency.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) if err != nil { log.Printf("Increment account wait count failed: %v", err) @@ -244,12 +253,15 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { log.Printf("Account wait queue full: account=%d", account.ID) googleError(c, http.StatusTooManyRequests, "Too many pending requests, please retry later") return - } else { - // Only set release function if increment succeeded - accountWaitRelease = func() { + } + if err == nil && canWait { + accountWaitCounted = true + } + defer func() { + if accountWaitCounted { geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID) } - } + }() accountReleaseFunc, err = geminiConcurrency.AcquireAccountSlotWithWaitTimeout( c, @@ -260,19 +272,19 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { &streamStarted, ) if err != nil { - if accountWaitRelease != nil { - accountWaitRelease() - } googleError(c, http.StatusTooManyRequests, err.Error()) return } + if accountWaitCounted { + geminiConcurrency.DecrementAccountWaitCount(c.Request.Context(), account.ID) + accountWaitCounted = false + } if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionKey, account.ID); err != nil { log.Printf("Bind sticky session failed: %v", err) } } // 账号槽位/等待计数需要在超时或断开时安全回收 accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) - accountWaitRelease = wrapReleaseOnDone(c.Request.Context(), accountWaitRelease) // 5) forward (根据平台分流) var result *service.ForwardResult @@ -284,9 +296,6 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { if accountReleaseFunc != nil { accountReleaseFunc() } - if accountWaitRelease != nil { - accountWaitRelease() - } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { @@ -306,8 +315,12 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { return } + // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) + userAgent := c.GetHeader("User-Agent") + clientIP := ip.GetClientIP(c) + // 6) record usage async - go func(result *service.ForwardResult, usedAccount *service.Account, ua string) { + go func(result *service.ForwardResult, usedAccount *service.Account, ua, ip string) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if err := h.gatewayService.RecordUsage(ctx, &service.RecordUsageInput{ @@ -317,10 +330,11 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { Account: usedAccount, Subscription: subscription, UserAgent: ua, + IPAddress: ip, }); err != nil { log.Printf("Record usage failed: %v", err) } - }(result, account, userAgent) + }(result, account, userAgent, clientIP) return } } diff --git a/backend/internal/handler/handler.go b/backend/internal/handler/handler.go index 817b71d3..5b1b317d 100644 --- a/backend/internal/handler/handler.go +++ b/backend/internal/handler/handler.go @@ -16,7 +16,9 @@ type AdminHandlers struct { AntigravityOAuth *admin.AntigravityOAuthHandler Proxy *admin.ProxyHandler Redeem *admin.RedeemHandler + Promo *admin.PromoHandler Setting *admin.SettingHandler + Ops *admin.OpsHandler System *admin.SystemHandler Subscription *admin.SubscriptionHandler Usage *admin.UsageHandler diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 334d1368..064473a0 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -8,9 +8,11 @@ import ( "io" "log" "net/http" + "strings" "time" "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/pkg/openai" middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/Wei-Shaw/sub2api/internal/service" @@ -81,6 +83,8 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { return } + setOpsRequestContext(c, "", false, body) + // Parse request body to map for potential modification var reqBody map[string]any if err := json.Unmarshal(body, &reqBody); err != nil { @@ -98,15 +102,41 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { return } - // For non-Codex CLI requests, set default instructions userAgent := c.GetHeader("User-Agent") if !openai.IsCodexCLIRequest(userAgent) { - reqBody["instructions"] = openai.DefaultInstructions - // Re-serialize body - body, err = json.Marshal(reqBody) - if err != nil { - h.errorResponse(c, http.StatusInternalServerError, "api_error", "Failed to process request") - return + existingInstructions, _ := reqBody["instructions"].(string) + if strings.TrimSpace(existingInstructions) == "" { + if instructions := strings.TrimSpace(service.GetOpenCodeInstructions()); instructions != "" { + reqBody["instructions"] = instructions + // Re-serialize body + body, err = json.Marshal(reqBody) + if err != nil { + h.errorResponse(c, http.StatusInternalServerError, "api_error", "Failed to process request") + return + } + } + } + } + + setOpsRequestContext(c, reqModel, reqStream, body) + + // 提前校验 function_call_output 是否具备可关联上下文,避免上游 400。 + // 要求 previous_response_id,或 input 内存在带 call_id 的 tool_call/function_call, + // 或带 id 且与 call_id 匹配的 item_reference。 + if service.HasFunctionCallOutput(reqBody) { + previousResponseID, _ := reqBody["previous_response_id"].(string) + if strings.TrimSpace(previousResponseID) == "" && !service.HasToolCallContext(reqBody) { + if service.HasFunctionCallOutputMissingCallID(reqBody) { + log.Printf("[OpenAI Handler] function_call_output 缺少 call_id: model=%s", reqModel) + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires call_id or previous_response_id; if relying on history, ensure store=true and reuse previous_response_id") + return + } + callIDs := service.FunctionCallOutputCallIDs(reqBody) + if !service.HasItemReferenceForCallIDs(reqBody, callIDs) { + log.Printf("[OpenAI Handler] function_call_output 缺少匹配的 item_reference: model=%s", reqModel) + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires item_reference ids matching each call_id, or previous_response_id/tool_call context; if relying on history, ensure store=true and reuse previous_response_id") + return + } } } @@ -119,6 +149,7 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { // 0. Check if wait queue is full maxWait := service.CalculateMaxWait(subject.Concurrency) canWait, err := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), subject.UserID, maxWait) + waitCounted := false if err != nil { log.Printf("Increment wait count failed: %v", err) // On error, allow request to proceed @@ -126,8 +157,14 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later") return } - // Ensure wait count is decremented when function exits - defer h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) + if err == nil && canWait { + waitCounted = true + } + defer func() { + if waitCounted { + h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) + } + }() // 1. First acquire user concurrency slot userReleaseFunc, err := h.concurrencyHelper.AcquireUserSlotWithWait(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted) @@ -136,6 +173,11 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { h.handleConcurrencyError(c, err, "user", streamStarted) return } + // User slot acquired: no longer waiting. + if waitCounted { + h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) + waitCounted = false + } // 确保请求取消时也会释放槽位,避免长连接被动中断造成泄漏 userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc) if userReleaseFunc != nil { @@ -173,15 +215,16 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { } account := selection.Account log.Printf("[OpenAI Handler] Selected account: id=%d name=%s", account.ID, account.Name) + setOpsSelectedAccount(c, account.ID) // 3. Acquire account concurrency slot accountReleaseFunc := selection.ReleaseFunc - var accountWaitRelease func() if !selection.Acquired { if selection.WaitPlan == nil { h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) return } + accountWaitCounted := false canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) if err != nil { log.Printf("Increment account wait count failed: %v", err) @@ -189,12 +232,15 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { log.Printf("Account wait queue full: account=%d", account.ID) h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) return - } else { - // Only set release function if increment succeeded - accountWaitRelease = func() { + } + if err == nil && canWait { + accountWaitCounted = true + } + defer func() { + if accountWaitCounted { h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) } - } + }() accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( c, @@ -205,29 +251,26 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { &streamStarted, ) if err != nil { - if accountWaitRelease != nil { - accountWaitRelease() - } log.Printf("Account concurrency acquire failed: %v", err) h.handleConcurrencyError(c, err, "account", streamStarted) return } + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) + accountWaitCounted = false + } if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionHash, account.ID); err != nil { log.Printf("Bind sticky session failed: %v", err) } } // 账号槽位/等待计数需要在超时或断开时安全回收 accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) - accountWaitRelease = wrapReleaseOnDone(c.Request.Context(), accountWaitRelease) // Forward request result, err := h.gatewayService.Forward(c.Request.Context(), c, account, body) if accountReleaseFunc != nil { accountReleaseFunc() } - if accountWaitRelease != nil { - accountWaitRelease() - } if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { @@ -247,8 +290,12 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { return } + // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) + userAgent := c.GetHeader("User-Agent") + clientIP := ip.GetClientIP(c) + // Async record usage - go func(result *service.OpenAIForwardResult, usedAccount *service.Account, ua string) { + go func(result *service.OpenAIForwardResult, usedAccount *service.Account, ua, ip string) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() if err := h.gatewayService.RecordUsage(ctx, &service.OpenAIRecordUsageInput{ @@ -258,10 +305,11 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { Account: usedAccount, Subscription: subscription, UserAgent: ua, + IPAddress: ip, }); err != nil { log.Printf("Record usage failed: %v", err) } - }(result, account, userAgent) + }(result, account, userAgent, clientIP) return } } diff --git a/backend/internal/handler/ops_error_logger.go b/backend/internal/handler/ops_error_logger.go new file mode 100644 index 00000000..f62e6b3e --- /dev/null +++ b/backend/internal/handler/ops_error_logger.go @@ -0,0 +1,1015 @@ +package handler + +import ( + "bytes" + "context" + "encoding/json" + "log" + "runtime" + "runtime/debug" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + "unicode/utf8" + + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" +) + +const ( + opsModelKey = "ops_model" + opsStreamKey = "ops_stream" + opsRequestBodyKey = "ops_request_body" + opsAccountIDKey = "ops_account_id" +) + +const ( + opsErrorLogTimeout = 5 * time.Second + opsErrorLogDrainTimeout = 10 * time.Second + + opsErrorLogMinWorkerCount = 4 + opsErrorLogMaxWorkerCount = 32 + + opsErrorLogQueueSizePerWorker = 128 + opsErrorLogMinQueueSize = 256 + opsErrorLogMaxQueueSize = 8192 +) + +type opsErrorLogJob struct { + ops *service.OpsService + entry *service.OpsInsertErrorLogInput + requestBody []byte +} + +var ( + opsErrorLogOnce sync.Once + opsErrorLogQueue chan opsErrorLogJob + + opsErrorLogStopOnce sync.Once + opsErrorLogWorkersWg sync.WaitGroup + opsErrorLogMu sync.RWMutex + opsErrorLogStopping bool + opsErrorLogQueueLen atomic.Int64 + opsErrorLogEnqueued atomic.Int64 + opsErrorLogDropped atomic.Int64 + opsErrorLogProcessed atomic.Int64 + + opsErrorLogLastDropLogAt atomic.Int64 + + opsErrorLogShutdownCh = make(chan struct{}) + opsErrorLogShutdownOnce sync.Once + opsErrorLogDrained atomic.Bool +) + +func startOpsErrorLogWorkers() { + opsErrorLogMu.Lock() + defer opsErrorLogMu.Unlock() + + if opsErrorLogStopping { + return + } + + workerCount, queueSize := opsErrorLogConfig() + opsErrorLogQueue = make(chan opsErrorLogJob, queueSize) + opsErrorLogQueueLen.Store(0) + + opsErrorLogWorkersWg.Add(workerCount) + for i := 0; i < workerCount; i++ { + go func() { + defer opsErrorLogWorkersWg.Done() + for job := range opsErrorLogQueue { + opsErrorLogQueueLen.Add(-1) + if job.ops == nil || job.entry == nil { + continue + } + func() { + defer func() { + if r := recover(); r != nil { + log.Printf("[OpsErrorLogger] worker panic: %v\n%s", r, debug.Stack()) + } + }() + ctx, cancel := context.WithTimeout(context.Background(), opsErrorLogTimeout) + _ = job.ops.RecordError(ctx, job.entry, job.requestBody) + cancel() + opsErrorLogProcessed.Add(1) + }() + } + }() + } +} + +func enqueueOpsErrorLog(ops *service.OpsService, entry *service.OpsInsertErrorLogInput, requestBody []byte) { + if ops == nil || entry == nil { + return + } + select { + case <-opsErrorLogShutdownCh: + return + default: + } + + opsErrorLogMu.RLock() + stopping := opsErrorLogStopping + opsErrorLogMu.RUnlock() + if stopping { + return + } + + opsErrorLogOnce.Do(startOpsErrorLogWorkers) + + opsErrorLogMu.RLock() + defer opsErrorLogMu.RUnlock() + if opsErrorLogStopping || opsErrorLogQueue == nil { + return + } + + select { + case opsErrorLogQueue <- opsErrorLogJob{ops: ops, entry: entry, requestBody: requestBody}: + opsErrorLogQueueLen.Add(1) + opsErrorLogEnqueued.Add(1) + default: + // Queue is full; drop to avoid blocking request handling. + opsErrorLogDropped.Add(1) + maybeLogOpsErrorLogDrop() + } +} + +func StopOpsErrorLogWorkers() bool { + opsErrorLogStopOnce.Do(func() { + opsErrorLogShutdownOnce.Do(func() { + close(opsErrorLogShutdownCh) + }) + opsErrorLogDrained.Store(stopOpsErrorLogWorkers()) + }) + return opsErrorLogDrained.Load() +} + +func stopOpsErrorLogWorkers() bool { + opsErrorLogMu.Lock() + opsErrorLogStopping = true + ch := opsErrorLogQueue + if ch != nil { + close(ch) + } + opsErrorLogQueue = nil + opsErrorLogMu.Unlock() + + if ch == nil { + opsErrorLogQueueLen.Store(0) + return true + } + + done := make(chan struct{}) + go func() { + opsErrorLogWorkersWg.Wait() + close(done) + }() + + select { + case <-done: + opsErrorLogQueueLen.Store(0) + return true + case <-time.After(opsErrorLogDrainTimeout): + return false + } +} + +func OpsErrorLogQueueLength() int64 { + return opsErrorLogQueueLen.Load() +} + +func OpsErrorLogQueueCapacity() int { + opsErrorLogMu.RLock() + ch := opsErrorLogQueue + opsErrorLogMu.RUnlock() + if ch == nil { + return 0 + } + return cap(ch) +} + +func OpsErrorLogDroppedTotal() int64 { + return opsErrorLogDropped.Load() +} + +func OpsErrorLogEnqueuedTotal() int64 { + return opsErrorLogEnqueued.Load() +} + +func OpsErrorLogProcessedTotal() int64 { + return opsErrorLogProcessed.Load() +} + +func maybeLogOpsErrorLogDrop() { + now := time.Now().Unix() + + for { + last := opsErrorLogLastDropLogAt.Load() + if last != 0 && now-last < 60 { + return + } + if opsErrorLogLastDropLogAt.CompareAndSwap(last, now) { + break + } + } + + queued := opsErrorLogQueueLen.Load() + queueCap := OpsErrorLogQueueCapacity() + + log.Printf( + "[OpsErrorLogger] queue is full; dropping logs (queued=%d cap=%d enqueued_total=%d dropped_total=%d processed_total=%d)", + queued, + queueCap, + opsErrorLogEnqueued.Load(), + opsErrorLogDropped.Load(), + opsErrorLogProcessed.Load(), + ) +} + +func opsErrorLogConfig() (workerCount int, queueSize int) { + workerCount = runtime.GOMAXPROCS(0) * 2 + if workerCount < opsErrorLogMinWorkerCount { + workerCount = opsErrorLogMinWorkerCount + } + if workerCount > opsErrorLogMaxWorkerCount { + workerCount = opsErrorLogMaxWorkerCount + } + + queueSize = workerCount * opsErrorLogQueueSizePerWorker + if queueSize < opsErrorLogMinQueueSize { + queueSize = opsErrorLogMinQueueSize + } + if queueSize > opsErrorLogMaxQueueSize { + queueSize = opsErrorLogMaxQueueSize + } + + return workerCount, queueSize +} + +func setOpsRequestContext(c *gin.Context, model string, stream bool, requestBody []byte) { + if c == nil { + return + } + c.Set(opsModelKey, model) + c.Set(opsStreamKey, stream) + if len(requestBody) > 0 { + c.Set(opsRequestBodyKey, requestBody) + } +} + +func setOpsSelectedAccount(c *gin.Context, accountID int64) { + if c == nil || accountID <= 0 { + return + } + c.Set(opsAccountIDKey, accountID) +} + +type opsCaptureWriter struct { + gin.ResponseWriter + limit int + buf bytes.Buffer +} + +func (w *opsCaptureWriter) Write(b []byte) (int, error) { + if w.Status() >= 400 && w.limit > 0 && w.buf.Len() < w.limit { + remaining := w.limit - w.buf.Len() + if len(b) > remaining { + _, _ = w.buf.Write(b[:remaining]) + } else { + _, _ = w.buf.Write(b) + } + } + return w.ResponseWriter.Write(b) +} + +func (w *opsCaptureWriter) WriteString(s string) (int, error) { + if w.Status() >= 400 && w.limit > 0 && w.buf.Len() < w.limit { + remaining := w.limit - w.buf.Len() + if len(s) > remaining { + _, _ = w.buf.WriteString(s[:remaining]) + } else { + _, _ = w.buf.WriteString(s) + } + } + return w.ResponseWriter.WriteString(s) +} + +// OpsErrorLoggerMiddleware records error responses (status >= 400) into ops_error_logs. +// +// Notes: +// - It buffers response bodies only when status >= 400 to avoid overhead for successful traffic. +// - Streaming errors after the response has started (SSE) may still need explicit logging. +func OpsErrorLoggerMiddleware(ops *service.OpsService) gin.HandlerFunc { + return func(c *gin.Context) { + w := &opsCaptureWriter{ResponseWriter: c.Writer, limit: 64 * 1024} + c.Writer = w + c.Next() + + if ops == nil { + return + } + if !ops.IsMonitoringEnabled(c.Request.Context()) { + return + } + + status := c.Writer.Status() + if status < 400 { + // Even when the client request succeeds, we still want to persist upstream error attempts + // (retries/failover) so ops can observe upstream instability that gets "covered" by retries. + var events []*service.OpsUpstreamErrorEvent + if v, ok := c.Get(service.OpsUpstreamErrorsKey); ok { + if arr, ok := v.([]*service.OpsUpstreamErrorEvent); ok && len(arr) > 0 { + events = arr + } + } + // Also accept single upstream fields set by gateway services (rare for successful requests). + hasUpstreamContext := len(events) > 0 + if !hasUpstreamContext { + if v, ok := c.Get(service.OpsUpstreamStatusCodeKey); ok { + switch t := v.(type) { + case int: + hasUpstreamContext = t > 0 + case int64: + hasUpstreamContext = t > 0 + } + } + } + if !hasUpstreamContext { + if v, ok := c.Get(service.OpsUpstreamErrorMessageKey); ok { + if s, ok := v.(string); ok && strings.TrimSpace(s) != "" { + hasUpstreamContext = true + } + } + } + if !hasUpstreamContext { + if v, ok := c.Get(service.OpsUpstreamErrorDetailKey); ok { + if s, ok := v.(string); ok && strings.TrimSpace(s) != "" { + hasUpstreamContext = true + } + } + } + if !hasUpstreamContext { + return + } + + apiKey, _ := middleware2.GetAPIKeyFromContext(c) + clientRequestID, _ := c.Request.Context().Value(ctxkey.ClientRequestID).(string) + + model, _ := c.Get(opsModelKey) + streamV, _ := c.Get(opsStreamKey) + accountIDV, _ := c.Get(opsAccountIDKey) + + var modelName string + if s, ok := model.(string); ok { + modelName = s + } + stream := false + if b, ok := streamV.(bool); ok { + stream = b + } + + // Prefer showing the account that experienced the upstream error (if we have events), + // otherwise fall back to the final selected account (best-effort). + var accountID *int64 + if len(events) > 0 { + if last := events[len(events)-1]; last != nil && last.AccountID > 0 { + v := last.AccountID + accountID = &v + } + } + if accountID == nil { + if v, ok := accountIDV.(int64); ok && v > 0 { + accountID = &v + } + } + + fallbackPlatform := guessPlatformFromPath(c.Request.URL.Path) + platform := resolveOpsPlatform(apiKey, fallbackPlatform) + + requestID := c.Writer.Header().Get("X-Request-Id") + if requestID == "" { + requestID = c.Writer.Header().Get("x-request-id") + } + + // Best-effort backfill single upstream fields from the last event (if present). + var upstreamStatusCode *int + var upstreamErrorMessage *string + var upstreamErrorDetail *string + if len(events) > 0 { + last := events[len(events)-1] + if last != nil { + if last.UpstreamStatusCode > 0 { + code := last.UpstreamStatusCode + upstreamStatusCode = &code + } + if msg := strings.TrimSpace(last.Message); msg != "" { + upstreamErrorMessage = &msg + } + if detail := strings.TrimSpace(last.Detail); detail != "" { + upstreamErrorDetail = &detail + } + } + } + + if upstreamStatusCode == nil { + if v, ok := c.Get(service.OpsUpstreamStatusCodeKey); ok { + switch t := v.(type) { + case int: + if t > 0 { + code := t + upstreamStatusCode = &code + } + case int64: + if t > 0 { + code := int(t) + upstreamStatusCode = &code + } + } + } + } + if upstreamErrorMessage == nil { + if v, ok := c.Get(service.OpsUpstreamErrorMessageKey); ok { + if s, ok := v.(string); ok && strings.TrimSpace(s) != "" { + msg := strings.TrimSpace(s) + upstreamErrorMessage = &msg + } + } + } + if upstreamErrorDetail == nil { + if v, ok := c.Get(service.OpsUpstreamErrorDetailKey); ok { + if s, ok := v.(string); ok && strings.TrimSpace(s) != "" { + detail := strings.TrimSpace(s) + upstreamErrorDetail = &detail + } + } + } + + // If we still have nothing meaningful, skip. + if upstreamStatusCode == nil && upstreamErrorMessage == nil && upstreamErrorDetail == nil && len(events) == 0 { + return + } + + effectiveUpstreamStatus := 0 + if upstreamStatusCode != nil { + effectiveUpstreamStatus = *upstreamStatusCode + } + + recoveredMsg := "Recovered upstream error" + if effectiveUpstreamStatus > 0 { + recoveredMsg += " " + strconvItoa(effectiveUpstreamStatus) + } + if upstreamErrorMessage != nil && strings.TrimSpace(*upstreamErrorMessage) != "" { + recoveredMsg += ": " + strings.TrimSpace(*upstreamErrorMessage) + } + recoveredMsg = truncateString(recoveredMsg, 2048) + + entry := &service.OpsInsertErrorLogInput{ + RequestID: requestID, + ClientRequestID: clientRequestID, + + AccountID: accountID, + Platform: platform, + Model: modelName, + RequestPath: func() string { + if c.Request != nil && c.Request.URL != nil { + return c.Request.URL.Path + } + return "" + }(), + Stream: stream, + UserAgent: c.GetHeader("User-Agent"), + + ErrorPhase: "upstream", + ErrorType: "upstream_error", + // Severity/retryability should reflect the upstream failure, not the final client status (200). + Severity: classifyOpsSeverity("upstream_error", effectiveUpstreamStatus), + StatusCode: status, + IsBusinessLimited: false, + IsCountTokens: isCountTokensRequest(c), + + ErrorMessage: recoveredMsg, + ErrorBody: "", + + ErrorSource: "upstream_http", + ErrorOwner: "provider", + + UpstreamStatusCode: upstreamStatusCode, + UpstreamErrorMessage: upstreamErrorMessage, + UpstreamErrorDetail: upstreamErrorDetail, + UpstreamErrors: events, + + IsRetryable: classifyOpsIsRetryable("upstream_error", effectiveUpstreamStatus), + RetryCount: 0, + CreatedAt: time.Now(), + } + + if apiKey != nil { + entry.APIKeyID = &apiKey.ID + if apiKey.User != nil { + entry.UserID = &apiKey.User.ID + } + if apiKey.GroupID != nil { + entry.GroupID = apiKey.GroupID + } + // Prefer group platform if present (more stable than inferring from path). + if apiKey.Group != nil && apiKey.Group.Platform != "" { + entry.Platform = apiKey.Group.Platform + } + } + + var clientIP string + if ip := strings.TrimSpace(ip.GetClientIP(c)); ip != "" { + clientIP = ip + entry.ClientIP = &clientIP + } + + var requestBody []byte + if v, ok := c.Get(opsRequestBodyKey); ok { + if b, ok := v.([]byte); ok && len(b) > 0 { + requestBody = b + } + } + // Store request headers/body only when an upstream error occurred to keep overhead minimal. + entry.RequestHeadersJSON = extractOpsRetryRequestHeaders(c) + + enqueueOpsErrorLog(ops, entry, requestBody) + return + } + + body := w.buf.Bytes() + parsed := parseOpsErrorResponse(body) + + // Skip logging if the error should be filtered based on settings + if shouldSkipOpsErrorLog(c.Request.Context(), ops, parsed.Message, string(body), c.Request.URL.Path) { + return + } + + apiKey, _ := middleware2.GetAPIKeyFromContext(c) + + clientRequestID, _ := c.Request.Context().Value(ctxkey.ClientRequestID).(string) + + model, _ := c.Get(opsModelKey) + streamV, _ := c.Get(opsStreamKey) + accountIDV, _ := c.Get(opsAccountIDKey) + + var modelName string + if s, ok := model.(string); ok { + modelName = s + } + stream := false + if b, ok := streamV.(bool); ok { + stream = b + } + var accountID *int64 + if v, ok := accountIDV.(int64); ok && v > 0 { + accountID = &v + } + + fallbackPlatform := guessPlatformFromPath(c.Request.URL.Path) + platform := resolveOpsPlatform(apiKey, fallbackPlatform) + + requestID := c.Writer.Header().Get("X-Request-Id") + if requestID == "" { + requestID = c.Writer.Header().Get("x-request-id") + } + + phase := classifyOpsPhase(parsed.ErrorType, parsed.Message, parsed.Code) + isBusinessLimited := classifyOpsIsBusinessLimited(parsed.ErrorType, phase, parsed.Code, status, parsed.Message) + + errorOwner := classifyOpsErrorOwner(phase, parsed.Message) + errorSource := classifyOpsErrorSource(phase, parsed.Message) + + entry := &service.OpsInsertErrorLogInput{ + RequestID: requestID, + ClientRequestID: clientRequestID, + + AccountID: accountID, + Platform: platform, + Model: modelName, + RequestPath: func() string { + if c.Request != nil && c.Request.URL != nil { + return c.Request.URL.Path + } + return "" + }(), + Stream: stream, + UserAgent: c.GetHeader("User-Agent"), + + ErrorPhase: phase, + ErrorType: normalizeOpsErrorType(parsed.ErrorType, parsed.Code), + Severity: classifyOpsSeverity(parsed.ErrorType, status), + StatusCode: status, + IsBusinessLimited: isBusinessLimited, + IsCountTokens: isCountTokensRequest(c), + + ErrorMessage: parsed.Message, + // Keep the full captured error body (capture is already capped at 64KB) so the + // service layer can sanitize JSON before truncating for storage. + ErrorBody: string(body), + ErrorSource: errorSource, + ErrorOwner: errorOwner, + + IsRetryable: classifyOpsIsRetryable(parsed.ErrorType, status), + RetryCount: 0, + CreatedAt: time.Now(), + } + + // Capture upstream error context set by gateway services (if present). + // This does NOT affect the client response; it enriches Ops troubleshooting data. + { + if v, ok := c.Get(service.OpsUpstreamStatusCodeKey); ok { + switch t := v.(type) { + case int: + if t > 0 { + code := t + entry.UpstreamStatusCode = &code + } + case int64: + if t > 0 { + code := int(t) + entry.UpstreamStatusCode = &code + } + } + } + if v, ok := c.Get(service.OpsUpstreamErrorMessageKey); ok { + if s, ok := v.(string); ok { + if msg := strings.TrimSpace(s); msg != "" { + entry.UpstreamErrorMessage = &msg + } + } + } + if v, ok := c.Get(service.OpsUpstreamErrorDetailKey); ok { + if s, ok := v.(string); ok { + if detail := strings.TrimSpace(s); detail != "" { + entry.UpstreamErrorDetail = &detail + } + } + } + if v, ok := c.Get(service.OpsUpstreamErrorsKey); ok { + if events, ok := v.([]*service.OpsUpstreamErrorEvent); ok && len(events) > 0 { + entry.UpstreamErrors = events + // Best-effort backfill the single upstream fields from the last event when missing. + last := events[len(events)-1] + if last != nil { + if entry.UpstreamStatusCode == nil && last.UpstreamStatusCode > 0 { + code := last.UpstreamStatusCode + entry.UpstreamStatusCode = &code + } + if entry.UpstreamErrorMessage == nil && strings.TrimSpace(last.Message) != "" { + msg := strings.TrimSpace(last.Message) + entry.UpstreamErrorMessage = &msg + } + if entry.UpstreamErrorDetail == nil && strings.TrimSpace(last.Detail) != "" { + detail := strings.TrimSpace(last.Detail) + entry.UpstreamErrorDetail = &detail + } + } + } + } + } + + if apiKey != nil { + entry.APIKeyID = &apiKey.ID + if apiKey.User != nil { + entry.UserID = &apiKey.User.ID + } + if apiKey.GroupID != nil { + entry.GroupID = apiKey.GroupID + } + // Prefer group platform if present (more stable than inferring from path). + if apiKey.Group != nil && apiKey.Group.Platform != "" { + entry.Platform = apiKey.Group.Platform + } + } + + var clientIP string + if ip := strings.TrimSpace(ip.GetClientIP(c)); ip != "" { + clientIP = ip + entry.ClientIP = &clientIP + } + + var requestBody []byte + if v, ok := c.Get(opsRequestBodyKey); ok { + if b, ok := v.([]byte); ok && len(b) > 0 { + requestBody = b + } + } + // Persist only a minimal, whitelisted set of request headers to improve retry fidelity. + // Do NOT store Authorization/Cookie/etc. + entry.RequestHeadersJSON = extractOpsRetryRequestHeaders(c) + + enqueueOpsErrorLog(ops, entry, requestBody) + } +} + +var opsRetryRequestHeaderAllowlist = []string{ + "anthropic-beta", + "anthropic-version", +} + +// isCountTokensRequest checks if the request is a count_tokens request +func isCountTokensRequest(c *gin.Context) bool { + if c == nil || c.Request == nil || c.Request.URL == nil { + return false + } + return strings.Contains(c.Request.URL.Path, "/count_tokens") +} + +func extractOpsRetryRequestHeaders(c *gin.Context) *string { + if c == nil || c.Request == nil { + return nil + } + + headers := make(map[string]string, 4) + for _, key := range opsRetryRequestHeaderAllowlist { + v := strings.TrimSpace(c.GetHeader(key)) + if v == "" { + continue + } + // Keep headers small even if a client sends something unexpected. + headers[key] = truncateString(v, 512) + } + if len(headers) == 0 { + return nil + } + + raw, err := json.Marshal(headers) + if err != nil { + return nil + } + s := string(raw) + return &s +} + +type parsedOpsError struct { + ErrorType string + Message string + Code string +} + +func parseOpsErrorResponse(body []byte) parsedOpsError { + if len(body) == 0 { + return parsedOpsError{} + } + + // Fast path: attempt to decode into a generic map. + var m map[string]any + if err := json.Unmarshal(body, &m); err != nil { + return parsedOpsError{Message: truncateString(string(body), 1024)} + } + + // Claude/OpenAI-style gateway error: { type:"error", error:{ type, message } } + if errObj, ok := m["error"].(map[string]any); ok { + t, _ := errObj["type"].(string) + msg, _ := errObj["message"].(string) + // Gemini googleError also uses "error": { code, message, status } + if msg == "" { + if v, ok := errObj["message"]; ok { + msg, _ = v.(string) + } + } + if t == "" { + // Gemini error does not have "type" field. + t = "api_error" + } + // For gemini error, capture numeric code as string for business-limited mapping if needed. + var code string + if v, ok := errObj["code"]; ok { + switch n := v.(type) { + case float64: + code = strconvItoa(int(n)) + case int: + code = strconvItoa(n) + } + } + return parsedOpsError{ErrorType: t, Message: msg, Code: code} + } + + // APIKeyAuth-style: { code:"INSUFFICIENT_BALANCE", message:"..." } + code, _ := m["code"].(string) + msg, _ := m["message"].(string) + if code != "" || msg != "" { + return parsedOpsError{ErrorType: "api_error", Message: msg, Code: code} + } + + return parsedOpsError{Message: truncateString(string(body), 1024)} +} + +func resolveOpsPlatform(apiKey *service.APIKey, fallback string) string { + if apiKey != nil && apiKey.Group != nil && apiKey.Group.Platform != "" { + return apiKey.Group.Platform + } + return fallback +} + +func guessPlatformFromPath(path string) string { + p := strings.ToLower(path) + switch { + case strings.HasPrefix(p, "/antigravity/"): + return service.PlatformAntigravity + case strings.HasPrefix(p, "/v1beta/"): + return service.PlatformGemini + case strings.Contains(p, "/responses"): + return service.PlatformOpenAI + default: + return "" + } +} + +func normalizeOpsErrorType(errType string, code string) string { + if errType != "" { + return errType + } + switch strings.TrimSpace(code) { + case "INSUFFICIENT_BALANCE": + return "billing_error" + case "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID": + return "subscription_error" + default: + return "api_error" + } +} + +func classifyOpsPhase(errType, message, code string) string { + msg := strings.ToLower(message) + // Standardized phases: request|auth|routing|upstream|network|internal + // Map billing/concurrency/response => request; scheduling => routing. + switch strings.TrimSpace(code) { + case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID": + return "request" + } + + switch errType { + case "authentication_error": + return "auth" + case "billing_error", "subscription_error": + return "request" + case "rate_limit_error": + if strings.Contains(msg, "concurrency") || strings.Contains(msg, "pending") || strings.Contains(msg, "queue") { + return "request" + } + return "upstream" + case "invalid_request_error": + return "request" + case "upstream_error", "overloaded_error": + return "upstream" + case "api_error": + if strings.Contains(msg, "no available accounts") { + return "routing" + } + return "internal" + default: + return "internal" + } +} + +func classifyOpsSeverity(errType string, status int) string { + switch errType { + case "invalid_request_error", "authentication_error", "billing_error", "subscription_error": + return "P3" + } + if status >= 500 { + return "P1" + } + if status == 429 { + return "P1" + } + if status >= 400 { + return "P2" + } + return "P3" +} + +func classifyOpsIsRetryable(errType string, statusCode int) bool { + switch errType { + case "authentication_error", "invalid_request_error": + return false + case "timeout_error": + return true + case "rate_limit_error": + // May be transient (upstream or queue); retry can help. + return true + case "billing_error", "subscription_error": + return false + case "upstream_error", "overloaded_error": + return statusCode >= 500 || statusCode == 429 || statusCode == 529 + default: + return statusCode >= 500 + } +} + +func classifyOpsIsBusinessLimited(errType, phase, code string, status int, message string) bool { + switch strings.TrimSpace(code) { + case "INSUFFICIENT_BALANCE", "USAGE_LIMIT_EXCEEDED", "SUBSCRIPTION_NOT_FOUND", "SUBSCRIPTION_INVALID": + return true + } + if phase == "billing" || phase == "concurrency" { + // SLA/错误率排除“用户级业务限制” + return true + } + // Avoid treating upstream rate limits as business-limited. + if errType == "rate_limit_error" && strings.Contains(strings.ToLower(message), "upstream") { + return false + } + _ = status + return false +} + +func classifyOpsErrorOwner(phase string, message string) string { + // Standardized owners: client|provider|platform + switch phase { + case "upstream", "network": + return "provider" + case "request", "auth": + return "client" + case "routing", "internal": + return "platform" + default: + if strings.Contains(strings.ToLower(message), "upstream") { + return "provider" + } + return "platform" + } +} + +func classifyOpsErrorSource(phase string, message string) string { + // Standardized sources: client_request|upstream_http|gateway + switch phase { + case "upstream": + return "upstream_http" + case "network": + return "gateway" + case "request", "auth": + return "client_request" + case "routing", "internal": + return "gateway" + default: + if strings.Contains(strings.ToLower(message), "upstream") { + return "upstream_http" + } + return "gateway" + } +} + +func truncateString(s string, max int) string { + if max <= 0 { + return "" + } + if len(s) <= max { + return s + } + cut := s[:max] + // Ensure truncation does not split multi-byte characters. + for len(cut) > 0 && !utf8.ValidString(cut) { + cut = cut[:len(cut)-1] + } + return cut +} + +func strconvItoa(v int) string { + return strconv.Itoa(v) +} + +// shouldSkipOpsErrorLog determines if an error should be skipped from logging based on settings. +// Returns true for errors that should be filtered according to OpsAdvancedSettings. +func shouldSkipOpsErrorLog(ctx context.Context, ops *service.OpsService, message, body, requestPath string) bool { + if ops == nil { + return false + } + + // Get advanced settings to check filter configuration + settings, err := ops.GetOpsAdvancedSettings(ctx) + if err != nil || settings == nil { + // If we can't get settings, don't skip (fail open) + return false + } + + msgLower := strings.ToLower(message) + bodyLower := strings.ToLower(body) + + // Check if count_tokens errors should be ignored + if settings.IgnoreCountTokensErrors && strings.Contains(requestPath, "/count_tokens") { + return true + } + + // Check if context canceled errors should be ignored (client disconnects) + if settings.IgnoreContextCanceled { + if strings.Contains(msgLower, "context canceled") || strings.Contains(bodyLower, "context canceled") { + return true + } + } + + // Check if "no available accounts" errors should be ignored + if settings.IgnoreNoAvailableAccounts { + if strings.Contains(msgLower, "no available accounts") || strings.Contains(bodyLower, "no available accounts") { + return true + } + } + + return false +} diff --git a/backend/internal/handler/setting_handler.go b/backend/internal/handler/setting_handler.go index e1b20c8c..cac79e9c 100644 --- a/backend/internal/handler/setting_handler.go +++ b/backend/internal/handler/setting_handler.go @@ -42,6 +42,7 @@ func (h *SettingHandler) GetPublicSettings(c *gin.Context) { APIBaseURL: settings.APIBaseURL, ContactInfo: settings.ContactInfo, DocURL: settings.DocURL, + HomeContent: settings.HomeContent, LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled, Version: h.version, }) diff --git a/backend/internal/handler/wire.go b/backend/internal/handler/wire.go index 1695f8a9..2af7905e 100644 --- a/backend/internal/handler/wire.go +++ b/backend/internal/handler/wire.go @@ -19,7 +19,9 @@ func ProvideAdminHandlers( antigravityOAuthHandler *admin.AntigravityOAuthHandler, proxyHandler *admin.ProxyHandler, redeemHandler *admin.RedeemHandler, + promoHandler *admin.PromoHandler, settingHandler *admin.SettingHandler, + opsHandler *admin.OpsHandler, systemHandler *admin.SystemHandler, subscriptionHandler *admin.SubscriptionHandler, usageHandler *admin.UsageHandler, @@ -36,7 +38,9 @@ func ProvideAdminHandlers( AntigravityOAuth: antigravityOAuthHandler, Proxy: proxyHandler, Redeem: redeemHandler, + Promo: promoHandler, Setting: settingHandler, + Ops: opsHandler, System: systemHandler, Subscription: subscriptionHandler, Usage: usageHandler, @@ -105,7 +109,9 @@ var ProviderSet = wire.NewSet( admin.NewAntigravityOAuthHandler, admin.NewProxyHandler, admin.NewRedeemHandler, + admin.NewPromoHandler, admin.NewSettingHandler, + admin.NewOpsHandler, ProvideSystemHandler, admin.NewSubscriptionHandler, admin.NewUsageHandler, diff --git a/backend/internal/middleware/rate_limiter.go b/backend/internal/middleware/rate_limiter.go new file mode 100644 index 00000000..819d74c2 --- /dev/null +++ b/backend/internal/middleware/rate_limiter.go @@ -0,0 +1,161 @@ +package middleware + +import ( + "context" + "fmt" + "log" + "net/http" + "strconv" + "time" + + "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" +) + +// RateLimitFailureMode Redis 故障策略 +type RateLimitFailureMode int + +const ( + RateLimitFailOpen RateLimitFailureMode = iota + RateLimitFailClose +) + +// RateLimitOptions 限流可选配置 +type RateLimitOptions struct { + FailureMode RateLimitFailureMode +} + +var rateLimitScript = redis.NewScript(` +local current = redis.call('INCR', KEYS[1]) +local ttl = redis.call('PTTL', KEYS[1]) +local repaired = 0 +if current == 1 then + redis.call('PEXPIRE', KEYS[1], ARGV[1]) +elseif ttl == -1 then + redis.call('PEXPIRE', KEYS[1], ARGV[1]) + repaired = 1 +end +return {current, repaired} +`) + +// rateLimitRun 允许测试覆写脚本执行逻辑 +var rateLimitRun = func(ctx context.Context, client *redis.Client, key string, windowMillis int64) (int64, bool, error) { + values, err := rateLimitScript.Run(ctx, client, []string{key}, windowMillis).Slice() + if err != nil { + return 0, false, err + } + if len(values) < 2 { + return 0, false, fmt.Errorf("rate limit script returned %d values", len(values)) + } + count, err := parseInt64(values[0]) + if err != nil { + return 0, false, err + } + repaired, err := parseInt64(values[1]) + if err != nil { + return 0, false, err + } + return count, repaired == 1, nil +} + +// RateLimiter Redis 速率限制器 +type RateLimiter struct { + redis *redis.Client + prefix string +} + +// NewRateLimiter 创建速率限制器实例 +func NewRateLimiter(redisClient *redis.Client) *RateLimiter { + return &RateLimiter{ + redis: redisClient, + prefix: "rate_limit:", + } +} + +// Limit 返回速率限制中间件 +// key: 限制类型标识 +// limit: 时间窗口内最大请求数 +// window: 时间窗口 +func (r *RateLimiter) Limit(key string, limit int, window time.Duration) gin.HandlerFunc { + return r.LimitWithOptions(key, limit, window, RateLimitOptions{}) +} + +// LimitWithOptions 返回速率限制中间件(带可选配置) +func (r *RateLimiter) LimitWithOptions(key string, limit int, window time.Duration, opts RateLimitOptions) gin.HandlerFunc { + failureMode := opts.FailureMode + if failureMode != RateLimitFailClose { + failureMode = RateLimitFailOpen + } + + return func(c *gin.Context) { + ip := c.ClientIP() + redisKey := r.prefix + key + ":" + ip + + ctx := c.Request.Context() + + windowMillis := windowTTLMillis(window) + + // 使用 Lua 脚本原子操作增加计数并设置过期 + count, repaired, err := rateLimitRun(ctx, r.redis, redisKey, windowMillis) + if err != nil { + log.Printf("[RateLimit] redis error: key=%s mode=%s err=%v", redisKey, failureModeLabel(failureMode), err) + if failureMode == RateLimitFailClose { + abortRateLimit(c) + return + } + // Redis 错误时放行,避免影响正常服务 + c.Next() + return + } + if repaired { + log.Printf("[RateLimit] ttl repaired: key=%s window_ms=%d", redisKey, windowMillis) + } + + // 超过限制 + if count > int64(limit) { + abortRateLimit(c) + return + } + + c.Next() + } +} + +func windowTTLMillis(window time.Duration) int64 { + ttl := window.Milliseconds() + if ttl < 1 { + return 1 + } + return ttl +} + +func abortRateLimit(c *gin.Context) { + c.AbortWithStatusJSON(http.StatusTooManyRequests, gin.H{ + "error": "rate limit exceeded", + "message": "Too many requests, please try again later", + }) +} + +func failureModeLabel(mode RateLimitFailureMode) string { + if mode == RateLimitFailClose { + return "fail-close" + } + return "fail-open" +} + +func parseInt64(value any) (int64, error) { + switch v := value.(type) { + case int64: + return v, nil + case int: + return int64(v), nil + case string: + parsed, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + return parsed, nil + default: + return 0, fmt.Errorf("unexpected value type %T", value) + } +} diff --git a/backend/internal/middleware/rate_limiter_integration_test.go b/backend/internal/middleware/rate_limiter_integration_test.go new file mode 100644 index 00000000..4759a988 --- /dev/null +++ b/backend/internal/middleware/rate_limiter_integration_test.go @@ -0,0 +1,114 @@ +//go:build integration + +package middleware + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + tcredis "github.com/testcontainers/testcontainers-go/modules/redis" +) + +const redisImageTag = "redis:8.4-alpine" + +func TestRateLimiterSetsTTLAndDoesNotRefresh(t *testing.T) { + gin.SetMode(gin.TestMode) + + ctx := context.Background() + rdb := startRedis(t, ctx) + limiter := NewRateLimiter(rdb) + + router := gin.New() + router.Use(limiter.Limit("ttl-test", 10, 2*time.Second)) + router.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + recorder := performRequest(router) + require.Equal(t, http.StatusOK, recorder.Code) + + redisKey := limiter.prefix + "ttl-test:127.0.0.1" + ttlBefore, err := rdb.PTTL(ctx, redisKey).Result() + require.NoError(t, err) + require.Greater(t, ttlBefore, time.Duration(0)) + require.LessOrEqual(t, ttlBefore, 2*time.Second) + + time.Sleep(50 * time.Millisecond) + + recorder = performRequest(router) + require.Equal(t, http.StatusOK, recorder.Code) + + ttlAfter, err := rdb.PTTL(ctx, redisKey).Result() + require.NoError(t, err) + require.Less(t, ttlAfter, ttlBefore) +} + +func TestRateLimiterFixesMissingTTL(t *testing.T) { + gin.SetMode(gin.TestMode) + + ctx := context.Background() + rdb := startRedis(t, ctx) + limiter := NewRateLimiter(rdb) + + router := gin.New() + router.Use(limiter.Limit("ttl-missing", 10, 2*time.Second)) + router.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + redisKey := limiter.prefix + "ttl-missing:127.0.0.1" + require.NoError(t, rdb.Set(ctx, redisKey, 5, 0).Err()) + + ttlBefore, err := rdb.PTTL(ctx, redisKey).Result() + require.NoError(t, err) + require.Less(t, ttlBefore, time.Duration(0)) + + recorder := performRequest(router) + require.Equal(t, http.StatusOK, recorder.Code) + + ttlAfter, err := rdb.PTTL(ctx, redisKey).Result() + require.NoError(t, err) + require.Greater(t, ttlAfter, time.Duration(0)) +} + +func performRequest(router *gin.Engine) *httptest.ResponseRecorder { + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + return recorder +} + +func startRedis(t *testing.T, ctx context.Context) *redis.Client { + t.Helper() + + redisContainer, err := tcredis.Run(ctx, redisImageTag) + require.NoError(t, err) + t.Cleanup(func() { + _ = redisContainer.Terminate(ctx) + }) + + redisHost, err := redisContainer.Host(ctx) + require.NoError(t, err) + redisPort, err := redisContainer.MappedPort(ctx, "6379/tcp") + require.NoError(t, err) + + rdb := redis.NewClient(&redis.Options{ + Addr: fmt.Sprintf("%s:%d", redisHost, redisPort.Int()), + DB: 0, + }) + require.NoError(t, rdb.Ping(ctx).Err()) + + t.Cleanup(func() { + _ = rdb.Close() + }) + + return rdb +} diff --git a/backend/internal/middleware/rate_limiter_test.go b/backend/internal/middleware/rate_limiter_test.go new file mode 100644 index 00000000..0c379c0f --- /dev/null +++ b/backend/internal/middleware/rate_limiter_test.go @@ -0,0 +1,100 @@ +package middleware + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" +) + +func TestWindowTTLMillis(t *testing.T) { + require.Equal(t, int64(1), windowTTLMillis(500*time.Microsecond)) + require.Equal(t, int64(1), windowTTLMillis(1500*time.Microsecond)) + require.Equal(t, int64(2), windowTTLMillis(2500*time.Microsecond)) +} + +func TestRateLimiterFailureModes(t *testing.T) { + gin.SetMode(gin.TestMode) + + rdb := redis.NewClient(&redis.Options{ + Addr: "127.0.0.1:1", + DialTimeout: 50 * time.Millisecond, + ReadTimeout: 50 * time.Millisecond, + WriteTimeout: 50 * time.Millisecond, + }) + t.Cleanup(func() { + _ = rdb.Close() + }) + + limiter := NewRateLimiter(rdb) + + failOpenRouter := gin.New() + failOpenRouter.Use(limiter.Limit("test", 1, time.Second)) + failOpenRouter.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorder := httptest.NewRecorder() + failOpenRouter.ServeHTTP(recorder, req) + require.Equal(t, http.StatusOK, recorder.Code) + + failCloseRouter := gin.New() + failCloseRouter.Use(limiter.LimitWithOptions("test", 1, time.Second, RateLimitOptions{ + FailureMode: RateLimitFailClose, + })) + failCloseRouter.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + req = httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorder = httptest.NewRecorder() + failCloseRouter.ServeHTTP(recorder, req) + require.Equal(t, http.StatusTooManyRequests, recorder.Code) +} + +func TestRateLimiterSuccessAndLimit(t *testing.T) { + gin.SetMode(gin.TestMode) + + originalRun := rateLimitRun + counts := []int64{1, 2} + callIndex := 0 + rateLimitRun = func(ctx context.Context, client *redis.Client, key string, windowMillis int64) (int64, bool, error) { + if callIndex >= len(counts) { + return counts[len(counts)-1], false, nil + } + value := counts[callIndex] + callIndex++ + return value, false, nil + } + t.Cleanup(func() { + rateLimitRun = originalRun + }) + + limiter := NewRateLimiter(redis.NewClient(&redis.Options{Addr: "127.0.0.1:1"})) + + router := gin.New() + router.Use(limiter.Limit("test", 1, time.Second)) + router.GET("/test", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + req := httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + require.Equal(t, http.StatusOK, recorder.Code) + + req = httptest.NewRequest(http.MethodGet, "/test", nil) + req.RemoteAddr = "127.0.0.1:1234" + recorder = httptest.NewRecorder() + router.ServeHTTP(recorder, req) + require.Equal(t, http.StatusTooManyRequests, recorder.Code) +} diff --git a/backend/internal/pkg/ctxkey/ctxkey.go b/backend/internal/pkg/ctxkey/ctxkey.go index 3add78de..27bb5ac5 100644 --- a/backend/internal/pkg/ctxkey/ctxkey.go +++ b/backend/internal/pkg/ctxkey/ctxkey.go @@ -7,6 +7,15 @@ type Key string const ( // ForcePlatform 强制平台(用于 /antigravity 路由),由 middleware.ForcePlatform 设置 ForcePlatform Key = "ctx_force_platform" - // IsClaudeCodeClient 是否为 Claude Code 客户端,由中间件设置 + + // ClientRequestID 客户端请求的唯一标识,用于追踪请求全生命周期(用于 Ops 监控与排障)。 + ClientRequestID Key = "ctx_client_request_id" + + // RetryCount 表示当前请求在网关层的重试次数(用于 Ops 记录与排障)。 + RetryCount Key = "ctx_retry_count" + + // IsClaudeCodeClient 标识当前请求是否来自 Claude Code 客户端 IsClaudeCodeClient Key = "ctx_is_claude_code_client" + // Group 认证后的分组信息,由 API Key 认证中间件设置 + Group Key = "ctx_group" ) diff --git a/backend/internal/pkg/ip/ip.go b/backend/internal/pkg/ip/ip.go new file mode 100644 index 00000000..97109c0c --- /dev/null +++ b/backend/internal/pkg/ip/ip.go @@ -0,0 +1,168 @@ +// Package ip 提供客户端 IP 地址提取工具。 +package ip + +import ( + "net" + "strings" + + "github.com/gin-gonic/gin" +) + +// GetClientIP 从 Gin Context 中提取客户端真实 IP 地址。 +// 按以下优先级检查 Header: +// 1. CF-Connecting-IP (Cloudflare) +// 2. X-Real-IP (Nginx) +// 3. X-Forwarded-For (取第一个非私有 IP) +// 4. c.ClientIP() (Gin 内置方法) +func GetClientIP(c *gin.Context) string { + // 1. Cloudflare + if ip := c.GetHeader("CF-Connecting-IP"); ip != "" { + return normalizeIP(ip) + } + + // 2. Nginx X-Real-IP + if ip := c.GetHeader("X-Real-IP"); ip != "" { + return normalizeIP(ip) + } + + // 3. X-Forwarded-For (多个 IP 时取第一个公网 IP) + if xff := c.GetHeader("X-Forwarded-For"); xff != "" { + ips := strings.Split(xff, ",") + for _, ip := range ips { + ip = strings.TrimSpace(ip) + if ip != "" && !isPrivateIP(ip) { + return normalizeIP(ip) + } + } + // 如果都是私有 IP,返回第一个 + if len(ips) > 0 { + return normalizeIP(strings.TrimSpace(ips[0])) + } + } + + // 4. Gin 内置方法 + return normalizeIP(c.ClientIP()) +} + +// normalizeIP 规范化 IP 地址,去除端口号和空格。 +func normalizeIP(ip string) string { + ip = strings.TrimSpace(ip) + // 移除端口号(如 "192.168.1.1:8080" -> "192.168.1.1") + if host, _, err := net.SplitHostPort(ip); err == nil { + return host + } + return ip +} + +// isPrivateIP 检查 IP 是否为私有地址。 +func isPrivateIP(ipStr string) bool { + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + + // 私有 IP 范围 + privateBlocks := []string{ + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + "127.0.0.0/8", + "::1/128", + "fc00::/7", + } + + for _, block := range privateBlocks { + _, cidr, err := net.ParseCIDR(block) + if err != nil { + continue + } + if cidr.Contains(ip) { + return true + } + } + return false +} + +// MatchesPattern 检查 IP 是否匹配指定的模式(支持单个 IP 或 CIDR)。 +// pattern 可以是: +// - 单个 IP: "192.168.1.100" +// - CIDR 范围: "192.168.1.0/24" +func MatchesPattern(clientIP, pattern string) bool { + ip := net.ParseIP(clientIP) + if ip == nil { + return false + } + + // 尝试解析为 CIDR + if strings.Contains(pattern, "/") { + _, cidr, err := net.ParseCIDR(pattern) + if err != nil { + return false + } + return cidr.Contains(ip) + } + + // 作为单个 IP 处理 + patternIP := net.ParseIP(pattern) + if patternIP == nil { + return false + } + return ip.Equal(patternIP) +} + +// MatchesAnyPattern 检查 IP 是否匹配任意一个模式。 +func MatchesAnyPattern(clientIP string, patterns []string) bool { + for _, pattern := range patterns { + if MatchesPattern(clientIP, pattern) { + return true + } + } + return false +} + +// CheckIPRestriction 检查 IP 是否被 API Key 的 IP 限制允许。 +// 返回值:(是否允许, 拒绝原因) +// 逻辑: +// 1. 先检查黑名单,如果在黑名单中则直接拒绝 +// 2. 如果白名单不为空,IP 必须在白名单中 +// 3. 如果白名单为空,允许访问(除非被黑名单拒绝) +func CheckIPRestriction(clientIP string, whitelist, blacklist []string) (bool, string) { + // 规范化 IP + clientIP = normalizeIP(clientIP) + if clientIP == "" { + return false, "access denied" + } + + // 1. 检查黑名单 + if len(blacklist) > 0 && MatchesAnyPattern(clientIP, blacklist) { + return false, "access denied" + } + + // 2. 检查白名单(如果设置了白名单,IP 必须在其中) + if len(whitelist) > 0 && !MatchesAnyPattern(clientIP, whitelist) { + return false, "access denied" + } + + return true, "" +} + +// ValidateIPPattern 验证 IP 或 CIDR 格式是否有效。 +func ValidateIPPattern(pattern string) bool { + if strings.Contains(pattern, "/") { + _, _, err := net.ParseCIDR(pattern) + return err == nil + } + return net.ParseIP(pattern) != nil +} + +// ValidateIPPatterns 验证多个 IP 或 CIDR 格式。 +// 返回无效的模式列表。 +func ValidateIPPatterns(patterns []string) []string { + var invalid []string + for _, p := range patterns { + if !ValidateIPPattern(p) { + invalid = append(invalid, p) + } + } + return invalid +} diff --git a/backend/internal/pkg/usagestats/account_stats.go b/backend/internal/pkg/usagestats/account_stats.go index ed77dd27..9ac49625 100644 --- a/backend/internal/pkg/usagestats/account_stats.go +++ b/backend/internal/pkg/usagestats/account_stats.go @@ -1,8 +1,14 @@ package usagestats // AccountStats 账号使用统计 +// +// cost: 账号口径费用(使用 total_cost * account_rate_multiplier) +// standard_cost: 标准费用(使用 total_cost,不含倍率) +// user_cost: 用户/API Key 口径费用(使用 actual_cost,受分组倍率影响) type AccountStats struct { - Requests int64 `json:"requests"` - Tokens int64 `json:"tokens"` - Cost float64 `json:"cost"` + Requests int64 `json:"requests"` + Tokens int64 `json:"tokens"` + Cost float64 `json:"cost"` + StandardCost float64 `json:"standard_cost"` + UserCost float64 `json:"user_cost"` } diff --git a/backend/internal/pkg/usagestats/usage_log_types.go b/backend/internal/pkg/usagestats/usage_log_types.go index 39314602..2f6c7fe0 100644 --- a/backend/internal/pkg/usagestats/usage_log_types.go +++ b/backend/internal/pkg/usagestats/usage_log_types.go @@ -9,6 +9,12 @@ type DashboardStats struct { TotalUsers int64 `json:"total_users"` TodayNewUsers int64 `json:"today_new_users"` // 今日新增用户数 ActiveUsers int64 `json:"active_users"` // 今日有请求的用户数 + // 小时活跃用户数(UTC 当前小时) + HourlyActiveUsers int64 `json:"hourly_active_users"` + + // 预聚合新鲜度 + StatsUpdatedAt string `json:"stats_updated_at"` + StatsStale bool `json:"stats_stale"` // API Key 统计 TotalAPIKeys int64 `json:"total_api_keys"` @@ -141,14 +147,15 @@ type UsageLogFilters struct { // UsageStats represents usage statistics type UsageStats struct { - TotalRequests int64 `json:"total_requests"` - TotalInputTokens int64 `json:"total_input_tokens"` - TotalOutputTokens int64 `json:"total_output_tokens"` - TotalCacheTokens int64 `json:"total_cache_tokens"` - TotalTokens int64 `json:"total_tokens"` - TotalCost float64 `json:"total_cost"` - TotalActualCost float64 `json:"total_actual_cost"` - AverageDurationMs float64 `json:"average_duration_ms"` + TotalRequests int64 `json:"total_requests"` + TotalInputTokens int64 `json:"total_input_tokens"` + TotalOutputTokens int64 `json:"total_output_tokens"` + TotalCacheTokens int64 `json:"total_cache_tokens"` + TotalTokens int64 `json:"total_tokens"` + TotalCost float64 `json:"total_cost"` + TotalActualCost float64 `json:"total_actual_cost"` + TotalAccountCost *float64 `json:"total_account_cost,omitempty"` + AverageDurationMs float64 `json:"average_duration_ms"` } // BatchUserUsageStats represents usage stats for a single user @@ -171,25 +178,29 @@ type AccountUsageHistory struct { Label string `json:"label"` Requests int64 `json:"requests"` Tokens int64 `json:"tokens"` - Cost float64 `json:"cost"` - ActualCost float64 `json:"actual_cost"` + Cost float64 `json:"cost"` // 标准计费(total_cost) + ActualCost float64 `json:"actual_cost"` // 账号口径费用(total_cost * account_rate_multiplier) + UserCost float64 `json:"user_cost"` // 用户口径费用(actual_cost,受分组倍率影响) } // AccountUsageSummary represents summary statistics for an account type AccountUsageSummary struct { Days int `json:"days"` ActualDaysUsed int `json:"actual_days_used"` - TotalCost float64 `json:"total_cost"` + TotalCost float64 `json:"total_cost"` // 账号口径费用 + TotalUserCost float64 `json:"total_user_cost"` // 用户口径费用 TotalStandardCost float64 `json:"total_standard_cost"` TotalRequests int64 `json:"total_requests"` TotalTokens int64 `json:"total_tokens"` - AvgDailyCost float64 `json:"avg_daily_cost"` + AvgDailyCost float64 `json:"avg_daily_cost"` // 账号口径日均 + AvgDailyUserCost float64 `json:"avg_daily_user_cost"` AvgDailyRequests float64 `json:"avg_daily_requests"` AvgDailyTokens float64 `json:"avg_daily_tokens"` AvgDurationMs float64 `json:"avg_duration_ms"` Today *struct { Date string `json:"date"` Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` Requests int64 `json:"requests"` Tokens int64 `json:"tokens"` } `json:"today"` @@ -197,6 +208,7 @@ type AccountUsageSummary struct { Date string `json:"date"` Label string `json:"label"` Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` Requests int64 `json:"requests"` } `json:"highest_cost_day"` HighestRequestDay *struct { @@ -204,6 +216,7 @@ type AccountUsageSummary struct { Label string `json:"label"` Requests int64 `json:"requests"` Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` } `json:"highest_request_day"` } diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index 0acf1636..8aa487ac 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -15,6 +15,7 @@ import ( "database/sql" "encoding/json" "errors" + "log" "strconv" "time" @@ -79,6 +80,10 @@ func (r *accountRepository) Create(ctx context.Context, account *service.Account SetSchedulable(account.Schedulable). SetAutoPauseOnExpired(account.AutoPauseOnExpired) + if account.RateMultiplier != nil { + builder.SetRateMultiplier(*account.RateMultiplier) + } + if account.ProxyID != nil { builder.SetProxyID(*account.ProxyID) } @@ -115,6 +120,9 @@ func (r *accountRepository) Create(ctx context.Context, account *service.Account account.ID = created.ID account.CreatedAt = created.CreatedAt account.UpdatedAt = created.UpdatedAt + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &account.ID, nil, buildSchedulerGroupPayload(account.GroupIDs)); err != nil { + log.Printf("[SchedulerOutbox] enqueue account create failed: account=%d err=%v", account.ID, err) + } return nil } @@ -287,6 +295,10 @@ func (r *accountRepository) Update(ctx context.Context, account *service.Account SetSchedulable(account.Schedulable). SetAutoPauseOnExpired(account.AutoPauseOnExpired) + if account.RateMultiplier != nil { + builder.SetRateMultiplier(*account.RateMultiplier) + } + if account.ProxyID != nil { builder.SetProxyID(*account.ProxyID) } else { @@ -341,10 +353,17 @@ func (r *accountRepository) Update(ctx context.Context, account *service.Account return translatePersistenceError(err, service.ErrAccountNotFound, nil) } account.UpdatedAt = updated.UpdatedAt + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &account.ID, nil, buildSchedulerGroupPayload(account.GroupIDs)); err != nil { + log.Printf("[SchedulerOutbox] enqueue account update failed: account=%d err=%v", account.ID, err) + } return nil } func (r *accountRepository) Delete(ctx context.Context, id int64) error { + groupIDs, err := r.loadAccountGroupIDs(ctx, id) + if err != nil { + return err + } // 使用事务保证账号与关联分组的删除原子性 tx, err := r.client.Tx(ctx) if err != nil && !errors.Is(err, dbent.ErrTxStarted) { @@ -368,7 +387,12 @@ func (r *accountRepository) Delete(ctx context.Context, id int64) error { } if tx != nil { - return tx.Commit() + if err := tx.Commit(); err != nil { + return err + } + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, buildSchedulerGroupPayload(groupIDs)); err != nil { + log.Printf("[SchedulerOutbox] enqueue account delete failed: account=%d err=%v", id, err) } return nil } @@ -455,7 +479,18 @@ func (r *accountRepository) UpdateLastUsed(ctx context.Context, id int64) error Where(dbaccount.IDEQ(id)). SetLastUsedAt(now). Save(ctx) - return err + if err != nil { + return err + } + payload := map[string]any{ + "last_used": map[string]int64{ + strconv.FormatInt(id, 10): now.Unix(), + }, + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountLastUsed, &id, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue last used failed: account=%d err=%v", id, err) + } + return nil } func (r *accountRepository) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { @@ -479,7 +514,18 @@ func (r *accountRepository) BatchUpdateLastUsed(ctx context.Context, updates map args = append(args, pq.Array(ids)) _, err := r.sql.ExecContext(ctx, caseSQL, args...) - return err + if err != nil { + return err + } + lastUsedPayload := make(map[string]int64, len(updates)) + for id, ts := range updates { + lastUsedPayload[strconv.FormatInt(id, 10)] = ts.Unix() + } + payload := map[string]any{"last_used": lastUsedPayload} + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountLastUsed, nil, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue batch last used failed: err=%v", err) + } + return nil } func (r *accountRepository) SetError(ctx context.Context, id int64, errorMsg string) error { @@ -488,7 +534,13 @@ func (r *accountRepository) SetError(ctx context.Context, id int64, errorMsg str SetStatus(service.StatusError). SetErrorMessage(errorMsg). Save(ctx) - return err + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue set error failed: account=%d err=%v", id, err) + } + return nil } func (r *accountRepository) ClearError(ctx context.Context, id int64) error { @@ -506,7 +558,14 @@ func (r *accountRepository) AddToGroup(ctx context.Context, accountID, groupID i SetGroupID(groupID). SetPriority(priority). Save(ctx) - return err + if err != nil { + return err + } + payload := buildSchedulerGroupPayload([]int64{groupID}) + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountGroupsChanged, &accountID, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue add to group failed: account=%d group=%d err=%v", accountID, groupID, err) + } + return nil } func (r *accountRepository) RemoveFromGroup(ctx context.Context, accountID, groupID int64) error { @@ -516,7 +575,14 @@ func (r *accountRepository) RemoveFromGroup(ctx context.Context, accountID, grou dbaccountgroup.GroupIDEQ(groupID), ). Exec(ctx) - return err + if err != nil { + return err + } + payload := buildSchedulerGroupPayload([]int64{groupID}) + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountGroupsChanged, &accountID, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue remove from group failed: account=%d group=%d err=%v", accountID, groupID, err) + } + return nil } func (r *accountRepository) GetGroups(ctx context.Context, accountID int64) ([]service.Group, error) { @@ -537,6 +603,10 @@ func (r *accountRepository) GetGroups(ctx context.Context, accountID int64) ([]s } func (r *accountRepository) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error { + existingGroupIDs, err := r.loadAccountGroupIDs(ctx, accountID) + if err != nil { + return err + } // 使用事务保证删除旧绑定与创建新绑定的原子性 tx, err := r.client.Tx(ctx) if err != nil && !errors.Is(err, dbent.ErrTxStarted) { @@ -577,7 +647,13 @@ func (r *accountRepository) BindGroups(ctx context.Context, accountID int64, gro } if tx != nil { - return tx.Commit() + if err := tx.Commit(); err != nil { + return err + } + } + payload := buildSchedulerGroupPayload(mergeGroupIDs(existingGroupIDs, groupIDs)) + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountGroupsChanged, &accountID, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue bind groups failed: account=%d err=%v", accountID, err) } return nil } @@ -681,7 +757,13 @@ func (r *accountRepository) SetRateLimited(ctx context.Context, id int64, resetA SetRateLimitedAt(now). SetRateLimitResetAt(resetAt). Save(ctx) - return err + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue rate limit failed: account=%d err=%v", id, err) + } + return nil } func (r *accountRepository) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope service.AntigravityQuotaScope, resetAt time.Time) error { @@ -715,6 +797,49 @@ func (r *accountRepository) SetAntigravityQuotaScopeLimit(ctx context.Context, i if affected == 0 { return service.ErrAccountNotFound } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue quota scope failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error { + if scope == "" { + return nil + } + now := time.Now().UTC() + payload := map[string]string{ + "rate_limited_at": now.Format(time.RFC3339), + "rate_limit_reset_at": resetAt.UTC().Format(time.RFC3339), + } + raw, err := json.Marshal(payload) + if err != nil { + return err + } + + path := "{model_rate_limits," + scope + "}" + client := clientFromContext(ctx, r.client) + result, err := client.ExecContext( + ctx, + "UPDATE accounts SET extra = jsonb_set(COALESCE(extra, '{}'::jsonb), $1::text[], $2::jsonb, true), updated_at = NOW() WHERE id = $3 AND deleted_at IS NULL", + path, + raw, + id, + ) + if err != nil { + return err + } + + affected, err := result.RowsAffected() + if err != nil { + return err + } + if affected == 0 { + return service.ErrAccountNotFound + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue model rate limit failed: account=%d err=%v", id, err) + } return nil } @@ -723,7 +848,13 @@ func (r *accountRepository) SetOverloaded(ctx context.Context, id int64, until t Where(dbaccount.IDEQ(id)). SetOverloadUntil(until). Save(ctx) - return err + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue overload failed: account=%d err=%v", id, err) + } + return nil } func (r *accountRepository) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error { @@ -736,7 +867,13 @@ func (r *accountRepository) SetTempUnschedulable(ctx context.Context, id int64, AND deleted_at IS NULL AND (temp_unschedulable_until IS NULL OR temp_unschedulable_until < $1) `, until, reason, id) - return err + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue temp unschedulable failed: account=%d err=%v", id, err) + } + return nil } func (r *accountRepository) ClearTempUnschedulable(ctx context.Context, id int64) error { @@ -748,7 +885,13 @@ func (r *accountRepository) ClearTempUnschedulable(ctx context.Context, id int64 WHERE id = $1 AND deleted_at IS NULL `, id) - return err + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue clear temp unschedulable failed: account=%d err=%v", id, err) + } + return nil } func (r *accountRepository) ClearRateLimit(ctx context.Context, id int64) error { @@ -758,7 +901,13 @@ func (r *accountRepository) ClearRateLimit(ctx context.Context, id int64) error ClearRateLimitResetAt(). ClearOverloadUntil(). Save(ctx) - return err + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue clear rate limit failed: account=%d err=%v", id, err) + } + return nil } func (r *accountRepository) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { @@ -779,6 +928,33 @@ func (r *accountRepository) ClearAntigravityQuotaScopes(ctx context.Context, id if affected == 0 { return service.ErrAccountNotFound } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue clear quota scopes failed: account=%d err=%v", id, err) + } + return nil +} + +func (r *accountRepository) ClearModelRateLimits(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + result, err := client.ExecContext( + ctx, + "UPDATE accounts SET extra = COALESCE(extra, '{}'::jsonb) - 'model_rate_limits', updated_at = NOW() WHERE id = $1 AND deleted_at IS NULL", + id, + ) + if err != nil { + return err + } + + affected, err := result.RowsAffected() + if err != nil { + return err + } + if affected == 0 { + return service.ErrAccountNotFound + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue clear model rate limit failed: account=%d err=%v", id, err) + } return nil } @@ -801,7 +977,13 @@ func (r *accountRepository) SetSchedulable(ctx context.Context, id int64, schedu Where(dbaccount.IDEQ(id)). SetSchedulable(schedulable). Save(ctx) - return err + if err != nil { + return err + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue schedulable change failed: account=%d err=%v", id, err) + } + return nil } func (r *accountRepository) AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) { @@ -822,6 +1004,11 @@ func (r *accountRepository) AutoPauseExpiredAccounts(ctx context.Context, now ti if err != nil { return 0, err } + if rows > 0 { + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventFullRebuild, nil, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue auto pause rebuild failed: err=%v", err) + } + } return rows, nil } @@ -853,6 +1040,9 @@ func (r *accountRepository) UpdateExtra(ctx context.Context, id int64, updates m if affected == 0 { return service.ErrAccountNotFound } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountChanged, &id, nil, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue extra update failed: account=%d err=%v", id, err) + } return nil } @@ -890,6 +1080,11 @@ func (r *accountRepository) BulkUpdate(ctx context.Context, ids []int64, updates args = append(args, *updates.Priority) idx++ } + if updates.RateMultiplier != nil { + setClauses = append(setClauses, "rate_multiplier = $"+itoa(idx)) + args = append(args, *updates.RateMultiplier) + idx++ + } if updates.Status != nil { setClauses = append(setClauses, "status = $"+itoa(idx)) args = append(args, *updates.Status) @@ -937,6 +1132,12 @@ func (r *accountRepository) BulkUpdate(ctx context.Context, ids []int64, updates if err != nil { return 0, err } + if rows > 0 { + payload := map[string]any{"account_ids": ids} + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventAccountBulkChanged, nil, nil, payload); err != nil { + log.Printf("[SchedulerOutbox] enqueue bulk update failed: err=%v", err) + } + } return rows, nil } @@ -1179,11 +1380,61 @@ func (r *accountRepository) loadAccountGroups(ctx context.Context, accountIDs [] return groupsByAccount, groupIDsByAccount, accountGroupsByAccount, nil } +func (r *accountRepository) loadAccountGroupIDs(ctx context.Context, accountID int64) ([]int64, error) { + entries, err := r.client.AccountGroup. + Query(). + Where(dbaccountgroup.AccountIDEQ(accountID)). + All(ctx) + if err != nil { + return nil, err + } + ids := make([]int64, 0, len(entries)) + for _, entry := range entries { + ids = append(ids, entry.GroupID) + } + return ids, nil +} + +func mergeGroupIDs(a []int64, b []int64) []int64 { + seen := make(map[int64]struct{}, len(a)+len(b)) + out := make([]int64, 0, len(a)+len(b)) + for _, id := range a { + if id <= 0 { + continue + } + if _, ok := seen[id]; ok { + continue + } + seen[id] = struct{}{} + out = append(out, id) + } + for _, id := range b { + if id <= 0 { + continue + } + if _, ok := seen[id]; ok { + continue + } + seen[id] = struct{}{} + out = append(out, id) + } + return out +} + +func buildSchedulerGroupPayload(groupIDs []int64) map[string]any { + if len(groupIDs) == 0 { + return nil + } + return map[string]any{"group_ids": groupIDs} +} + func accountEntityToService(m *dbent.Account) *service.Account { if m == nil { return nil } + rateMultiplier := m.RateMultiplier + return &service.Account{ ID: m.ID, Name: m.Name, @@ -1195,6 +1446,7 @@ func accountEntityToService(m *dbent.Account) *service.Account { ProxyID: m.ProxyID, Concurrency: m.Concurrency, Priority: m.Priority, + RateMultiplier: &rateMultiplier, Status: m.Status, ErrorMessage: derefString(m.ErrorMessage), LastUsedAt: m.LastUsedAt, diff --git a/backend/internal/repository/api_key_cache.go b/backend/internal/repository/api_key_cache.go index 73a929c5..6d834b40 100644 --- a/backend/internal/repository/api_key_cache.go +++ b/backend/internal/repository/api_key_cache.go @@ -2,6 +2,7 @@ package repository import ( "context" + "encoding/json" "errors" "fmt" "time" @@ -13,6 +14,7 @@ import ( const ( apiKeyRateLimitKeyPrefix = "apikey:ratelimit:" apiKeyRateLimitDuration = 24 * time.Hour + apiKeyAuthCachePrefix = "apikey:auth:" ) // apiKeyRateLimitKey generates the Redis key for API key creation rate limiting. @@ -20,6 +22,10 @@ func apiKeyRateLimitKey(userID int64) string { return fmt.Sprintf("%s%d", apiKeyRateLimitKeyPrefix, userID) } +func apiKeyAuthCacheKey(key string) string { + return fmt.Sprintf("%s%s", apiKeyAuthCachePrefix, key) +} + type apiKeyCache struct { rdb *redis.Client } @@ -58,3 +64,30 @@ func (c *apiKeyCache) IncrementDailyUsage(ctx context.Context, apiKey string) er func (c *apiKeyCache) SetDailyUsageExpiry(ctx context.Context, apiKey string, ttl time.Duration) error { return c.rdb.Expire(ctx, apiKey, ttl).Err() } + +func (c *apiKeyCache) GetAuthCache(ctx context.Context, key string) (*service.APIKeyAuthCacheEntry, error) { + val, err := c.rdb.Get(ctx, apiKeyAuthCacheKey(key)).Bytes() + if err != nil { + return nil, err + } + var entry service.APIKeyAuthCacheEntry + if err := json.Unmarshal(val, &entry); err != nil { + return nil, err + } + return &entry, nil +} + +func (c *apiKeyCache) SetAuthCache(ctx context.Context, key string, entry *service.APIKeyAuthCacheEntry, ttl time.Duration) error { + if entry == nil { + return nil + } + payload, err := json.Marshal(entry) + if err != nil { + return err + } + return c.rdb.Set(ctx, apiKeyAuthCacheKey(key), payload, ttl).Err() +} + +func (c *apiKeyCache) DeleteAuthCache(ctx context.Context, key string) error { + return c.rdb.Del(ctx, apiKeyAuthCacheKey(key)).Err() +} diff --git a/backend/internal/repository/api_key_repo.go b/backend/internal/repository/api_key_repo.go index f3b07616..ab890844 100644 --- a/backend/internal/repository/api_key_repo.go +++ b/backend/internal/repository/api_key_repo.go @@ -6,7 +6,9 @@ import ( dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/ent/apikey" + "github.com/Wei-Shaw/sub2api/ent/group" "github.com/Wei-Shaw/sub2api/ent/schema/mixins" + "github.com/Wei-Shaw/sub2api/ent/user" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" @@ -26,13 +28,21 @@ func (r *apiKeyRepository) activeQuery() *dbent.APIKeyQuery { } func (r *apiKeyRepository) Create(ctx context.Context, key *service.APIKey) error { - created, err := r.client.APIKey.Create(). + builder := r.client.APIKey.Create(). SetUserID(key.UserID). SetKey(key.Key). SetName(key.Name). SetStatus(key.Status). - SetNillableGroupID(key.GroupID). - Save(ctx) + SetNillableGroupID(key.GroupID) + + if len(key.IPWhitelist) > 0 { + builder.SetIPWhitelist(key.IPWhitelist) + } + if len(key.IPBlacklist) > 0 { + builder.SetIPBlacklist(key.IPBlacklist) + } + + created, err := builder.Save(ctx) if err == nil { key.ID = created.ID key.CreatedAt = created.CreatedAt @@ -56,23 +66,23 @@ func (r *apiKeyRepository) GetByID(ctx context.Context, id int64) (*service.APIK return apiKeyEntityToService(m), nil } -// GetOwnerID 根据 API Key ID 获取其所有者(用户)的 ID。 +// GetKeyAndOwnerID 根据 API Key ID 获取其 key 与所有者(用户)ID。 // 相比 GetByID,此方法性能更优,因为: -// - 使用 Select() 只查询 user_id 字段,减少数据传输量 +// - 使用 Select() 只查询必要字段,减少数据传输量 // - 不加载完整的 API Key 实体及其关联数据(User、Group 等) -// - 适用于权限验证等只需用户 ID 的场景(如删除前的所有权检查) -func (r *apiKeyRepository) GetOwnerID(ctx context.Context, id int64) (int64, error) { +// - 适用于删除等只需 key 与用户 ID 的场景 +func (r *apiKeyRepository) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { m, err := r.activeQuery(). Where(apikey.IDEQ(id)). - Select(apikey.FieldUserID). + Select(apikey.FieldKey, apikey.FieldUserID). Only(ctx) if err != nil { if dbent.IsNotFound(err) { - return 0, service.ErrAPIKeyNotFound + return "", 0, service.ErrAPIKeyNotFound } - return 0, err + return "", 0, err } - return m.UserID, nil + return m.Key, m.UserID, nil } func (r *apiKeyRepository) GetByKey(ctx context.Context, key string) (*service.APIKey, error) { @@ -90,6 +100,56 @@ func (r *apiKeyRepository) GetByKey(ctx context.Context, key string) (*service.A return apiKeyEntityToService(m), nil } +func (r *apiKeyRepository) GetByKeyForAuth(ctx context.Context, key string) (*service.APIKey, error) { + m, err := r.activeQuery(). + Where(apikey.KeyEQ(key)). + Select( + apikey.FieldID, + apikey.FieldUserID, + apikey.FieldGroupID, + apikey.FieldStatus, + apikey.FieldIPWhitelist, + apikey.FieldIPBlacklist, + ). + WithUser(func(q *dbent.UserQuery) { + q.Select( + user.FieldID, + user.FieldStatus, + user.FieldRole, + user.FieldBalance, + user.FieldConcurrency, + ) + }). + WithGroup(func(q *dbent.GroupQuery) { + q.Select( + group.FieldID, + group.FieldName, + group.FieldPlatform, + group.FieldStatus, + group.FieldSubscriptionType, + group.FieldRateMultiplier, + group.FieldDailyLimitUsd, + group.FieldWeeklyLimitUsd, + group.FieldMonthlyLimitUsd, + group.FieldImagePrice1k, + group.FieldImagePrice2k, + group.FieldImagePrice4k, + group.FieldClaudeCodeOnly, + group.FieldFallbackGroupID, + group.FieldModelRoutingEnabled, + group.FieldModelRouting, + ) + }). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrAPIKeyNotFound + } + return nil, err + } + return apiKeyEntityToService(m), nil +} + func (r *apiKeyRepository) Update(ctx context.Context, key *service.APIKey) error { // 使用原子操作:将软删除检查与更新合并到同一语句,避免竞态条件。 // 之前的实现先检查 Exist 再 UpdateOneID,若在两步之间发生软删除, @@ -108,6 +168,18 @@ func (r *apiKeyRepository) Update(ctx context.Context, key *service.APIKey) erro builder.ClearGroupID() } + // IP 限制字段 + if len(key.IPWhitelist) > 0 { + builder.SetIPWhitelist(key.IPWhitelist) + } else { + builder.ClearIPWhitelist() + } + if len(key.IPBlacklist) > 0 { + builder.SetIPBlacklist(key.IPBlacklist) + } else { + builder.ClearIPBlacklist() + } + affected, err := builder.Save(ctx) if err != nil { return err @@ -263,19 +335,43 @@ func (r *apiKeyRepository) CountByGroupID(ctx context.Context, groupID int64) (i return int64(count), err } +func (r *apiKeyRepository) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + keys, err := r.activeQuery(). + Where(apikey.UserIDEQ(userID)). + Select(apikey.FieldKey). + Strings(ctx) + if err != nil { + return nil, err + } + return keys, nil +} + +func (r *apiKeyRepository) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + keys, err := r.activeQuery(). + Where(apikey.GroupIDEQ(groupID)). + Select(apikey.FieldKey). + Strings(ctx) + if err != nil { + return nil, err + } + return keys, nil +} + func apiKeyEntityToService(m *dbent.APIKey) *service.APIKey { if m == nil { return nil } out := &service.APIKey{ - ID: m.ID, - UserID: m.UserID, - Key: m.Key, - Name: m.Name, - Status: m.Status, - CreatedAt: m.CreatedAt, - UpdatedAt: m.UpdatedAt, - GroupID: m.GroupID, + ID: m.ID, + UserID: m.UserID, + Key: m.Key, + Name: m.Name, + Status: m.Status, + IPWhitelist: m.IPWhitelist, + IPBlacklist: m.IPBlacklist, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + GroupID: m.GroupID, } if m.Edges.User != nil { out.User = userEntityToService(m.Edges.User) @@ -317,6 +413,7 @@ func groupEntityToService(g *dbent.Group) *service.Group { RateMultiplier: g.RateMultiplier, IsExclusive: g.IsExclusive, Status: g.Status, + Hydrated: true, SubscriptionType: g.SubscriptionType, DailyLimitUSD: g.DailyLimitUsd, WeeklyLimitUSD: g.WeeklyLimitUsd, @@ -327,6 +424,8 @@ func groupEntityToService(g *dbent.Group) *service.Group { DefaultValidityDays: g.DefaultValidityDays, ClaudeCodeOnly: g.ClaudeCodeOnly, FallbackGroupID: g.FallbackGroupID, + ModelRouting: g.ModelRouting, + ModelRoutingEnabled: g.ModelRoutingEnabled, CreatedAt: g.CreatedAt, UpdatedAt: g.UpdatedAt, } diff --git a/backend/internal/repository/concurrency_cache.go b/backend/internal/repository/concurrency_cache.go index 0831f5eb..b34961e1 100644 --- a/backend/internal/repository/concurrency_cache.go +++ b/backend/internal/repository/concurrency_cache.go @@ -93,7 +93,7 @@ var ( return redis.call('ZCARD', key) `) - // incrementWaitScript - only sets TTL on first creation to avoid refreshing + // incrementWaitScript - refreshes TTL on each increment to keep queue depth accurate // KEYS[1] = wait queue key // ARGV[1] = maxWait // ARGV[2] = TTL in seconds @@ -111,15 +111,13 @@ var ( local newVal = redis.call('INCR', KEYS[1]) - -- Only set TTL on first creation to avoid refreshing zombie data - if newVal == 1 then - redis.call('EXPIRE', KEYS[1], ARGV[2]) - end + -- Refresh TTL so long-running traffic doesn't expire active queue counters. + redis.call('EXPIRE', KEYS[1], ARGV[2]) return 1 `) - // incrementAccountWaitScript - account-level wait queue count + // incrementAccountWaitScript - account-level wait queue count (refresh TTL on each increment) incrementAccountWaitScript = redis.NewScript(` local current = redis.call('GET', KEYS[1]) if current == false then @@ -134,10 +132,8 @@ var ( local newVal = redis.call('INCR', KEYS[1]) - -- Only set TTL on first creation to avoid refreshing zombie data - if newVal == 1 then - redis.call('EXPIRE', KEYS[1], ARGV[2]) - end + -- Refresh TTL so long-running traffic doesn't expire active queue counters. + redis.call('EXPIRE', KEYS[1], ARGV[2]) return 1 `) diff --git a/backend/internal/repository/dashboard_aggregation_repo.go b/backend/internal/repository/dashboard_aggregation_repo.go new file mode 100644 index 00000000..3543e061 --- /dev/null +++ b/backend/internal/repository/dashboard_aggregation_repo.go @@ -0,0 +1,392 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "log" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" +) + +type dashboardAggregationRepository struct { + sql sqlExecutor +} + +// NewDashboardAggregationRepository 创建仪表盘预聚合仓储。 +func NewDashboardAggregationRepository(sqlDB *sql.DB) service.DashboardAggregationRepository { + if sqlDB == nil { + return nil + } + if !isPostgresDriver(sqlDB) { + log.Printf("[DashboardAggregation] 检测到非 PostgreSQL 驱动,已自动禁用预聚合") + return nil + } + return newDashboardAggregationRepositoryWithSQL(sqlDB) +} + +func newDashboardAggregationRepositoryWithSQL(sqlq sqlExecutor) *dashboardAggregationRepository { + return &dashboardAggregationRepository{sql: sqlq} +} + +func isPostgresDriver(db *sql.DB) bool { + if db == nil { + return false + } + _, ok := db.Driver().(*pq.Driver) + return ok +} + +func (r *dashboardAggregationRepository) AggregateRange(ctx context.Context, start, end time.Time) error { + loc := timezone.Location() + startLocal := start.In(loc) + endLocal := end.In(loc) + if !endLocal.After(startLocal) { + return nil + } + + hourStart := startLocal.Truncate(time.Hour) + hourEnd := endLocal.Truncate(time.Hour) + if endLocal.After(hourEnd) { + hourEnd = hourEnd.Add(time.Hour) + } + + dayStart := truncateToDay(startLocal) + dayEnd := truncateToDay(endLocal) + if endLocal.After(dayEnd) { + dayEnd = dayEnd.Add(24 * time.Hour) + } + + // 以桶边界聚合,允许覆盖 end 所在桶的剩余区间。 + if err := r.insertHourlyActiveUsers(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.insertDailyActiveUsers(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.upsertHourlyAggregates(ctx, hourStart, hourEnd); err != nil { + return err + } + if err := r.upsertDailyAggregates(ctx, dayStart, dayEnd); err != nil { + return err + } + return nil +} + +func (r *dashboardAggregationRepository) GetAggregationWatermark(ctx context.Context) (time.Time, error) { + var ts time.Time + query := "SELECT last_aggregated_at FROM usage_dashboard_aggregation_watermark WHERE id = 1" + if err := scanSingleRow(ctx, r.sql, query, nil, &ts); err != nil { + if err == sql.ErrNoRows { + return time.Unix(0, 0).UTC(), nil + } + return time.Time{}, err + } + return ts.UTC(), nil +} + +func (r *dashboardAggregationRepository) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error { + query := ` + INSERT INTO usage_dashboard_aggregation_watermark (id, last_aggregated_at, updated_at) + VALUES (1, $1, NOW()) + ON CONFLICT (id) + DO UPDATE SET last_aggregated_at = EXCLUDED.last_aggregated_at, updated_at = EXCLUDED.updated_at + ` + _, err := r.sql.ExecContext(ctx, query, aggregatedAt.UTC()) + return err +} + +func (r *dashboardAggregationRepository) CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error { + hourlyCutoffUTC := hourlyCutoff.UTC() + dailyCutoffUTC := dailyCutoff.UTC() + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly WHERE bucket_start < $1", hourlyCutoffUTC); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_hourly_users WHERE bucket_start < $1", hourlyCutoffUTC); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily WHERE bucket_date < $1::date", dailyCutoffUTC); err != nil { + return err + } + if _, err := r.sql.ExecContext(ctx, "DELETE FROM usage_dashboard_daily_users WHERE bucket_date < $1::date", dailyCutoffUTC); err != nil { + return err + } + return nil +} + +func (r *dashboardAggregationRepository) CleanupUsageLogs(ctx context.Context, cutoff time.Time) error { + isPartitioned, err := r.isUsageLogsPartitioned(ctx) + if err != nil { + return err + } + if isPartitioned { + return r.dropUsageLogsPartitions(ctx, cutoff) + } + _, err = r.sql.ExecContext(ctx, "DELETE FROM usage_logs WHERE created_at < $1", cutoff.UTC()) + return err +} + +func (r *dashboardAggregationRepository) EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error { + isPartitioned, err := r.isUsageLogsPartitioned(ctx) + if err != nil || !isPartitioned { + return err + } + monthStart := truncateToMonthUTC(now) + prevMonth := monthStart.AddDate(0, -1, 0) + nextMonth := monthStart.AddDate(0, 1, 0) + + for _, m := range []time.Time{prevMonth, monthStart, nextMonth} { + if err := r.createUsageLogsPartition(ctx, m); err != nil { + return err + } + } + return nil +} + +func (r *dashboardAggregationRepository) insertHourlyActiveUsers(ctx context.Context, start, end time.Time) error { + tzName := timezone.Name() + query := ` + INSERT INTO usage_dashboard_hourly_users (bucket_start, user_id) + SELECT DISTINCT + date_trunc('hour', created_at AT TIME ZONE $3) AT TIME ZONE $3 AS bucket_start, + user_id + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + ON CONFLICT DO NOTHING + ` + _, err := r.sql.ExecContext(ctx, query, start, end, tzName) + return err +} + +func (r *dashboardAggregationRepository) insertDailyActiveUsers(ctx context.Context, start, end time.Time) error { + tzName := timezone.Name() + query := ` + INSERT INTO usage_dashboard_daily_users (bucket_date, user_id) + SELECT DISTINCT + (bucket_start AT TIME ZONE $3)::date AS bucket_date, + user_id + FROM usage_dashboard_hourly_users + WHERE bucket_start >= $1 AND bucket_start < $2 + ON CONFLICT DO NOTHING + ` + _, err := r.sql.ExecContext(ctx, query, start, end, tzName) + return err +} + +func (r *dashboardAggregationRepository) upsertHourlyAggregates(ctx context.Context, start, end time.Time) error { + tzName := timezone.Name() + query := ` + WITH hourly AS ( + SELECT + date_trunc('hour', created_at AT TIME ZONE $3) AT TIME ZONE $3 AS bucket_start, + COUNT(*) AS total_requests, + COALESCE(SUM(input_tokens), 0) AS input_tokens, + COALESCE(SUM(output_tokens), 0) AS output_tokens, + COALESCE(SUM(cache_creation_tokens), 0) AS cache_creation_tokens, + COALESCE(SUM(cache_read_tokens), 0) AS cache_read_tokens, + COALESCE(SUM(total_cost), 0) AS total_cost, + COALESCE(SUM(actual_cost), 0) AS actual_cost, + COALESCE(SUM(COALESCE(duration_ms, 0)), 0) AS total_duration_ms + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + GROUP BY 1 + ), + user_counts AS ( + SELECT bucket_start, COUNT(*) AS active_users + FROM usage_dashboard_hourly_users + WHERE bucket_start >= $1 AND bucket_start < $2 + GROUP BY bucket_start + ) + INSERT INTO usage_dashboard_hourly ( + bucket_start, + total_requests, + input_tokens, + output_tokens, + cache_creation_tokens, + cache_read_tokens, + total_cost, + actual_cost, + total_duration_ms, + active_users, + computed_at + ) + SELECT + hourly.bucket_start, + hourly.total_requests, + hourly.input_tokens, + hourly.output_tokens, + hourly.cache_creation_tokens, + hourly.cache_read_tokens, + hourly.total_cost, + hourly.actual_cost, + hourly.total_duration_ms, + COALESCE(user_counts.active_users, 0) AS active_users, + NOW() + FROM hourly + LEFT JOIN user_counts ON user_counts.bucket_start = hourly.bucket_start + ON CONFLICT (bucket_start) + DO UPDATE SET + total_requests = EXCLUDED.total_requests, + input_tokens = EXCLUDED.input_tokens, + output_tokens = EXCLUDED.output_tokens, + cache_creation_tokens = EXCLUDED.cache_creation_tokens, + cache_read_tokens = EXCLUDED.cache_read_tokens, + total_cost = EXCLUDED.total_cost, + actual_cost = EXCLUDED.actual_cost, + total_duration_ms = EXCLUDED.total_duration_ms, + active_users = EXCLUDED.active_users, + computed_at = EXCLUDED.computed_at + ` + _, err := r.sql.ExecContext(ctx, query, start, end, tzName) + return err +} + +func (r *dashboardAggregationRepository) upsertDailyAggregates(ctx context.Context, start, end time.Time) error { + tzName := timezone.Name() + query := ` + WITH daily AS ( + SELECT + (bucket_start AT TIME ZONE $5)::date AS bucket_date, + COALESCE(SUM(total_requests), 0) AS total_requests, + COALESCE(SUM(input_tokens), 0) AS input_tokens, + COALESCE(SUM(output_tokens), 0) AS output_tokens, + COALESCE(SUM(cache_creation_tokens), 0) AS cache_creation_tokens, + COALESCE(SUM(cache_read_tokens), 0) AS cache_read_tokens, + COALESCE(SUM(total_cost), 0) AS total_cost, + COALESCE(SUM(actual_cost), 0) AS actual_cost, + COALESCE(SUM(total_duration_ms), 0) AS total_duration_ms + FROM usage_dashboard_hourly + WHERE bucket_start >= $1 AND bucket_start < $2 + GROUP BY (bucket_start AT TIME ZONE $5)::date + ), + user_counts AS ( + SELECT bucket_date, COUNT(*) AS active_users + FROM usage_dashboard_daily_users + WHERE bucket_date >= $3::date AND bucket_date < $4::date + GROUP BY bucket_date + ) + INSERT INTO usage_dashboard_daily ( + bucket_date, + total_requests, + input_tokens, + output_tokens, + cache_creation_tokens, + cache_read_tokens, + total_cost, + actual_cost, + total_duration_ms, + active_users, + computed_at + ) + SELECT + daily.bucket_date, + daily.total_requests, + daily.input_tokens, + daily.output_tokens, + daily.cache_creation_tokens, + daily.cache_read_tokens, + daily.total_cost, + daily.actual_cost, + daily.total_duration_ms, + COALESCE(user_counts.active_users, 0) AS active_users, + NOW() + FROM daily + LEFT JOIN user_counts ON user_counts.bucket_date = daily.bucket_date + ON CONFLICT (bucket_date) + DO UPDATE SET + total_requests = EXCLUDED.total_requests, + input_tokens = EXCLUDED.input_tokens, + output_tokens = EXCLUDED.output_tokens, + cache_creation_tokens = EXCLUDED.cache_creation_tokens, + cache_read_tokens = EXCLUDED.cache_read_tokens, + total_cost = EXCLUDED.total_cost, + actual_cost = EXCLUDED.actual_cost, + total_duration_ms = EXCLUDED.total_duration_ms, + active_users = EXCLUDED.active_users, + computed_at = EXCLUDED.computed_at + ` + _, err := r.sql.ExecContext(ctx, query, start, end, start, end, tzName) + return err +} + +func (r *dashboardAggregationRepository) isUsageLogsPartitioned(ctx context.Context) (bool, error) { + query := ` + SELECT EXISTS( + SELECT 1 + FROM pg_partitioned_table pt + JOIN pg_class c ON c.oid = pt.partrelid + WHERE c.relname = 'usage_logs' + ) + ` + var partitioned bool + if err := scanSingleRow(ctx, r.sql, query, nil, &partitioned); err != nil { + return false, err + } + return partitioned, nil +} + +func (r *dashboardAggregationRepository) dropUsageLogsPartitions(ctx context.Context, cutoff time.Time) error { + rows, err := r.sql.QueryContext(ctx, ` + SELECT c.relname + FROM pg_inherits + JOIN pg_class c ON c.oid = pg_inherits.inhrelid + JOIN pg_class p ON p.oid = pg_inherits.inhparent + WHERE p.relname = 'usage_logs' + `) + if err != nil { + return err + } + defer func() { + _ = rows.Close() + }() + + cutoffMonth := truncateToMonthUTC(cutoff) + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + return err + } + if !strings.HasPrefix(name, "usage_logs_") { + continue + } + suffix := strings.TrimPrefix(name, "usage_logs_") + month, err := time.Parse("200601", suffix) + if err != nil { + continue + } + month = month.UTC() + if month.Before(cutoffMonth) { + if _, err := r.sql.ExecContext(ctx, fmt.Sprintf("DROP TABLE IF EXISTS %s", pq.QuoteIdentifier(name))); err != nil { + return err + } + } + } + return rows.Err() +} + +func (r *dashboardAggregationRepository) createUsageLogsPartition(ctx context.Context, month time.Time) error { + monthStart := truncateToMonthUTC(month) + nextMonth := monthStart.AddDate(0, 1, 0) + name := fmt.Sprintf("usage_logs_%s", monthStart.Format("200601")) + query := fmt.Sprintf( + "CREATE TABLE IF NOT EXISTS %s PARTITION OF usage_logs FOR VALUES FROM (%s) TO (%s)", + pq.QuoteIdentifier(name), + pq.QuoteLiteral(monthStart.Format("2006-01-02")), + pq.QuoteLiteral(nextMonth.Format("2006-01-02")), + ) + _, err := r.sql.ExecContext(ctx, query) + return err +} + +func truncateToDay(t time.Time) time.Time { + return timezone.StartOfDay(t) +} + +func truncateToMonthUTC(t time.Time) time.Time { + t = t.UTC() + return time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, time.UTC) +} diff --git a/backend/internal/repository/dashboard_cache.go b/backend/internal/repository/dashboard_cache.go new file mode 100644 index 00000000..f996cd68 --- /dev/null +++ b/backend/internal/repository/dashboard_cache.go @@ -0,0 +1,58 @@ +package repository + +import ( + "context" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const dashboardStatsCacheKey = "dashboard:stats:v1" + +type dashboardCache struct { + rdb *redis.Client + keyPrefix string +} + +func NewDashboardCache(rdb *redis.Client, cfg *config.Config) service.DashboardStatsCache { + prefix := "sub2api:" + if cfg != nil { + prefix = strings.TrimSpace(cfg.Dashboard.KeyPrefix) + } + if prefix != "" && !strings.HasSuffix(prefix, ":") { + prefix += ":" + } + return &dashboardCache{ + rdb: rdb, + keyPrefix: prefix, + } +} + +func (c *dashboardCache) GetDashboardStats(ctx context.Context) (string, error) { + val, err := c.rdb.Get(ctx, c.buildKey()).Result() + if err != nil { + if err == redis.Nil { + return "", service.ErrDashboardStatsCacheMiss + } + return "", err + } + return val, nil +} + +func (c *dashboardCache) SetDashboardStats(ctx context.Context, data string, ttl time.Duration) error { + return c.rdb.Set(ctx, c.buildKey(), data, ttl).Err() +} + +func (c *dashboardCache) buildKey() string { + if c.keyPrefix == "" { + return dashboardStatsCacheKey + } + return c.keyPrefix + dashboardStatsCacheKey +} + +func (c *dashboardCache) DeleteDashboardStats(ctx context.Context) error { + return c.rdb.Del(ctx, c.buildKey()).Err() +} diff --git a/backend/internal/repository/dashboard_cache_test.go b/backend/internal/repository/dashboard_cache_test.go new file mode 100644 index 00000000..3bb0da4f --- /dev/null +++ b/backend/internal/repository/dashboard_cache_test.go @@ -0,0 +1,28 @@ +package repository + +import ( + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +func TestNewDashboardCacheKeyPrefix(t *testing.T) { + cache := NewDashboardCache(nil, &config.Config{ + Dashboard: config.DashboardCacheConfig{ + KeyPrefix: "prod", + }, + }) + impl, ok := cache.(*dashboardCache) + require.True(t, ok) + require.Equal(t, "prod:", impl.keyPrefix) + + cache = NewDashboardCache(nil, &config.Config{ + Dashboard: config.DashboardCacheConfig{ + KeyPrefix: "staging:", + }, + }) + impl, ok = cache.(*dashboardCache) + require.True(t, ok) + require.Equal(t, "staging:", impl.keyPrefix) +} diff --git a/backend/internal/repository/gemini_token_cache.go b/backend/internal/repository/gemini_token_cache.go index a7270556..d4f552bc 100644 --- a/backend/internal/repository/gemini_token_cache.go +++ b/backend/internal/repository/gemini_token_cache.go @@ -11,8 +11,8 @@ import ( ) const ( - geminiTokenKeyPrefix = "gemini:token:" - geminiRefreshLockKeyPrefix = "gemini:refresh_lock:" + oauthTokenKeyPrefix = "oauth:token:" + oauthRefreshLockKeyPrefix = "oauth:refresh_lock:" ) type geminiTokenCache struct { @@ -24,21 +24,26 @@ func NewGeminiTokenCache(rdb *redis.Client) service.GeminiTokenCache { } func (c *geminiTokenCache) GetAccessToken(ctx context.Context, cacheKey string) (string, error) { - key := fmt.Sprintf("%s%s", geminiTokenKeyPrefix, cacheKey) + key := fmt.Sprintf("%s%s", oauthTokenKeyPrefix, cacheKey) return c.rdb.Get(ctx, key).Result() } func (c *geminiTokenCache) SetAccessToken(ctx context.Context, cacheKey string, token string, ttl time.Duration) error { - key := fmt.Sprintf("%s%s", geminiTokenKeyPrefix, cacheKey) + key := fmt.Sprintf("%s%s", oauthTokenKeyPrefix, cacheKey) return c.rdb.Set(ctx, key, token, ttl).Err() } +func (c *geminiTokenCache) DeleteAccessToken(ctx context.Context, cacheKey string) error { + key := fmt.Sprintf("%s%s", oauthTokenKeyPrefix, cacheKey) + return c.rdb.Del(ctx, key).Err() +} + func (c *geminiTokenCache) AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error) { - key := fmt.Sprintf("%s%s", geminiRefreshLockKeyPrefix, cacheKey) + key := fmt.Sprintf("%s%s", oauthRefreshLockKeyPrefix, cacheKey) return c.rdb.SetNX(ctx, key, 1, ttl).Result() } func (c *geminiTokenCache) ReleaseRefreshLock(ctx context.Context, cacheKey string) error { - key := fmt.Sprintf("%s%s", geminiRefreshLockKeyPrefix, cacheKey) + key := fmt.Sprintf("%s%s", oauthRefreshLockKeyPrefix, cacheKey) return c.rdb.Del(ctx, key).Err() } diff --git a/backend/internal/repository/gemini_token_cache_integration_test.go b/backend/internal/repository/gemini_token_cache_integration_test.go new file mode 100644 index 00000000..4fe89865 --- /dev/null +++ b/backend/internal/repository/gemini_token_cache_integration_test.go @@ -0,0 +1,47 @@ +//go:build integration + +package repository + +import ( + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +type GeminiTokenCacheSuite struct { + IntegrationRedisSuite + cache service.GeminiTokenCache +} + +func (s *GeminiTokenCacheSuite) SetupTest() { + s.IntegrationRedisSuite.SetupTest() + s.cache = NewGeminiTokenCache(s.rdb) +} + +func (s *GeminiTokenCacheSuite) TestDeleteAccessToken() { + cacheKey := "project-123" + token := "token-value" + require.NoError(s.T(), s.cache.SetAccessToken(s.ctx, cacheKey, token, time.Minute)) + + got, err := s.cache.GetAccessToken(s.ctx, cacheKey) + require.NoError(s.T(), err) + require.Equal(s.T(), token, got) + + require.NoError(s.T(), s.cache.DeleteAccessToken(s.ctx, cacheKey)) + + _, err = s.cache.GetAccessToken(s.ctx, cacheKey) + require.True(s.T(), errors.Is(err, redis.Nil), "expected redis.Nil after delete") +} + +func (s *GeminiTokenCacheSuite) TestDeleteAccessToken_MissingKey() { + require.NoError(s.T(), s.cache.DeleteAccessToken(s.ctx, "missing-key")) +} + +func TestGeminiTokenCacheSuite(t *testing.T) { + suite.Run(t, new(GeminiTokenCacheSuite)) +} diff --git a/backend/internal/repository/gemini_token_cache_test.go b/backend/internal/repository/gemini_token_cache_test.go new file mode 100644 index 00000000..4fcebfdd --- /dev/null +++ b/backend/internal/repository/gemini_token_cache_test.go @@ -0,0 +1,28 @@ +//go:build unit + +package repository + +import ( + "context" + "testing" + "time" + + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" +) + +func TestGeminiTokenCache_DeleteAccessToken_RedisError(t *testing.T) { + rdb := redis.NewClient(&redis.Options{ + Addr: "127.0.0.1:1", + DialTimeout: 50 * time.Millisecond, + ReadTimeout: 50 * time.Millisecond, + WriteTimeout: 50 * time.Millisecond, + }) + t.Cleanup(func() { + _ = rdb.Close() + }) + + cache := NewGeminiTokenCache(rdb) + err := cache.DeleteAccessToken(context.Background(), "broken") + require.Error(t, err) +} diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go index a54f3116..5c4d6cf4 100644 --- a/backend/internal/repository/group_repo.go +++ b/backend/internal/repository/group_repo.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "errors" + "log" dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/ent/apikey" @@ -48,18 +49,38 @@ func (r *groupRepository) Create(ctx context.Context, groupIn *service.Group) er SetNillableImagePrice4k(groupIn.ImagePrice4K). SetDefaultValidityDays(groupIn.DefaultValidityDays). SetClaudeCodeOnly(groupIn.ClaudeCodeOnly). - SetNillableFallbackGroupID(groupIn.FallbackGroupID) + SetNillableFallbackGroupID(groupIn.FallbackGroupID). + SetModelRoutingEnabled(groupIn.ModelRoutingEnabled) + + // 设置模型路由配置 + if groupIn.ModelRouting != nil { + builder = builder.SetModelRouting(groupIn.ModelRouting) + } created, err := builder.Save(ctx) if err == nil { groupIn.ID = created.ID groupIn.CreatedAt = created.CreatedAt groupIn.UpdatedAt = created.UpdatedAt + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &groupIn.ID, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue group create failed: group=%d err=%v", groupIn.ID, err) + } } return translatePersistenceError(err, nil, service.ErrGroupExists) } func (r *groupRepository) GetByID(ctx context.Context, id int64) (*service.Group, error) { + out, err := r.GetByIDLite(ctx, id) + if err != nil { + return nil, err + } + count, _ := r.GetAccountCount(ctx, out.ID) + out.AccountCount = count + return out, nil +} + +func (r *groupRepository) GetByIDLite(ctx context.Context, id int64) (*service.Group, error) { + // AccountCount is intentionally not loaded here; use GetByID when needed. m, err := r.client.Group.Query(). Where(group.IDEQ(id)). Only(ctx) @@ -67,10 +88,7 @@ func (r *groupRepository) GetByID(ctx context.Context, id int64) (*service.Group return nil, translatePersistenceError(err, service.ErrGroupNotFound, nil) } - out := groupEntityToService(m) - count, _ := r.GetAccountCount(ctx, out.ID) - out.AccountCount = count - return out, nil + return groupEntityToService(m), nil } func (r *groupRepository) Update(ctx context.Context, groupIn *service.Group) error { @@ -89,7 +107,8 @@ func (r *groupRepository) Update(ctx context.Context, groupIn *service.Group) er SetNillableImagePrice2k(groupIn.ImagePrice2K). SetNillableImagePrice4k(groupIn.ImagePrice4K). SetDefaultValidityDays(groupIn.DefaultValidityDays). - SetClaudeCodeOnly(groupIn.ClaudeCodeOnly) + SetClaudeCodeOnly(groupIn.ClaudeCodeOnly). + SetModelRoutingEnabled(groupIn.ModelRoutingEnabled) // 处理 FallbackGroupID:nil 时清除,否则设置 if groupIn.FallbackGroupID != nil { @@ -98,17 +117,33 @@ func (r *groupRepository) Update(ctx context.Context, groupIn *service.Group) er builder = builder.ClearFallbackGroupID() } + // 处理 ModelRouting:nil 时清除,否则设置 + if groupIn.ModelRouting != nil { + builder = builder.SetModelRouting(groupIn.ModelRouting) + } else { + builder = builder.ClearModelRouting() + } + updated, err := builder.Save(ctx) if err != nil { return translatePersistenceError(err, service.ErrGroupNotFound, service.ErrGroupExists) } groupIn.UpdatedAt = updated.UpdatedAt + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &groupIn.ID, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue group update failed: group=%d err=%v", groupIn.ID, err) + } return nil } func (r *groupRepository) Delete(ctx context.Context, id int64) error { _, err := r.client.Group.Delete().Where(group.IDEQ(id)).Exec(ctx) - return translatePersistenceError(err, service.ErrGroupNotFound, nil) + if err != nil { + return translatePersistenceError(err, service.ErrGroupNotFound, nil) + } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &id, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue group delete failed: group=%d err=%v", id, err) + } + return nil } func (r *groupRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.Group, *pagination.PaginationResult, error) { @@ -238,6 +273,9 @@ func (r *groupRepository) DeleteAccountGroupsByGroupID(ctx context.Context, grou return 0, err } affected, _ := res.RowsAffected() + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &groupID, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue group account clear failed: group=%d err=%v", groupID, err) + } return affected, nil } @@ -345,6 +383,9 @@ func (r *groupRepository) DeleteCascade(ctx context.Context, id int64) ([]int64, return nil, err } } + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &id, nil); err != nil { + log.Printf("[SchedulerOutbox] enqueue group cascade delete failed: group=%d err=%v", id, err) + } return affectedUserIDs, nil } diff --git a/backend/internal/repository/group_repo_integration_test.go b/backend/internal/repository/group_repo_integration_test.go index 660618a6..c31a9ec4 100644 --- a/backend/internal/repository/group_repo_integration_test.go +++ b/backend/internal/repository/group_repo_integration_test.go @@ -4,6 +4,8 @@ package repository import ( "context" + "database/sql" + "errors" "testing" dbent "github.com/Wei-Shaw/sub2api/ent" @@ -19,6 +21,20 @@ type GroupRepoSuite struct { repo *groupRepository } +type forbidSQLExecutor struct { + called bool +} + +func (s *forbidSQLExecutor) ExecContext(ctx context.Context, query string, args ...any) (sql.Result, error) { + s.called = true + return nil, errors.New("unexpected sql exec") +} + +func (s *forbidSQLExecutor) QueryContext(ctx context.Context, query string, args ...any) (*sql.Rows, error) { + s.called = true + return nil, errors.New("unexpected sql query") +} + func (s *GroupRepoSuite) SetupTest() { s.ctx = context.Background() tx := testEntTx(s.T()) @@ -57,6 +73,26 @@ func (s *GroupRepoSuite) TestGetByID_NotFound() { s.Require().ErrorIs(err, service.ErrGroupNotFound) } +func (s *GroupRepoSuite) TestGetByIDLite_DoesNotUseAccountCount() { + group := &service.Group{ + Name: "lite-group", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, group)) + + spy := &forbidSQLExecutor{} + repo := newGroupRepositoryWithSQL(s.tx.Client(), spy) + + got, err := repo.GetByIDLite(s.ctx, group.ID) + s.Require().NoError(err) + s.Require().Equal(group.ID, got.ID) + s.Require().False(spy.called, "expected no direct sql executor usage") +} + func (s *GroupRepoSuite) TestUpdate() { group := &service.Group{ Name: "original", diff --git a/backend/internal/repository/migrations_runner.go b/backend/internal/repository/migrations_runner.go index 1b187830..5912e50f 100644 --- a/backend/internal/repository/migrations_runner.go +++ b/backend/internal/repository/migrations_runner.go @@ -28,6 +28,23 @@ CREATE TABLE IF NOT EXISTS schema_migrations ( ); ` +const atlasSchemaRevisionsTableDDL = ` +CREATE TABLE IF NOT EXISTS atlas_schema_revisions ( + version TEXT PRIMARY KEY, + description TEXT NOT NULL, + type INTEGER NOT NULL, + applied INTEGER NOT NULL DEFAULT 0, + total INTEGER NOT NULL DEFAULT 0, + executed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + execution_time BIGINT NOT NULL DEFAULT 0, + error TEXT NULL, + error_stmt TEXT NULL, + hash TEXT NOT NULL DEFAULT '', + partial_hashes TEXT[] NULL, + operator_version TEXT NULL +); +` + // migrationsAdvisoryLockID 是用于序列化迁移操作的 PostgreSQL Advisory Lock ID。 // 在多实例部署场景下,该锁确保同一时间只有一个实例执行迁移。 // 任何稳定的 int64 值都可以,只要不与同一数据库中的其他锁冲突即可。 @@ -94,6 +111,11 @@ func applyMigrationsFS(ctx context.Context, db *sql.DB, fsys fs.FS) error { return fmt.Errorf("create schema_migrations: %w", err) } + // 自动对齐 Atlas 基线(如果检测到 legacy schema_migrations 且缺失 atlas_schema_revisions)。 + if err := ensureAtlasBaselineAligned(ctx, db, fsys); err != nil { + return err + } + // 获取所有 .sql 迁移文件并按文件名排序。 // 命名规范:使用零填充数字前缀(如 001_init.sql, 002_add_users.sql)。 files, err := fs.Glob(fsys, "*.sql") @@ -172,6 +194,80 @@ func applyMigrationsFS(ctx context.Context, db *sql.DB, fsys fs.FS) error { return nil } +func ensureAtlasBaselineAligned(ctx context.Context, db *sql.DB, fsys fs.FS) error { + hasLegacy, err := tableExists(ctx, db, "schema_migrations") + if err != nil { + return fmt.Errorf("check schema_migrations: %w", err) + } + if !hasLegacy { + return nil + } + + hasAtlas, err := tableExists(ctx, db, "atlas_schema_revisions") + if err != nil { + return fmt.Errorf("check atlas_schema_revisions: %w", err) + } + if !hasAtlas { + if _, err := db.ExecContext(ctx, atlasSchemaRevisionsTableDDL); err != nil { + return fmt.Errorf("create atlas_schema_revisions: %w", err) + } + } + + var count int + if err := db.QueryRowContext(ctx, "SELECT COUNT(*) FROM atlas_schema_revisions").Scan(&count); err != nil { + return fmt.Errorf("count atlas_schema_revisions: %w", err) + } + if count > 0 { + return nil + } + + version, description, hash, err := latestMigrationBaseline(fsys) + if err != nil { + return fmt.Errorf("atlas baseline version: %w", err) + } + + if _, err := db.ExecContext(ctx, ` + INSERT INTO atlas_schema_revisions (version, description, type, applied, total, executed_at, execution_time, hash) + VALUES ($1, $2, $3, 0, 0, NOW(), 0, $4) + `, version, description, 1, hash); err != nil { + return fmt.Errorf("insert atlas baseline: %w", err) + } + return nil +} + +func tableExists(ctx context.Context, db *sql.DB, tableName string) (bool, error) { + var exists bool + err := db.QueryRowContext(ctx, ` + SELECT EXISTS ( + SELECT 1 + FROM information_schema.tables + WHERE table_schema = 'public' AND table_name = $1 + ) + `, tableName).Scan(&exists) + return exists, err +} + +func latestMigrationBaseline(fsys fs.FS) (string, string, string, error) { + files, err := fs.Glob(fsys, "*.sql") + if err != nil { + return "", "", "", err + } + if len(files) == 0 { + return "baseline", "baseline", "", nil + } + sort.Strings(files) + name := files[len(files)-1] + contentBytes, err := fs.ReadFile(fsys, name) + if err != nil { + return "", "", "", err + } + content := strings.TrimSpace(string(contentBytes)) + sum := sha256.Sum256([]byte(content)) + hash := hex.EncodeToString(sum[:]) + version := strings.TrimSuffix(name, ".sql") + return version, version, hash, nil +} + // pgAdvisoryLock 获取 PostgreSQL Advisory Lock。 // Advisory Lock 是一种轻量级的锁机制,不与任何特定的数据库对象关联。 // 它非常适合用于应用层面的分布式锁场景,如迁移序列化。 diff --git a/backend/internal/repository/ops_repo.go b/backend/internal/repository/ops_repo.go new file mode 100644 index 00000000..613c5bd5 --- /dev/null +++ b/backend/internal/repository/ops_repo.go @@ -0,0 +1,1098 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" +) + +type opsRepository struct { + db *sql.DB +} + +func NewOpsRepository(db *sql.DB) service.OpsRepository { + return &opsRepository{db: db} +} + +func (r *opsRepository) InsertErrorLog(ctx context.Context, input *service.OpsInsertErrorLogInput) (int64, error) { + if r == nil || r.db == nil { + return 0, fmt.Errorf("nil ops repository") + } + if input == nil { + return 0, fmt.Errorf("nil input") + } + + q := ` +INSERT INTO ops_error_logs ( + request_id, + client_request_id, + user_id, + api_key_id, + account_id, + group_id, + client_ip, + platform, + model, + request_path, + stream, + user_agent, + error_phase, + error_type, + severity, + status_code, + is_business_limited, + is_count_tokens, + error_message, + error_body, + error_source, + error_owner, + upstream_status_code, + upstream_error_message, + upstream_error_detail, + upstream_errors, + time_to_first_token_ms, + request_body, + request_body_truncated, + request_body_bytes, + request_headers, + is_retryable, + retry_count, + created_at +) VALUES ( + $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24,$25,$26,$27,$28,$29,$30,$31,$32,$33,$34 +) RETURNING id` + + var id int64 + err := r.db.QueryRowContext( + ctx, + q, + opsNullString(input.RequestID), + opsNullString(input.ClientRequestID), + opsNullInt64(input.UserID), + opsNullInt64(input.APIKeyID), + opsNullInt64(input.AccountID), + opsNullInt64(input.GroupID), + opsNullString(input.ClientIP), + opsNullString(input.Platform), + opsNullString(input.Model), + opsNullString(input.RequestPath), + input.Stream, + opsNullString(input.UserAgent), + input.ErrorPhase, + input.ErrorType, + opsNullString(input.Severity), + opsNullInt(input.StatusCode), + input.IsBusinessLimited, + input.IsCountTokens, + opsNullString(input.ErrorMessage), + opsNullString(input.ErrorBody), + opsNullString(input.ErrorSource), + opsNullString(input.ErrorOwner), + opsNullInt(input.UpstreamStatusCode), + opsNullString(input.UpstreamErrorMessage), + opsNullString(input.UpstreamErrorDetail), + opsNullString(input.UpstreamErrorsJSON), + opsNullInt64(input.TimeToFirstTokenMs), + opsNullString(input.RequestBodyJSON), + input.RequestBodyTruncated, + opsNullInt(input.RequestBodyBytes), + opsNullString(input.RequestHeadersJSON), + input.IsRetryable, + input.RetryCount, + input.CreatedAt, + ).Scan(&id) + if err != nil { + return 0, err + } + return id, nil +} + +func (r *opsRepository) ListErrorLogs(ctx context.Context, filter *service.OpsErrorLogFilter) (*service.OpsErrorLogList, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + filter = &service.OpsErrorLogFilter{} + } + + page := filter.Page + if page <= 0 { + page = 1 + } + pageSize := filter.PageSize + if pageSize <= 0 { + pageSize = 20 + } + if pageSize > 500 { + pageSize = 500 + } + + where, args := buildOpsErrorLogsWhere(filter) + countSQL := "SELECT COUNT(*) FROM ops_error_logs e " + where + + var total int + if err := r.db.QueryRowContext(ctx, countSQL, args...).Scan(&total); err != nil { + return nil, err + } + + offset := (page - 1) * pageSize + argsWithLimit := append(args, pageSize, offset) + selectSQL := ` +SELECT + e.id, + e.created_at, + e.error_phase, + e.error_type, + COALESCE(e.error_owner, ''), + COALESCE(e.error_source, ''), + e.severity, + COALESCE(e.upstream_status_code, e.status_code, 0), + COALESCE(e.platform, ''), + COALESCE(e.model, ''), + COALESCE(e.is_retryable, false), + COALESCE(e.retry_count, 0), + COALESCE(e.resolved, false), + e.resolved_at, + e.resolved_by_user_id, + COALESCE(u2.email, ''), + e.resolved_retry_id, + COALESCE(e.client_request_id, ''), + COALESCE(e.request_id, ''), + COALESCE(e.error_message, ''), + e.user_id, + COALESCE(u.email, ''), + e.api_key_id, + e.account_id, + COALESCE(a.name, ''), + e.group_id, + COALESCE(g.name, ''), + CASE WHEN e.client_ip IS NULL THEN NULL ELSE e.client_ip::text END, + COALESCE(e.request_path, ''), + e.stream +FROM ops_error_logs e +LEFT JOIN accounts a ON e.account_id = a.id +LEFT JOIN groups g ON e.group_id = g.id +LEFT JOIN users u ON e.user_id = u.id +LEFT JOIN users u2 ON e.resolved_by_user_id = u2.id +` + where + ` +ORDER BY e.created_at DESC +LIMIT $` + itoa(len(args)+1) + ` OFFSET $` + itoa(len(args)+2) + + rows, err := r.db.QueryContext(ctx, selectSQL, argsWithLimit...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]*service.OpsErrorLog, 0, pageSize) + for rows.Next() { + var item service.OpsErrorLog + var statusCode sql.NullInt64 + var clientIP sql.NullString + var userID sql.NullInt64 + var apiKeyID sql.NullInt64 + var accountID sql.NullInt64 + var accountName string + var groupID sql.NullInt64 + var groupName string + var userEmail string + var resolvedAt sql.NullTime + var resolvedBy sql.NullInt64 + var resolvedByName string + var resolvedRetryID sql.NullInt64 + if err := rows.Scan( + &item.ID, + &item.CreatedAt, + &item.Phase, + &item.Type, + &item.Owner, + &item.Source, + &item.Severity, + &statusCode, + &item.Platform, + &item.Model, + &item.IsRetryable, + &item.RetryCount, + &item.Resolved, + &resolvedAt, + &resolvedBy, + &resolvedByName, + &resolvedRetryID, + &item.ClientRequestID, + &item.RequestID, + &item.Message, + &userID, + &userEmail, + &apiKeyID, + &accountID, + &accountName, + &groupID, + &groupName, + &clientIP, + &item.RequestPath, + &item.Stream, + ); err != nil { + return nil, err + } + if resolvedAt.Valid { + t := resolvedAt.Time + item.ResolvedAt = &t + } + if resolvedBy.Valid { + v := resolvedBy.Int64 + item.ResolvedByUserID = &v + } + item.ResolvedByUserName = resolvedByName + if resolvedRetryID.Valid { + v := resolvedRetryID.Int64 + item.ResolvedRetryID = &v + } + item.StatusCode = int(statusCode.Int64) + if clientIP.Valid { + s := clientIP.String + item.ClientIP = &s + } + if userID.Valid { + v := userID.Int64 + item.UserID = &v + } + item.UserEmail = userEmail + if apiKeyID.Valid { + v := apiKeyID.Int64 + item.APIKeyID = &v + } + if accountID.Valid { + v := accountID.Int64 + item.AccountID = &v + } + item.AccountName = accountName + if groupID.Valid { + v := groupID.Int64 + item.GroupID = &v + } + item.GroupName = groupName + out = append(out, &item) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return &service.OpsErrorLogList{ + Errors: out, + Total: total, + Page: page, + PageSize: pageSize, + }, nil +} + +func (r *opsRepository) GetErrorLogByID(ctx context.Context, id int64) (*service.OpsErrorLogDetail, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if id <= 0 { + return nil, fmt.Errorf("invalid id") + } + + q := ` +SELECT + e.id, + e.created_at, + e.error_phase, + e.error_type, + COALESCE(e.error_owner, ''), + COALESCE(e.error_source, ''), + e.severity, + COALESCE(e.upstream_status_code, e.status_code, 0), + COALESCE(e.platform, ''), + COALESCE(e.model, ''), + COALESCE(e.is_retryable, false), + COALESCE(e.retry_count, 0), + COALESCE(e.resolved, false), + e.resolved_at, + e.resolved_by_user_id, + e.resolved_retry_id, + COALESCE(e.client_request_id, ''), + COALESCE(e.request_id, ''), + COALESCE(e.error_message, ''), + COALESCE(e.error_body, ''), + e.upstream_status_code, + COALESCE(e.upstream_error_message, ''), + COALESCE(e.upstream_error_detail, ''), + COALESCE(e.upstream_errors::text, ''), + e.is_business_limited, + e.user_id, + COALESCE(u.email, ''), + e.api_key_id, + e.account_id, + COALESCE(a.name, ''), + e.group_id, + COALESCE(g.name, ''), + CASE WHEN e.client_ip IS NULL THEN NULL ELSE e.client_ip::text END, + COALESCE(e.request_path, ''), + e.stream, + COALESCE(e.user_agent, ''), + e.auth_latency_ms, + e.routing_latency_ms, + e.upstream_latency_ms, + e.response_latency_ms, + e.time_to_first_token_ms, + COALESCE(e.request_body::text, ''), + e.request_body_truncated, + e.request_body_bytes, + COALESCE(e.request_headers::text, '') +FROM ops_error_logs e +LEFT JOIN users u ON e.user_id = u.id +LEFT JOIN accounts a ON e.account_id = a.id +LEFT JOIN groups g ON e.group_id = g.id +WHERE e.id = $1 +LIMIT 1` + + var out service.OpsErrorLogDetail + var statusCode sql.NullInt64 + var upstreamStatusCode sql.NullInt64 + var resolvedAt sql.NullTime + var resolvedBy sql.NullInt64 + var resolvedRetryID sql.NullInt64 + var clientIP sql.NullString + var userID sql.NullInt64 + var apiKeyID sql.NullInt64 + var accountID sql.NullInt64 + var groupID sql.NullInt64 + var authLatency sql.NullInt64 + var routingLatency sql.NullInt64 + var upstreamLatency sql.NullInt64 + var responseLatency sql.NullInt64 + var ttft sql.NullInt64 + var requestBodyBytes sql.NullInt64 + + err := r.db.QueryRowContext(ctx, q, id).Scan( + &out.ID, + &out.CreatedAt, + &out.Phase, + &out.Type, + &out.Owner, + &out.Source, + &out.Severity, + &statusCode, + &out.Platform, + &out.Model, + &out.IsRetryable, + &out.RetryCount, + &out.Resolved, + &resolvedAt, + &resolvedBy, + &resolvedRetryID, + &out.ClientRequestID, + &out.RequestID, + &out.Message, + &out.ErrorBody, + &upstreamStatusCode, + &out.UpstreamErrorMessage, + &out.UpstreamErrorDetail, + &out.UpstreamErrors, + &out.IsBusinessLimited, + &userID, + &out.UserEmail, + &apiKeyID, + &accountID, + &out.AccountName, + &groupID, + &out.GroupName, + &clientIP, + &out.RequestPath, + &out.Stream, + &out.UserAgent, + &authLatency, + &routingLatency, + &upstreamLatency, + &responseLatency, + &ttft, + &out.RequestBody, + &out.RequestBodyTruncated, + &requestBodyBytes, + &out.RequestHeaders, + ) + if err != nil { + return nil, err + } + + out.StatusCode = int(statusCode.Int64) + if resolvedAt.Valid { + t := resolvedAt.Time + out.ResolvedAt = &t + } + if resolvedBy.Valid { + v := resolvedBy.Int64 + out.ResolvedByUserID = &v + } + if resolvedRetryID.Valid { + v := resolvedRetryID.Int64 + out.ResolvedRetryID = &v + } + if clientIP.Valid { + s := clientIP.String + out.ClientIP = &s + } + if upstreamStatusCode.Valid && upstreamStatusCode.Int64 > 0 { + v := int(upstreamStatusCode.Int64) + out.UpstreamStatusCode = &v + } + if userID.Valid { + v := userID.Int64 + out.UserID = &v + } + if apiKeyID.Valid { + v := apiKeyID.Int64 + out.APIKeyID = &v + } + if accountID.Valid { + v := accountID.Int64 + out.AccountID = &v + } + if groupID.Valid { + v := groupID.Int64 + out.GroupID = &v + } + if authLatency.Valid { + v := authLatency.Int64 + out.AuthLatencyMs = &v + } + if routingLatency.Valid { + v := routingLatency.Int64 + out.RoutingLatencyMs = &v + } + if upstreamLatency.Valid { + v := upstreamLatency.Int64 + out.UpstreamLatencyMs = &v + } + if responseLatency.Valid { + v := responseLatency.Int64 + out.ResponseLatencyMs = &v + } + if ttft.Valid { + v := ttft.Int64 + out.TimeToFirstTokenMs = &v + } + if requestBodyBytes.Valid { + v := int(requestBodyBytes.Int64) + out.RequestBodyBytes = &v + } + + // Normalize request_body to empty string when stored as JSON null. + out.RequestBody = strings.TrimSpace(out.RequestBody) + if out.RequestBody == "null" { + out.RequestBody = "" + } + // Normalize request_headers to empty string when stored as JSON null. + out.RequestHeaders = strings.TrimSpace(out.RequestHeaders) + if out.RequestHeaders == "null" { + out.RequestHeaders = "" + } + // Normalize upstream_errors to empty string when stored as JSON null. + out.UpstreamErrors = strings.TrimSpace(out.UpstreamErrors) + if out.UpstreamErrors == "null" { + out.UpstreamErrors = "" + } + + return &out, nil +} + +func (r *opsRepository) InsertRetryAttempt(ctx context.Context, input *service.OpsInsertRetryAttemptInput) (int64, error) { + if r == nil || r.db == nil { + return 0, fmt.Errorf("nil ops repository") + } + if input == nil { + return 0, fmt.Errorf("nil input") + } + if input.SourceErrorID <= 0 { + return 0, fmt.Errorf("invalid source_error_id") + } + if strings.TrimSpace(input.Mode) == "" { + return 0, fmt.Errorf("invalid mode") + } + + q := ` +INSERT INTO ops_retry_attempts ( + requested_by_user_id, + source_error_id, + mode, + pinned_account_id, + status, + started_at +) VALUES ( + $1,$2,$3,$4,$5,$6 +) RETURNING id` + + var id int64 + err := r.db.QueryRowContext( + ctx, + q, + opsNullInt64(&input.RequestedByUserID), + input.SourceErrorID, + strings.TrimSpace(input.Mode), + opsNullInt64(input.PinnedAccountID), + strings.TrimSpace(input.Status), + input.StartedAt, + ).Scan(&id) + if err != nil { + return 0, err + } + return id, nil +} + +func (r *opsRepository) UpdateRetryAttempt(ctx context.Context, input *service.OpsUpdateRetryAttemptInput) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if input == nil { + return fmt.Errorf("nil input") + } + if input.ID <= 0 { + return fmt.Errorf("invalid id") + } + + q := ` +UPDATE ops_retry_attempts +SET + status = $2, + finished_at = $3, + duration_ms = $4, + success = $5, + http_status_code = $6, + upstream_request_id = $7, + used_account_id = $8, + response_preview = $9, + response_truncated = $10, + result_request_id = $11, + result_error_id = $12, + error_message = $13 +WHERE id = $1` + + _, err := r.db.ExecContext( + ctx, + q, + input.ID, + strings.TrimSpace(input.Status), + nullTime(input.FinishedAt), + input.DurationMs, + nullBool(input.Success), + nullInt(input.HTTPStatusCode), + opsNullString(input.UpstreamRequestID), + nullInt64(input.UsedAccountID), + opsNullString(input.ResponsePreview), + nullBool(input.ResponseTruncated), + opsNullString(input.ResultRequestID), + nullInt64(input.ResultErrorID), + opsNullString(input.ErrorMessage), + ) + return err +} + +func (r *opsRepository) GetLatestRetryAttemptForError(ctx context.Context, sourceErrorID int64) (*service.OpsRetryAttempt, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if sourceErrorID <= 0 { + return nil, fmt.Errorf("invalid source_error_id") + } + + q := ` +SELECT + id, + created_at, + COALESCE(requested_by_user_id, 0), + source_error_id, + COALESCE(mode, ''), + pinned_account_id, + COALESCE(status, ''), + started_at, + finished_at, + duration_ms, + success, + http_status_code, + upstream_request_id, + used_account_id, + response_preview, + response_truncated, + result_request_id, + result_error_id, + error_message +FROM ops_retry_attempts +WHERE source_error_id = $1 +ORDER BY created_at DESC +LIMIT 1` + + var out service.OpsRetryAttempt + var pinnedAccountID sql.NullInt64 + var requestedBy sql.NullInt64 + var startedAt sql.NullTime + var finishedAt sql.NullTime + var durationMs sql.NullInt64 + var success sql.NullBool + var httpStatusCode sql.NullInt64 + var upstreamRequestID sql.NullString + var usedAccountID sql.NullInt64 + var responsePreview sql.NullString + var responseTruncated sql.NullBool + var resultRequestID sql.NullString + var resultErrorID sql.NullInt64 + var errorMessage sql.NullString + + err := r.db.QueryRowContext(ctx, q, sourceErrorID).Scan( + &out.ID, + &out.CreatedAt, + &requestedBy, + &out.SourceErrorID, + &out.Mode, + &pinnedAccountID, + &out.Status, + &startedAt, + &finishedAt, + &durationMs, + &success, + &httpStatusCode, + &upstreamRequestID, + &usedAccountID, + &responsePreview, + &responseTruncated, + &resultRequestID, + &resultErrorID, + &errorMessage, + ) + if err != nil { + return nil, err + } + out.RequestedByUserID = requestedBy.Int64 + if pinnedAccountID.Valid { + v := pinnedAccountID.Int64 + out.PinnedAccountID = &v + } + if startedAt.Valid { + t := startedAt.Time + out.StartedAt = &t + } + if finishedAt.Valid { + t := finishedAt.Time + out.FinishedAt = &t + } + if durationMs.Valid { + v := durationMs.Int64 + out.DurationMs = &v + } + if success.Valid { + v := success.Bool + out.Success = &v + } + if httpStatusCode.Valid { + v := int(httpStatusCode.Int64) + out.HTTPStatusCode = &v + } + if upstreamRequestID.Valid { + s := upstreamRequestID.String + out.UpstreamRequestID = &s + } + if usedAccountID.Valid { + v := usedAccountID.Int64 + out.UsedAccountID = &v + } + if responsePreview.Valid { + s := responsePreview.String + out.ResponsePreview = &s + } + if responseTruncated.Valid { + v := responseTruncated.Bool + out.ResponseTruncated = &v + } + if resultRequestID.Valid { + s := resultRequestID.String + out.ResultRequestID = &s + } + if resultErrorID.Valid { + v := resultErrorID.Int64 + out.ResultErrorID = &v + } + if errorMessage.Valid { + s := errorMessage.String + out.ErrorMessage = &s + } + + return &out, nil +} + +func nullTime(t time.Time) sql.NullTime { + if t.IsZero() { + return sql.NullTime{} + } + return sql.NullTime{Time: t, Valid: true} +} + +func nullBool(v *bool) sql.NullBool { + if v == nil { + return sql.NullBool{} + } + return sql.NullBool{Bool: *v, Valid: true} +} + +func (r *opsRepository) ListRetryAttemptsByErrorID(ctx context.Context, sourceErrorID int64, limit int) ([]*service.OpsRetryAttempt, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if sourceErrorID <= 0 { + return nil, fmt.Errorf("invalid source_error_id") + } + if limit <= 0 { + limit = 50 + } + if limit > 200 { + limit = 200 + } + + q := ` +SELECT + r.id, + r.created_at, + COALESCE(r.requested_by_user_id, 0), + r.source_error_id, + COALESCE(r.mode, ''), + r.pinned_account_id, + COALESCE(pa.name, ''), + COALESCE(r.status, ''), + r.started_at, + r.finished_at, + r.duration_ms, + r.success, + r.http_status_code, + r.upstream_request_id, + r.used_account_id, + COALESCE(ua.name, ''), + r.response_preview, + r.response_truncated, + r.result_request_id, + r.result_error_id, + r.error_message +FROM ops_retry_attempts r +LEFT JOIN accounts pa ON r.pinned_account_id = pa.id +LEFT JOIN accounts ua ON r.used_account_id = ua.id +WHERE r.source_error_id = $1 +ORDER BY r.created_at DESC +LIMIT $2` + + rows, err := r.db.QueryContext(ctx, q, sourceErrorID, limit) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]*service.OpsRetryAttempt, 0, 16) + for rows.Next() { + var item service.OpsRetryAttempt + var pinnedAccountID sql.NullInt64 + var pinnedAccountName string + var requestedBy sql.NullInt64 + var startedAt sql.NullTime + var finishedAt sql.NullTime + var durationMs sql.NullInt64 + var success sql.NullBool + var httpStatusCode sql.NullInt64 + var upstreamRequestID sql.NullString + var usedAccountID sql.NullInt64 + var usedAccountName string + var responsePreview sql.NullString + var responseTruncated sql.NullBool + var resultRequestID sql.NullString + var resultErrorID sql.NullInt64 + var errorMessage sql.NullString + + if err := rows.Scan( + &item.ID, + &item.CreatedAt, + &requestedBy, + &item.SourceErrorID, + &item.Mode, + &pinnedAccountID, + &pinnedAccountName, + &item.Status, + &startedAt, + &finishedAt, + &durationMs, + &success, + &httpStatusCode, + &upstreamRequestID, + &usedAccountID, + &usedAccountName, + &responsePreview, + &responseTruncated, + &resultRequestID, + &resultErrorID, + &errorMessage, + ); err != nil { + return nil, err + } + + item.RequestedByUserID = requestedBy.Int64 + if pinnedAccountID.Valid { + v := pinnedAccountID.Int64 + item.PinnedAccountID = &v + } + item.PinnedAccountName = pinnedAccountName + if startedAt.Valid { + t := startedAt.Time + item.StartedAt = &t + } + if finishedAt.Valid { + t := finishedAt.Time + item.FinishedAt = &t + } + if durationMs.Valid { + v := durationMs.Int64 + item.DurationMs = &v + } + if success.Valid { + v := success.Bool + item.Success = &v + } + if httpStatusCode.Valid { + v := int(httpStatusCode.Int64) + item.HTTPStatusCode = &v + } + if upstreamRequestID.Valid { + item.UpstreamRequestID = &upstreamRequestID.String + } + if usedAccountID.Valid { + v := usedAccountID.Int64 + item.UsedAccountID = &v + } + item.UsedAccountName = usedAccountName + if responsePreview.Valid { + item.ResponsePreview = &responsePreview.String + } + if responseTruncated.Valid { + v := responseTruncated.Bool + item.ResponseTruncated = &v + } + if resultRequestID.Valid { + item.ResultRequestID = &resultRequestID.String + } + if resultErrorID.Valid { + v := resultErrorID.Int64 + item.ResultErrorID = &v + } + if errorMessage.Valid { + item.ErrorMessage = &errorMessage.String + } + out = append(out, &item) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +func (r *opsRepository) UpdateErrorResolution(ctx context.Context, errorID int64, resolved bool, resolvedByUserID *int64, resolvedRetryID *int64, resolvedAt *time.Time) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if errorID <= 0 { + return fmt.Errorf("invalid error id") + } + + q := ` +UPDATE ops_error_logs +SET + resolved = $2, + resolved_at = $3, + resolved_by_user_id = $4, + resolved_retry_id = $5 +WHERE id = $1` + + at := sql.NullTime{} + if resolvedAt != nil && !resolvedAt.IsZero() { + at = sql.NullTime{Time: resolvedAt.UTC(), Valid: true} + } else if resolved { + now := time.Now().UTC() + at = sql.NullTime{Time: now, Valid: true} + } + + _, err := r.db.ExecContext( + ctx, + q, + errorID, + resolved, + at, + nullInt64(resolvedByUserID), + nullInt64(resolvedRetryID), + ) + return err +} + +func buildOpsErrorLogsWhere(filter *service.OpsErrorLogFilter) (string, []any) { + clauses := make([]string, 0, 12) + args := make([]any, 0, 12) + clauses = append(clauses, "1=1") + + phaseFilter := "" + if filter != nil { + phaseFilter = strings.TrimSpace(strings.ToLower(filter.Phase)) + } + // ops_error_logs stores client-visible error requests (status>=400), + // but we also persist "recovered" upstream errors (status<400) for upstream health visibility. + // If Resolved is not specified, do not filter by resolved state (backward-compatible). + resolvedFilter := (*bool)(nil) + if filter != nil { + resolvedFilter = filter.Resolved + } + // Keep list endpoints scoped to client errors unless explicitly filtering upstream phase. + if phaseFilter != "upstream" { + clauses = append(clauses, "COALESCE(status_code, 0) >= 400") + } + + if filter.StartTime != nil && !filter.StartTime.IsZero() { + args = append(args, filter.StartTime.UTC()) + clauses = append(clauses, "e.created_at >= $"+itoa(len(args))) + } + if filter.EndTime != nil && !filter.EndTime.IsZero() { + args = append(args, filter.EndTime.UTC()) + // Keep time-window semantics consistent with other ops queries: [start, end) + clauses = append(clauses, "e.created_at < $"+itoa(len(args))) + } + if p := strings.TrimSpace(filter.Platform); p != "" { + args = append(args, p) + clauses = append(clauses, "platform = $"+itoa(len(args))) + } + if filter.GroupID != nil && *filter.GroupID > 0 { + args = append(args, *filter.GroupID) + clauses = append(clauses, "group_id = $"+itoa(len(args))) + } + if filter.AccountID != nil && *filter.AccountID > 0 { + args = append(args, *filter.AccountID) + clauses = append(clauses, "account_id = $"+itoa(len(args))) + } + if phase := phaseFilter; phase != "" { + args = append(args, phase) + clauses = append(clauses, "error_phase = $"+itoa(len(args))) + } + if filter != nil { + if owner := strings.TrimSpace(strings.ToLower(filter.Owner)); owner != "" { + args = append(args, owner) + clauses = append(clauses, "LOWER(COALESCE(error_owner,'')) = $"+itoa(len(args))) + } + if source := strings.TrimSpace(strings.ToLower(filter.Source)); source != "" { + args = append(args, source) + clauses = append(clauses, "LOWER(COALESCE(error_source,'')) = $"+itoa(len(args))) + } + } + if resolvedFilter != nil { + args = append(args, *resolvedFilter) + clauses = append(clauses, "COALESCE(resolved,false) = $"+itoa(len(args))) + } + + // View filter: errors vs excluded vs all. + // Excluded = upstream 429/529 and business-limited (quota/concurrency/billing) errors. + view := "" + if filter != nil { + view = strings.ToLower(strings.TrimSpace(filter.View)) + } + switch view { + case "", "errors": + clauses = append(clauses, "COALESCE(is_business_limited,false) = false") + clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)") + case "excluded": + clauses = append(clauses, "(COALESCE(is_business_limited,false) = true OR COALESCE(upstream_status_code, status_code, 0) IN (429, 529))") + case "all": + // no-op + default: + // treat unknown as default 'errors' + clauses = append(clauses, "COALESCE(is_business_limited,false) = false") + clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)") + } + if len(filter.StatusCodes) > 0 { + args = append(args, pq.Array(filter.StatusCodes)) + clauses = append(clauses, "COALESCE(upstream_status_code, status_code, 0) = ANY($"+itoa(len(args))+")") + } else if filter.StatusCodesOther { + // "Other" means: status codes not in the common list. + known := []int{400, 401, 403, 404, 409, 422, 429, 500, 502, 503, 504, 529} + args = append(args, pq.Array(known)) + clauses = append(clauses, "NOT (COALESCE(upstream_status_code, status_code, 0) = ANY($"+itoa(len(args))+"))") + } + // Exact correlation keys (preferred for request↔upstream linkage). + if rid := strings.TrimSpace(filter.RequestID); rid != "" { + args = append(args, rid) + clauses = append(clauses, "COALESCE(request_id,'') = $"+itoa(len(args))) + } + if crid := strings.TrimSpace(filter.ClientRequestID); crid != "" { + args = append(args, crid) + clauses = append(clauses, "COALESCE(client_request_id,'') = $"+itoa(len(args))) + } + + if q := strings.TrimSpace(filter.Query); q != "" { + like := "%" + q + "%" + args = append(args, like) + n := itoa(len(args)) + clauses = append(clauses, "(request_id ILIKE $"+n+" OR client_request_id ILIKE $"+n+" OR error_message ILIKE $"+n+")") + } + + if userQuery := strings.TrimSpace(filter.UserQuery); userQuery != "" { + like := "%" + userQuery + "%" + args = append(args, like) + n := itoa(len(args)) + clauses = append(clauses, "u.email ILIKE $"+n) + } + + return "WHERE " + strings.Join(clauses, " AND "), args +} + +// Helpers for nullable args +func opsNullString(v any) any { + switch s := v.(type) { + case nil: + return sql.NullString{} + case *string: + if s == nil || strings.TrimSpace(*s) == "" { + return sql.NullString{} + } + return sql.NullString{String: strings.TrimSpace(*s), Valid: true} + case string: + if strings.TrimSpace(s) == "" { + return sql.NullString{} + } + return sql.NullString{String: strings.TrimSpace(s), Valid: true} + default: + return sql.NullString{} + } +} + +func opsNullInt64(v *int64) any { + if v == nil || *v == 0 { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: *v, Valid: true} +} + +func opsNullInt(v any) any { + switch n := v.(type) { + case nil: + return sql.NullInt64{} + case *int: + if n == nil || *n == 0 { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: int64(*n), Valid: true} + case *int64: + if n == nil || *n == 0 { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: *n, Valid: true} + case int: + if n == 0 { + return sql.NullInt64{} + } + return sql.NullInt64{Int64: int64(n), Valid: true} + default: + return sql.NullInt64{} + } +} diff --git a/backend/internal/repository/ops_repo_alerts.go b/backend/internal/repository/ops_repo_alerts.go new file mode 100644 index 00000000..bd98b7e4 --- /dev/null +++ b/backend/internal/repository/ops_repo_alerts.go @@ -0,0 +1,853 @@ +package repository + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) ListAlertRules(ctx context.Context) ([]*service.OpsAlertRule, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + + q := ` +SELECT + id, + name, + COALESCE(description, ''), + enabled, + COALESCE(severity, ''), + metric_type, + operator, + threshold, + window_minutes, + sustained_minutes, + cooldown_minutes, + COALESCE(notify_email, true), + filters, + last_triggered_at, + created_at, + updated_at +FROM ops_alert_rules +ORDER BY id DESC` + + rows, err := r.db.QueryContext(ctx, q) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := []*service.OpsAlertRule{} + for rows.Next() { + var rule service.OpsAlertRule + var filtersRaw []byte + var lastTriggeredAt sql.NullTime + if err := rows.Scan( + &rule.ID, + &rule.Name, + &rule.Description, + &rule.Enabled, + &rule.Severity, + &rule.MetricType, + &rule.Operator, + &rule.Threshold, + &rule.WindowMinutes, + &rule.SustainedMinutes, + &rule.CooldownMinutes, + &rule.NotifyEmail, + &filtersRaw, + &lastTriggeredAt, + &rule.CreatedAt, + &rule.UpdatedAt, + ); err != nil { + return nil, err + } + if lastTriggeredAt.Valid { + v := lastTriggeredAt.Time + rule.LastTriggeredAt = &v + } + if len(filtersRaw) > 0 && string(filtersRaw) != "null" { + var decoded map[string]any + if err := json.Unmarshal(filtersRaw, &decoded); err == nil { + rule.Filters = decoded + } + } + out = append(out, &rule) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +func (r *opsRepository) CreateAlertRule(ctx context.Context, input *service.OpsAlertRule) (*service.OpsAlertRule, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if input == nil { + return nil, fmt.Errorf("nil input") + } + + filtersArg, err := opsNullJSONMap(input.Filters) + if err != nil { + return nil, err + } + + q := ` +INSERT INTO ops_alert_rules ( + name, + description, + enabled, + severity, + metric_type, + operator, + threshold, + window_minutes, + sustained_minutes, + cooldown_minutes, + notify_email, + filters, + created_at, + updated_at +) VALUES ( + $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,NOW(),NOW() +) +RETURNING + id, + name, + COALESCE(description, ''), + enabled, + COALESCE(severity, ''), + metric_type, + operator, + threshold, + window_minutes, + sustained_minutes, + cooldown_minutes, + COALESCE(notify_email, true), + filters, + last_triggered_at, + created_at, + updated_at` + + var out service.OpsAlertRule + var filtersRaw []byte + var lastTriggeredAt sql.NullTime + + if err := r.db.QueryRowContext( + ctx, + q, + strings.TrimSpace(input.Name), + strings.TrimSpace(input.Description), + input.Enabled, + strings.TrimSpace(input.Severity), + strings.TrimSpace(input.MetricType), + strings.TrimSpace(input.Operator), + input.Threshold, + input.WindowMinutes, + input.SustainedMinutes, + input.CooldownMinutes, + input.NotifyEmail, + filtersArg, + ).Scan( + &out.ID, + &out.Name, + &out.Description, + &out.Enabled, + &out.Severity, + &out.MetricType, + &out.Operator, + &out.Threshold, + &out.WindowMinutes, + &out.SustainedMinutes, + &out.CooldownMinutes, + &out.NotifyEmail, + &filtersRaw, + &lastTriggeredAt, + &out.CreatedAt, + &out.UpdatedAt, + ); err != nil { + return nil, err + } + if lastTriggeredAt.Valid { + v := lastTriggeredAt.Time + out.LastTriggeredAt = &v + } + if len(filtersRaw) > 0 && string(filtersRaw) != "null" { + var decoded map[string]any + if err := json.Unmarshal(filtersRaw, &decoded); err == nil { + out.Filters = decoded + } + } + + return &out, nil +} + +func (r *opsRepository) UpdateAlertRule(ctx context.Context, input *service.OpsAlertRule) (*service.OpsAlertRule, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if input == nil { + return nil, fmt.Errorf("nil input") + } + if input.ID <= 0 { + return nil, fmt.Errorf("invalid id") + } + + filtersArg, err := opsNullJSONMap(input.Filters) + if err != nil { + return nil, err + } + + q := ` +UPDATE ops_alert_rules +SET + name = $2, + description = $3, + enabled = $4, + severity = $5, + metric_type = $6, + operator = $7, + threshold = $8, + window_minutes = $9, + sustained_minutes = $10, + cooldown_minutes = $11, + notify_email = $12, + filters = $13, + updated_at = NOW() +WHERE id = $1 +RETURNING + id, + name, + COALESCE(description, ''), + enabled, + COALESCE(severity, ''), + metric_type, + operator, + threshold, + window_minutes, + sustained_minutes, + cooldown_minutes, + COALESCE(notify_email, true), + filters, + last_triggered_at, + created_at, + updated_at` + + var out service.OpsAlertRule + var filtersRaw []byte + var lastTriggeredAt sql.NullTime + + if err := r.db.QueryRowContext( + ctx, + q, + input.ID, + strings.TrimSpace(input.Name), + strings.TrimSpace(input.Description), + input.Enabled, + strings.TrimSpace(input.Severity), + strings.TrimSpace(input.MetricType), + strings.TrimSpace(input.Operator), + input.Threshold, + input.WindowMinutes, + input.SustainedMinutes, + input.CooldownMinutes, + input.NotifyEmail, + filtersArg, + ).Scan( + &out.ID, + &out.Name, + &out.Description, + &out.Enabled, + &out.Severity, + &out.MetricType, + &out.Operator, + &out.Threshold, + &out.WindowMinutes, + &out.SustainedMinutes, + &out.CooldownMinutes, + &out.NotifyEmail, + &filtersRaw, + &lastTriggeredAt, + &out.CreatedAt, + &out.UpdatedAt, + ); err != nil { + return nil, err + } + + if lastTriggeredAt.Valid { + v := lastTriggeredAt.Time + out.LastTriggeredAt = &v + } + if len(filtersRaw) > 0 && string(filtersRaw) != "null" { + var decoded map[string]any + if err := json.Unmarshal(filtersRaw, &decoded); err == nil { + out.Filters = decoded + } + } + + return &out, nil +} + +func (r *opsRepository) DeleteAlertRule(ctx context.Context, id int64) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if id <= 0 { + return fmt.Errorf("invalid id") + } + + res, err := r.db.ExecContext(ctx, "DELETE FROM ops_alert_rules WHERE id = $1", id) + if err != nil { + return err + } + affected, err := res.RowsAffected() + if err != nil { + return err + } + if affected == 0 { + return sql.ErrNoRows + } + return nil +} + +func (r *opsRepository) ListAlertEvents(ctx context.Context, filter *service.OpsAlertEventFilter) ([]*service.OpsAlertEvent, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + filter = &service.OpsAlertEventFilter{} + } + + limit := filter.Limit + if limit <= 0 { + limit = 100 + } + if limit > 500 { + limit = 500 + } + + where, args := buildOpsAlertEventsWhere(filter) + args = append(args, limit) + limitArg := "$" + itoa(len(args)) + + q := ` +SELECT + id, + COALESCE(rule_id, 0), + COALESCE(severity, ''), + COALESCE(status, ''), + COALESCE(title, ''), + COALESCE(description, ''), + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at +FROM ops_alert_events +` + where + ` +ORDER BY fired_at DESC, id DESC +LIMIT ` + limitArg + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := []*service.OpsAlertEvent{} + for rows.Next() { + var ev service.OpsAlertEvent + var metricValue sql.NullFloat64 + var thresholdValue sql.NullFloat64 + var dimensionsRaw []byte + var resolvedAt sql.NullTime + if err := rows.Scan( + &ev.ID, + &ev.RuleID, + &ev.Severity, + &ev.Status, + &ev.Title, + &ev.Description, + &metricValue, + &thresholdValue, + &dimensionsRaw, + &ev.FiredAt, + &resolvedAt, + &ev.EmailSent, + &ev.CreatedAt, + ); err != nil { + return nil, err + } + if metricValue.Valid { + v := metricValue.Float64 + ev.MetricValue = &v + } + if thresholdValue.Valid { + v := thresholdValue.Float64 + ev.ThresholdValue = &v + } + if resolvedAt.Valid { + v := resolvedAt.Time + ev.ResolvedAt = &v + } + if len(dimensionsRaw) > 0 && string(dimensionsRaw) != "null" { + var decoded map[string]any + if err := json.Unmarshal(dimensionsRaw, &decoded); err == nil { + ev.Dimensions = decoded + } + } + out = append(out, &ev) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +func (r *opsRepository) GetAlertEventByID(ctx context.Context, eventID int64) (*service.OpsAlertEvent, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if eventID <= 0 { + return nil, fmt.Errorf("invalid event id") + } + + q := ` +SELECT + id, + COALESCE(rule_id, 0), + COALESCE(severity, ''), + COALESCE(status, ''), + COALESCE(title, ''), + COALESCE(description, ''), + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at +FROM ops_alert_events +WHERE id = $1` + + row := r.db.QueryRowContext(ctx, q, eventID) + ev, err := scanOpsAlertEvent(row) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, err + } + return ev, nil +} + +func (r *opsRepository) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*service.OpsAlertEvent, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if ruleID <= 0 { + return nil, fmt.Errorf("invalid rule id") + } + + q := ` +SELECT + id, + COALESCE(rule_id, 0), + COALESCE(severity, ''), + COALESCE(status, ''), + COALESCE(title, ''), + COALESCE(description, ''), + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at +FROM ops_alert_events +WHERE rule_id = $1 AND status = $2 +ORDER BY fired_at DESC +LIMIT 1` + + row := r.db.QueryRowContext(ctx, q, ruleID, service.OpsAlertStatusFiring) + ev, err := scanOpsAlertEvent(row) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, err + } + return ev, nil +} + +func (r *opsRepository) GetLatestAlertEvent(ctx context.Context, ruleID int64) (*service.OpsAlertEvent, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if ruleID <= 0 { + return nil, fmt.Errorf("invalid rule id") + } + + q := ` +SELECT + id, + COALESCE(rule_id, 0), + COALESCE(severity, ''), + COALESCE(status, ''), + COALESCE(title, ''), + COALESCE(description, ''), + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at +FROM ops_alert_events +WHERE rule_id = $1 +ORDER BY fired_at DESC +LIMIT 1` + + row := r.db.QueryRowContext(ctx, q, ruleID) + ev, err := scanOpsAlertEvent(row) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, err + } + return ev, nil +} + +func (r *opsRepository) CreateAlertEvent(ctx context.Context, event *service.OpsAlertEvent) (*service.OpsAlertEvent, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if event == nil { + return nil, fmt.Errorf("nil event") + } + + dimensionsArg, err := opsNullJSONMap(event.Dimensions) + if err != nil { + return nil, err + } + + q := ` +INSERT INTO ops_alert_events ( + rule_id, + severity, + status, + title, + description, + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at +) VALUES ( + $1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,NOW() +) +RETURNING + id, + COALESCE(rule_id, 0), + COALESCE(severity, ''), + COALESCE(status, ''), + COALESCE(title, ''), + COALESCE(description, ''), + metric_value, + threshold_value, + dimensions, + fired_at, + resolved_at, + email_sent, + created_at` + + row := r.db.QueryRowContext( + ctx, + q, + opsNullInt64(&event.RuleID), + opsNullString(event.Severity), + opsNullString(event.Status), + opsNullString(event.Title), + opsNullString(event.Description), + opsNullFloat64(event.MetricValue), + opsNullFloat64(event.ThresholdValue), + dimensionsArg, + event.FiredAt, + opsNullTime(event.ResolvedAt), + event.EmailSent, + ) + return scanOpsAlertEvent(row) +} + +func (r *opsRepository) UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if eventID <= 0 { + return fmt.Errorf("invalid event id") + } + if strings.TrimSpace(status) == "" { + return fmt.Errorf("invalid status") + } + + q := ` +UPDATE ops_alert_events +SET status = $2, + resolved_at = $3 +WHERE id = $1` + + _, err := r.db.ExecContext(ctx, q, eventID, strings.TrimSpace(status), opsNullTime(resolvedAt)) + return err +} + +func (r *opsRepository) UpdateAlertEventEmailSent(ctx context.Context, eventID int64, emailSent bool) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if eventID <= 0 { + return fmt.Errorf("invalid event id") + } + + _, err := r.db.ExecContext(ctx, "UPDATE ops_alert_events SET email_sent = $2 WHERE id = $1", eventID, emailSent) + return err +} + +type opsAlertEventRow interface { + Scan(dest ...any) error +} + +func (r *opsRepository) CreateAlertSilence(ctx context.Context, input *service.OpsAlertSilence) (*service.OpsAlertSilence, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if input == nil { + return nil, fmt.Errorf("nil input") + } + if input.RuleID <= 0 { + return nil, fmt.Errorf("invalid rule_id") + } + platform := strings.TrimSpace(input.Platform) + if platform == "" { + return nil, fmt.Errorf("invalid platform") + } + if input.Until.IsZero() { + return nil, fmt.Errorf("invalid until") + } + + q := ` +INSERT INTO ops_alert_silences ( + rule_id, + platform, + group_id, + region, + until, + reason, + created_by, + created_at +) VALUES ( + $1,$2,$3,$4,$5,$6,$7,NOW() +) +RETURNING id, rule_id, platform, group_id, region, until, COALESCE(reason,''), created_by, created_at` + + row := r.db.QueryRowContext( + ctx, + q, + input.RuleID, + platform, + opsNullInt64(input.GroupID), + opsNullString(input.Region), + input.Until, + opsNullString(input.Reason), + opsNullInt64(input.CreatedBy), + ) + + var out service.OpsAlertSilence + var groupID sql.NullInt64 + var region sql.NullString + var createdBy sql.NullInt64 + if err := row.Scan( + &out.ID, + &out.RuleID, + &out.Platform, + &groupID, + ®ion, + &out.Until, + &out.Reason, + &createdBy, + &out.CreatedAt, + ); err != nil { + return nil, err + } + if groupID.Valid { + v := groupID.Int64 + out.GroupID = &v + } + if region.Valid { + v := strings.TrimSpace(region.String) + if v != "" { + out.Region = &v + } + } + if createdBy.Valid { + v := createdBy.Int64 + out.CreatedBy = &v + } + return &out, nil +} + +func (r *opsRepository) IsAlertSilenced(ctx context.Context, ruleID int64, platform string, groupID *int64, region *string, now time.Time) (bool, error) { + if r == nil || r.db == nil { + return false, fmt.Errorf("nil ops repository") + } + if ruleID <= 0 { + return false, fmt.Errorf("invalid rule id") + } + platform = strings.TrimSpace(platform) + if platform == "" { + return false, nil + } + if now.IsZero() { + now = time.Now().UTC() + } + + q := ` +SELECT 1 +FROM ops_alert_silences +WHERE rule_id = $1 + AND platform = $2 + AND (group_id IS NOT DISTINCT FROM $3) + AND (region IS NOT DISTINCT FROM $4) + AND until > $5 +LIMIT 1` + + var dummy int + err := r.db.QueryRowContext(ctx, q, ruleID, platform, opsNullInt64(groupID), opsNullString(region), now).Scan(&dummy) + if err != nil { + if err == sql.ErrNoRows { + return false, nil + } + return false, err + } + return true, nil +} + +func scanOpsAlertEvent(row opsAlertEventRow) (*service.OpsAlertEvent, error) { + var ev service.OpsAlertEvent + var metricValue sql.NullFloat64 + var thresholdValue sql.NullFloat64 + var dimensionsRaw []byte + var resolvedAt sql.NullTime + + if err := row.Scan( + &ev.ID, + &ev.RuleID, + &ev.Severity, + &ev.Status, + &ev.Title, + &ev.Description, + &metricValue, + &thresholdValue, + &dimensionsRaw, + &ev.FiredAt, + &resolvedAt, + &ev.EmailSent, + &ev.CreatedAt, + ); err != nil { + return nil, err + } + if metricValue.Valid { + v := metricValue.Float64 + ev.MetricValue = &v + } + if thresholdValue.Valid { + v := thresholdValue.Float64 + ev.ThresholdValue = &v + } + if resolvedAt.Valid { + v := resolvedAt.Time + ev.ResolvedAt = &v + } + if len(dimensionsRaw) > 0 && string(dimensionsRaw) != "null" { + var decoded map[string]any + if err := json.Unmarshal(dimensionsRaw, &decoded); err == nil { + ev.Dimensions = decoded + } + } + return &ev, nil +} + +func buildOpsAlertEventsWhere(filter *service.OpsAlertEventFilter) (string, []any) { + clauses := []string{"1=1"} + args := []any{} + + if filter == nil { + return "WHERE " + strings.Join(clauses, " AND "), args + } + + if status := strings.TrimSpace(filter.Status); status != "" { + args = append(args, status) + clauses = append(clauses, "status = $"+itoa(len(args))) + } + if severity := strings.TrimSpace(filter.Severity); severity != "" { + args = append(args, severity) + clauses = append(clauses, "severity = $"+itoa(len(args))) + } + if filter.EmailSent != nil { + args = append(args, *filter.EmailSent) + clauses = append(clauses, "email_sent = $"+itoa(len(args))) + } + if filter.StartTime != nil && !filter.StartTime.IsZero() { + args = append(args, *filter.StartTime) + clauses = append(clauses, "fired_at >= $"+itoa(len(args))) + } + if filter.EndTime != nil && !filter.EndTime.IsZero() { + args = append(args, *filter.EndTime) + clauses = append(clauses, "fired_at < $"+itoa(len(args))) + } + + // Cursor pagination (descending by fired_at, then id) + if filter.BeforeFiredAt != nil && !filter.BeforeFiredAt.IsZero() && filter.BeforeID != nil && *filter.BeforeID > 0 { + args = append(args, *filter.BeforeFiredAt) + tsArg := "$" + itoa(len(args)) + args = append(args, *filter.BeforeID) + idArg := "$" + itoa(len(args)) + clauses = append(clauses, fmt.Sprintf("(fired_at < %s OR (fired_at = %s AND id < %s))", tsArg, tsArg, idArg)) + } + // Dimensions are stored in JSONB. We filter best-effort without requiring GIN indexes. + if platform := strings.TrimSpace(filter.Platform); platform != "" { + args = append(args, platform) + clauses = append(clauses, "(dimensions->>'platform') = $"+itoa(len(args))) + } + if filter.GroupID != nil && *filter.GroupID > 0 { + args = append(args, fmt.Sprintf("%d", *filter.GroupID)) + clauses = append(clauses, "(dimensions->>'group_id') = $"+itoa(len(args))) + } + + return "WHERE " + strings.Join(clauses, " AND "), args +} + +func opsNullJSONMap(v map[string]any) (any, error) { + if v == nil { + return sql.NullString{}, nil + } + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + if len(b) == 0 { + return sql.NullString{}, nil + } + return sql.NullString{String: string(b), Valid: true}, nil +} diff --git a/backend/internal/repository/ops_repo_dashboard.go b/backend/internal/repository/ops_repo_dashboard.go new file mode 100644 index 00000000..85791a9a --- /dev/null +++ b/backend/internal/repository/ops_repo_dashboard.go @@ -0,0 +1,1015 @@ +package repository + +import ( + "context" + "database/sql" + "errors" + "fmt" + "math" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) GetDashboardOverview(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsDashboardOverview, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + mode := filter.QueryMode + if !mode.IsValid() { + mode = service.OpsQueryModeRaw + } + + switch mode { + case service.OpsQueryModePreagg: + return r.getDashboardOverviewPreaggregated(ctx, filter) + case service.OpsQueryModeAuto: + out, err := r.getDashboardOverviewPreaggregated(ctx, filter) + if err != nil && errors.Is(err, service.ErrOpsPreaggregatedNotPopulated) { + return r.getDashboardOverviewRaw(ctx, filter) + } + return out, err + default: + return r.getDashboardOverviewRaw(ctx, filter) + } +} + +func (r *opsRepository) getDashboardOverviewRaw(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsDashboardOverview, error) { + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + + successCount, tokenConsumed, err := r.queryUsageCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + duration, ttft, err := r.queryUsageLatency(ctx, filter, start, end) + if err != nil { + return nil, err + } + + errorTotal, businessLimited, errorCountSLA, upstreamExcl, upstream429, upstream529, err := r.queryErrorCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + windowSeconds := end.Sub(start).Seconds() + if windowSeconds <= 0 { + windowSeconds = 1 + } + + requestCountTotal := successCount + errorTotal + requestCountSLA := successCount + errorCountSLA + + sla := safeDivideFloat64(float64(successCount), float64(requestCountSLA)) + errorRate := safeDivideFloat64(float64(errorCountSLA), float64(requestCountSLA)) + upstreamErrorRate := safeDivideFloat64(float64(upstreamExcl), float64(requestCountSLA)) + + qpsCurrent, tpsCurrent, err := r.queryCurrentRates(ctx, filter, end) + if err != nil { + return nil, err + } + + qpsPeak, err := r.queryPeakQPS(ctx, filter, start, end) + if err != nil { + return nil, err + } + tpsPeak, err := r.queryPeakTPS(ctx, filter, start, end) + if err != nil { + return nil, err + } + + qpsAvg := roundTo1DP(float64(requestCountTotal) / windowSeconds) + tpsAvg := roundTo1DP(float64(tokenConsumed) / windowSeconds) + + return &service.OpsDashboardOverview{ + StartTime: start, + EndTime: end, + Platform: strings.TrimSpace(filter.Platform), + GroupID: filter.GroupID, + + SuccessCount: successCount, + ErrorCountTotal: errorTotal, + BusinessLimitedCount: businessLimited, + ErrorCountSLA: errorCountSLA, + RequestCountTotal: requestCountTotal, + RequestCountSLA: requestCountSLA, + TokenConsumed: tokenConsumed, + + SLA: roundTo4DP(sla), + ErrorRate: roundTo4DP(errorRate), + UpstreamErrorRate: roundTo4DP(upstreamErrorRate), + UpstreamErrorCountExcl429529: upstreamExcl, + Upstream429Count: upstream429, + Upstream529Count: upstream529, + + QPS: service.OpsRateSummary{ + Current: qpsCurrent, + Peak: qpsPeak, + Avg: qpsAvg, + }, + TPS: service.OpsRateSummary{ + Current: tpsCurrent, + Peak: tpsPeak, + Avg: tpsAvg, + }, + + Duration: duration, + TTFT: ttft, + }, nil +} + +type opsDashboardPartial struct { + successCount int64 + errorCountTotal int64 + businessLimitedCount int64 + errorCountSLA int64 + + upstreamErrorCountExcl429529 int64 + upstream429Count int64 + upstream529Count int64 + + tokenConsumed int64 + + duration service.OpsPercentiles + ttft service.OpsPercentiles +} + +func (r *opsRepository) getDashboardOverviewPreaggregated(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsDashboardOverview, error) { + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + + // Stable full-hour range covered by pre-aggregation. + aggSafeEnd := preaggSafeEnd(end) + aggFullStart := utcCeilToHour(start) + aggFullEnd := utcFloorToHour(aggSafeEnd) + + // If there are no stable full-hour buckets, use raw directly (short windows). + if !aggFullStart.Before(aggFullEnd) { + return r.getDashboardOverviewRaw(ctx, filter) + } + + // 1) Pre-aggregated stable segment. + preaggRows, err := r.listHourlyMetricsRows(ctx, filter, aggFullStart, aggFullEnd) + if err != nil { + return nil, err + } + if len(preaggRows) == 0 { + // Distinguish "no data" vs "preagg not populated yet". + if exists, err := r.rawOpsDataExists(ctx, filter, aggFullStart, aggFullEnd); err == nil && exists { + return nil, service.ErrOpsPreaggregatedNotPopulated + } + } + preagg := aggregateHourlyRows(preaggRows) + + // 2) Raw head/tail fragments (at most ~1 hour each). + head := opsDashboardPartial{} + tail := opsDashboardPartial{} + + if start.Before(aggFullStart) { + part, err := r.queryRawPartial(ctx, filter, start, minTime(end, aggFullStart)) + if err != nil { + return nil, err + } + head = *part + } + if aggFullEnd.Before(end) { + part, err := r.queryRawPartial(ctx, filter, maxTime(start, aggFullEnd), end) + if err != nil { + return nil, err + } + tail = *part + } + + // Merge counts. + successCount := preagg.successCount + head.successCount + tail.successCount + errorTotal := preagg.errorCountTotal + head.errorCountTotal + tail.errorCountTotal + businessLimited := preagg.businessLimitedCount + head.businessLimitedCount + tail.businessLimitedCount + errorCountSLA := preagg.errorCountSLA + head.errorCountSLA + tail.errorCountSLA + + upstreamExcl := preagg.upstreamErrorCountExcl429529 + head.upstreamErrorCountExcl429529 + tail.upstreamErrorCountExcl429529 + upstream429 := preagg.upstream429Count + head.upstream429Count + tail.upstream429Count + upstream529 := preagg.upstream529Count + head.upstream529Count + tail.upstream529Count + + tokenConsumed := preagg.tokenConsumed + head.tokenConsumed + tail.tokenConsumed + + // Approximate percentiles across segments: + // - p50/p90/avg: weighted average by success_count + // - p95/p99/max: max (conservative tail) + duration := combineApproxPercentiles([]opsPercentileSegment{ + {weight: preagg.successCount, p: preagg.duration}, + {weight: head.successCount, p: head.duration}, + {weight: tail.successCount, p: tail.duration}, + }) + ttft := combineApproxPercentiles([]opsPercentileSegment{ + {weight: preagg.successCount, p: preagg.ttft}, + {weight: head.successCount, p: head.ttft}, + {weight: tail.successCount, p: tail.ttft}, + }) + + windowSeconds := end.Sub(start).Seconds() + if windowSeconds <= 0 { + windowSeconds = 1 + } + + requestCountTotal := successCount + errorTotal + requestCountSLA := successCount + errorCountSLA + + sla := safeDivideFloat64(float64(successCount), float64(requestCountSLA)) + errorRate := safeDivideFloat64(float64(errorCountSLA), float64(requestCountSLA)) + upstreamErrorRate := safeDivideFloat64(float64(upstreamExcl), float64(requestCountSLA)) + + // Keep "current" rates as raw, to preserve realtime semantics. + qpsCurrent, tpsCurrent, err := r.queryCurrentRates(ctx, filter, end) + if err != nil { + return nil, err + } + + // NOTE: peak still uses raw logs (minute granularity). This is typically cheaper than percentile_cont + // and keeps semantics consistent across modes. + qpsPeak, err := r.queryPeakQPS(ctx, filter, start, end) + if err != nil { + return nil, err + } + tpsPeak, err := r.queryPeakTPS(ctx, filter, start, end) + if err != nil { + return nil, err + } + + qpsAvg := roundTo1DP(float64(requestCountTotal) / windowSeconds) + tpsAvg := roundTo1DP(float64(tokenConsumed) / windowSeconds) + + return &service.OpsDashboardOverview{ + StartTime: start, + EndTime: end, + Platform: strings.TrimSpace(filter.Platform), + GroupID: filter.GroupID, + + SuccessCount: successCount, + ErrorCountTotal: errorTotal, + BusinessLimitedCount: businessLimited, + ErrorCountSLA: errorCountSLA, + RequestCountTotal: requestCountTotal, + RequestCountSLA: requestCountSLA, + TokenConsumed: tokenConsumed, + + SLA: roundTo4DP(sla), + ErrorRate: roundTo4DP(errorRate), + UpstreamErrorRate: roundTo4DP(upstreamErrorRate), + UpstreamErrorCountExcl429529: upstreamExcl, + Upstream429Count: upstream429, + Upstream529Count: upstream529, + + QPS: service.OpsRateSummary{ + Current: qpsCurrent, + Peak: qpsPeak, + Avg: qpsAvg, + }, + TPS: service.OpsRateSummary{ + Current: tpsCurrent, + Peak: tpsPeak, + Avg: tpsAvg, + }, + + Duration: duration, + TTFT: ttft, + }, nil +} + +type opsHourlyMetricsRow struct { + bucketStart time.Time + + successCount int64 + errorCountTotal int64 + businessLimitedCount int64 + errorCountSLA int64 + + upstreamErrorCountExcl429529 int64 + upstream429Count int64 + upstream529Count int64 + + tokenConsumed int64 + + durationP50 sql.NullInt64 + durationP90 sql.NullInt64 + durationP95 sql.NullInt64 + durationP99 sql.NullInt64 + durationAvg sql.NullFloat64 + durationMax sql.NullInt64 + + ttftP50 sql.NullInt64 + ttftP90 sql.NullInt64 + ttftP95 sql.NullInt64 + ttftP99 sql.NullInt64 + ttftAvg sql.NullFloat64 + ttftMax sql.NullInt64 +} + +func (r *opsRepository) listHourlyMetricsRows(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) ([]opsHourlyMetricsRow, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if start.IsZero() || end.IsZero() || !start.Before(end) { + return []opsHourlyMetricsRow{}, nil + } + + where := "bucket_start >= $1 AND bucket_start < $2" + args := []any{start.UTC(), end.UTC()} + idx := 3 + + platform := "" + groupID := (*int64)(nil) + if filter != nil { + platform = strings.TrimSpace(strings.ToLower(filter.Platform)) + groupID = filter.GroupID + } + + switch { + case groupID != nil && *groupID > 0: + where += fmt.Sprintf(" AND group_id = $%d", idx) + args = append(args, *groupID) + idx++ + if platform != "" { + where += fmt.Sprintf(" AND platform = $%d", idx) + args = append(args, platform) + // idx++ removed - not used after this + } + case platform != "": + where += fmt.Sprintf(" AND platform = $%d AND group_id IS NULL", idx) + args = append(args, platform) + // idx++ removed - not used after this + default: + where += " AND platform IS NULL AND group_id IS NULL" + } + + q := ` +SELECT + bucket_start, + success_count, + error_count_total, + business_limited_count, + error_count_sla, + upstream_error_count_excl_429_529, + upstream_429_count, + upstream_529_count, + token_consumed, + duration_p50_ms, + duration_p90_ms, + duration_p95_ms, + duration_p99_ms, + duration_avg_ms, + duration_max_ms, + ttft_p50_ms, + ttft_p90_ms, + ttft_p95_ms, + ttft_p99_ms, + ttft_avg_ms, + ttft_max_ms +FROM ops_metrics_hourly +WHERE ` + where + ` +ORDER BY bucket_start ASC` + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]opsHourlyMetricsRow, 0, 64) + for rows.Next() { + var row opsHourlyMetricsRow + if err := rows.Scan( + &row.bucketStart, + &row.successCount, + &row.errorCountTotal, + &row.businessLimitedCount, + &row.errorCountSLA, + &row.upstreamErrorCountExcl429529, + &row.upstream429Count, + &row.upstream529Count, + &row.tokenConsumed, + &row.durationP50, + &row.durationP90, + &row.durationP95, + &row.durationP99, + &row.durationAvg, + &row.durationMax, + &row.ttftP50, + &row.ttftP90, + &row.ttftP95, + &row.ttftP99, + &row.ttftAvg, + &row.ttftMax, + ); err != nil { + return nil, err + } + out = append(out, row) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +func aggregateHourlyRows(rows []opsHourlyMetricsRow) opsDashboardPartial { + out := opsDashboardPartial{} + if len(rows) == 0 { + return out + } + + var ( + p50Sum float64 + p50W int64 + p90Sum float64 + p90W int64 + avgSum float64 + avgW int64 + ) + var ( + ttftP50Sum float64 + ttftP50W int64 + ttftP90Sum float64 + ttftP90W int64 + ttftAvgSum float64 + ttftAvgW int64 + ) + + var ( + p95Max *int + p99Max *int + maxMax *int + + ttftP95Max *int + ttftP99Max *int + ttftMaxMax *int + ) + + for _, row := range rows { + out.successCount += row.successCount + out.errorCountTotal += row.errorCountTotal + out.businessLimitedCount += row.businessLimitedCount + out.errorCountSLA += row.errorCountSLA + + out.upstreamErrorCountExcl429529 += row.upstreamErrorCountExcl429529 + out.upstream429Count += row.upstream429Count + out.upstream529Count += row.upstream529Count + + out.tokenConsumed += row.tokenConsumed + + if row.successCount > 0 { + if row.durationP50.Valid { + p50Sum += float64(row.durationP50.Int64) * float64(row.successCount) + p50W += row.successCount + } + if row.durationP90.Valid { + p90Sum += float64(row.durationP90.Int64) * float64(row.successCount) + p90W += row.successCount + } + if row.durationAvg.Valid { + avgSum += row.durationAvg.Float64 * float64(row.successCount) + avgW += row.successCount + } + if row.ttftP50.Valid { + ttftP50Sum += float64(row.ttftP50.Int64) * float64(row.successCount) + ttftP50W += row.successCount + } + if row.ttftP90.Valid { + ttftP90Sum += float64(row.ttftP90.Int64) * float64(row.successCount) + ttftP90W += row.successCount + } + if row.ttftAvg.Valid { + ttftAvgSum += row.ttftAvg.Float64 * float64(row.successCount) + ttftAvgW += row.successCount + } + } + + if row.durationP95.Valid { + v := int(row.durationP95.Int64) + if p95Max == nil || v > *p95Max { + p95Max = &v + } + } + if row.durationP99.Valid { + v := int(row.durationP99.Int64) + if p99Max == nil || v > *p99Max { + p99Max = &v + } + } + if row.durationMax.Valid { + v := int(row.durationMax.Int64) + if maxMax == nil || v > *maxMax { + maxMax = &v + } + } + + if row.ttftP95.Valid { + v := int(row.ttftP95.Int64) + if ttftP95Max == nil || v > *ttftP95Max { + ttftP95Max = &v + } + } + if row.ttftP99.Valid { + v := int(row.ttftP99.Int64) + if ttftP99Max == nil || v > *ttftP99Max { + ttftP99Max = &v + } + } + if row.ttftMax.Valid { + v := int(row.ttftMax.Int64) + if ttftMaxMax == nil || v > *ttftMaxMax { + ttftMaxMax = &v + } + } + } + + // duration + if p50W > 0 { + v := int(math.Round(p50Sum / float64(p50W))) + out.duration.P50 = &v + } + if p90W > 0 { + v := int(math.Round(p90Sum / float64(p90W))) + out.duration.P90 = &v + } + out.duration.P95 = p95Max + out.duration.P99 = p99Max + if avgW > 0 { + v := int(math.Round(avgSum / float64(avgW))) + out.duration.Avg = &v + } + out.duration.Max = maxMax + + // ttft + if ttftP50W > 0 { + v := int(math.Round(ttftP50Sum / float64(ttftP50W))) + out.ttft.P50 = &v + } + if ttftP90W > 0 { + v := int(math.Round(ttftP90Sum / float64(ttftP90W))) + out.ttft.P90 = &v + } + out.ttft.P95 = ttftP95Max + out.ttft.P99 = ttftP99Max + if ttftAvgW > 0 { + v := int(math.Round(ttftAvgSum / float64(ttftAvgW))) + out.ttft.Avg = &v + } + out.ttft.Max = ttftMaxMax + + return out +} + +func (r *opsRepository) queryRawPartial(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (*opsDashboardPartial, error) { + successCount, tokenConsumed, err := r.queryUsageCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + duration, ttft, err := r.queryUsageLatency(ctx, filter, start, end) + if err != nil { + return nil, err + } + + errorTotal, businessLimited, errorCountSLA, upstreamExcl, upstream429, upstream529, err := r.queryErrorCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + return &opsDashboardPartial{ + successCount: successCount, + errorCountTotal: errorTotal, + businessLimitedCount: businessLimited, + errorCountSLA: errorCountSLA, + upstreamErrorCountExcl429529: upstreamExcl, + upstream429Count: upstream429, + upstream529Count: upstream529, + tokenConsumed: tokenConsumed, + duration: duration, + ttft: ttft, + }, nil +} + +func (r *opsRepository) rawOpsDataExists(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (bool, error) { + { + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + q := `SELECT EXISTS(SELECT 1 FROM usage_logs ul ` + join + ` ` + where + ` LIMIT 1)` + var exists bool + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&exists); err != nil { + return false, err + } + if exists { + return true, nil + } + } + + { + where, args, _ := buildErrorWhere(filter, start, end, 1) + q := `SELECT EXISTS(SELECT 1 FROM ops_error_logs ` + where + ` LIMIT 1)` + var exists bool + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&exists); err != nil { + return false, err + } + return exists, nil + } +} + +type opsPercentileSegment struct { + weight int64 + p service.OpsPercentiles +} + +func combineApproxPercentiles(segments []opsPercentileSegment) service.OpsPercentiles { + weightedInt := func(get func(service.OpsPercentiles) *int) *int { + var sum float64 + var w int64 + for _, seg := range segments { + if seg.weight <= 0 { + continue + } + v := get(seg.p) + if v == nil { + continue + } + sum += float64(*v) * float64(seg.weight) + w += seg.weight + } + if w <= 0 { + return nil + } + out := int(math.Round(sum / float64(w))) + return &out + } + + maxInt := func(get func(service.OpsPercentiles) *int) *int { + var max *int + for _, seg := range segments { + v := get(seg.p) + if v == nil { + continue + } + if max == nil || *v > *max { + c := *v + max = &c + } + } + return max + } + + return service.OpsPercentiles{ + P50: weightedInt(func(p service.OpsPercentiles) *int { return p.P50 }), + P90: weightedInt(func(p service.OpsPercentiles) *int { return p.P90 }), + P95: maxInt(func(p service.OpsPercentiles) *int { return p.P95 }), + P99: maxInt(func(p service.OpsPercentiles) *int { return p.P99 }), + Avg: weightedInt(func(p service.OpsPercentiles) *int { return p.Avg }), + Max: maxInt(func(p service.OpsPercentiles) *int { return p.Max }), + } +} + +func preaggSafeEnd(endTime time.Time) time.Time { + now := time.Now().UTC() + cutoff := now.Add(-5 * time.Minute) + if endTime.After(cutoff) { + return cutoff + } + return endTime +} + +func utcCeilToHour(t time.Time) time.Time { + u := t.UTC() + f := u.Truncate(time.Hour) + if f.Equal(u) { + return f + } + return f.Add(time.Hour) +} + +func utcFloorToHour(t time.Time) time.Time { + return t.UTC().Truncate(time.Hour) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} + +func (r *opsRepository) queryUsageCounts(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (successCount int64, tokenConsumed int64, err error) { + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + + q := ` +SELECT + COALESCE(COUNT(*), 0) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_consumed +FROM usage_logs ul +` + join + ` +` + where + + var tokens sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&successCount, &tokens); err != nil { + return 0, 0, err + } + if tokens.Valid { + tokenConsumed = tokens.Int64 + } + return successCount, tokenConsumed, nil +} + +func (r *opsRepository) queryUsageLatency(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (duration service.OpsPercentiles, ttft service.OpsPercentiles, err error) { + { + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + q := ` +SELECT + percentile_cont(0.50) WITHIN GROUP (ORDER BY duration_ms) AS p50, + percentile_cont(0.90) WITHIN GROUP (ORDER BY duration_ms) AS p90, + percentile_cont(0.95) WITHIN GROUP (ORDER BY duration_ms) AS p95, + percentile_cont(0.99) WITHIN GROUP (ORDER BY duration_ms) AS p99, + AVG(duration_ms) AS avg_ms, + MAX(duration_ms) AS max_ms +FROM usage_logs ul +` + join + ` +` + where + ` +AND duration_ms IS NOT NULL` + + var p50, p90, p95, p99 sql.NullFloat64 + var avg sql.NullFloat64 + var max sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&p50, &p90, &p95, &p99, &avg, &max); err != nil { + return service.OpsPercentiles{}, service.OpsPercentiles{}, err + } + duration.P50 = floatToIntPtr(p50) + duration.P90 = floatToIntPtr(p90) + duration.P95 = floatToIntPtr(p95) + duration.P99 = floatToIntPtr(p99) + duration.Avg = floatToIntPtr(avg) + if max.Valid { + v := int(max.Int64) + duration.Max = &v + } + } + + { + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + q := ` +SELECT + percentile_cont(0.50) WITHIN GROUP (ORDER BY first_token_ms) AS p50, + percentile_cont(0.90) WITHIN GROUP (ORDER BY first_token_ms) AS p90, + percentile_cont(0.95) WITHIN GROUP (ORDER BY first_token_ms) AS p95, + percentile_cont(0.99) WITHIN GROUP (ORDER BY first_token_ms) AS p99, + AVG(first_token_ms) AS avg_ms, + MAX(first_token_ms) AS max_ms +FROM usage_logs ul +` + join + ` +` + where + ` +AND first_token_ms IS NOT NULL` + + var p50, p90, p95, p99 sql.NullFloat64 + var avg sql.NullFloat64 + var max sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&p50, &p90, &p95, &p99, &avg, &max); err != nil { + return service.OpsPercentiles{}, service.OpsPercentiles{}, err + } + ttft.P50 = floatToIntPtr(p50) + ttft.P90 = floatToIntPtr(p90) + ttft.P95 = floatToIntPtr(p95) + ttft.P99 = floatToIntPtr(p99) + ttft.Avg = floatToIntPtr(avg) + if max.Valid { + v := int(max.Int64) + ttft.Max = &v + } + } + + return duration, ttft, nil +} + +func (r *opsRepository) queryErrorCounts(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) ( + errorTotal int64, + businessLimited int64, + errorCountSLA int64, + upstreamExcl429529 int64, + upstream429 int64, + upstream529 int64, + err error, +) { + where, args, _ := buildErrorWhere(filter, start, end, 1) + + q := ` +SELECT + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400), 0) AS error_total, + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND is_business_limited), 0) AS business_limited, + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND NOT is_business_limited), 0) AS error_sla, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)), 0) AS upstream_excl, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 429), 0) AS upstream_429, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 529), 0) AS upstream_529 +FROM ops_error_logs +` + where + + if err := r.db.QueryRowContext(ctx, q, args...).Scan( + &errorTotal, + &businessLimited, + &errorCountSLA, + &upstreamExcl429529, + &upstream429, + &upstream529, + ); err != nil { + return 0, 0, 0, 0, 0, 0, err + } + return errorTotal, businessLimited, errorCountSLA, upstreamExcl429529, upstream429, upstream529, nil +} + +func (r *opsRepository) queryCurrentRates(ctx context.Context, filter *service.OpsDashboardFilter, end time.Time) (qpsCurrent float64, tpsCurrent float64, err error) { + windowStart := end.Add(-1 * time.Minute) + + successCount1m, token1m, err := r.queryUsageCounts(ctx, filter, windowStart, end) + if err != nil { + return 0, 0, err + } + errorCount1m, _, _, _, _, _, err := r.queryErrorCounts(ctx, filter, windowStart, end) + if err != nil { + return 0, 0, err + } + + qpsCurrent = roundTo1DP(float64(successCount1m+errorCount1m) / 60.0) + tpsCurrent = roundTo1DP(float64(token1m) / 60.0) + return qpsCurrent, tpsCurrent, nil +} + +func (r *opsRepository) queryPeakQPS(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (float64, error) { + usageJoin, usageWhere, usageArgs, next := buildUsageWhere(filter, start, end, 1) + errorWhere, errorArgs, _ := buildErrorWhere(filter, start, end, next) + + q := ` +WITH usage_buckets AS ( + SELECT date_trunc('minute', ul.created_at) AS bucket, COUNT(*) AS cnt + FROM usage_logs ul + ` + usageJoin + ` + ` + usageWhere + ` + GROUP BY 1 +), +error_buckets AS ( + SELECT date_trunc('minute', created_at) AS bucket, COUNT(*) AS cnt + FROM ops_error_logs + ` + errorWhere + ` + AND COALESCE(status_code, 0) >= 400 + GROUP BY 1 +), +combined AS ( + SELECT COALESCE(u.bucket, e.bucket) AS bucket, + COALESCE(u.cnt, 0) + COALESCE(e.cnt, 0) AS total + FROM usage_buckets u + FULL OUTER JOIN error_buckets e ON u.bucket = e.bucket +) +SELECT COALESCE(MAX(total), 0) FROM combined` + + args := append(usageArgs, errorArgs...) + + var maxPerMinute sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&maxPerMinute); err != nil { + return 0, err + } + if !maxPerMinute.Valid || maxPerMinute.Int64 <= 0 { + return 0, nil + } + return roundTo1DP(float64(maxPerMinute.Int64) / 60.0), nil +} + +func (r *opsRepository) queryPeakTPS(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (float64, error) { + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + + q := ` +SELECT COALESCE(MAX(tokens_per_min), 0) +FROM ( + SELECT + date_trunc('minute', ul.created_at) AS bucket, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS tokens_per_min + FROM usage_logs ul + ` + join + ` + ` + where + ` + GROUP BY 1 +) t` + + var maxPerMinute sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&maxPerMinute); err != nil { + return 0, err + } + if !maxPerMinute.Valid || maxPerMinute.Int64 <= 0 { + return 0, nil + } + return roundTo1DP(float64(maxPerMinute.Int64) / 60.0), nil +} + +func buildUsageWhere(filter *service.OpsDashboardFilter, start, end time.Time, startIndex int) (join string, where string, args []any, nextIndex int) { + platform := "" + groupID := (*int64)(nil) + if filter != nil { + platform = strings.TrimSpace(strings.ToLower(filter.Platform)) + groupID = filter.GroupID + } + + idx := startIndex + clauses := make([]string, 0, 4) + args = make([]any, 0, 4) + + args = append(args, start) + clauses = append(clauses, fmt.Sprintf("ul.created_at >= $%d", idx)) + idx++ + args = append(args, end) + clauses = append(clauses, fmt.Sprintf("ul.created_at < $%d", idx)) + idx++ + + if groupID != nil && *groupID > 0 { + args = append(args, *groupID) + clauses = append(clauses, fmt.Sprintf("ul.group_id = $%d", idx)) + idx++ + } + if platform != "" { + // Prefer group.platform when available; fall back to account.platform so we don't + // drop rows where group_id is NULL. + join = "LEFT JOIN groups g ON g.id = ul.group_id LEFT JOIN accounts a ON a.id = ul.account_id" + args = append(args, platform) + clauses = append(clauses, fmt.Sprintf("COALESCE(NULLIF(g.platform,''), a.platform) = $%d", idx)) + idx++ + } + + where = "WHERE " + strings.Join(clauses, " AND ") + return join, where, args, idx +} + +func buildErrorWhere(filter *service.OpsDashboardFilter, start, end time.Time, startIndex int) (where string, args []any, nextIndex int) { + platform := "" + groupID := (*int64)(nil) + if filter != nil { + platform = strings.TrimSpace(strings.ToLower(filter.Platform)) + groupID = filter.GroupID + } + + idx := startIndex + clauses := make([]string, 0, 5) + args = make([]any, 0, 5) + + args = append(args, start) + clauses = append(clauses, fmt.Sprintf("created_at >= $%d", idx)) + idx++ + args = append(args, end) + clauses = append(clauses, fmt.Sprintf("created_at < $%d", idx)) + idx++ + + clauses = append(clauses, "is_count_tokens = FALSE") + + if groupID != nil && *groupID > 0 { + args = append(args, *groupID) + clauses = append(clauses, fmt.Sprintf("group_id = $%d", idx)) + idx++ + } + if platform != "" { + args = append(args, platform) + clauses = append(clauses, fmt.Sprintf("platform = $%d", idx)) + idx++ + } + + where = "WHERE " + strings.Join(clauses, " AND ") + return where, args, idx +} + +func floatToIntPtr(v sql.NullFloat64) *int { + if !v.Valid { + return nil + } + n := int(math.Round(v.Float64)) + return &n +} + +func safeDivideFloat64(numerator float64, denominator float64) float64 { + if denominator == 0 { + return 0 + } + return numerator / denominator +} + +func roundTo1DP(v float64) float64 { + return math.Round(v*10) / 10 +} + +func roundTo4DP(v float64) float64 { + return math.Round(v*10000) / 10000 +} diff --git a/backend/internal/repository/ops_repo_histograms.go b/backend/internal/repository/ops_repo_histograms.go new file mode 100644 index 00000000..c2978798 --- /dev/null +++ b/backend/internal/repository/ops_repo_histograms.go @@ -0,0 +1,79 @@ +package repository + +import ( + "context" + "fmt" + "strings" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) GetLatencyHistogram(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsLatencyHistogramResponse, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + rangeExpr := latencyHistogramRangeCaseExpr("ul.duration_ms") + orderExpr := latencyHistogramRangeOrderCaseExpr("ul.duration_ms") + + q := ` +SELECT + ` + rangeExpr + ` AS range, + COALESCE(COUNT(*), 0) AS count, + ` + orderExpr + ` AS ord +FROM usage_logs ul +` + join + ` +` + where + ` +AND ul.duration_ms IS NOT NULL +GROUP BY 1, 3 +ORDER BY 3 ASC` + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + counts := make(map[string]int64, len(latencyHistogramOrderedRanges)) + var total int64 + for rows.Next() { + var label string + var count int64 + var _ord int + if err := rows.Scan(&label, &count, &_ord); err != nil { + return nil, err + } + counts[label] = count + total += count + } + if err := rows.Err(); err != nil { + return nil, err + } + + buckets := make([]*service.OpsLatencyHistogramBucket, 0, len(latencyHistogramOrderedRanges)) + for _, label := range latencyHistogramOrderedRanges { + buckets = append(buckets, &service.OpsLatencyHistogramBucket{ + Range: label, + Count: counts[label], + }) + } + + return &service.OpsLatencyHistogramResponse{ + StartTime: start, + EndTime: end, + Platform: strings.TrimSpace(filter.Platform), + GroupID: filter.GroupID, + TotalRequests: total, + Buckets: buckets, + }, nil +} diff --git a/backend/internal/repository/ops_repo_latency_histogram_buckets.go b/backend/internal/repository/ops_repo_latency_histogram_buckets.go new file mode 100644 index 00000000..cd5bed37 --- /dev/null +++ b/backend/internal/repository/ops_repo_latency_histogram_buckets.go @@ -0,0 +1,64 @@ +package repository + +import ( + "fmt" + "strings" +) + +type latencyHistogramBucket struct { + upperMs int + label string +} + +var latencyHistogramBuckets = []latencyHistogramBucket{ + {upperMs: 100, label: "0-100ms"}, + {upperMs: 200, label: "100-200ms"}, + {upperMs: 500, label: "200-500ms"}, + {upperMs: 1000, label: "500-1000ms"}, + {upperMs: 2000, label: "1000-2000ms"}, + {upperMs: 0, label: "2000ms+"}, // default bucket +} + +var latencyHistogramOrderedRanges = func() []string { + out := make([]string, 0, len(latencyHistogramBuckets)) + for _, b := range latencyHistogramBuckets { + out = append(out, b.label) + } + return out +}() + +func latencyHistogramRangeCaseExpr(column string) string { + var sb strings.Builder + _, _ = sb.WriteString("CASE\n") + + for _, b := range latencyHistogramBuckets { + if b.upperMs <= 0 { + continue + } + _, _ = sb.WriteString(fmt.Sprintf("\tWHEN %s < %d THEN '%s'\n", column, b.upperMs, b.label)) + } + + // Default bucket. + last := latencyHistogramBuckets[len(latencyHistogramBuckets)-1] + _, _ = sb.WriteString(fmt.Sprintf("\tELSE '%s'\n", last.label)) + _, _ = sb.WriteString("END") + return sb.String() +} + +func latencyHistogramRangeOrderCaseExpr(column string) string { + var sb strings.Builder + _, _ = sb.WriteString("CASE\n") + + order := 1 + for _, b := range latencyHistogramBuckets { + if b.upperMs <= 0 { + continue + } + _, _ = sb.WriteString(fmt.Sprintf("\tWHEN %s < %d THEN %d\n", column, b.upperMs, order)) + order++ + } + + _, _ = sb.WriteString(fmt.Sprintf("\tELSE %d\n", order)) + _, _ = sb.WriteString("END") + return sb.String() +} diff --git a/backend/internal/repository/ops_repo_latency_histogram_buckets_test.go b/backend/internal/repository/ops_repo_latency_histogram_buckets_test.go new file mode 100644 index 00000000..dc79f6cc --- /dev/null +++ b/backend/internal/repository/ops_repo_latency_histogram_buckets_test.go @@ -0,0 +1,14 @@ +package repository + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLatencyHistogramBuckets_AreConsistent(t *testing.T) { + require.Equal(t, len(latencyHistogramBuckets), len(latencyHistogramOrderedRanges)) + for i, b := range latencyHistogramBuckets { + require.Equal(t, b.label, latencyHistogramOrderedRanges[i]) + } +} diff --git a/backend/internal/repository/ops_repo_metrics.go b/backend/internal/repository/ops_repo_metrics.go new file mode 100644 index 00000000..713e0eb9 --- /dev/null +++ b/backend/internal/repository/ops_repo_metrics.go @@ -0,0 +1,436 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) InsertSystemMetrics(ctx context.Context, input *service.OpsInsertSystemMetricsInput) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if input == nil { + return fmt.Errorf("nil input") + } + + window := input.WindowMinutes + if window <= 0 { + window = 1 + } + createdAt := input.CreatedAt + if createdAt.IsZero() { + createdAt = time.Now().UTC() + } + + q := ` +INSERT INTO ops_system_metrics ( + created_at, + window_minutes, + platform, + group_id, + + success_count, + error_count_total, + business_limited_count, + error_count_sla, + + upstream_error_count_excl_429_529, + upstream_429_count, + upstream_529_count, + + token_consumed, + qps, + tps, + + duration_p50_ms, + duration_p90_ms, + duration_p95_ms, + duration_p99_ms, + duration_avg_ms, + duration_max_ms, + + ttft_p50_ms, + ttft_p90_ms, + ttft_p95_ms, + ttft_p99_ms, + ttft_avg_ms, + ttft_max_ms, + + cpu_usage_percent, + memory_used_mb, + memory_total_mb, + memory_usage_percent, + + db_ok, + redis_ok, + + redis_conn_total, + redis_conn_idle, + + db_conn_active, + db_conn_idle, + db_conn_waiting, + + goroutine_count, + concurrency_queue_depth +) VALUES ( + $1,$2,$3,$4, + $5,$6,$7,$8, + $9,$10,$11, + $12,$13,$14, + $15,$16,$17,$18,$19,$20, + $21,$22,$23,$24,$25,$26, + $27,$28,$29,$30, + $31,$32, + $33,$34, + $35,$36,$37, + $38,$39 +)` + + _, err := r.db.ExecContext( + ctx, + q, + createdAt, + window, + opsNullString(input.Platform), + opsNullInt64(input.GroupID), + + input.SuccessCount, + input.ErrorCountTotal, + input.BusinessLimitedCount, + input.ErrorCountSLA, + + input.UpstreamErrorCountExcl429529, + input.Upstream429Count, + input.Upstream529Count, + + input.TokenConsumed, + opsNullFloat64(input.QPS), + opsNullFloat64(input.TPS), + + opsNullInt(input.DurationP50Ms), + opsNullInt(input.DurationP90Ms), + opsNullInt(input.DurationP95Ms), + opsNullInt(input.DurationP99Ms), + opsNullFloat64(input.DurationAvgMs), + opsNullInt(input.DurationMaxMs), + + opsNullInt(input.TTFTP50Ms), + opsNullInt(input.TTFTP90Ms), + opsNullInt(input.TTFTP95Ms), + opsNullInt(input.TTFTP99Ms), + opsNullFloat64(input.TTFTAvgMs), + opsNullInt(input.TTFTMaxMs), + + opsNullFloat64(input.CPUUsagePercent), + opsNullInt(input.MemoryUsedMB), + opsNullInt(input.MemoryTotalMB), + opsNullFloat64(input.MemoryUsagePercent), + + opsNullBool(input.DBOK), + opsNullBool(input.RedisOK), + + opsNullInt(input.RedisConnTotal), + opsNullInt(input.RedisConnIdle), + + opsNullInt(input.DBConnActive), + opsNullInt(input.DBConnIdle), + opsNullInt(input.DBConnWaiting), + + opsNullInt(input.GoroutineCount), + opsNullInt(input.ConcurrencyQueueDepth), + ) + return err +} + +func (r *opsRepository) GetLatestSystemMetrics(ctx context.Context, windowMinutes int) (*service.OpsSystemMetricsSnapshot, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if windowMinutes <= 0 { + windowMinutes = 1 + } + + q := ` +SELECT + id, + created_at, + window_minutes, + + cpu_usage_percent, + memory_used_mb, + memory_total_mb, + memory_usage_percent, + + db_ok, + redis_ok, + + redis_conn_total, + redis_conn_idle, + + db_conn_active, + db_conn_idle, + db_conn_waiting, + + goroutine_count, + concurrency_queue_depth +FROM ops_system_metrics +WHERE window_minutes = $1 + AND platform IS NULL + AND group_id IS NULL +ORDER BY created_at DESC +LIMIT 1` + + var out service.OpsSystemMetricsSnapshot + var cpu sql.NullFloat64 + var memUsed sql.NullInt64 + var memTotal sql.NullInt64 + var memPct sql.NullFloat64 + var dbOK sql.NullBool + var redisOK sql.NullBool + var redisTotal sql.NullInt64 + var redisIdle sql.NullInt64 + var dbActive sql.NullInt64 + var dbIdle sql.NullInt64 + var dbWaiting sql.NullInt64 + var goroutines sql.NullInt64 + var queueDepth sql.NullInt64 + + if err := r.db.QueryRowContext(ctx, q, windowMinutes).Scan( + &out.ID, + &out.CreatedAt, + &out.WindowMinutes, + &cpu, + &memUsed, + &memTotal, + &memPct, + &dbOK, + &redisOK, + &redisTotal, + &redisIdle, + &dbActive, + &dbIdle, + &dbWaiting, + &goroutines, + &queueDepth, + ); err != nil { + return nil, err + } + + if cpu.Valid { + v := cpu.Float64 + out.CPUUsagePercent = &v + } + if memUsed.Valid { + v := memUsed.Int64 + out.MemoryUsedMB = &v + } + if memTotal.Valid { + v := memTotal.Int64 + out.MemoryTotalMB = &v + } + if memPct.Valid { + v := memPct.Float64 + out.MemoryUsagePercent = &v + } + if dbOK.Valid { + v := dbOK.Bool + out.DBOK = &v + } + if redisOK.Valid { + v := redisOK.Bool + out.RedisOK = &v + } + if redisTotal.Valid { + v := int(redisTotal.Int64) + out.RedisConnTotal = &v + } + if redisIdle.Valid { + v := int(redisIdle.Int64) + out.RedisConnIdle = &v + } + if dbActive.Valid { + v := int(dbActive.Int64) + out.DBConnActive = &v + } + if dbIdle.Valid { + v := int(dbIdle.Int64) + out.DBConnIdle = &v + } + if dbWaiting.Valid { + v := int(dbWaiting.Int64) + out.DBConnWaiting = &v + } + if goroutines.Valid { + v := int(goroutines.Int64) + out.GoroutineCount = &v + } + if queueDepth.Valid { + v := int(queueDepth.Int64) + out.ConcurrencyQueueDepth = &v + } + + return &out, nil +} + +func (r *opsRepository) UpsertJobHeartbeat(ctx context.Context, input *service.OpsUpsertJobHeartbeatInput) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if input == nil { + return fmt.Errorf("nil input") + } + if input.JobName == "" { + return fmt.Errorf("job_name required") + } + + q := ` +INSERT INTO ops_job_heartbeats ( + job_name, + last_run_at, + last_success_at, + last_error_at, + last_error, + last_duration_ms, + last_result, + updated_at +) VALUES ( + $1,$2,$3,$4,$5,$6,$7,NOW() +) +ON CONFLICT (job_name) DO UPDATE SET + last_run_at = COALESCE(EXCLUDED.last_run_at, ops_job_heartbeats.last_run_at), + last_success_at = COALESCE(EXCLUDED.last_success_at, ops_job_heartbeats.last_success_at), + last_error_at = CASE + WHEN EXCLUDED.last_success_at IS NOT NULL THEN NULL + ELSE COALESCE(EXCLUDED.last_error_at, ops_job_heartbeats.last_error_at) + END, + last_error = CASE + WHEN EXCLUDED.last_success_at IS NOT NULL THEN NULL + ELSE COALESCE(EXCLUDED.last_error, ops_job_heartbeats.last_error) + END, + last_duration_ms = COALESCE(EXCLUDED.last_duration_ms, ops_job_heartbeats.last_duration_ms), + last_result = CASE + WHEN EXCLUDED.last_success_at IS NOT NULL THEN COALESCE(EXCLUDED.last_result, ops_job_heartbeats.last_result) + ELSE ops_job_heartbeats.last_result + END, + updated_at = NOW()` + + _, err := r.db.ExecContext( + ctx, + q, + input.JobName, + opsNullTime(input.LastRunAt), + opsNullTime(input.LastSuccessAt), + opsNullTime(input.LastErrorAt), + opsNullString(input.LastError), + opsNullInt(input.LastDurationMs), + opsNullString(input.LastResult), + ) + return err +} + +func (r *opsRepository) ListJobHeartbeats(ctx context.Context) ([]*service.OpsJobHeartbeat, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + + q := ` +SELECT + job_name, + last_run_at, + last_success_at, + last_error_at, + last_error, + last_duration_ms, + last_result, + updated_at +FROM ops_job_heartbeats +ORDER BY job_name ASC` + + rows, err := r.db.QueryContext(ctx, q) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]*service.OpsJobHeartbeat, 0, 8) + for rows.Next() { + var item service.OpsJobHeartbeat + var lastRun sql.NullTime + var lastSuccess sql.NullTime + var lastErrorAt sql.NullTime + var lastError sql.NullString + var lastDuration sql.NullInt64 + + var lastResult sql.NullString + + if err := rows.Scan( + &item.JobName, + &lastRun, + &lastSuccess, + &lastErrorAt, + &lastError, + &lastDuration, + &lastResult, + &item.UpdatedAt, + ); err != nil { + return nil, err + } + + if lastRun.Valid { + v := lastRun.Time + item.LastRunAt = &v + } + if lastSuccess.Valid { + v := lastSuccess.Time + item.LastSuccessAt = &v + } + if lastErrorAt.Valid { + v := lastErrorAt.Time + item.LastErrorAt = &v + } + if lastError.Valid { + v := lastError.String + item.LastError = &v + } + if lastDuration.Valid { + v := lastDuration.Int64 + item.LastDurationMs = &v + } + if lastResult.Valid { + v := lastResult.String + item.LastResult = &v + } + + out = append(out, &item) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + +func opsNullBool(v *bool) any { + if v == nil { + return sql.NullBool{} + } + return sql.NullBool{Bool: *v, Valid: true} +} + +func opsNullFloat64(v *float64) any { + if v == nil { + return sql.NullFloat64{} + } + return sql.NullFloat64{Float64: *v, Valid: true} +} + +func opsNullTime(v *time.Time) any { + if v == nil || v.IsZero() { + return sql.NullTime{} + } + return sql.NullTime{Time: *v, Valid: true} +} diff --git a/backend/internal/repository/ops_repo_preagg.go b/backend/internal/repository/ops_repo_preagg.go new file mode 100644 index 00000000..ad94e13f --- /dev/null +++ b/backend/internal/repository/ops_repo_preagg.go @@ -0,0 +1,363 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "time" +) + +func (r *opsRepository) UpsertHourlyMetrics(ctx context.Context, startTime, endTime time.Time) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if startTime.IsZero() || endTime.IsZero() || !endTime.After(startTime) { + return nil + } + + start := startTime.UTC() + end := endTime.UTC() + + // NOTE: + // - We aggregate usage_logs + ops_error_logs into ops_metrics_hourly. + // - We emit three dimension granularities via GROUPING SETS: + // 1) overall: (bucket_start) + // 2) platform: (bucket_start, platform) + // 3) group: (bucket_start, platform, group_id) + // + // IMPORTANT: Postgres UNIQUE treats NULLs as distinct, so the table uses a COALESCE-based + // unique index; our ON CONFLICT target must match that expression set. + q := ` +WITH usage_base AS ( + SELECT + date_trunc('hour', ul.created_at AT TIME ZONE 'UTC') AT TIME ZONE 'UTC' AS bucket_start, + g.platform AS platform, + ul.group_id AS group_id, + ul.duration_ms AS duration_ms, + ul.first_token_ms AS first_token_ms, + (ul.input_tokens + ul.output_tokens + ul.cache_creation_tokens + ul.cache_read_tokens) AS tokens + FROM usage_logs ul + JOIN groups g ON g.id = ul.group_id + WHERE ul.created_at >= $1 AND ul.created_at < $2 +), +usage_agg AS ( + SELECT + bucket_start, + CASE WHEN GROUPING(platform) = 1 THEN NULL ELSE platform END AS platform, + CASE WHEN GROUPING(group_id) = 1 THEN NULL ELSE group_id END AS group_id, + COUNT(*) AS success_count, + COALESCE(SUM(tokens), 0) AS token_consumed, + + percentile_cont(0.50) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p50_ms, + percentile_cont(0.90) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p90_ms, + percentile_cont(0.95) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p95_ms, + percentile_cont(0.99) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p99_ms, + AVG(duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_avg_ms, + MAX(duration_ms) AS duration_max_ms, + + percentile_cont(0.50) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p50_ms, + percentile_cont(0.90) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p90_ms, + percentile_cont(0.95) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p95_ms, + percentile_cont(0.99) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p99_ms, + AVG(first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_avg_ms, + MAX(first_token_ms) AS ttft_max_ms + FROM usage_base + GROUP BY GROUPING SETS ( + (bucket_start), + (bucket_start, platform), + (bucket_start, platform, group_id) + ) +), +error_base AS ( + SELECT + date_trunc('hour', created_at AT TIME ZONE 'UTC') AT TIME ZONE 'UTC' AS bucket_start, + -- platform is NULL for some early-phase errors (e.g. before routing); map to a sentinel + -- value so platform-level GROUPING SETS don't collide with the overall (platform=NULL) row. + COALESCE(platform, 'unknown') AS platform, + group_id AS group_id, + is_business_limited AS is_business_limited, + error_owner AS error_owner, + status_code AS client_status_code, + COALESCE(upstream_status_code, status_code, 0) AS effective_status_code + FROM ops_error_logs + -- Exclude count_tokens requests from error metrics as they are informational probes + WHERE created_at >= $1 AND created_at < $2 + AND is_count_tokens = FALSE +), +error_agg AS ( + SELECT + bucket_start, + CASE WHEN GROUPING(platform) = 1 THEN NULL ELSE platform END AS platform, + CASE WHEN GROUPING(group_id) = 1 THEN NULL ELSE group_id END AS group_id, + COUNT(*) FILTER (WHERE COALESCE(client_status_code, 0) >= 400) AS error_count_total, + COUNT(*) FILTER (WHERE COALESCE(client_status_code, 0) >= 400 AND is_business_limited) AS business_limited_count, + COUNT(*) FILTER (WHERE COALESCE(client_status_code, 0) >= 400 AND NOT is_business_limited) AS error_count_sla, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(effective_status_code, 0) NOT IN (429, 529)) AS upstream_error_count_excl_429_529, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(effective_status_code, 0) = 429) AS upstream_429_count, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(effective_status_code, 0) = 529) AS upstream_529_count + FROM error_base + GROUP BY GROUPING SETS ( + (bucket_start), + (bucket_start, platform), + (bucket_start, platform, group_id) + ) + HAVING GROUPING(group_id) = 1 OR group_id IS NOT NULL +), +combined AS ( + SELECT + COALESCE(u.bucket_start, e.bucket_start) AS bucket_start, + COALESCE(u.platform, e.platform) AS platform, + COALESCE(u.group_id, e.group_id) AS group_id, + + COALESCE(u.success_count, 0) AS success_count, + COALESCE(e.error_count_total, 0) AS error_count_total, + COALESCE(e.business_limited_count, 0) AS business_limited_count, + COALESCE(e.error_count_sla, 0) AS error_count_sla, + COALESCE(e.upstream_error_count_excl_429_529, 0) AS upstream_error_count_excl_429_529, + COALESCE(e.upstream_429_count, 0) AS upstream_429_count, + COALESCE(e.upstream_529_count, 0) AS upstream_529_count, + + COALESCE(u.token_consumed, 0) AS token_consumed, + + u.duration_p50_ms, + u.duration_p90_ms, + u.duration_p95_ms, + u.duration_p99_ms, + u.duration_avg_ms, + u.duration_max_ms, + + u.ttft_p50_ms, + u.ttft_p90_ms, + u.ttft_p95_ms, + u.ttft_p99_ms, + u.ttft_avg_ms, + u.ttft_max_ms + FROM usage_agg u + FULL OUTER JOIN error_agg e + ON u.bucket_start = e.bucket_start + AND COALESCE(u.platform, '') = COALESCE(e.platform, '') + AND COALESCE(u.group_id, 0) = COALESCE(e.group_id, 0) +) +INSERT INTO ops_metrics_hourly ( + bucket_start, + platform, + group_id, + success_count, + error_count_total, + business_limited_count, + error_count_sla, + upstream_error_count_excl_429_529, + upstream_429_count, + upstream_529_count, + token_consumed, + duration_p50_ms, + duration_p90_ms, + duration_p95_ms, + duration_p99_ms, + duration_avg_ms, + duration_max_ms, + ttft_p50_ms, + ttft_p90_ms, + ttft_p95_ms, + ttft_p99_ms, + ttft_avg_ms, + ttft_max_ms, + computed_at +) +SELECT + bucket_start, + NULLIF(platform, '') AS platform, + group_id, + success_count, + error_count_total, + business_limited_count, + error_count_sla, + upstream_error_count_excl_429_529, + upstream_429_count, + upstream_529_count, + token_consumed, + duration_p50_ms::int, + duration_p90_ms::int, + duration_p95_ms::int, + duration_p99_ms::int, + duration_avg_ms, + duration_max_ms::int, + ttft_p50_ms::int, + ttft_p90_ms::int, + ttft_p95_ms::int, + ttft_p99_ms::int, + ttft_avg_ms, + ttft_max_ms::int, + NOW() +FROM combined +WHERE bucket_start IS NOT NULL + AND (platform IS NULL OR platform <> '') +ON CONFLICT (bucket_start, COALESCE(platform, ''), COALESCE(group_id, 0)) DO UPDATE SET + success_count = EXCLUDED.success_count, + error_count_total = EXCLUDED.error_count_total, + business_limited_count = EXCLUDED.business_limited_count, + error_count_sla = EXCLUDED.error_count_sla, + upstream_error_count_excl_429_529 = EXCLUDED.upstream_error_count_excl_429_529, + upstream_429_count = EXCLUDED.upstream_429_count, + upstream_529_count = EXCLUDED.upstream_529_count, + token_consumed = EXCLUDED.token_consumed, + + duration_p50_ms = EXCLUDED.duration_p50_ms, + duration_p90_ms = EXCLUDED.duration_p90_ms, + duration_p95_ms = EXCLUDED.duration_p95_ms, + duration_p99_ms = EXCLUDED.duration_p99_ms, + duration_avg_ms = EXCLUDED.duration_avg_ms, + duration_max_ms = EXCLUDED.duration_max_ms, + + ttft_p50_ms = EXCLUDED.ttft_p50_ms, + ttft_p90_ms = EXCLUDED.ttft_p90_ms, + ttft_p95_ms = EXCLUDED.ttft_p95_ms, + ttft_p99_ms = EXCLUDED.ttft_p99_ms, + ttft_avg_ms = EXCLUDED.ttft_avg_ms, + ttft_max_ms = EXCLUDED.ttft_max_ms, + + computed_at = NOW() +` + + _, err := r.db.ExecContext(ctx, q, start, end) + return err +} + +func (r *opsRepository) UpsertDailyMetrics(ctx context.Context, startTime, endTime time.Time) error { + if r == nil || r.db == nil { + return fmt.Errorf("nil ops repository") + } + if startTime.IsZero() || endTime.IsZero() || !endTime.After(startTime) { + return nil + } + + start := startTime.UTC() + end := endTime.UTC() + + q := ` +INSERT INTO ops_metrics_daily ( + bucket_date, + platform, + group_id, + success_count, + error_count_total, + business_limited_count, + error_count_sla, + upstream_error_count_excl_429_529, + upstream_429_count, + upstream_529_count, + token_consumed, + duration_p50_ms, + duration_p90_ms, + duration_p95_ms, + duration_p99_ms, + duration_avg_ms, + duration_max_ms, + ttft_p50_ms, + ttft_p90_ms, + ttft_p95_ms, + ttft_p99_ms, + ttft_avg_ms, + ttft_max_ms, + computed_at +) +SELECT + (bucket_start AT TIME ZONE 'UTC')::date AS bucket_date, + platform, + group_id, + + COALESCE(SUM(success_count), 0) AS success_count, + COALESCE(SUM(error_count_total), 0) AS error_count_total, + COALESCE(SUM(business_limited_count), 0) AS business_limited_count, + COALESCE(SUM(error_count_sla), 0) AS error_count_sla, + COALESCE(SUM(upstream_error_count_excl_429_529), 0) AS upstream_error_count_excl_429_529, + COALESCE(SUM(upstream_429_count), 0) AS upstream_429_count, + COALESCE(SUM(upstream_529_count), 0) AS upstream_529_count, + COALESCE(SUM(token_consumed), 0) AS token_consumed, + + -- Approximation: weighted average for p50/p90, max for p95/p99 (conservative tail). + ROUND(SUM(duration_p50_ms::double precision * success_count) FILTER (WHERE duration_p50_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE duration_p50_ms IS NOT NULL), 0))::int AS duration_p50_ms, + ROUND(SUM(duration_p90_ms::double precision * success_count) FILTER (WHERE duration_p90_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE duration_p90_ms IS NOT NULL), 0))::int AS duration_p90_ms, + MAX(duration_p95_ms) AS duration_p95_ms, + MAX(duration_p99_ms) AS duration_p99_ms, + SUM(duration_avg_ms * success_count) FILTER (WHERE duration_avg_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE duration_avg_ms IS NOT NULL), 0) AS duration_avg_ms, + MAX(duration_max_ms) AS duration_max_ms, + + ROUND(SUM(ttft_p50_ms::double precision * success_count) FILTER (WHERE ttft_p50_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE ttft_p50_ms IS NOT NULL), 0))::int AS ttft_p50_ms, + ROUND(SUM(ttft_p90_ms::double precision * success_count) FILTER (WHERE ttft_p90_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE ttft_p90_ms IS NOT NULL), 0))::int AS ttft_p90_ms, + MAX(ttft_p95_ms) AS ttft_p95_ms, + MAX(ttft_p99_ms) AS ttft_p99_ms, + SUM(ttft_avg_ms * success_count) FILTER (WHERE ttft_avg_ms IS NOT NULL) + / NULLIF(SUM(success_count) FILTER (WHERE ttft_avg_ms IS NOT NULL), 0) AS ttft_avg_ms, + MAX(ttft_max_ms) AS ttft_max_ms, + + NOW() +FROM ops_metrics_hourly +WHERE bucket_start >= $1 AND bucket_start < $2 +GROUP BY 1, 2, 3 +ON CONFLICT (bucket_date, COALESCE(platform, ''), COALESCE(group_id, 0)) DO UPDATE SET + success_count = EXCLUDED.success_count, + error_count_total = EXCLUDED.error_count_total, + business_limited_count = EXCLUDED.business_limited_count, + error_count_sla = EXCLUDED.error_count_sla, + upstream_error_count_excl_429_529 = EXCLUDED.upstream_error_count_excl_429_529, + upstream_429_count = EXCLUDED.upstream_429_count, + upstream_529_count = EXCLUDED.upstream_529_count, + token_consumed = EXCLUDED.token_consumed, + + duration_p50_ms = EXCLUDED.duration_p50_ms, + duration_p90_ms = EXCLUDED.duration_p90_ms, + duration_p95_ms = EXCLUDED.duration_p95_ms, + duration_p99_ms = EXCLUDED.duration_p99_ms, + duration_avg_ms = EXCLUDED.duration_avg_ms, + duration_max_ms = EXCLUDED.duration_max_ms, + + ttft_p50_ms = EXCLUDED.ttft_p50_ms, + ttft_p90_ms = EXCLUDED.ttft_p90_ms, + ttft_p95_ms = EXCLUDED.ttft_p95_ms, + ttft_p99_ms = EXCLUDED.ttft_p99_ms, + ttft_avg_ms = EXCLUDED.ttft_avg_ms, + ttft_max_ms = EXCLUDED.ttft_max_ms, + + computed_at = NOW() +` + + _, err := r.db.ExecContext(ctx, q, start, end) + return err +} + +func (r *opsRepository) GetLatestHourlyBucketStart(ctx context.Context) (time.Time, bool, error) { + if r == nil || r.db == nil { + return time.Time{}, false, fmt.Errorf("nil ops repository") + } + + var value sql.NullTime + if err := r.db.QueryRowContext(ctx, `SELECT MAX(bucket_start) FROM ops_metrics_hourly`).Scan(&value); err != nil { + return time.Time{}, false, err + } + if !value.Valid { + return time.Time{}, false, nil + } + return value.Time.UTC(), true, nil +} + +func (r *opsRepository) GetLatestDailyBucketDate(ctx context.Context) (time.Time, bool, error) { + if r == nil || r.db == nil { + return time.Time{}, false, fmt.Errorf("nil ops repository") + } + + var value sql.NullTime + if err := r.db.QueryRowContext(ctx, `SELECT MAX(bucket_date) FROM ops_metrics_daily`).Scan(&value); err != nil { + return time.Time{}, false, err + } + if !value.Valid { + return time.Time{}, false, nil + } + t := value.Time.UTC() + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC), true, nil +} diff --git a/backend/internal/repository/ops_repo_realtime_traffic.go b/backend/internal/repository/ops_repo_realtime_traffic.go new file mode 100644 index 00000000..a9b0b929 --- /dev/null +++ b/backend/internal/repository/ops_repo_realtime_traffic.go @@ -0,0 +1,129 @@ +package repository + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) GetRealtimeTrafficSummary(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsRealtimeTrafficSummary, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + if start.After(end) { + return nil, fmt.Errorf("start_time must be <= end_time") + } + + window := end.Sub(start) + if window <= 0 { + return nil, fmt.Errorf("invalid time window") + } + if window > time.Hour { + return nil, fmt.Errorf("window too large") + } + + usageJoin, usageWhere, usageArgs, next := buildUsageWhere(filter, start, end, 1) + errorWhere, errorArgs, _ := buildErrorWhere(filter, start, end, next) + + q := ` +WITH usage_buckets AS ( + SELECT + date_trunc('minute', ul.created_at) AS bucket, + COALESCE(COUNT(*), 0) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_sum + FROM usage_logs ul + ` + usageJoin + ` + ` + usageWhere + ` + GROUP BY 1 +), +error_buckets AS ( + SELECT + date_trunc('minute', created_at) AS bucket, + COALESCE(COUNT(*), 0) AS error_count + FROM ops_error_logs + ` + errorWhere + ` + AND COALESCE(status_code, 0) >= 400 + GROUP BY 1 +), +combined AS ( + SELECT + COALESCE(u.bucket, e.bucket) AS bucket, + COALESCE(u.success_count, 0) AS success_count, + COALESCE(u.token_sum, 0) AS token_sum, + COALESCE(e.error_count, 0) AS error_count, + COALESCE(u.success_count, 0) + COALESCE(e.error_count, 0) AS request_total + FROM usage_buckets u + FULL OUTER JOIN error_buckets e ON u.bucket = e.bucket +) +SELECT + COALESCE(SUM(success_count), 0) AS success_total, + COALESCE(SUM(error_count), 0) AS error_total, + COALESCE(SUM(token_sum), 0) AS token_total, + COALESCE(MAX(request_total), 0) AS peak_requests_per_min, + COALESCE(MAX(token_sum), 0) AS peak_tokens_per_min +FROM combined` + + args := append(usageArgs, errorArgs...) + var successCount int64 + var errorTotal int64 + var tokenConsumed int64 + var peakRequestsPerMin int64 + var peakTokensPerMin int64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan( + &successCount, + &errorTotal, + &tokenConsumed, + &peakRequestsPerMin, + &peakTokensPerMin, + ); err != nil { + return nil, err + } + + windowSeconds := window.Seconds() + if windowSeconds <= 0 { + windowSeconds = 1 + } + + requestCountTotal := successCount + errorTotal + qpsAvg := roundTo1DP(float64(requestCountTotal) / windowSeconds) + tpsAvg := roundTo1DP(float64(tokenConsumed) / windowSeconds) + + // Keep "current" consistent with the dashboard overview semantics: last 1 minute. + // This remains "within the selected window" since end=start+window. + qpsCurrent, tpsCurrent, err := r.queryCurrentRates(ctx, filter, end) + if err != nil { + return nil, err + } + + qpsPeak := roundTo1DP(float64(peakRequestsPerMin) / 60.0) + tpsPeak := roundTo1DP(float64(peakTokensPerMin) / 60.0) + + return &service.OpsRealtimeTrafficSummary{ + StartTime: start, + EndTime: end, + Platform: strings.TrimSpace(filter.Platform), + GroupID: filter.GroupID, + QPS: service.OpsRateSummary{ + Current: qpsCurrent, + Peak: qpsPeak, + Avg: qpsAvg, + }, + TPS: service.OpsRateSummary{ + Current: tpsCurrent, + Peak: tpsPeak, + Avg: tpsAvg, + }, + }, nil +} diff --git a/backend/internal/repository/ops_repo_request_details.go b/backend/internal/repository/ops_repo_request_details.go new file mode 100644 index 00000000..d8d5d111 --- /dev/null +++ b/backend/internal/repository/ops_repo_request_details.go @@ -0,0 +1,286 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) ListRequestDetails(ctx context.Context, filter *service.OpsRequestDetailFilter) ([]*service.OpsRequestDetail, int64, error) { + if r == nil || r.db == nil { + return nil, 0, fmt.Errorf("nil ops repository") + } + + page, pageSize, startTime, endTime := filter.Normalize() + offset := (page - 1) * pageSize + + conditions := make([]string, 0, 16) + args := make([]any, 0, 24) + + // Placeholders $1/$2 reserved for time window inside the CTE. + args = append(args, startTime.UTC(), endTime.UTC()) + + addCondition := func(condition string, values ...any) { + conditions = append(conditions, condition) + args = append(args, values...) + } + + if filter != nil { + if kind := strings.TrimSpace(strings.ToLower(filter.Kind)); kind != "" && kind != "all" { + if kind != string(service.OpsRequestKindSuccess) && kind != string(service.OpsRequestKindError) { + return nil, 0, fmt.Errorf("invalid kind") + } + addCondition(fmt.Sprintf("kind = $%d", len(args)+1), kind) + } + + if platform := strings.TrimSpace(strings.ToLower(filter.Platform)); platform != "" { + addCondition(fmt.Sprintf("platform = $%d", len(args)+1), platform) + } + if filter.GroupID != nil && *filter.GroupID > 0 { + addCondition(fmt.Sprintf("group_id = $%d", len(args)+1), *filter.GroupID) + } + + if filter.UserID != nil && *filter.UserID > 0 { + addCondition(fmt.Sprintf("user_id = $%d", len(args)+1), *filter.UserID) + } + if filter.APIKeyID != nil && *filter.APIKeyID > 0 { + addCondition(fmt.Sprintf("api_key_id = $%d", len(args)+1), *filter.APIKeyID) + } + if filter.AccountID != nil && *filter.AccountID > 0 { + addCondition(fmt.Sprintf("account_id = $%d", len(args)+1), *filter.AccountID) + } + + if model := strings.TrimSpace(filter.Model); model != "" { + addCondition(fmt.Sprintf("model = $%d", len(args)+1), model) + } + if requestID := strings.TrimSpace(filter.RequestID); requestID != "" { + addCondition(fmt.Sprintf("request_id = $%d", len(args)+1), requestID) + } + if q := strings.TrimSpace(filter.Query); q != "" { + like := "%" + strings.ToLower(q) + "%" + startIdx := len(args) + 1 + addCondition( + fmt.Sprintf("(LOWER(COALESCE(request_id,'')) LIKE $%d OR LOWER(COALESCE(model,'')) LIKE $%d OR LOWER(COALESCE(message,'')) LIKE $%d)", + startIdx, startIdx+1, startIdx+2, + ), + like, like, like, + ) + } + + if filter.MinDurationMs != nil { + addCondition(fmt.Sprintf("duration_ms >= $%d", len(args)+1), *filter.MinDurationMs) + } + if filter.MaxDurationMs != nil { + addCondition(fmt.Sprintf("duration_ms <= $%d", len(args)+1), *filter.MaxDurationMs) + } + } + + where := "" + if len(conditions) > 0 { + where = "WHERE " + strings.Join(conditions, " AND ") + } + + cte := ` +WITH combined AS ( + SELECT + 'success'::TEXT AS kind, + ul.created_at AS created_at, + ul.request_id AS request_id, + COALESCE(NULLIF(g.platform, ''), NULLIF(a.platform, ''), '') AS platform, + ul.model AS model, + ul.duration_ms AS duration_ms, + NULL::INT AS status_code, + NULL::BIGINT AS error_id, + NULL::TEXT AS phase, + NULL::TEXT AS severity, + NULL::TEXT AS message, + ul.user_id AS user_id, + ul.api_key_id AS api_key_id, + ul.account_id AS account_id, + ul.group_id AS group_id, + ul.stream AS stream + FROM usage_logs ul + LEFT JOIN groups g ON g.id = ul.group_id + LEFT JOIN accounts a ON a.id = ul.account_id + WHERE ul.created_at >= $1 AND ul.created_at < $2 + + UNION ALL + + SELECT + 'error'::TEXT AS kind, + o.created_at AS created_at, + COALESCE(NULLIF(o.request_id,''), NULLIF(o.client_request_id,''), '') AS request_id, + COALESCE(NULLIF(o.platform, ''), NULLIF(g.platform, ''), NULLIF(a.platform, ''), '') AS platform, + o.model AS model, + o.duration_ms AS duration_ms, + o.status_code AS status_code, + o.id AS error_id, + o.error_phase AS phase, + o.severity AS severity, + o.error_message AS message, + o.user_id AS user_id, + o.api_key_id AS api_key_id, + o.account_id AS account_id, + o.group_id AS group_id, + o.stream AS stream + FROM ops_error_logs o + LEFT JOIN groups g ON g.id = o.group_id + LEFT JOIN accounts a ON a.id = o.account_id + WHERE o.created_at >= $1 AND o.created_at < $2 + AND COALESCE(o.status_code, 0) >= 400 +) +` + + countQuery := fmt.Sprintf(`%s SELECT COUNT(1) FROM combined %s`, cte, where) + var total int64 + if err := r.db.QueryRowContext(ctx, countQuery, args...).Scan(&total); err != nil { + if err == sql.ErrNoRows { + total = 0 + } else { + return nil, 0, err + } + } + + sort := "ORDER BY created_at DESC" + if filter != nil { + switch strings.TrimSpace(strings.ToLower(filter.Sort)) { + case "", "created_at_desc": + // default + case "duration_desc": + sort = "ORDER BY duration_ms DESC NULLS LAST, created_at DESC" + default: + return nil, 0, fmt.Errorf("invalid sort") + } + } + + listQuery := fmt.Sprintf(` +%s +SELECT + kind, + created_at, + request_id, + platform, + model, + duration_ms, + status_code, + error_id, + phase, + severity, + message, + user_id, + api_key_id, + account_id, + group_id, + stream +FROM combined +%s +%s +LIMIT $%d OFFSET $%d +`, cte, where, sort, len(args)+1, len(args)+2) + + listArgs := append(append([]any{}, args...), pageSize, offset) + rows, err := r.db.QueryContext(ctx, listQuery, listArgs...) + if err != nil { + return nil, 0, err + } + defer func() { _ = rows.Close() }() + + toIntPtr := func(v sql.NullInt64) *int { + if !v.Valid { + return nil + } + i := int(v.Int64) + return &i + } + toInt64Ptr := func(v sql.NullInt64) *int64 { + if !v.Valid { + return nil + } + i := v.Int64 + return &i + } + + out := make([]*service.OpsRequestDetail, 0, pageSize) + for rows.Next() { + var ( + kind string + createdAt time.Time + requestID sql.NullString + platform sql.NullString + model sql.NullString + + durationMs sql.NullInt64 + statusCode sql.NullInt64 + errorID sql.NullInt64 + + phase sql.NullString + severity sql.NullString + message sql.NullString + + userID sql.NullInt64 + apiKeyID sql.NullInt64 + accountID sql.NullInt64 + groupID sql.NullInt64 + + stream bool + ) + + if err := rows.Scan( + &kind, + &createdAt, + &requestID, + &platform, + &model, + &durationMs, + &statusCode, + &errorID, + &phase, + &severity, + &message, + &userID, + &apiKeyID, + &accountID, + &groupID, + &stream, + ); err != nil { + return nil, 0, err + } + + item := &service.OpsRequestDetail{ + Kind: service.OpsRequestKind(kind), + CreatedAt: createdAt, + RequestID: strings.TrimSpace(requestID.String), + Platform: strings.TrimSpace(platform.String), + Model: strings.TrimSpace(model.String), + + DurationMs: toIntPtr(durationMs), + StatusCode: toIntPtr(statusCode), + ErrorID: toInt64Ptr(errorID), + Phase: phase.String, + Severity: severity.String, + Message: message.String, + + UserID: toInt64Ptr(userID), + APIKeyID: toInt64Ptr(apiKeyID), + AccountID: toInt64Ptr(accountID), + GroupID: toInt64Ptr(groupID), + + Stream: stream, + } + + if item.Platform == "" { + item.Platform = "unknown" + } + + out = append(out, item) + } + if err := rows.Err(); err != nil { + return nil, 0, err + } + + return out, total, nil +} diff --git a/backend/internal/repository/ops_repo_trends.go b/backend/internal/repository/ops_repo_trends.go new file mode 100644 index 00000000..022d1187 --- /dev/null +++ b/backend/internal/repository/ops_repo_trends.go @@ -0,0 +1,573 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) GetThroughputTrend(ctx context.Context, filter *service.OpsDashboardFilter, bucketSeconds int) (*service.OpsThroughputTrendResponse, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + if bucketSeconds <= 0 { + bucketSeconds = 60 + } + if bucketSeconds != 60 && bucketSeconds != 300 && bucketSeconds != 3600 { + // Keep a small, predictable set of supported buckets for now. + bucketSeconds = 60 + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + + usageJoin, usageWhere, usageArgs, next := buildUsageWhere(filter, start, end, 1) + errorWhere, errorArgs, _ := buildErrorWhere(filter, start, end, next) + + usageBucketExpr := opsBucketExprForUsage(bucketSeconds) + errorBucketExpr := opsBucketExprForError(bucketSeconds) + + q := ` +WITH usage_buckets AS ( + SELECT ` + usageBucketExpr + ` AS bucket, + COUNT(*) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_consumed + FROM usage_logs ul + ` + usageJoin + ` + ` + usageWhere + ` + GROUP BY 1 +), +error_buckets AS ( + SELECT ` + errorBucketExpr + ` AS bucket, + COUNT(*) AS error_count + FROM ops_error_logs + ` + errorWhere + ` + AND COALESCE(status_code, 0) >= 400 + GROUP BY 1 +), +combined AS ( + SELECT COALESCE(u.bucket, e.bucket) AS bucket, + COALESCE(u.success_count, 0) AS success_count, + COALESCE(e.error_count, 0) AS error_count, + COALESCE(u.token_consumed, 0) AS token_consumed + FROM usage_buckets u + FULL OUTER JOIN error_buckets e ON u.bucket = e.bucket +) +SELECT + bucket, + (success_count + error_count) AS request_count, + token_consumed +FROM combined +ORDER BY bucket ASC` + + args := append(usageArgs, errorArgs...) + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + points := make([]*service.OpsThroughputTrendPoint, 0, 256) + for rows.Next() { + var bucket time.Time + var requests int64 + var tokens sql.NullInt64 + if err := rows.Scan(&bucket, &requests, &tokens); err != nil { + return nil, err + } + tokenConsumed := int64(0) + if tokens.Valid { + tokenConsumed = tokens.Int64 + } + + denom := float64(bucketSeconds) + if denom <= 0 { + denom = 60 + } + qps := roundTo1DP(float64(requests) / denom) + tps := roundTo1DP(float64(tokenConsumed) / denom) + + points = append(points, &service.OpsThroughputTrendPoint{ + BucketStart: bucket.UTC(), + RequestCount: requests, + TokenConsumed: tokenConsumed, + QPS: qps, + TPS: tps, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + + // Fill missing buckets with zeros so charts render continuous timelines. + points = fillOpsThroughputBuckets(start, end, bucketSeconds, points) + + var byPlatform []*service.OpsThroughputPlatformBreakdownItem + var topGroups []*service.OpsThroughputGroupBreakdownItem + + platform := "" + if filter != nil { + platform = strings.TrimSpace(strings.ToLower(filter.Platform)) + } + groupID := (*int64)(nil) + if filter != nil { + groupID = filter.GroupID + } + + // Drilldown helpers: + // - No platform/group: totals by platform + // - Platform selected but no group: top groups in that platform + if platform == "" && (groupID == nil || *groupID <= 0) { + items, err := r.getThroughputBreakdownByPlatform(ctx, start, end) + if err != nil { + return nil, err + } + byPlatform = items + } else if platform != "" && (groupID == nil || *groupID <= 0) { + items, err := r.getThroughputTopGroupsByPlatform(ctx, start, end, platform, 10) + if err != nil { + return nil, err + } + topGroups = items + } + + return &service.OpsThroughputTrendResponse{ + Bucket: opsBucketLabel(bucketSeconds), + Points: points, + + ByPlatform: byPlatform, + TopGroups: topGroups, + }, nil +} + +func (r *opsRepository) getThroughputBreakdownByPlatform(ctx context.Context, start, end time.Time) ([]*service.OpsThroughputPlatformBreakdownItem, error) { + q := ` +WITH usage_totals AS ( + SELECT COALESCE(NULLIF(g.platform,''), a.platform) AS platform, + COUNT(*) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_consumed + FROM usage_logs ul + LEFT JOIN groups g ON g.id = ul.group_id + LEFT JOIN accounts a ON a.id = ul.account_id + WHERE ul.created_at >= $1 AND ul.created_at < $2 + GROUP BY 1 +), +error_totals AS ( + SELECT platform, + COUNT(*) AS error_count + FROM ops_error_logs + WHERE created_at >= $1 AND created_at < $2 + AND COALESCE(status_code, 0) >= 400 + AND is_count_tokens = FALSE -- 排除 count_tokens 请求的错误 + GROUP BY 1 +), +combined AS ( + SELECT COALESCE(u.platform, e.platform) AS platform, + COALESCE(u.success_count, 0) AS success_count, + COALESCE(e.error_count, 0) AS error_count, + COALESCE(u.token_consumed, 0) AS token_consumed + FROM usage_totals u + FULL OUTER JOIN error_totals e ON u.platform = e.platform +) +SELECT platform, (success_count + error_count) AS request_count, token_consumed +FROM combined +WHERE platform IS NOT NULL AND platform <> '' +ORDER BY request_count DESC` + + rows, err := r.db.QueryContext(ctx, q, start, end) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + items := make([]*service.OpsThroughputPlatformBreakdownItem, 0, 8) + for rows.Next() { + var platform string + var requests int64 + var tokens sql.NullInt64 + if err := rows.Scan(&platform, &requests, &tokens); err != nil { + return nil, err + } + tokenConsumed := int64(0) + if tokens.Valid { + tokenConsumed = tokens.Int64 + } + items = append(items, &service.OpsThroughputPlatformBreakdownItem{ + Platform: platform, + RequestCount: requests, + TokenConsumed: tokenConsumed, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +func (r *opsRepository) getThroughputTopGroupsByPlatform(ctx context.Context, start, end time.Time, platform string, limit int) ([]*service.OpsThroughputGroupBreakdownItem, error) { + if strings.TrimSpace(platform) == "" { + return nil, nil + } + if limit <= 0 || limit > 100 { + limit = 10 + } + + q := ` +WITH usage_totals AS ( + SELECT ul.group_id AS group_id, + g.name AS group_name, + COUNT(*) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_consumed + FROM usage_logs ul + JOIN groups g ON g.id = ul.group_id + WHERE ul.created_at >= $1 AND ul.created_at < $2 + AND g.platform = $3 + GROUP BY 1, 2 +), +error_totals AS ( + SELECT group_id, + COUNT(*) AS error_count + FROM ops_error_logs + WHERE created_at >= $1 AND created_at < $2 + AND platform = $3 + AND group_id IS NOT NULL + AND COALESCE(status_code, 0) >= 400 + AND is_count_tokens = FALSE -- 排除 count_tokens 请求的错误 + GROUP BY 1 +), +combined AS ( + SELECT COALESCE(u.group_id, e.group_id) AS group_id, + COALESCE(u.group_name, g2.name, '') AS group_name, + COALESCE(u.success_count, 0) AS success_count, + COALESCE(e.error_count, 0) AS error_count, + COALESCE(u.token_consumed, 0) AS token_consumed + FROM usage_totals u + FULL OUTER JOIN error_totals e ON u.group_id = e.group_id + LEFT JOIN groups g2 ON g2.id = COALESCE(u.group_id, e.group_id) +) +SELECT group_id, group_name, (success_count + error_count) AS request_count, token_consumed +FROM combined +WHERE group_id IS NOT NULL +ORDER BY request_count DESC +LIMIT $4` + + rows, err := r.db.QueryContext(ctx, q, start, end, platform, limit) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + items := make([]*service.OpsThroughputGroupBreakdownItem, 0, limit) + for rows.Next() { + var groupID int64 + var groupName sql.NullString + var requests int64 + var tokens sql.NullInt64 + if err := rows.Scan(&groupID, &groupName, &requests, &tokens); err != nil { + return nil, err + } + tokenConsumed := int64(0) + if tokens.Valid { + tokenConsumed = tokens.Int64 + } + name := "" + if groupName.Valid { + name = groupName.String + } + items = append(items, &service.OpsThroughputGroupBreakdownItem{ + GroupID: groupID, + GroupName: name, + RequestCount: requests, + TokenConsumed: tokenConsumed, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +func opsBucketExprForUsage(bucketSeconds int) string { + switch bucketSeconds { + case 3600: + return "date_trunc('hour', ul.created_at)" + case 300: + // 5-minute buckets in UTC. + return "to_timestamp(floor(extract(epoch from ul.created_at) / 300) * 300)" + default: + return "date_trunc('minute', ul.created_at)" + } +} + +func opsBucketExprForError(bucketSeconds int) string { + switch bucketSeconds { + case 3600: + return "date_trunc('hour', created_at)" + case 300: + return "to_timestamp(floor(extract(epoch from created_at) / 300) * 300)" + default: + return "date_trunc('minute', created_at)" + } +} + +func opsBucketLabel(bucketSeconds int) string { + if bucketSeconds <= 0 { + return "1m" + } + if bucketSeconds%3600 == 0 { + h := bucketSeconds / 3600 + if h <= 0 { + h = 1 + } + return fmt.Sprintf("%dh", h) + } + m := bucketSeconds / 60 + if m <= 0 { + m = 1 + } + return fmt.Sprintf("%dm", m) +} + +func opsFloorToBucketStart(t time.Time, bucketSeconds int) time.Time { + t = t.UTC() + if bucketSeconds <= 0 { + bucketSeconds = 60 + } + secs := t.Unix() + floored := secs - (secs % int64(bucketSeconds)) + return time.Unix(floored, 0).UTC() +} + +func fillOpsThroughputBuckets(start, end time.Time, bucketSeconds int, points []*service.OpsThroughputTrendPoint) []*service.OpsThroughputTrendPoint { + if bucketSeconds <= 0 { + bucketSeconds = 60 + } + if !start.Before(end) { + return points + } + + endMinus := end.Add(-time.Nanosecond) + if endMinus.Before(start) { + return points + } + + first := opsFloorToBucketStart(start, bucketSeconds) + last := opsFloorToBucketStart(endMinus, bucketSeconds) + step := time.Duration(bucketSeconds) * time.Second + + existing := make(map[int64]*service.OpsThroughputTrendPoint, len(points)) + for _, p := range points { + if p == nil { + continue + } + existing[p.BucketStart.UTC().Unix()] = p + } + + out := make([]*service.OpsThroughputTrendPoint, 0, int(last.Sub(first)/step)+1) + for cursor := first; !cursor.After(last); cursor = cursor.Add(step) { + if p, ok := existing[cursor.Unix()]; ok && p != nil { + out = append(out, p) + continue + } + out = append(out, &service.OpsThroughputTrendPoint{ + BucketStart: cursor, + RequestCount: 0, + TokenConsumed: 0, + QPS: 0, + TPS: 0, + }) + } + return out +} + +func (r *opsRepository) GetErrorTrend(ctx context.Context, filter *service.OpsDashboardFilter, bucketSeconds int) (*service.OpsErrorTrendResponse, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + if bucketSeconds <= 0 { + bucketSeconds = 60 + } + if bucketSeconds != 60 && bucketSeconds != 300 && bucketSeconds != 3600 { + bucketSeconds = 60 + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + where, args, _ := buildErrorWhere(filter, start, end, 1) + bucketExpr := opsBucketExprForError(bucketSeconds) + + q := ` +SELECT + ` + bucketExpr + ` AS bucket, + COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400) AS error_total, + COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND is_business_limited) AS business_limited, + COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND NOT is_business_limited) AS error_sla, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)) AS upstream_excl, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 429) AS upstream_429, + COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 529) AS upstream_529 +FROM ops_error_logs +` + where + ` +GROUP BY 1 +ORDER BY 1 ASC` + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + points := make([]*service.OpsErrorTrendPoint, 0, 256) + for rows.Next() { + var bucket time.Time + var total, businessLimited, sla, upstreamExcl, upstream429, upstream529 int64 + if err := rows.Scan(&bucket, &total, &businessLimited, &sla, &upstreamExcl, &upstream429, &upstream529); err != nil { + return nil, err + } + points = append(points, &service.OpsErrorTrendPoint{ + BucketStart: bucket.UTC(), + + ErrorCountTotal: total, + BusinessLimitedCount: businessLimited, + ErrorCountSLA: sla, + + UpstreamErrorCountExcl429529: upstreamExcl, + Upstream429Count: upstream429, + Upstream529Count: upstream529, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + + points = fillOpsErrorTrendBuckets(start, end, bucketSeconds, points) + + return &service.OpsErrorTrendResponse{ + Bucket: opsBucketLabel(bucketSeconds), + Points: points, + }, nil +} + +func fillOpsErrorTrendBuckets(start, end time.Time, bucketSeconds int, points []*service.OpsErrorTrendPoint) []*service.OpsErrorTrendPoint { + if bucketSeconds <= 0 { + bucketSeconds = 60 + } + if !start.Before(end) { + return points + } + + endMinus := end.Add(-time.Nanosecond) + if endMinus.Before(start) { + return points + } + + first := opsFloorToBucketStart(start, bucketSeconds) + last := opsFloorToBucketStart(endMinus, bucketSeconds) + step := time.Duration(bucketSeconds) * time.Second + + existing := make(map[int64]*service.OpsErrorTrendPoint, len(points)) + for _, p := range points { + if p == nil { + continue + } + existing[p.BucketStart.UTC().Unix()] = p + } + + out := make([]*service.OpsErrorTrendPoint, 0, int(last.Sub(first)/step)+1) + for cursor := first; !cursor.After(last); cursor = cursor.Add(step) { + if p, ok := existing[cursor.Unix()]; ok && p != nil { + out = append(out, p) + continue + } + out = append(out, &service.OpsErrorTrendPoint{ + BucketStart: cursor, + + ErrorCountTotal: 0, + BusinessLimitedCount: 0, + ErrorCountSLA: 0, + + UpstreamErrorCountExcl429529: 0, + Upstream429Count: 0, + Upstream529Count: 0, + }) + } + return out +} + +func (r *opsRepository) GetErrorDistribution(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsErrorDistributionResponse, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + where, args, _ := buildErrorWhere(filter, start, end, 1) + + q := ` +SELECT + COALESCE(upstream_status_code, status_code, 0) AS status_code, + COUNT(*) AS total, + COUNT(*) FILTER (WHERE NOT is_business_limited) AS sla, + COUNT(*) FILTER (WHERE is_business_limited) AS business_limited +FROM ops_error_logs +` + where + ` + AND COALESCE(status_code, 0) >= 400 +GROUP BY 1 +ORDER BY total DESC +LIMIT 20` + + rows, err := r.db.QueryContext(ctx, q, args...) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + items := make([]*service.OpsErrorDistributionItem, 0, 16) + var total int64 + for rows.Next() { + var statusCode int + var cntTotal, cntSLA, cntBiz int64 + if err := rows.Scan(&statusCode, &cntTotal, &cntSLA, &cntBiz); err != nil { + return nil, err + } + total += cntTotal + items = append(items, &service.OpsErrorDistributionItem{ + StatusCode: statusCode, + Total: cntTotal, + SLA: cntSLA, + BusinessLimited: cntBiz, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + + return &service.OpsErrorDistributionResponse{ + Total: total, + Items: items, + }, nil +} diff --git a/backend/internal/repository/ops_repo_window_stats.go b/backend/internal/repository/ops_repo_window_stats.go new file mode 100644 index 00000000..8221c473 --- /dev/null +++ b/backend/internal/repository/ops_repo_window_stats.go @@ -0,0 +1,50 @@ +package repository + +import ( + "context" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +func (r *opsRepository) GetWindowStats(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsWindowStats, error) { + if r == nil || r.db == nil { + return nil, fmt.Errorf("nil ops repository") + } + if filter == nil { + return nil, fmt.Errorf("nil filter") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, fmt.Errorf("start_time/end_time required") + } + + start := filter.StartTime.UTC() + end := filter.EndTime.UTC() + if start.After(end) { + return nil, fmt.Errorf("start_time must be <= end_time") + } + // Bound excessively large windows to prevent accidental heavy queries. + if end.Sub(start) > 24*time.Hour { + return nil, fmt.Errorf("window too large") + } + + successCount, tokenConsumed, err := r.queryUsageCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + errorTotal, _, _, _, _, _, err := r.queryErrorCounts(ctx, filter, start, end) + if err != nil { + return nil, err + } + + return &service.OpsWindowStats{ + StartTime: start, + EndTime: end, + + SuccessCount: successCount, + ErrorCountTotal: errorTotal, + TokenConsumed: tokenConsumed, + }, nil +} diff --git a/backend/internal/repository/promo_code_repo.go b/backend/internal/repository/promo_code_repo.go new file mode 100644 index 00000000..98b422e0 --- /dev/null +++ b/backend/internal/repository/promo_code_repo.go @@ -0,0 +1,273 @@ +package repository + +import ( + "context" + + dbent "github.com/Wei-Shaw/sub2api/ent" + "github.com/Wei-Shaw/sub2api/ent/promocode" + "github.com/Wei-Shaw/sub2api/ent/promocodeusage" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type promoCodeRepository struct { + client *dbent.Client +} + +func NewPromoCodeRepository(client *dbent.Client) service.PromoCodeRepository { + return &promoCodeRepository{client: client} +} + +func (r *promoCodeRepository) Create(ctx context.Context, code *service.PromoCode) error { + client := clientFromContext(ctx, r.client) + builder := client.PromoCode.Create(). + SetCode(code.Code). + SetBonusAmount(code.BonusAmount). + SetMaxUses(code.MaxUses). + SetUsedCount(code.UsedCount). + SetStatus(code.Status). + SetNotes(code.Notes) + + if code.ExpiresAt != nil { + builder.SetExpiresAt(*code.ExpiresAt) + } + + created, err := builder.Save(ctx) + if err != nil { + return err + } + + code.ID = created.ID + code.CreatedAt = created.CreatedAt + code.UpdatedAt = created.UpdatedAt + return nil +} + +func (r *promoCodeRepository) GetByID(ctx context.Context, id int64) (*service.PromoCode, error) { + m, err := r.client.PromoCode.Query(). + Where(promocode.IDEQ(id)). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrPromoCodeNotFound + } + return nil, err + } + return promoCodeEntityToService(m), nil +} + +func (r *promoCodeRepository) GetByCode(ctx context.Context, code string) (*service.PromoCode, error) { + m, err := r.client.PromoCode.Query(). + Where(promocode.CodeEqualFold(code)). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrPromoCodeNotFound + } + return nil, err + } + return promoCodeEntityToService(m), nil +} + +func (r *promoCodeRepository) GetByCodeForUpdate(ctx context.Context, code string) (*service.PromoCode, error) { + client := clientFromContext(ctx, r.client) + m, err := client.PromoCode.Query(). + Where(promocode.CodeEqualFold(code)). + ForUpdate(). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, service.ErrPromoCodeNotFound + } + return nil, err + } + return promoCodeEntityToService(m), nil +} + +func (r *promoCodeRepository) Update(ctx context.Context, code *service.PromoCode) error { + client := clientFromContext(ctx, r.client) + builder := client.PromoCode.UpdateOneID(code.ID). + SetCode(code.Code). + SetBonusAmount(code.BonusAmount). + SetMaxUses(code.MaxUses). + SetUsedCount(code.UsedCount). + SetStatus(code.Status). + SetNotes(code.Notes) + + if code.ExpiresAt != nil { + builder.SetExpiresAt(*code.ExpiresAt) + } else { + builder.ClearExpiresAt() + } + + updated, err := builder.Save(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return service.ErrPromoCodeNotFound + } + return err + } + + code.UpdatedAt = updated.UpdatedAt + return nil +} + +func (r *promoCodeRepository) Delete(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + _, err := client.PromoCode.Delete().Where(promocode.IDEQ(id)).Exec(ctx) + return err +} + +func (r *promoCodeRepository) List(ctx context.Context, params pagination.PaginationParams) ([]service.PromoCode, *pagination.PaginationResult, error) { + return r.ListWithFilters(ctx, params, "", "") +} + +func (r *promoCodeRepository) ListWithFilters(ctx context.Context, params pagination.PaginationParams, status, search string) ([]service.PromoCode, *pagination.PaginationResult, error) { + q := r.client.PromoCode.Query() + + if status != "" { + q = q.Where(promocode.StatusEQ(status)) + } + if search != "" { + q = q.Where(promocode.CodeContainsFold(search)) + } + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + codes, err := q. + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(promocode.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + outCodes := promoCodeEntitiesToService(codes) + + return outCodes, paginationResultFromTotal(int64(total), params), nil +} + +func (r *promoCodeRepository) CreateUsage(ctx context.Context, usage *service.PromoCodeUsage) error { + client := clientFromContext(ctx, r.client) + created, err := client.PromoCodeUsage.Create(). + SetPromoCodeID(usage.PromoCodeID). + SetUserID(usage.UserID). + SetBonusAmount(usage.BonusAmount). + SetUsedAt(usage.UsedAt). + Save(ctx) + if err != nil { + return err + } + + usage.ID = created.ID + return nil +} + +func (r *promoCodeRepository) GetUsageByPromoCodeAndUser(ctx context.Context, promoCodeID, userID int64) (*service.PromoCodeUsage, error) { + m, err := r.client.PromoCodeUsage.Query(). + Where( + promocodeusage.PromoCodeIDEQ(promoCodeID), + promocodeusage.UserIDEQ(userID), + ). + Only(ctx) + if err != nil { + if dbent.IsNotFound(err) { + return nil, nil + } + return nil, err + } + return promoCodeUsageEntityToService(m), nil +} + +func (r *promoCodeRepository) ListUsagesByPromoCode(ctx context.Context, promoCodeID int64, params pagination.PaginationParams) ([]service.PromoCodeUsage, *pagination.PaginationResult, error) { + q := r.client.PromoCodeUsage.Query(). + Where(promocodeusage.PromoCodeIDEQ(promoCodeID)) + + total, err := q.Count(ctx) + if err != nil { + return nil, nil, err + } + + usages, err := q. + WithUser(). + Offset(params.Offset()). + Limit(params.Limit()). + Order(dbent.Desc(promocodeusage.FieldID)). + All(ctx) + if err != nil { + return nil, nil, err + } + + outUsages := promoCodeUsageEntitiesToService(usages) + + return outUsages, paginationResultFromTotal(int64(total), params), nil +} + +func (r *promoCodeRepository) IncrementUsedCount(ctx context.Context, id int64) error { + client := clientFromContext(ctx, r.client) + _, err := client.PromoCode.UpdateOneID(id). + AddUsedCount(1). + Save(ctx) + return err +} + +// Entity to Service conversions + +func promoCodeEntityToService(m *dbent.PromoCode) *service.PromoCode { + if m == nil { + return nil + } + return &service.PromoCode{ + ID: m.ID, + Code: m.Code, + BonusAmount: m.BonusAmount, + MaxUses: m.MaxUses, + UsedCount: m.UsedCount, + Status: m.Status, + ExpiresAt: m.ExpiresAt, + Notes: derefString(m.Notes), + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + } +} + +func promoCodeEntitiesToService(models []*dbent.PromoCode) []service.PromoCode { + out := make([]service.PromoCode, 0, len(models)) + for i := range models { + if s := promoCodeEntityToService(models[i]); s != nil { + out = append(out, *s) + } + } + return out +} + +func promoCodeUsageEntityToService(m *dbent.PromoCodeUsage) *service.PromoCodeUsage { + if m == nil { + return nil + } + out := &service.PromoCodeUsage{ + ID: m.ID, + PromoCodeID: m.PromoCodeID, + UserID: m.UserID, + BonusAmount: m.BonusAmount, + UsedAt: m.UsedAt, + } + if m.Edges.User != nil { + out.User = userEntityToService(m.Edges.User) + } + return out +} + +func promoCodeUsageEntitiesToService(models []*dbent.PromoCodeUsage) []service.PromoCodeUsage { + out := make([]service.PromoCodeUsage, 0, len(models)) + for i := range models { + if s := promoCodeUsageEntityToService(models[i]); s != nil { + out = append(out, *s) + } + } + return out +} diff --git a/backend/internal/repository/proxy_latency_cache.go b/backend/internal/repository/proxy_latency_cache.go new file mode 100644 index 00000000..4458b5e1 --- /dev/null +++ b/backend/internal/repository/proxy_latency_cache.go @@ -0,0 +1,74 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const proxyLatencyKeyPrefix = "proxy:latency:" + +func proxyLatencyKey(proxyID int64) string { + return fmt.Sprintf("%s%d", proxyLatencyKeyPrefix, proxyID) +} + +type proxyLatencyCache struct { + rdb *redis.Client +} + +func NewProxyLatencyCache(rdb *redis.Client) service.ProxyLatencyCache { + return &proxyLatencyCache{rdb: rdb} +} + +func (c *proxyLatencyCache) GetProxyLatencies(ctx context.Context, proxyIDs []int64) (map[int64]*service.ProxyLatencyInfo, error) { + results := make(map[int64]*service.ProxyLatencyInfo) + if len(proxyIDs) == 0 { + return results, nil + } + + keys := make([]string, 0, len(proxyIDs)) + for _, id := range proxyIDs { + keys = append(keys, proxyLatencyKey(id)) + } + + values, err := c.rdb.MGet(ctx, keys...).Result() + if err != nil { + return results, err + } + + for i, raw := range values { + if raw == nil { + continue + } + var payload []byte + switch v := raw.(type) { + case string: + payload = []byte(v) + case []byte: + payload = v + default: + continue + } + var info service.ProxyLatencyInfo + if err := json.Unmarshal(payload, &info); err != nil { + continue + } + results[proxyIDs[i]] = &info + } + + return results, nil +} + +func (c *proxyLatencyCache) SetProxyLatency(ctx context.Context, proxyID int64, info *service.ProxyLatencyInfo) error { + if info == nil { + return nil + } + payload, err := json.Marshal(info) + if err != nil { + return err + } + return c.rdb.Set(ctx, proxyLatencyKey(proxyID), payload, 0).Err() +} diff --git a/backend/internal/repository/proxy_probe_service.go b/backend/internal/repository/proxy_probe_service.go index 5c42e4d1..fb6f405e 100644 --- a/backend/internal/repository/proxy_probe_service.go +++ b/backend/internal/repository/proxy_probe_service.go @@ -7,6 +7,7 @@ import ( "io" "log" "net/http" + "strings" "time" "github.com/Wei-Shaw/sub2api/internal/config" @@ -34,7 +35,10 @@ func NewProxyExitInfoProber(cfg *config.Config) service.ProxyExitInfoProber { } } -const defaultIPInfoURL = "https://ipinfo.io/json" +const ( + defaultIPInfoURL = "http://ip-api.com/json/?lang=zh-CN" + defaultProxyProbeTimeout = 30 * time.Second +) type proxyProbeService struct { ipInfoURL string @@ -46,7 +50,7 @@ type proxyProbeService struct { func (s *proxyProbeService) ProbeProxy(ctx context.Context, proxyURL string) (*service.ProxyExitInfo, int64, error) { client, err := httpclient.GetClient(httpclient.Options{ ProxyURL: proxyURL, - Timeout: 15 * time.Second, + Timeout: defaultProxyProbeTimeout, InsecureSkipVerify: s.insecureSkipVerify, ProxyStrict: true, ValidateResolvedIP: s.validateResolvedIP, @@ -75,10 +79,14 @@ func (s *proxyProbeService) ProbeProxy(ctx context.Context, proxyURL string) (*s } var ipInfo struct { - IP string `json:"ip"` - City string `json:"city"` - Region string `json:"region"` - Country string `json:"country"` + Status string `json:"status"` + Message string `json:"message"` + Query string `json:"query"` + City string `json:"city"` + Region string `json:"region"` + RegionName string `json:"regionName"` + Country string `json:"country"` + CountryCode string `json:"countryCode"` } body, err := io.ReadAll(resp.Body) @@ -89,11 +97,22 @@ func (s *proxyProbeService) ProbeProxy(ctx context.Context, proxyURL string) (*s if err := json.Unmarshal(body, &ipInfo); err != nil { return nil, latencyMs, fmt.Errorf("failed to parse response: %w", err) } + if strings.ToLower(ipInfo.Status) != "success" { + if ipInfo.Message == "" { + ipInfo.Message = "ip-api request failed" + } + return nil, latencyMs, fmt.Errorf("ip-api request failed: %s", ipInfo.Message) + } + region := ipInfo.RegionName + if region == "" { + region = ipInfo.Region + } return &service.ProxyExitInfo{ - IP: ipInfo.IP, - City: ipInfo.City, - Region: ipInfo.Region, - Country: ipInfo.Country, + IP: ipInfo.Query, + City: ipInfo.City, + Region: region, + Country: ipInfo.Country, + CountryCode: ipInfo.CountryCode, }, latencyMs, nil } diff --git a/backend/internal/repository/proxy_probe_service_test.go b/backend/internal/repository/proxy_probe_service_test.go index fe45adbb..f1cd5721 100644 --- a/backend/internal/repository/proxy_probe_service_test.go +++ b/backend/internal/repository/proxy_probe_service_test.go @@ -21,7 +21,7 @@ type ProxyProbeServiceSuite struct { func (s *ProxyProbeServiceSuite) SetupTest() { s.ctx = context.Background() s.prober = &proxyProbeService{ - ipInfoURL: "http://ipinfo.test/json", + ipInfoURL: "http://ip-api.test/json/?lang=zh-CN", allowPrivateHosts: true, } } @@ -54,7 +54,7 @@ func (s *ProxyProbeServiceSuite) TestProbeProxy_Success() { s.setupProxyServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { seen <- r.RequestURI w.Header().Set("Content-Type", "application/json") - _, _ = io.WriteString(w, `{"ip":"1.2.3.4","city":"c","region":"r","country":"cc"}`) + _, _ = io.WriteString(w, `{"status":"success","query":"1.2.3.4","city":"c","regionName":"r","country":"cc","countryCode":"CC"}`) })) info, latencyMs, err := s.prober.ProbeProxy(s.ctx, s.proxySrv.URL) @@ -64,11 +64,12 @@ func (s *ProxyProbeServiceSuite) TestProbeProxy_Success() { require.Equal(s.T(), "c", info.City) require.Equal(s.T(), "r", info.Region) require.Equal(s.T(), "cc", info.Country) + require.Equal(s.T(), "CC", info.CountryCode) // Verify proxy received the request select { case uri := <-seen: - require.Contains(s.T(), uri, "ipinfo.test", "expected request to go through proxy") + require.Contains(s.T(), uri, "ip-api.test", "expected request to go through proxy") default: require.Fail(s.T(), "expected proxy to receive request") } diff --git a/backend/internal/repository/proxy_repo.go b/backend/internal/repository/proxy_repo.go index 622b0aeb..36965c05 100644 --- a/backend/internal/repository/proxy_repo.go +++ b/backend/internal/repository/proxy_repo.go @@ -219,12 +219,54 @@ func (r *proxyRepository) ExistsByHostPortAuth(ctx context.Context, host string, // CountAccountsByProxyID returns the number of accounts using a specific proxy func (r *proxyRepository) CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) { var count int64 - if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM accounts WHERE proxy_id = $1", []any{proxyID}, &count); err != nil { + if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM accounts WHERE proxy_id = $1 AND deleted_at IS NULL", []any{proxyID}, &count); err != nil { return 0, err } return count, nil } +func (r *proxyRepository) ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]service.ProxyAccountSummary, error) { + rows, err := r.sql.QueryContext(ctx, ` + SELECT id, name, platform, type, notes + FROM accounts + WHERE proxy_id = $1 AND deleted_at IS NULL + ORDER BY id DESC + `, proxyID) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + out := make([]service.ProxyAccountSummary, 0) + for rows.Next() { + var ( + id int64 + name string + platform string + accType string + notes sql.NullString + ) + if err := rows.Scan(&id, &name, &platform, &accType, ¬es); err != nil { + return nil, err + } + var notesPtr *string + if notes.Valid { + notesPtr = ¬es.String + } + out = append(out, service.ProxyAccountSummary{ + ID: id, + Name: name, + Platform: platform, + Type: accType, + Notes: notesPtr, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + return out, nil +} + // GetAccountCountsForProxies returns a map of proxy ID to account count for all proxies func (r *proxyRepository) GetAccountCountsForProxies(ctx context.Context) (counts map[int64]int64, err error) { rows, err := r.sql.QueryContext(ctx, "SELECT proxy_id, COUNT(*) AS count FROM accounts WHERE proxy_id IS NOT NULL AND deleted_at IS NULL GROUP BY proxy_id") diff --git a/backend/internal/repository/scheduler_cache.go b/backend/internal/repository/scheduler_cache.go new file mode 100644 index 00000000..13b22107 --- /dev/null +++ b/backend/internal/repository/scheduler_cache.go @@ -0,0 +1,276 @@ +package repository + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const ( + schedulerBucketSetKey = "sched:buckets" + schedulerOutboxWatermarkKey = "sched:outbox:watermark" + schedulerAccountPrefix = "sched:acc:" + schedulerActivePrefix = "sched:active:" + schedulerReadyPrefix = "sched:ready:" + schedulerVersionPrefix = "sched:ver:" + schedulerSnapshotPrefix = "sched:" + schedulerLockPrefix = "sched:lock:" +) + +type schedulerCache struct { + rdb *redis.Client +} + +func NewSchedulerCache(rdb *redis.Client) service.SchedulerCache { + return &schedulerCache{rdb: rdb} +} + +func (c *schedulerCache) GetSnapshot(ctx context.Context, bucket service.SchedulerBucket) ([]*service.Account, bool, error) { + readyKey := schedulerBucketKey(schedulerReadyPrefix, bucket) + readyVal, err := c.rdb.Get(ctx, readyKey).Result() + if err == redis.Nil { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + if readyVal != "1" { + return nil, false, nil + } + + activeKey := schedulerBucketKey(schedulerActivePrefix, bucket) + activeVal, err := c.rdb.Get(ctx, activeKey).Result() + if err == redis.Nil { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + + snapshotKey := schedulerSnapshotKey(bucket, activeVal) + ids, err := c.rdb.ZRange(ctx, snapshotKey, 0, -1).Result() + if err != nil { + return nil, false, err + } + if len(ids) == 0 { + return []*service.Account{}, true, nil + } + + keys := make([]string, 0, len(ids)) + for _, id := range ids { + keys = append(keys, schedulerAccountKey(id)) + } + values, err := c.rdb.MGet(ctx, keys...).Result() + if err != nil { + return nil, false, err + } + + accounts := make([]*service.Account, 0, len(values)) + for _, val := range values { + if val == nil { + return nil, false, nil + } + account, err := decodeCachedAccount(val) + if err != nil { + return nil, false, err + } + accounts = append(accounts, account) + } + + return accounts, true, nil +} + +func (c *schedulerCache) SetSnapshot(ctx context.Context, bucket service.SchedulerBucket, accounts []service.Account) error { + activeKey := schedulerBucketKey(schedulerActivePrefix, bucket) + oldActive, _ := c.rdb.Get(ctx, activeKey).Result() + + versionKey := schedulerBucketKey(schedulerVersionPrefix, bucket) + version, err := c.rdb.Incr(ctx, versionKey).Result() + if err != nil { + return err + } + + versionStr := strconv.FormatInt(version, 10) + snapshotKey := schedulerSnapshotKey(bucket, versionStr) + + pipe := c.rdb.Pipeline() + for _, account := range accounts { + payload, err := json.Marshal(account) + if err != nil { + return err + } + pipe.Set(ctx, schedulerAccountKey(strconv.FormatInt(account.ID, 10)), payload, 0) + } + if len(accounts) > 0 { + // 使用序号作为 score,保持数据库返回的排序语义。 + members := make([]redis.Z, 0, len(accounts)) + for idx, account := range accounts { + members = append(members, redis.Z{ + Score: float64(idx), + Member: strconv.FormatInt(account.ID, 10), + }) + } + pipe.ZAdd(ctx, snapshotKey, members...) + } else { + pipe.Del(ctx, snapshotKey) + } + pipe.Set(ctx, activeKey, versionStr, 0) + pipe.Set(ctx, schedulerBucketKey(schedulerReadyPrefix, bucket), "1", 0) + pipe.SAdd(ctx, schedulerBucketSetKey, bucket.String()) + if _, err := pipe.Exec(ctx); err != nil { + return err + } + + if oldActive != "" && oldActive != versionStr { + _ = c.rdb.Del(ctx, schedulerSnapshotKey(bucket, oldActive)).Err() + } + + return nil +} + +func (c *schedulerCache) GetAccount(ctx context.Context, accountID int64) (*service.Account, error) { + key := schedulerAccountKey(strconv.FormatInt(accountID, 10)) + val, err := c.rdb.Get(ctx, key).Result() + if err == redis.Nil { + return nil, nil + } + if err != nil { + return nil, err + } + return decodeCachedAccount(val) +} + +func (c *schedulerCache) SetAccount(ctx context.Context, account *service.Account) error { + if account == nil || account.ID <= 0 { + return nil + } + payload, err := json.Marshal(account) + if err != nil { + return err + } + key := schedulerAccountKey(strconv.FormatInt(account.ID, 10)) + return c.rdb.Set(ctx, key, payload, 0).Err() +} + +func (c *schedulerCache) DeleteAccount(ctx context.Context, accountID int64) error { + if accountID <= 0 { + return nil + } + key := schedulerAccountKey(strconv.FormatInt(accountID, 10)) + return c.rdb.Del(ctx, key).Err() +} + +func (c *schedulerCache) UpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { + if len(updates) == 0 { + return nil + } + + keys := make([]string, 0, len(updates)) + ids := make([]int64, 0, len(updates)) + for id := range updates { + keys = append(keys, schedulerAccountKey(strconv.FormatInt(id, 10))) + ids = append(ids, id) + } + + values, err := c.rdb.MGet(ctx, keys...).Result() + if err != nil { + return err + } + + pipe := c.rdb.Pipeline() + for i, val := range values { + if val == nil { + continue + } + account, err := decodeCachedAccount(val) + if err != nil { + return err + } + account.LastUsedAt = ptrTime(updates[ids[i]]) + updated, err := json.Marshal(account) + if err != nil { + return err + } + pipe.Set(ctx, keys[i], updated, 0) + } + _, err = pipe.Exec(ctx) + return err +} + +func (c *schedulerCache) TryLockBucket(ctx context.Context, bucket service.SchedulerBucket, ttl time.Duration) (bool, error) { + key := schedulerBucketKey(schedulerLockPrefix, bucket) + return c.rdb.SetNX(ctx, key, time.Now().UnixNano(), ttl).Result() +} + +func (c *schedulerCache) ListBuckets(ctx context.Context) ([]service.SchedulerBucket, error) { + raw, err := c.rdb.SMembers(ctx, schedulerBucketSetKey).Result() + if err != nil { + return nil, err + } + out := make([]service.SchedulerBucket, 0, len(raw)) + for _, entry := range raw { + bucket, ok := service.ParseSchedulerBucket(entry) + if !ok { + continue + } + out = append(out, bucket) + } + return out, nil +} + +func (c *schedulerCache) GetOutboxWatermark(ctx context.Context) (int64, error) { + val, err := c.rdb.Get(ctx, schedulerOutboxWatermarkKey).Result() + if err == redis.Nil { + return 0, nil + } + if err != nil { + return 0, err + } + id, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return 0, err + } + return id, nil +} + +func (c *schedulerCache) SetOutboxWatermark(ctx context.Context, id int64) error { + return c.rdb.Set(ctx, schedulerOutboxWatermarkKey, strconv.FormatInt(id, 10), 0).Err() +} + +func schedulerBucketKey(prefix string, bucket service.SchedulerBucket) string { + return fmt.Sprintf("%s%d:%s:%s", prefix, bucket.GroupID, bucket.Platform, bucket.Mode) +} + +func schedulerSnapshotKey(bucket service.SchedulerBucket, version string) string { + return fmt.Sprintf("%s%d:%s:%s:v%s", schedulerSnapshotPrefix, bucket.GroupID, bucket.Platform, bucket.Mode, version) +} + +func schedulerAccountKey(id string) string { + return schedulerAccountPrefix + id +} + +func ptrTime(t time.Time) *time.Time { + return &t +} + +func decodeCachedAccount(val any) (*service.Account, error) { + var payload []byte + switch raw := val.(type) { + case string: + payload = []byte(raw) + case []byte: + payload = raw + default: + return nil, fmt.Errorf("unexpected account cache type: %T", val) + } + var account service.Account + if err := json.Unmarshal(payload, &account); err != nil { + return nil, err + } + return &account, nil +} diff --git a/backend/internal/repository/scheduler_outbox_repo.go b/backend/internal/repository/scheduler_outbox_repo.go new file mode 100644 index 00000000..d7bc97da --- /dev/null +++ b/backend/internal/repository/scheduler_outbox_repo.go @@ -0,0 +1,96 @@ +package repository + +import ( + "context" + "database/sql" + "encoding/json" + + "github.com/Wei-Shaw/sub2api/internal/service" +) + +type schedulerOutboxRepository struct { + db *sql.DB +} + +func NewSchedulerOutboxRepository(db *sql.DB) service.SchedulerOutboxRepository { + return &schedulerOutboxRepository{db: db} +} + +func (r *schedulerOutboxRepository) ListAfter(ctx context.Context, afterID int64, limit int) ([]service.SchedulerOutboxEvent, error) { + if limit <= 0 { + limit = 100 + } + rows, err := r.db.QueryContext(ctx, ` + SELECT id, event_type, account_id, group_id, payload, created_at + FROM scheduler_outbox + WHERE id > $1 + ORDER BY id ASC + LIMIT $2 + `, afterID, limit) + if err != nil { + return nil, err + } + defer func() { + _ = rows.Close() + }() + + events := make([]service.SchedulerOutboxEvent, 0, limit) + for rows.Next() { + var ( + payloadRaw []byte + accountID sql.NullInt64 + groupID sql.NullInt64 + event service.SchedulerOutboxEvent + ) + if err := rows.Scan(&event.ID, &event.EventType, &accountID, &groupID, &payloadRaw, &event.CreatedAt); err != nil { + return nil, err + } + if accountID.Valid { + v := accountID.Int64 + event.AccountID = &v + } + if groupID.Valid { + v := groupID.Int64 + event.GroupID = &v + } + if len(payloadRaw) > 0 { + var payload map[string]any + if err := json.Unmarshal(payloadRaw, &payload); err != nil { + return nil, err + } + event.Payload = payload + } + events = append(events, event) + } + if err := rows.Err(); err != nil { + return nil, err + } + return events, nil +} + +func (r *schedulerOutboxRepository) MaxID(ctx context.Context) (int64, error) { + var maxID int64 + if err := r.db.QueryRowContext(ctx, "SELECT COALESCE(MAX(id), 0) FROM scheduler_outbox").Scan(&maxID); err != nil { + return 0, err + } + return maxID, nil +} + +func enqueueSchedulerOutbox(ctx context.Context, exec sqlExecutor, eventType string, accountID *int64, groupID *int64, payload any) error { + if exec == nil { + return nil + } + var payloadArg any + if payload != nil { + encoded, err := json.Marshal(payload) + if err != nil { + return err + } + payloadArg = encoded + } + _, err := exec.ExecContext(ctx, ` + INSERT INTO scheduler_outbox (event_type, account_id, group_id, payload) + VALUES ($1, $2, $3, $4) + `, eventType, accountID, groupID, payloadArg) + return err +} diff --git a/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go b/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go new file mode 100644 index 00000000..e442a125 --- /dev/null +++ b/backend/internal/repository/scheduler_snapshot_outbox_integration_test.go @@ -0,0 +1,68 @@ +//go:build integration + +package repository + +import ( + "context" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func TestSchedulerSnapshotOutboxReplay(t *testing.T) { + ctx := context.Background() + rdb := testRedis(t) + client := testEntClient(t) + + _, _ = integrationDB.ExecContext(ctx, "TRUNCATE scheduler_outbox") + + accountRepo := newAccountRepositoryWithSQL(client, integrationDB) + outboxRepo := NewSchedulerOutboxRepository(integrationDB) + cache := NewSchedulerCache(rdb) + + cfg := &config.Config{ + RunMode: config.RunModeStandard, + Gateway: config.GatewayConfig{ + Scheduling: config.GatewaySchedulingConfig{ + OutboxPollIntervalSeconds: 1, + FullRebuildIntervalSeconds: 0, + DbFallbackEnabled: true, + }, + }, + } + + account := &service.Account{ + Name: "outbox-replay-" + time.Now().Format("150405.000000"), + Platform: service.PlatformOpenAI, + Type: service.AccountTypeAPIKey, + Status: service.StatusActive, + Schedulable: true, + Concurrency: 3, + Priority: 1, + Credentials: map[string]any{}, + Extra: map[string]any{}, + } + require.NoError(t, accountRepo.Create(ctx, account)) + require.NoError(t, cache.SetAccount(ctx, account)) + + svc := service.NewSchedulerSnapshotService(cache, outboxRepo, accountRepo, nil, cfg) + svc.Start() + t.Cleanup(svc.Stop) + + require.NoError(t, accountRepo.UpdateLastUsed(ctx, account.ID)) + updated, err := accountRepo.GetByID(ctx, account.ID) + require.NoError(t, err) + require.NotNil(t, updated.LastUsedAt) + expectedUnix := updated.LastUsedAt.Unix() + + require.Eventually(t, func() bool { + cached, err := cache.GetAccount(ctx, account.ID) + if err != nil || cached == nil || cached.LastUsedAt == nil { + return false + } + return cached.LastUsedAt.Unix() == expectedUnix + }, 5*time.Second, 100*time.Millisecond) +} diff --git a/backend/internal/repository/session_limit_cache.go b/backend/internal/repository/session_limit_cache.go new file mode 100644 index 00000000..16f2a69c --- /dev/null +++ b/backend/internal/repository/session_limit_cache.go @@ -0,0 +1,321 @@ +package repository + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +// 会话限制缓存常量定义 +// +// 设计说明: +// 使用 Redis 有序集合(Sorted Set)跟踪每个账号的活跃会话: +// - Key: session_limit:account:{accountID} +// - Member: sessionUUID(从 metadata.user_id 中提取) +// - Score: Unix 时间戳(会话最后活跃时间) +// +// 通过 ZREMRANGEBYSCORE 自动清理过期会话,无需手动管理 TTL +const ( + // 会话限制键前缀 + // 格式: session_limit:account:{accountID} + sessionLimitKeyPrefix = "session_limit:account:" + + // 窗口费用缓存键前缀 + // 格式: window_cost:account:{accountID} + windowCostKeyPrefix = "window_cost:account:" + + // 窗口费用缓存 TTL(30秒) + windowCostCacheTTL = 30 * time.Second +) + +var ( + // registerSessionScript 注册会话活动 + // 使用 Redis TIME 命令获取服务器时间,避免多实例时钟不同步 + // KEYS[1] = session_limit:account:{accountID} + // ARGV[1] = maxSessions + // ARGV[2] = idleTimeout(秒) + // ARGV[3] = sessionUUID + // 返回: 1 = 允许, 0 = 拒绝 + registerSessionScript = redis.NewScript(` + local key = KEYS[1] + local maxSessions = tonumber(ARGV[1]) + local idleTimeout = tonumber(ARGV[2]) + local sessionUUID = ARGV[3] + + -- 使用 Redis 服务器时间,确保多实例时钟一致 + local timeResult = redis.call('TIME') + local now = tonumber(timeResult[1]) + local expireBefore = now - idleTimeout + + -- 清理过期会话 + redis.call('ZREMRANGEBYSCORE', key, '-inf', expireBefore) + + -- 检查会话是否已存在(支持刷新时间戳) + local exists = redis.call('ZSCORE', key, sessionUUID) + if exists ~= false then + -- 会话已存在,刷新时间戳 + redis.call('ZADD', key, now, sessionUUID) + redis.call('EXPIRE', key, idleTimeout + 60) + return 1 + end + + -- 检查是否达到会话数量上限 + local count = redis.call('ZCARD', key) + if count < maxSessions then + -- 未达上限,添加新会话 + redis.call('ZADD', key, now, sessionUUID) + redis.call('EXPIRE', key, idleTimeout + 60) + return 1 + end + + -- 达到上限,拒绝新会话 + return 0 + `) + + // refreshSessionScript 刷新会话时间戳 + // KEYS[1] = session_limit:account:{accountID} + // ARGV[1] = idleTimeout(秒) + // ARGV[2] = sessionUUID + refreshSessionScript = redis.NewScript(` + local key = KEYS[1] + local idleTimeout = tonumber(ARGV[1]) + local sessionUUID = ARGV[2] + + local timeResult = redis.call('TIME') + local now = tonumber(timeResult[1]) + + -- 检查会话是否存在 + local exists = redis.call('ZSCORE', key, sessionUUID) + if exists ~= false then + redis.call('ZADD', key, now, sessionUUID) + redis.call('EXPIRE', key, idleTimeout + 60) + end + return 1 + `) + + // getActiveSessionCountScript 获取活跃会话数 + // KEYS[1] = session_limit:account:{accountID} + // ARGV[1] = idleTimeout(秒) + getActiveSessionCountScript = redis.NewScript(` + local key = KEYS[1] + local idleTimeout = tonumber(ARGV[1]) + + local timeResult = redis.call('TIME') + local now = tonumber(timeResult[1]) + local expireBefore = now - idleTimeout + + -- 清理过期会话 + redis.call('ZREMRANGEBYSCORE', key, '-inf', expireBefore) + + return redis.call('ZCARD', key) + `) + + // isSessionActiveScript 检查会话是否活跃 + // KEYS[1] = session_limit:account:{accountID} + // ARGV[1] = idleTimeout(秒) + // ARGV[2] = sessionUUID + isSessionActiveScript = redis.NewScript(` + local key = KEYS[1] + local idleTimeout = tonumber(ARGV[1]) + local sessionUUID = ARGV[2] + + local timeResult = redis.call('TIME') + local now = tonumber(timeResult[1]) + local expireBefore = now - idleTimeout + + -- 获取会话的时间戳 + local score = redis.call('ZSCORE', key, sessionUUID) + if score == false then + return 0 + end + + -- 检查是否过期 + if tonumber(score) <= expireBefore then + return 0 + end + + return 1 + `) +) + +type sessionLimitCache struct { + rdb *redis.Client + defaultIdleTimeout time.Duration // 默认空闲超时(用于 GetActiveSessionCount) +} + +// NewSessionLimitCache 创建会话限制缓存 +// defaultIdleTimeoutMinutes: 默认空闲超时时间(分钟),用于无参数查询 +func NewSessionLimitCache(rdb *redis.Client, defaultIdleTimeoutMinutes int) service.SessionLimitCache { + if defaultIdleTimeoutMinutes <= 0 { + defaultIdleTimeoutMinutes = 5 // 默认 5 分钟 + } + return &sessionLimitCache{ + rdb: rdb, + defaultIdleTimeout: time.Duration(defaultIdleTimeoutMinutes) * time.Minute, + } +} + +// sessionLimitKey 生成会话限制的 Redis 键 +func sessionLimitKey(accountID int64) string { + return fmt.Sprintf("%s%d", sessionLimitKeyPrefix, accountID) +} + +// windowCostKey 生成窗口费用缓存的 Redis 键 +func windowCostKey(accountID int64) string { + return fmt.Sprintf("%s%d", windowCostKeyPrefix, accountID) +} + +// RegisterSession 注册会话活动 +func (c *sessionLimitCache) RegisterSession(ctx context.Context, accountID int64, sessionUUID string, maxSessions int, idleTimeout time.Duration) (bool, error) { + if sessionUUID == "" || maxSessions <= 0 { + return true, nil // 无效参数,默认允许 + } + + key := sessionLimitKey(accountID) + idleTimeoutSeconds := int(idleTimeout.Seconds()) + if idleTimeoutSeconds <= 0 { + idleTimeoutSeconds = int(c.defaultIdleTimeout.Seconds()) + } + + result, err := registerSessionScript.Run(ctx, c.rdb, []string{key}, maxSessions, idleTimeoutSeconds, sessionUUID).Int() + if err != nil { + return true, err // 失败开放:缓存错误时允许请求通过 + } + return result == 1, nil +} + +// RefreshSession 刷新会话时间戳 +func (c *sessionLimitCache) RefreshSession(ctx context.Context, accountID int64, sessionUUID string, idleTimeout time.Duration) error { + if sessionUUID == "" { + return nil + } + + key := sessionLimitKey(accountID) + idleTimeoutSeconds := int(idleTimeout.Seconds()) + if idleTimeoutSeconds <= 0 { + idleTimeoutSeconds = int(c.defaultIdleTimeout.Seconds()) + } + + _, err := refreshSessionScript.Run(ctx, c.rdb, []string{key}, idleTimeoutSeconds, sessionUUID).Result() + return err +} + +// GetActiveSessionCount 获取活跃会话数 +func (c *sessionLimitCache) GetActiveSessionCount(ctx context.Context, accountID int64) (int, error) { + key := sessionLimitKey(accountID) + idleTimeoutSeconds := int(c.defaultIdleTimeout.Seconds()) + + result, err := getActiveSessionCountScript.Run(ctx, c.rdb, []string{key}, idleTimeoutSeconds).Int() + if err != nil { + return 0, err + } + return result, nil +} + +// GetActiveSessionCountBatch 批量获取多个账号的活跃会话数 +func (c *sessionLimitCache) GetActiveSessionCountBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) { + if len(accountIDs) == 0 { + return make(map[int64]int), nil + } + + results := make(map[int64]int, len(accountIDs)) + + // 使用 pipeline 批量执行 + pipe := c.rdb.Pipeline() + idleTimeoutSeconds := int(c.defaultIdleTimeout.Seconds()) + + cmds := make(map[int64]*redis.Cmd, len(accountIDs)) + for _, accountID := range accountIDs { + key := sessionLimitKey(accountID) + cmds[accountID] = getActiveSessionCountScript.Run(ctx, pipe, []string{key}, idleTimeoutSeconds) + } + + // 执行 pipeline,即使部分失败也尝试获取成功的结果 + _, _ = pipe.Exec(ctx) + + for accountID, cmd := range cmds { + if result, err := cmd.Int(); err == nil { + results[accountID] = result + } + } + + return results, nil +} + +// IsSessionActive 检查会话是否活跃 +func (c *sessionLimitCache) IsSessionActive(ctx context.Context, accountID int64, sessionUUID string) (bool, error) { + if sessionUUID == "" { + return false, nil + } + + key := sessionLimitKey(accountID) + idleTimeoutSeconds := int(c.defaultIdleTimeout.Seconds()) + + result, err := isSessionActiveScript.Run(ctx, c.rdb, []string{key}, idleTimeoutSeconds, sessionUUID).Int() + if err != nil { + return false, err + } + return result == 1, nil +} + +// ========== 5h窗口费用缓存实现 ========== + +// GetWindowCost 获取缓存的窗口费用 +func (c *sessionLimitCache) GetWindowCost(ctx context.Context, accountID int64) (float64, bool, error) { + key := windowCostKey(accountID) + val, err := c.rdb.Get(ctx, key).Float64() + if err == redis.Nil { + return 0, false, nil // 缓存未命中 + } + if err != nil { + return 0, false, err + } + return val, true, nil +} + +// SetWindowCost 设置窗口费用缓存 +func (c *sessionLimitCache) SetWindowCost(ctx context.Context, accountID int64, cost float64) error { + key := windowCostKey(accountID) + return c.rdb.Set(ctx, key, cost, windowCostCacheTTL).Err() +} + +// GetWindowCostBatch 批量获取窗口费用缓存 +func (c *sessionLimitCache) GetWindowCostBatch(ctx context.Context, accountIDs []int64) (map[int64]float64, error) { + if len(accountIDs) == 0 { + return make(map[int64]float64), nil + } + + // 构建批量查询的 keys + keys := make([]string, len(accountIDs)) + for i, accountID := range accountIDs { + keys[i] = windowCostKey(accountID) + } + + // 使用 MGET 批量获取 + vals, err := c.rdb.MGet(ctx, keys...).Result() + if err != nil { + return nil, err + } + + results := make(map[int64]float64, len(accountIDs)) + for i, val := range vals { + if val == nil { + continue // 缓存未命中 + } + // 尝试解析为 float64 + switch v := val.(type) { + case string: + if cost, err := strconv.ParseFloat(v, 64); err == nil { + results[accountIDs[i]] = cost + } + case float64: + results[accountIDs[i]] = v + } + } + + return results, nil +} diff --git a/backend/internal/repository/timeout_counter_cache.go b/backend/internal/repository/timeout_counter_cache.go new file mode 100644 index 00000000..64cde22a --- /dev/null +++ b/backend/internal/repository/timeout_counter_cache.go @@ -0,0 +1,80 @@ +package repository + +import ( + "context" + "fmt" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/redis/go-redis/v9" +) + +const timeoutCounterPrefix = "timeout_count:account:" + +// timeoutCounterIncrScript 使用 Lua 脚本原子性地增加计数并返回当前值 +// 如果 key 不存在,则创建并设置过期时间 +var timeoutCounterIncrScript = redis.NewScript(` + local key = KEYS[1] + local ttl = tonumber(ARGV[1]) + + local count = redis.call('INCR', key) + if count == 1 then + redis.call('EXPIRE', key, ttl) + end + + return count +`) + +type timeoutCounterCache struct { + rdb *redis.Client +} + +// NewTimeoutCounterCache 创建超时计数器缓存实例 +func NewTimeoutCounterCache(rdb *redis.Client) service.TimeoutCounterCache { + return &timeoutCounterCache{rdb: rdb} +} + +// IncrementTimeoutCount 增加账户的超时计数,返回当前计数值 +// windowMinutes 是计数窗口时间(分钟),超过此时间计数器会自动重置 +func (c *timeoutCounterCache) IncrementTimeoutCount(ctx context.Context, accountID int64, windowMinutes int) (int64, error) { + key := fmt.Sprintf("%s%d", timeoutCounterPrefix, accountID) + + ttlSeconds := windowMinutes * 60 + if ttlSeconds < 60 { + ttlSeconds = 60 // 最小1分钟 + } + + result, err := timeoutCounterIncrScript.Run(ctx, c.rdb, []string{key}, ttlSeconds).Int64() + if err != nil { + return 0, fmt.Errorf("increment timeout count: %w", err) + } + + return result, nil +} + +// GetTimeoutCount 获取账户当前的超时计数 +func (c *timeoutCounterCache) GetTimeoutCount(ctx context.Context, accountID int64) (int64, error) { + key := fmt.Sprintf("%s%d", timeoutCounterPrefix, accountID) + + val, err := c.rdb.Get(ctx, key).Int64() + if err == redis.Nil { + return 0, nil + } + if err != nil { + return 0, fmt.Errorf("get timeout count: %w", err) + } + + return val, nil +} + +// ResetTimeoutCount 重置账户的超时计数 +func (c *timeoutCounterCache) ResetTimeoutCount(ctx context.Context, accountID int64) error { + key := fmt.Sprintf("%s%d", timeoutCounterPrefix, accountID) + return c.rdb.Del(ctx, key).Err() +} + +// GetTimeoutCountTTL 获取计数器剩余过期时间 +func (c *timeoutCounterCache) GetTimeoutCountTTL(ctx context.Context, accountID int64) (time.Duration, error) { + key := fmt.Sprintf("%s%d", timeoutCounterPrefix, accountID) + return c.rdb.TTL(ctx, key).Result() +} diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go index bd5c8b4f..4a2aaade 100644 --- a/backend/internal/repository/usage_log_repo.go +++ b/backend/internal/repository/usage_log_repo.go @@ -22,7 +22,7 @@ import ( "github.com/lib/pq" ) -const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, billing_type, stream, duration_ms, first_token_ms, user_agent, image_count, image_size, created_at" +const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, account_rate_multiplier, billing_type, stream, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, created_at" type usageLogRepository struct { client *dbent.Client @@ -105,11 +105,13 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) total_cost, actual_cost, rate_multiplier, + account_rate_multiplier, billing_type, stream, duration_ms, first_token_ms, user_agent, + ip_address, image_count, image_size, created_at @@ -119,7 +121,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, - $20, $21, $22, $23, $24, $25, $26, $27, $28 + $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30 ) ON CONFLICT (request_id, api_key_id) DO NOTHING RETURNING id, created_at @@ -130,6 +132,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) duration := nullInt(log.DurationMs) firstToken := nullInt(log.FirstTokenMs) userAgent := nullString(log.UserAgent) + ipAddress := nullString(log.IPAddress) imageSize := nullString(log.ImageSize) var requestIDArg any @@ -158,11 +161,13 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) log.TotalCost, log.ActualCost, rateMultiplier, + log.AccountRateMultiplier, log.BillingType, log.Stream, duration, firstToken, userAgent, + ipAddress, log.ImageCount, imageSize, createdAt, @@ -266,16 +271,60 @@ func (r *usageLogRepository) GetUserStats(ctx context.Context, userID int64, sta type DashboardStats = usagestats.DashboardStats func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardStats, error) { - var stats DashboardStats - today := timezone.Today() - now := time.Now() + stats := &DashboardStats{} + now := timezone.Now() + todayStart := timezone.Today() - // 合并用户统计查询 + if err := r.fillDashboardEntityStats(ctx, stats, todayStart, now); err != nil { + return nil, err + } + if err := r.fillDashboardUsageStatsAggregated(ctx, stats, todayStart, now); err != nil { + return nil, err + } + + rpm, tpm, err := r.getPerformanceStats(ctx, 0) + if err != nil { + return nil, err + } + stats.Rpm = rpm + stats.Tpm = tpm + + return stats, nil +} + +func (r *usageLogRepository) GetDashboardStatsWithRange(ctx context.Context, start, end time.Time) (*DashboardStats, error) { + startUTC := start.UTC() + endUTC := end.UTC() + if !endUTC.After(startUTC) { + return nil, errors.New("统计时间范围无效") + } + + stats := &DashboardStats{} + now := timezone.Now() + todayStart := timezone.Today() + + if err := r.fillDashboardEntityStats(ctx, stats, todayStart, now); err != nil { + return nil, err + } + if err := r.fillDashboardUsageStatsFromUsageLogs(ctx, stats, startUTC, endUTC, todayStart, now); err != nil { + return nil, err + } + + rpm, tpm, err := r.getPerformanceStats(ctx, 0) + if err != nil { + return nil, err + } + stats.Rpm = rpm + stats.Tpm = tpm + + return stats, nil +} + +func (r *usageLogRepository) fillDashboardEntityStats(ctx context.Context, stats *DashboardStats, todayUTC, now time.Time) error { userStatsQuery := ` SELECT COUNT(*) as total_users, - COUNT(CASE WHEN created_at >= $1 THEN 1 END) as today_new_users, - (SELECT COUNT(DISTINCT user_id) FROM usage_logs WHERE created_at >= $2) as active_users + COUNT(CASE WHEN created_at >= $1 THEN 1 END) as today_new_users FROM users WHERE deleted_at IS NULL ` @@ -283,15 +332,13 @@ func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardS ctx, r.sql, userStatsQuery, - []any{today, today}, + []any{todayUTC}, &stats.TotalUsers, &stats.TodayNewUsers, - &stats.ActiveUsers, ); err != nil { - return nil, err + return err } - // 合并API Key统计查询 apiKeyStatsQuery := ` SELECT COUNT(*) as total_api_keys, @@ -307,10 +354,9 @@ func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardS &stats.TotalAPIKeys, &stats.ActiveAPIKeys, ); err != nil { - return nil, err + return err } - // 合并账户统计查询 accountStatsQuery := ` SELECT COUNT(*) as total_accounts, @@ -332,22 +378,26 @@ func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardS &stats.RateLimitAccounts, &stats.OverloadAccounts, ); err != nil { - return nil, err + return err } - // 累计 Token 统计 + return nil +} + +func (r *usageLogRepository) fillDashboardUsageStatsAggregated(ctx context.Context, stats *DashboardStats, todayUTC, now time.Time) error { totalStatsQuery := ` SELECT - COUNT(*) as total_requests, + COALESCE(SUM(total_requests), 0) as total_requests, COALESCE(SUM(input_tokens), 0) as total_input_tokens, COALESCE(SUM(output_tokens), 0) as total_output_tokens, COALESCE(SUM(cache_creation_tokens), 0) as total_cache_creation_tokens, COALESCE(SUM(cache_read_tokens), 0) as total_cache_read_tokens, COALESCE(SUM(total_cost), 0) as total_cost, COALESCE(SUM(actual_cost), 0) as total_actual_cost, - COALESCE(AVG(duration_ms), 0) as avg_duration_ms - FROM usage_logs + COALESCE(SUM(total_duration_ms), 0) as total_duration_ms + FROM usage_dashboard_daily ` + var totalDurationMs int64 if err := scanSingleRow( ctx, r.sql, @@ -360,13 +410,100 @@ func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardS &stats.TotalCacheReadTokens, &stats.TotalCost, &stats.TotalActualCost, - &stats.AverageDurationMs, + &totalDurationMs, ); err != nil { - return nil, err + return err } stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheCreationTokens + stats.TotalCacheReadTokens + if stats.TotalRequests > 0 { + stats.AverageDurationMs = float64(totalDurationMs) / float64(stats.TotalRequests) + } - // 今日 Token 统计 + todayStatsQuery := ` + SELECT + total_requests as today_requests, + input_tokens as today_input_tokens, + output_tokens as today_output_tokens, + cache_creation_tokens as today_cache_creation_tokens, + cache_read_tokens as today_cache_read_tokens, + total_cost as today_cost, + actual_cost as today_actual_cost, + active_users as active_users + FROM usage_dashboard_daily + WHERE bucket_date = $1::date + ` + if err := scanSingleRow( + ctx, + r.sql, + todayStatsQuery, + []any{todayUTC}, + &stats.TodayRequests, + &stats.TodayInputTokens, + &stats.TodayOutputTokens, + &stats.TodayCacheCreationTokens, + &stats.TodayCacheReadTokens, + &stats.TodayCost, + &stats.TodayActualCost, + &stats.ActiveUsers, + ); err != nil { + if err != sql.ErrNoRows { + return err + } + } + stats.TodayTokens = stats.TodayInputTokens + stats.TodayOutputTokens + stats.TodayCacheCreationTokens + stats.TodayCacheReadTokens + + hourlyActiveQuery := ` + SELECT active_users + FROM usage_dashboard_hourly + WHERE bucket_start = $1 + ` + hourStart := now.In(timezone.Location()).Truncate(time.Hour) + if err := scanSingleRow(ctx, r.sql, hourlyActiveQuery, []any{hourStart}, &stats.HourlyActiveUsers); err != nil { + if err != sql.ErrNoRows { + return err + } + } + + return nil +} + +func (r *usageLogRepository) fillDashboardUsageStatsFromUsageLogs(ctx context.Context, stats *DashboardStats, startUTC, endUTC, todayUTC, now time.Time) error { + totalStatsQuery := ` + SELECT + COUNT(*) as total_requests, + COALESCE(SUM(input_tokens), 0) as total_input_tokens, + COALESCE(SUM(output_tokens), 0) as total_output_tokens, + COALESCE(SUM(cache_creation_tokens), 0) as total_cache_creation_tokens, + COALESCE(SUM(cache_read_tokens), 0) as total_cache_read_tokens, + COALESCE(SUM(total_cost), 0) as total_cost, + COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(SUM(COALESCE(duration_ms, 0)), 0) as total_duration_ms + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + ` + var totalDurationMs int64 + if err := scanSingleRow( + ctx, + r.sql, + totalStatsQuery, + []any{startUTC, endUTC}, + &stats.TotalRequests, + &stats.TotalInputTokens, + &stats.TotalOutputTokens, + &stats.TotalCacheCreationTokens, + &stats.TotalCacheReadTokens, + &stats.TotalCost, + &stats.TotalActualCost, + &totalDurationMs, + ); err != nil { + return err + } + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheCreationTokens + stats.TotalCacheReadTokens + if stats.TotalRequests > 0 { + stats.AverageDurationMs = float64(totalDurationMs) / float64(stats.TotalRequests) + } + + todayEnd := todayUTC.Add(24 * time.Hour) todayStatsQuery := ` SELECT COUNT(*) as today_requests, @@ -377,13 +514,13 @@ func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardS COALESCE(SUM(total_cost), 0) as today_cost, COALESCE(SUM(actual_cost), 0) as today_actual_cost FROM usage_logs - WHERE created_at >= $1 + WHERE created_at >= $1 AND created_at < $2 ` if err := scanSingleRow( ctx, r.sql, todayStatsQuery, - []any{today}, + []any{todayUTC, todayEnd}, &stats.TodayRequests, &stats.TodayInputTokens, &stats.TodayOutputTokens, @@ -392,19 +529,31 @@ func (r *usageLogRepository) GetDashboardStats(ctx context.Context) (*DashboardS &stats.TodayCost, &stats.TodayActualCost, ); err != nil { - return nil, err + return err } stats.TodayTokens = stats.TodayInputTokens + stats.TodayOutputTokens + stats.TodayCacheCreationTokens + stats.TodayCacheReadTokens - // 性能指标:RPM 和 TPM(最近1分钟,全局) - rpm, tpm, err := r.getPerformanceStats(ctx, 0) - if err != nil { - return nil, err + activeUsersQuery := ` + SELECT COUNT(DISTINCT user_id) as active_users + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + ` + if err := scanSingleRow(ctx, r.sql, activeUsersQuery, []any{todayUTC, todayEnd}, &stats.ActiveUsers); err != nil { + return err } - stats.Rpm = rpm - stats.Tpm = tpm - return &stats, nil + hourStart := now.UTC().Truncate(time.Hour) + hourEnd := hourStart.Add(time.Hour) + hourlyActiveQuery := ` + SELECT COUNT(DISTINCT user_id) as active_users + FROM usage_logs + WHERE created_at >= $1 AND created_at < $2 + ` + if err := scanSingleRow(ctx, r.sql, hourlyActiveQuery, []any{hourStart, hourEnd}, &stats.HourlyActiveUsers); err != nil { + return err + } + + return nil } func (r *usageLogRepository) ListByAccount(ctx context.Context, accountID int64, params pagination.PaginationParams) ([]service.UsageLog, *pagination.PaginationResult, error) { @@ -688,7 +837,9 @@ func (r *usageLogRepository) GetAccountTodayStats(ctx context.Context, accountID SELECT COUNT(*) as requests, COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens, - COALESCE(SUM(actual_cost), 0) as cost + COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as cost, + COALESCE(SUM(total_cost), 0) as standard_cost, + COALESCE(SUM(actual_cost), 0) as user_cost FROM usage_logs WHERE account_id = $1 AND created_at >= $2 ` @@ -702,6 +853,8 @@ func (r *usageLogRepository) GetAccountTodayStats(ctx context.Context, accountID &stats.Requests, &stats.Tokens, &stats.Cost, + &stats.StandardCost, + &stats.UserCost, ); err != nil { return nil, err } @@ -714,7 +867,9 @@ func (r *usageLogRepository) GetAccountWindowStats(ctx context.Context, accountI SELECT COUNT(*) as requests, COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens, - COALESCE(SUM(actual_cost), 0) as cost + COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as cost, + COALESCE(SUM(total_cost), 0) as standard_cost, + COALESCE(SUM(actual_cost), 0) as user_cost FROM usage_logs WHERE account_id = $1 AND created_at >= $2 ` @@ -728,6 +883,8 @@ func (r *usageLogRepository) GetAccountWindowStats(ctx context.Context, accountI &stats.Requests, &stats.Tokens, &stats.Cost, + &stats.StandardCost, + &stats.UserCost, ); err != nil { return nil, err } @@ -1253,8 +1410,8 @@ func (r *usageLogRepository) GetBatchAPIKeyUsageStats(ctx context.Context, apiKe return result, nil } -// GetUsageTrendWithFilters returns usage trend data with optional user/api_key filters -func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID int64) (results []TrendDataPoint, err error) { +// GetUsageTrendWithFilters returns usage trend data with optional filters +func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) (results []TrendDataPoint, err error) { dateFormat := "YYYY-MM-DD" if granularity == "hour" { dateFormat = "YYYY-MM-DD HH24:00" @@ -1283,6 +1440,22 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start query += fmt.Sprintf(" AND api_key_id = $%d", len(args)+1) args = append(args, apiKeyID) } + if accountID > 0 { + query += fmt.Sprintf(" AND account_id = $%d", len(args)+1) + args = append(args, accountID) + } + if groupID > 0 { + query += fmt.Sprintf(" AND group_id = $%d", len(args)+1) + args = append(args, groupID) + } + if model != "" { + query += fmt.Sprintf(" AND model = $%d", len(args)+1) + args = append(args, model) + } + if stream != nil { + query += fmt.Sprintf(" AND stream = $%d", len(args)+1) + args = append(args, *stream) + } query += " GROUP BY date ORDER BY date ASC" rows, err := r.sql.QueryContext(ctx, query, args...) @@ -1305,9 +1478,15 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start return results, nil } -// GetModelStatsWithFilters returns model statistics with optional user/api_key filters -func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID int64) (results []ModelStat, err error) { - query := ` +// GetModelStatsWithFilters returns model statistics with optional filters +func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) (results []ModelStat, err error) { + actualCostExpr := "COALESCE(SUM(actual_cost), 0) as actual_cost" + // 当仅按 account_id 聚合时,实际费用使用账号倍率(total_cost * account_rate_multiplier)。 + if accountID > 0 && userID == 0 && apiKeyID == 0 { + actualCostExpr = "COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as actual_cost" + } + + query := fmt.Sprintf(` SELECT model, COUNT(*) as requests, @@ -1315,10 +1494,10 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start COALESCE(SUM(output_tokens), 0) as output_tokens, COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as total_tokens, COALESCE(SUM(total_cost), 0) as cost, - COALESCE(SUM(actual_cost), 0) as actual_cost + %s FROM usage_logs WHERE created_at >= $1 AND created_at < $2 - ` + `, actualCostExpr) args := []any{startTime, endTime} if userID > 0 { @@ -1333,6 +1512,14 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start query += fmt.Sprintf(" AND account_id = $%d", len(args)+1) args = append(args, accountID) } + if groupID > 0 { + query += fmt.Sprintf(" AND group_id = $%d", len(args)+1) + args = append(args, groupID) + } + if stream != nil { + query += fmt.Sprintf(" AND stream = $%d", len(args)+1) + args = append(args, *stream) + } query += " GROUP BY model ORDER BY total_tokens DESC" rows, err := r.sql.QueryContext(ctx, query, args...) @@ -1440,12 +1627,14 @@ func (r *usageLogRepository) GetStatsWithFilters(ctx context.Context, filters Us COALESCE(SUM(cache_creation_tokens + cache_read_tokens), 0) as total_cache_tokens, COALESCE(SUM(total_cost), 0) as total_cost, COALESCE(SUM(actual_cost), 0) as total_actual_cost, + COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as total_account_cost, COALESCE(AVG(duration_ms), 0) as avg_duration_ms FROM usage_logs %s `, buildWhere(conditions)) stats := &UsageStats{} + var totalAccountCost float64 if err := scanSingleRow( ctx, r.sql, @@ -1457,10 +1646,14 @@ func (r *usageLogRepository) GetStatsWithFilters(ctx context.Context, filters Us &stats.TotalCacheTokens, &stats.TotalCost, &stats.TotalActualCost, + &totalAccountCost, &stats.AverageDurationMs, ); err != nil { return nil, err } + if filters.AccountID > 0 { + stats.TotalAccountCost = &totalAccountCost + } stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheTokens return stats, nil } @@ -1487,7 +1680,8 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID COUNT(*) as requests, COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) as tokens, COALESCE(SUM(total_cost), 0) as cost, - COALESCE(SUM(actual_cost), 0) as actual_cost + COALESCE(SUM(total_cost * COALESCE(account_rate_multiplier, 1)), 0) as actual_cost, + COALESCE(SUM(actual_cost), 0) as user_cost FROM usage_logs WHERE account_id = $1 AND created_at >= $2 AND created_at < $3 GROUP BY date @@ -1514,7 +1708,8 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID var tokens int64 var cost float64 var actualCost float64 - if err = rows.Scan(&date, &requests, &tokens, &cost, &actualCost); err != nil { + var userCost float64 + if err = rows.Scan(&date, &requests, &tokens, &cost, &actualCost, &userCost); err != nil { return nil, err } t, _ := time.Parse("2006-01-02", date) @@ -1525,19 +1720,21 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID Tokens: tokens, Cost: cost, ActualCost: actualCost, + UserCost: userCost, }) } if err = rows.Err(); err != nil { return nil, err } - var totalActualCost, totalStandardCost float64 + var totalAccountCost, totalUserCost, totalStandardCost float64 var totalRequests, totalTokens int64 var highestCostDay, highestRequestDay *AccountUsageHistory for i := range history { h := &history[i] - totalActualCost += h.ActualCost + totalAccountCost += h.ActualCost + totalUserCost += h.UserCost totalStandardCost += h.Cost totalRequests += h.Requests totalTokens += h.Tokens @@ -1564,11 +1761,13 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID summary := AccountUsageSummary{ Days: daysCount, ActualDaysUsed: actualDaysUsed, - TotalCost: totalActualCost, + TotalCost: totalAccountCost, + TotalUserCost: totalUserCost, TotalStandardCost: totalStandardCost, TotalRequests: totalRequests, TotalTokens: totalTokens, - AvgDailyCost: totalActualCost / float64(actualDaysUsed), + AvgDailyCost: totalAccountCost / float64(actualDaysUsed), + AvgDailyUserCost: totalUserCost / float64(actualDaysUsed), AvgDailyRequests: float64(totalRequests) / float64(actualDaysUsed), AvgDailyTokens: float64(totalTokens) / float64(actualDaysUsed), AvgDurationMs: avgDuration, @@ -1580,11 +1779,13 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID summary.Today = &struct { Date string `json:"date"` Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` Requests int64 `json:"requests"` Tokens int64 `json:"tokens"` }{ Date: history[i].Date, Cost: history[i].ActualCost, + UserCost: history[i].UserCost, Requests: history[i].Requests, Tokens: history[i].Tokens, } @@ -1597,11 +1798,13 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID Date string `json:"date"` Label string `json:"label"` Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` Requests int64 `json:"requests"` }{ Date: highestCostDay.Date, Label: highestCostDay.Label, Cost: highestCostDay.ActualCost, + UserCost: highestCostDay.UserCost, Requests: highestCostDay.Requests, } } @@ -1612,15 +1815,17 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID Label string `json:"label"` Requests int64 `json:"requests"` Cost float64 `json:"cost"` + UserCost float64 `json:"user_cost"` }{ Date: highestRequestDay.Date, Label: highestRequestDay.Label, Requests: highestRequestDay.Requests, Cost: highestRequestDay.ActualCost, + UserCost: highestRequestDay.UserCost, } } - models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID) + models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil) if err != nil { models = []ModelStat{} } @@ -1847,35 +2052,37 @@ func (r *usageLogRepository) loadSubscriptions(ctx context.Context, ids []int64) func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, error) { var ( - id int64 - userID int64 - apiKeyID int64 - accountID int64 - requestID sql.NullString - model string - groupID sql.NullInt64 - subscriptionID sql.NullInt64 - inputTokens int - outputTokens int - cacheCreationTokens int - cacheReadTokens int - cacheCreation5m int - cacheCreation1h int - inputCost float64 - outputCost float64 - cacheCreationCost float64 - cacheReadCost float64 - totalCost float64 - actualCost float64 - rateMultiplier float64 - billingType int16 - stream bool - durationMs sql.NullInt64 - firstTokenMs sql.NullInt64 - userAgent sql.NullString - imageCount int - imageSize sql.NullString - createdAt time.Time + id int64 + userID int64 + apiKeyID int64 + accountID int64 + requestID sql.NullString + model string + groupID sql.NullInt64 + subscriptionID sql.NullInt64 + inputTokens int + outputTokens int + cacheCreationTokens int + cacheReadTokens int + cacheCreation5m int + cacheCreation1h int + inputCost float64 + outputCost float64 + cacheCreationCost float64 + cacheReadCost float64 + totalCost float64 + actualCost float64 + rateMultiplier float64 + accountRateMultiplier sql.NullFloat64 + billingType int16 + stream bool + durationMs sql.NullInt64 + firstTokenMs sql.NullInt64 + userAgent sql.NullString + ipAddress sql.NullString + imageCount int + imageSize sql.NullString + createdAt time.Time ) if err := scanner.Scan( @@ -1900,11 +2107,13 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e &totalCost, &actualCost, &rateMultiplier, + &accountRateMultiplier, &billingType, &stream, &durationMs, &firstTokenMs, &userAgent, + &ipAddress, &imageCount, &imageSize, &createdAt, @@ -1931,6 +2140,7 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e TotalCost: totalCost, ActualCost: actualCost, RateMultiplier: rateMultiplier, + AccountRateMultiplier: nullFloat64Ptr(accountRateMultiplier), BillingType: int8(billingType), Stream: stream, ImageCount: imageCount, @@ -1959,6 +2169,9 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e if userAgent.Valid { log.UserAgent = &userAgent.String } + if ipAddress.Valid { + log.IPAddress = &ipAddress.String + } if imageSize.Valid { log.ImageSize = &imageSize.String } @@ -2034,6 +2247,14 @@ func nullInt(v *int) sql.NullInt64 { return sql.NullInt64{Int64: int64(*v), Valid: true} } +func nullFloat64Ptr(v sql.NullFloat64) *float64 { + if !v.Valid { + return nil + } + out := v.Float64 + return &out +} + func nullString(v *string) sql.NullString { if v == nil || *v == "" { return sql.NullString{} diff --git a/backend/internal/repository/usage_log_repo_integration_test.go b/backend/internal/repository/usage_log_repo_integration_test.go index 7193718f..7174be18 100644 --- a/backend/internal/repository/usage_log_repo_integration_test.go +++ b/backend/internal/repository/usage_log_repo_integration_test.go @@ -37,6 +37,12 @@ func TestUsageLogRepoSuite(t *testing.T) { suite.Run(t, new(UsageLogRepoSuite)) } +// truncateToDayUTC 截断到 UTC 日期边界(测试辅助函数) +func truncateToDayUTC(t time.Time) time.Time { + t = t.UTC() + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC) +} + func (s *UsageLogRepoSuite) createUsageLog(user *service.User, apiKey *service.APIKey, account *service.Account, inputTokens, outputTokens int, cost float64, createdAt time.Time) *service.UsageLog { log := &service.UsageLog{ UserID: user.ID, @@ -96,6 +102,34 @@ func (s *UsageLogRepoSuite) TestGetByID_NotFound() { s.Require().Error(err, "expected error for non-existent ID") } +func (s *UsageLogRepoSuite) TestGetByID_ReturnsAccountRateMultiplier() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "getbyid-mult@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-getbyid-mult", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-getbyid-mult"}) + + m := 0.5 + log := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: uuid.New().String(), + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + TotalCost: 1.0, + ActualCost: 2.0, + AccountRateMultiplier: &m, + CreatedAt: timezone.Today().Add(2 * time.Hour), + } + _, err := s.repo.Create(s.ctx, log) + s.Require().NoError(err) + + got, err := s.repo.GetByID(s.ctx, log.ID) + s.Require().NoError(err) + s.Require().NotNil(got.AccountRateMultiplier) + s.Require().InEpsilon(0.5, *got.AccountRateMultiplier, 0.0001) +} + // --- Delete --- func (s *UsageLogRepoSuite) TestDelete() { @@ -198,14 +232,14 @@ func (s *UsageLogRepoSuite) TestListWithFilters() { // --- GetDashboardStats --- func (s *UsageLogRepoSuite) TestDashboardStats_TodayTotalsAndPerformance() { - now := time.Now() - todayStart := timezone.Today() + now := time.Now().UTC() + todayStart := truncateToDayUTC(now) baseStats, err := s.repo.GetDashboardStats(s.ctx) s.Require().NoError(err, "GetDashboardStats base") userToday := mustCreateUser(s.T(), s.client, &service.User{ Email: "today@example.com", - CreatedAt: maxTime(todayStart.Add(10*time.Second), now.Add(-10*time.Second)), + CreatedAt: testMaxTime(todayStart.Add(10*time.Second), now.Add(-10*time.Second)), UpdatedAt: now, }) userOld := mustCreateUser(s.T(), s.client, &service.User{ @@ -238,7 +272,7 @@ func (s *UsageLogRepoSuite) TestDashboardStats_TodayTotalsAndPerformance() { TotalCost: 1.5, ActualCost: 1.2, DurationMs: &d1, - CreatedAt: maxTime(todayStart.Add(2*time.Minute), now.Add(-2*time.Minute)), + CreatedAt: testMaxTime(todayStart.Add(2*time.Minute), now.Add(-2*time.Minute)), } _, err = s.repo.Create(s.ctx, logToday) s.Require().NoError(err, "Create logToday") @@ -273,6 +307,11 @@ func (s *UsageLogRepoSuite) TestDashboardStats_TodayTotalsAndPerformance() { _, err = s.repo.Create(s.ctx, logPerf) s.Require().NoError(err, "Create logPerf") + aggRepo := newDashboardAggregationRepositoryWithSQL(s.tx) + aggStart := todayStart.Add(-2 * time.Hour) + aggEnd := now.Add(2 * time.Minute) + s.Require().NoError(aggRepo.AggregateRange(s.ctx, aggStart, aggEnd), "AggregateRange") + stats, err := s.repo.GetDashboardStats(s.ctx) s.Require().NoError(err, "GetDashboardStats") @@ -303,6 +342,80 @@ func (s *UsageLogRepoSuite) TestDashboardStats_TodayTotalsAndPerformance() { s.Require().Equal(wantTpm, stats.Tpm, "Tpm mismatch") } +func (s *UsageLogRepoSuite) TestDashboardStatsWithRange_Fallback() { + now := time.Now().UTC() + todayStart := truncateToDayUTC(now) + rangeStart := todayStart.Add(-24 * time.Hour) + rangeEnd := now.Add(1 * time.Second) + + user1 := mustCreateUser(s.T(), s.client, &service.User{Email: "range-u1@test.com"}) + user2 := mustCreateUser(s.T(), s.client, &service.User{Email: "range-u2@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user1.ID, Key: "sk-range-1", Name: "k1"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user2.ID, Key: "sk-range-2", Name: "k2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-range"}) + + d1, d2, d3 := 100, 200, 300 + logOutside := &service.UsageLog{ + UserID: user1.ID, + APIKeyID: apiKey1.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 7, + OutputTokens: 8, + TotalCost: 0.8, + ActualCost: 0.7, + DurationMs: &d3, + CreatedAt: rangeStart.Add(-1 * time.Hour), + } + _, err := s.repo.Create(s.ctx, logOutside) + s.Require().NoError(err) + + logRange := &service.UsageLog{ + UserID: user1.ID, + APIKeyID: apiKey1.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + CacheCreationTokens: 1, + CacheReadTokens: 2, + TotalCost: 1.0, + ActualCost: 0.9, + DurationMs: &d1, + CreatedAt: rangeStart.Add(2 * time.Hour), + } + _, err = s.repo.Create(s.ctx, logRange) + s.Require().NoError(err) + + logToday := &service.UsageLog{ + UserID: user2.ID, + APIKeyID: apiKey2.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 5, + OutputTokens: 6, + CacheReadTokens: 1, + TotalCost: 0.5, + ActualCost: 0.5, + DurationMs: &d2, + CreatedAt: now, + } + _, err = s.repo.Create(s.ctx, logToday) + s.Require().NoError(err) + + stats, err := s.repo.GetDashboardStatsWithRange(s.ctx, rangeStart, rangeEnd) + s.Require().NoError(err) + s.Require().Equal(int64(2), stats.TotalRequests) + s.Require().Equal(int64(15), stats.TotalInputTokens) + s.Require().Equal(int64(26), stats.TotalOutputTokens) + s.Require().Equal(int64(1), stats.TotalCacheCreationTokens) + s.Require().Equal(int64(3), stats.TotalCacheReadTokens) + s.Require().Equal(int64(45), stats.TotalTokens) + s.Require().Equal(1.5, stats.TotalCost) + s.Require().Equal(1.4, stats.TotalActualCost) + s.Require().InEpsilon(150.0, stats.AverageDurationMs, 0.0001) +} + // --- GetUserDashboardStats --- func (s *UsageLogRepoSuite) TestGetUserDashboardStats() { @@ -325,12 +438,202 @@ func (s *UsageLogRepoSuite) TestGetAccountTodayStats() { apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-acctoday", Name: "k"}) account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-today"}) - s.createUsageLog(user, apiKey, account, 10, 20, 0.5, time.Now()) + createdAt := timezone.Today().Add(1 * time.Hour) + + m1 := 1.5 + m2 := 0.0 + _, err := s.repo.Create(s.ctx, &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: uuid.New().String(), + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + TotalCost: 1.0, + ActualCost: 2.0, + AccountRateMultiplier: &m1, + CreatedAt: createdAt, + }) + s.Require().NoError(err) + _, err = s.repo.Create(s.ctx, &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: uuid.New().String(), + Model: "claude-3", + InputTokens: 5, + OutputTokens: 5, + TotalCost: 0.5, + ActualCost: 1.0, + AccountRateMultiplier: &m2, + CreatedAt: createdAt, + }) + s.Require().NoError(err) stats, err := s.repo.GetAccountTodayStats(s.ctx, account.ID) s.Require().NoError(err, "GetAccountTodayStats") - s.Require().Equal(int64(1), stats.Requests) - s.Require().Equal(int64(30), stats.Tokens) + s.Require().Equal(int64(2), stats.Requests) + s.Require().Equal(int64(40), stats.Tokens) + // account cost = SUM(total_cost * account_rate_multiplier) + s.Require().InEpsilon(1.5, stats.Cost, 0.0001) + // standard cost = SUM(total_cost) + s.Require().InEpsilon(1.5, stats.StandardCost, 0.0001) + // user cost = SUM(actual_cost) + s.Require().InEpsilon(3.0, stats.UserCost, 0.0001) +} + +func (s *UsageLogRepoSuite) TestDashboardAggregationConsistency() { + now := time.Now().UTC().Truncate(time.Second) + // 使用固定的时间偏移确保 hour1 和 hour2 在同一天且都在过去 + // 选择当天 02:00 和 03:00 作为测试时间点(基于 now 的日期) + dayStart := truncateToDayUTC(now) + hour1 := dayStart.Add(2 * time.Hour) // 当天 02:00 + hour2 := dayStart.Add(3 * time.Hour) // 当天 03:00 + // 如果当前时间早于 hour2,则使用昨天的时间 + if now.Before(hour2.Add(time.Hour)) { + dayStart = dayStart.Add(-24 * time.Hour) + hour1 = dayStart.Add(2 * time.Hour) + hour2 = dayStart.Add(3 * time.Hour) + } + + user1 := mustCreateUser(s.T(), s.client, &service.User{Email: "agg-u1@test.com"}) + user2 := mustCreateUser(s.T(), s.client, &service.User{Email: "agg-u2@test.com"}) + apiKey1 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user1.ID, Key: "sk-agg-1", Name: "k1"}) + apiKey2 := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user2.ID, Key: "sk-agg-2", Name: "k2"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-agg"}) + + d1, d2, d3 := 100, 200, 150 + log1 := &service.UsageLog{ + UserID: user1.ID, + APIKeyID: apiKey1.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 10, + OutputTokens: 20, + CacheCreationTokens: 2, + CacheReadTokens: 1, + TotalCost: 1.0, + ActualCost: 0.9, + DurationMs: &d1, + CreatedAt: hour1.Add(5 * time.Minute), + } + _, err := s.repo.Create(s.ctx, log1) + s.Require().NoError(err) + + log2 := &service.UsageLog{ + UserID: user1.ID, + APIKeyID: apiKey1.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 5, + OutputTokens: 5, + TotalCost: 0.5, + ActualCost: 0.5, + DurationMs: &d2, + CreatedAt: hour1.Add(20 * time.Minute), + } + _, err = s.repo.Create(s.ctx, log2) + s.Require().NoError(err) + + log3 := &service.UsageLog{ + UserID: user2.ID, + APIKeyID: apiKey2.ID, + AccountID: account.ID, + Model: "claude-3", + InputTokens: 7, + OutputTokens: 8, + TotalCost: 0.7, + ActualCost: 0.7, + DurationMs: &d3, + CreatedAt: hour2.Add(10 * time.Minute), + } + _, err = s.repo.Create(s.ctx, log3) + s.Require().NoError(err) + + aggRepo := newDashboardAggregationRepositoryWithSQL(s.tx) + aggStart := hour1.Add(-5 * time.Minute) + aggEnd := hour2.Add(time.Hour) // 确保覆盖 hour2 的所有数据 + s.Require().NoError(aggRepo.AggregateRange(s.ctx, aggStart, aggEnd)) + + type hourlyRow struct { + totalRequests int64 + inputTokens int64 + outputTokens int64 + cacheCreationTokens int64 + cacheReadTokens int64 + totalCost float64 + actualCost float64 + totalDurationMs int64 + activeUsers int64 + } + fetchHourly := func(bucketStart time.Time) hourlyRow { + var row hourlyRow + err := scanSingleRow(s.ctx, s.tx, ` + SELECT total_requests, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, + total_cost, actual_cost, total_duration_ms, active_users + FROM usage_dashboard_hourly + WHERE bucket_start = $1 + `, []any{bucketStart}, &row.totalRequests, &row.inputTokens, &row.outputTokens, + &row.cacheCreationTokens, &row.cacheReadTokens, &row.totalCost, &row.actualCost, + &row.totalDurationMs, &row.activeUsers, + ) + s.Require().NoError(err) + return row + } + + hour1Row := fetchHourly(hour1) + s.Require().Equal(int64(2), hour1Row.totalRequests) + s.Require().Equal(int64(15), hour1Row.inputTokens) + s.Require().Equal(int64(25), hour1Row.outputTokens) + s.Require().Equal(int64(2), hour1Row.cacheCreationTokens) + s.Require().Equal(int64(1), hour1Row.cacheReadTokens) + s.Require().Equal(1.5, hour1Row.totalCost) + s.Require().Equal(1.4, hour1Row.actualCost) + s.Require().Equal(int64(300), hour1Row.totalDurationMs) + s.Require().Equal(int64(1), hour1Row.activeUsers) + + hour2Row := fetchHourly(hour2) + s.Require().Equal(int64(1), hour2Row.totalRequests) + s.Require().Equal(int64(7), hour2Row.inputTokens) + s.Require().Equal(int64(8), hour2Row.outputTokens) + s.Require().Equal(int64(0), hour2Row.cacheCreationTokens) + s.Require().Equal(int64(0), hour2Row.cacheReadTokens) + s.Require().Equal(0.7, hour2Row.totalCost) + s.Require().Equal(0.7, hour2Row.actualCost) + s.Require().Equal(int64(150), hour2Row.totalDurationMs) + s.Require().Equal(int64(1), hour2Row.activeUsers) + + var daily struct { + totalRequests int64 + inputTokens int64 + outputTokens int64 + cacheCreationTokens int64 + cacheReadTokens int64 + totalCost float64 + actualCost float64 + totalDurationMs int64 + activeUsers int64 + } + err = scanSingleRow(s.ctx, s.tx, ` + SELECT total_requests, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, + total_cost, actual_cost, total_duration_ms, active_users + FROM usage_dashboard_daily + WHERE bucket_date = $1::date + `, []any{dayStart}, &daily.totalRequests, &daily.inputTokens, &daily.outputTokens, + &daily.cacheCreationTokens, &daily.cacheReadTokens, &daily.totalCost, &daily.actualCost, + &daily.totalDurationMs, &daily.activeUsers, + ) + s.Require().NoError(err) + s.Require().Equal(int64(3), daily.totalRequests) + s.Require().Equal(int64(22), daily.inputTokens) + s.Require().Equal(int64(33), daily.outputTokens) + s.Require().Equal(int64(2), daily.cacheCreationTokens) + s.Require().Equal(int64(1), daily.cacheReadTokens) + s.Require().Equal(2.2, daily.totalCost) + s.Require().Equal(2.1, daily.actualCost) + s.Require().Equal(int64(450), daily.totalDurationMs) + s.Require().Equal(int64(2), daily.activeUsers) } // --- GetBatchUserUsageStats --- @@ -398,7 +701,7 @@ func (s *UsageLogRepoSuite) TestGetGlobalStats() { s.Require().Equal(int64(45), stats.TotalOutputTokens) } -func maxTime(a, b time.Time) time.Time { +func testMaxTime(a, b time.Time) time.Time { if a.After(b) { return a } @@ -641,17 +944,17 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters() { endTime := base.Add(48 * time.Hour) // Test with user filter - trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0) + trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil) s.Require().NoError(err, "GetUsageTrendWithFilters user filter") s.Require().Len(trend, 2) // Test with apiKey filter - trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID) + trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil) s.Require().NoError(err, "GetUsageTrendWithFilters apiKey filter") s.Require().Len(trend, 2) // Test with both filters - trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID) + trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil) s.Require().NoError(err, "GetUsageTrendWithFilters both filters") s.Require().Len(trend, 2) } @@ -668,7 +971,7 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters_HourlyGranularity() { startTime := base.Add(-1 * time.Hour) endTime := base.Add(3 * time.Hour) - trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0) + trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil) s.Require().NoError(err, "GetUsageTrendWithFilters hourly") s.Require().Len(trend, 2) } @@ -714,17 +1017,17 @@ func (s *UsageLogRepoSuite) TestGetModelStatsWithFilters() { endTime := base.Add(2 * time.Hour) // Test with user filter - stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0) + stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil) s.Require().NoError(err, "GetModelStatsWithFilters user filter") s.Require().Len(stats, 2) // Test with apiKey filter - stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0) + stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil) s.Require().NoError(err, "GetModelStatsWithFilters apiKey filter") s.Require().Len(stats, 2) // Test with account filter - stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID) + stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil) s.Require().NoError(err, "GetModelStatsWithFilters account filter") s.Require().Len(stats, 2) } diff --git a/backend/internal/repository/wire.go b/backend/internal/repository/wire.go index ba09f85e..77ed37e1 100644 --- a/backend/internal/repository/wire.go +++ b/backend/internal/repository/wire.go @@ -37,6 +37,16 @@ func ProvidePricingRemoteClient(cfg *config.Config) service.PricingRemoteClient return NewPricingRemoteClient(cfg.Update.ProxyURL) } +// ProvideSessionLimitCache 创建会话限制缓存 +// 用于 Anthropic OAuth/SetupToken 账号的并发会话数量控制 +func ProvideSessionLimitCache(rdb *redis.Client, cfg *config.Config) service.SessionLimitCache { + defaultIdleTimeoutMinutes := 5 // 默认 5 分钟空闲超时 + if cfg != nil && cfg.Gateway.SessionIdleTimeoutMinutes > 0 { + defaultIdleTimeoutMinutes = cfg.Gateway.SessionIdleTimeoutMinutes + } + return NewSessionLimitCache(rdb, defaultIdleTimeoutMinutes) +} + // ProviderSet is the Wire provider set for all repositories var ProviderSet = wire.NewSet( NewUserRepository, @@ -45,8 +55,11 @@ var ProviderSet = wire.NewSet( NewAccountRepository, NewProxyRepository, NewRedeemCodeRepository, + NewPromoCodeRepository, NewUsageLogRepository, + NewDashboardAggregationRepository, NewSettingRepository, + NewOpsRepository, NewUserSubscriptionRepository, NewUserAttributeDefinitionRepository, NewUserAttributeValueRepository, @@ -56,12 +69,18 @@ var ProviderSet = wire.NewSet( NewBillingCache, NewAPIKeyCache, NewTempUnschedCache, + NewTimeoutCounterCache, ProvideConcurrencyCache, + ProvideSessionLimitCache, + NewDashboardCache, NewEmailCache, NewIdentityCache, NewRedeemCache, NewUpdateCache, NewGeminiTokenCache, + NewSchedulerCache, + NewSchedulerOutboxRepository, + NewProxyLatencyCache, // HTTP service ports (DI Strategy A: return interface directly) NewTurnstileVerifier, diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go index 20e82be8..80f3e408 100644 --- a/backend/internal/server/api_contract_test.go +++ b/backend/internal/server/api_contract_test.go @@ -82,6 +82,8 @@ func TestAPIContracts(t *testing.T) { "name": "Key One", "group_id": null, "status": "active", + "ip_whitelist": null, + "ip_blacklist": null, "created_at": "2025-01-02T03:04:05Z", "updated_at": "2025-01-02T03:04:05Z" } @@ -116,6 +118,8 @@ func TestAPIContracts(t *testing.T) { "name": "Key One", "group_id": null, "status": "active", + "ip_whitelist": null, + "ip_blacklist": null, "created_at": "2025-01-02T03:04:05Z", "updated_at": "2025-01-02T03:04:05Z" } @@ -235,9 +239,10 @@ func TestAPIContracts(t *testing.T) { "cache_creation_cost": 0, "cache_read_cost": 0, "total_cost": 0.5, - "actual_cost": 0.5, - "rate_multiplier": 1, - "billing_type": 0, + "actual_cost": 0.5, + "rate_multiplier": 1, + "account_rate_multiplier": null, + "billing_type": 0, "stream": true, "duration_ms": 100, "first_token_ms": 50, @@ -283,6 +288,11 @@ func TestAPIContracts(t *testing.T) { service.SettingKeyDefaultConcurrency: "5", service.SettingKeyDefaultBalance: "1.25", + + service.SettingKeyOpsMonitoringEnabled: "false", + service.SettingKeyOpsRealtimeMonitoringEnabled: "true", + service.SettingKeyOpsQueryModeDefault: "auto", + service.SettingKeyOpsMetricsIntervalSeconds: "60", }) }, method: http.MethodGet, @@ -305,13 +315,17 @@ func TestAPIContracts(t *testing.T) { "turnstile_site_key": "site-key", "turnstile_secret_key_configured": true, "linuxdo_connect_enabled": false, - "linuxdo_connect_client_id": "", - "linuxdo_connect_client_secret_configured": false, - "linuxdo_connect_redirect_url": "", - "site_name": "Sub2API", - "site_logo": "", - "site_subtitle": "Subtitle", - "api_base_url": "https://api.example.com", + "linuxdo_connect_client_id": "", + "linuxdo_connect_client_secret_configured": false, + "linuxdo_connect_redirect_url": "", + "ops_monitoring_enabled": false, + "ops_realtime_monitoring_enabled": true, + "ops_query_mode_default": "auto", + "ops_metrics_interval_seconds": 60, + "site_name": "Sub2API", + "site_logo": "", + "site_subtitle": "Subtitle", + "api_base_url": "https://api.example.com", "contact_info": "support", "doc_url": "https://docs.example.com", "default_concurrency": 5, @@ -322,7 +336,32 @@ func TestAPIContracts(t *testing.T) { "fallback_model_gemini": "gemini-2.5-pro", "fallback_model_openai": "gpt-4o", "enable_identity_patch": true, - "identity_patch_prompt": "" + "identity_patch_prompt": "", + "home_content": "" + } + }`, + }, + { + name: "POST /api/v1/admin/accounts/bulk-update", + method: http.MethodPost, + path: "/api/v1/admin/accounts/bulk-update", + body: `{"account_ids":[101,102],"schedulable":false}`, + headers: map[string]string{ + "Content-Type": "application/json", + }, + wantStatus: http.StatusOK, + wantJSON: `{ + "code": 0, + "message": "success", + "data": { + "success": 2, + "failed": 0, + "success_ids": [101, 102], + "failed_ids": [], + "results": [ + {"account_id": 101, "success": true}, + {"account_id": 102, "success": true} + ] } }`, }, @@ -377,6 +416,9 @@ func newContractDeps(t *testing.T) *contractDeps { apiKeyCache := stubApiKeyCache{} groupRepo := stubGroupRepo{} userSubRepo := stubUserSubscriptionRepo{} + accountRepo := stubAccountRepo{} + proxyRepo := stubProxyRepo{} + redeemRepo := stubRedeemCodeRepo{} cfg := &config.Config{ Default: config.DefaultConfig{ @@ -385,19 +427,21 @@ func newContractDeps(t *testing.T) *contractDeps { RunMode: config.RunModeStandard, } - userService := service.NewUserService(userRepo) + userService := service.NewUserService(userRepo, nil) apiKeyService := service.NewAPIKeyService(apiKeyRepo, userRepo, groupRepo, userSubRepo, apiKeyCache, cfg) usageRepo := newStubUsageLogRepo() - usageService := service.NewUsageService(usageRepo, userRepo, nil) + usageService := service.NewUsageService(usageRepo, userRepo, nil, nil) settingRepo := newStubSettingRepo() settingService := service.NewSettingService(settingRepo, cfg) - authHandler := handler.NewAuthHandler(cfg, nil, userService, settingService) + adminService := service.NewAdminService(userRepo, groupRepo, &accountRepo, proxyRepo, apiKeyRepo, redeemRepo, nil, nil, nil, nil) + authHandler := handler.NewAuthHandler(cfg, nil, userService, settingService, nil) apiKeyHandler := handler.NewAPIKeyHandler(apiKeyService) usageHandler := handler.NewUsageHandler(usageService, apiKeyService) - adminSettingHandler := adminhandler.NewSettingHandler(settingService, nil, nil) + adminSettingHandler := adminhandler.NewSettingHandler(settingService, nil, nil, nil) + adminAccountHandler := adminhandler.NewAccountHandler(adminService, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) jwtAuth := func(c *gin.Context) { c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{ @@ -437,6 +481,7 @@ func newContractDeps(t *testing.T) *contractDeps { v1Admin := v1.Group("/admin") v1Admin.Use(adminAuth) v1Admin.GET("/settings", adminSettingHandler.GetSettings) + v1Admin.POST("/accounts/bulk-update", adminAccountHandler.BulkUpdate) return &contractDeps{ now: now, @@ -561,6 +606,18 @@ func (stubApiKeyCache) SetDailyUsageExpiry(ctx context.Context, apiKey string, t return nil } +func (stubApiKeyCache) GetAuthCache(ctx context.Context, key string) (*service.APIKeyAuthCacheEntry, error) { + return nil, nil +} + +func (stubApiKeyCache) SetAuthCache(ctx context.Context, key string, entry *service.APIKeyAuthCacheEntry, ttl time.Duration) error { + return nil +} + +func (stubApiKeyCache) DeleteAuthCache(ctx context.Context, key string) error { + return nil +} + type stubGroupRepo struct{} func (stubGroupRepo) Create(ctx context.Context, group *service.Group) error { @@ -571,6 +628,10 @@ func (stubGroupRepo) GetByID(ctx context.Context, id int64) (*service.Group, err return nil, service.ErrGroupNotFound } +func (stubGroupRepo) GetByIDLite(ctx context.Context, id int64) (*service.Group, error) { + return nil, service.ErrGroupNotFound +} + func (stubGroupRepo) Update(ctx context.Context, group *service.Group) error { return errors.New("not implemented") } @@ -611,6 +672,251 @@ func (stubGroupRepo) DeleteAccountGroupsByGroupID(ctx context.Context, groupID i return 0, errors.New("not implemented") } +type stubAccountRepo struct { + bulkUpdateIDs []int64 +} + +func (s *stubAccountRepo) Create(ctx context.Context, account *service.Account) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) GetByID(ctx context.Context, id int64) (*service.Account, error) { + return nil, service.ErrAccountNotFound +} + +func (s *stubAccountRepo) GetByIDs(ctx context.Context, ids []int64) ([]*service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ExistsByID(ctx context.Context, id int64) (bool, error) { + return false, errors.New("not implemented") +} + +func (s *stubAccountRepo) GetByCRSAccountID(ctx context.Context, crsAccountID string) (*service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) Update(ctx context.Context, account *service.Account) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) List(ctx context.Context, params pagination.PaginationParams) ([]service.Account, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, accountType, status, search string) ([]service.Account, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListByGroup(ctx context.Context, groupID int64) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListActive(ctx context.Context) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListByPlatform(ctx context.Context, platform string) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) UpdateLastUsed(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) BatchUpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetError(ctx context.Context, id int64, errorMsg string) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) ClearError(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetSchedulable(ctx context.Context, id int64, schedulable bool) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) AutoPauseExpiredAccounts(ctx context.Context, now time.Time) (int64, error) { + return 0, errors.New("not implemented") +} + +func (s *stubAccountRepo) BindGroups(ctx context.Context, accountID int64, groupIDs []int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulable(ctx context.Context) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulableByGroupID(ctx context.Context, groupID int64) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulableByPlatform(ctx context.Context, platform string) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulableByPlatforms(ctx context.Context, platforms []string) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) ListSchedulableByGroupIDAndPlatforms(ctx context.Context, groupID int64, platforms []string) ([]service.Account, error) { + return nil, errors.New("not implemented") +} + +func (s *stubAccountRepo) SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope service.AntigravityQuotaScope, resetAt time.Time) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetOverloaded(ctx context.Context, id int64, until time.Time) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) ClearTempUnschedulable(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) ClearRateLimit(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) ClearModelRateLimits(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) UpdateExtra(ctx context.Context, id int64, updates map[string]any) error { + return errors.New("not implemented") +} + +func (s *stubAccountRepo) BulkUpdate(ctx context.Context, ids []int64, updates service.AccountBulkUpdate) (int64, error) { + s.bulkUpdateIDs = append([]int64{}, ids...) + return int64(len(ids)), nil +} + +type stubProxyRepo struct{} + +func (stubProxyRepo) Create(ctx context.Context, proxy *service.Proxy) error { + return errors.New("not implemented") +} + +func (stubProxyRepo) GetByID(ctx context.Context, id int64) (*service.Proxy, error) { + return nil, service.ErrProxyNotFound +} + +func (stubProxyRepo) Update(ctx context.Context, proxy *service.Proxy) error { + return errors.New("not implemented") +} + +func (stubProxyRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (stubProxyRepo) List(ctx context.Context, params pagination.PaginationParams) ([]service.Proxy, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubProxyRepo) ListWithFilters(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]service.Proxy, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubProxyRepo) ListWithFiltersAndAccountCount(ctx context.Context, params pagination.PaginationParams, protocol, status, search string) ([]service.ProxyWithAccountCount, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubProxyRepo) ListActive(ctx context.Context) ([]service.Proxy, error) { + return nil, errors.New("not implemented") +} + +func (stubProxyRepo) ListActiveWithAccountCount(ctx context.Context) ([]service.ProxyWithAccountCount, error) { + return nil, errors.New("not implemented") +} + +func (stubProxyRepo) ExistsByHostPortAuth(ctx context.Context, host string, port int, username, password string) (bool, error) { + return false, errors.New("not implemented") +} + +func (stubProxyRepo) CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) { + return 0, errors.New("not implemented") +} + +func (stubProxyRepo) ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]service.ProxyAccountSummary, error) { + return nil, errors.New("not implemented") +} + +type stubRedeemCodeRepo struct{} + +func (stubRedeemCodeRepo) Create(ctx context.Context, code *service.RedeemCode) error { + return errors.New("not implemented") +} + +func (stubRedeemCodeRepo) CreateBatch(ctx context.Context, codes []service.RedeemCode) error { + return errors.New("not implemented") +} + +func (stubRedeemCodeRepo) GetByID(ctx context.Context, id int64) (*service.RedeemCode, error) { + return nil, service.ErrRedeemCodeNotFound +} + +func (stubRedeemCodeRepo) GetByCode(ctx context.Context, code string) (*service.RedeemCode, error) { + return nil, service.ErrRedeemCodeNotFound +} + +func (stubRedeemCodeRepo) Update(ctx context.Context, code *service.RedeemCode) error { + return errors.New("not implemented") +} + +func (stubRedeemCodeRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} + +func (stubRedeemCodeRepo) Use(ctx context.Context, id, userID int64) error { + return errors.New("not implemented") +} + +func (stubRedeemCodeRepo) List(ctx context.Context, params pagination.PaginationParams) ([]service.RedeemCode, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubRedeemCodeRepo) ListWithFilters(ctx context.Context, params pagination.PaginationParams, codeType, status, search string) ([]service.RedeemCode, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} + +func (stubRedeemCodeRepo) ListByUser(ctx context.Context, userID int64, limit int) ([]service.RedeemCode, error) { + return nil, errors.New("not implemented") +} + type stubUserSubscriptionRepo struct{} func (stubUserSubscriptionRepo) Create(ctx context.Context, sub *service.UserSubscription) error { @@ -729,12 +1035,12 @@ func (r *stubApiKeyRepo) GetByID(ctx context.Context, id int64) (*service.APIKey return &clone, nil } -func (r *stubApiKeyRepo) GetOwnerID(ctx context.Context, id int64) (int64, error) { +func (r *stubApiKeyRepo) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { key, ok := r.byID[id] if !ok { - return 0, service.ErrAPIKeyNotFound + return "", 0, service.ErrAPIKeyNotFound } - return key.UserID, nil + return key.Key, key.UserID, nil } func (r *stubApiKeyRepo) GetByKey(ctx context.Context, key string) (*service.APIKey, error) { @@ -746,6 +1052,10 @@ func (r *stubApiKeyRepo) GetByKey(ctx context.Context, key string) (*service.API return &clone, nil } +func (r *stubApiKeyRepo) GetByKeyForAuth(ctx context.Context, key string) (*service.APIKey, error) { + return r.GetByKey(ctx, key) +} + func (r *stubApiKeyRepo) Update(ctx context.Context, key *service.APIKey) error { if key == nil { return errors.New("nil key") @@ -860,6 +1170,14 @@ func (r *stubApiKeyRepo) CountByGroupID(ctx context.Context, groupID int64) (int return 0, errors.New("not implemented") } +func (r *stubApiKeyRepo) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + return nil, errors.New("not implemented") +} + type stubUsageLogRepo struct { userLogs map[int64][]service.UsageLog } @@ -928,11 +1246,11 @@ func (r *stubUsageLogRepo) GetDashboardStats(ctx context.Context) (*usagestats.D return nil, errors.New("not implemented") } -func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID int64) ([]usagestats.TrendDataPoint, error) { +func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) { return nil, errors.New("not implemented") } -func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID int64) ([]usagestats.ModelStat, error) { +func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) { return nil, errors.New("not implemented") } diff --git a/backend/internal/server/http.go b/backend/internal/server/http.go index a8740ecc..52d5c926 100644 --- a/backend/internal/server/http.go +++ b/backend/internal/server/http.go @@ -13,6 +13,7 @@ import ( "github.com/gin-gonic/gin" "github.com/google/wire" + "github.com/redis/go-redis/v9" ) // ProviderSet 提供服务器层的依赖 @@ -30,6 +31,9 @@ func ProvideRouter( apiKeyAuth middleware2.APIKeyAuthMiddleware, apiKeyService *service.APIKeyService, subscriptionService *service.SubscriptionService, + opsService *service.OpsService, + settingService *service.SettingService, + redisClient *redis.Client, ) *gin.Engine { if cfg.Server.Mode == "release" { gin.SetMode(gin.ReleaseMode) @@ -47,7 +51,7 @@ func ProvideRouter( } } - return SetupRouter(r, handlers, jwtAuth, adminAuth, apiKeyAuth, apiKeyService, subscriptionService, cfg) + return SetupRouter(r, handlers, jwtAuth, adminAuth, apiKeyAuth, apiKeyService, subscriptionService, opsService, settingService, cfg, redisClient) } // ProvideHTTPServer 提供 HTTP 服务器 diff --git a/backend/internal/server/middleware/admin_auth.go b/backend/internal/server/middleware/admin_auth.go index e02a7b0a..8f30107c 100644 --- a/backend/internal/server/middleware/admin_auth.go +++ b/backend/internal/server/middleware/admin_auth.go @@ -30,6 +30,20 @@ func adminAuth( settingService *service.SettingService, ) gin.HandlerFunc { return func(c *gin.Context) { + // WebSocket upgrade requests cannot set Authorization headers in browsers. + // For admin WebSocket endpoints (e.g. Ops realtime), allow passing the JWT via + // Sec-WebSocket-Protocol (subprotocol list) using a prefixed token item: + // Sec-WebSocket-Protocol: sub2api-admin, jwt. + if isWebSocketUpgradeRequest(c) { + if token := extractJWTFromWebSocketSubprotocol(c); token != "" { + if !validateJWTForAdmin(c, token, authService, userService) { + return + } + c.Next() + return + } + } + // 检查 x-api-key header(Admin API Key 认证) apiKey := c.GetHeader("x-api-key") if apiKey != "" { @@ -58,6 +72,44 @@ func adminAuth( } } +func isWebSocketUpgradeRequest(c *gin.Context) bool { + if c == nil || c.Request == nil { + return false + } + // RFC6455 handshake uses: + // Connection: Upgrade + // Upgrade: websocket + upgrade := strings.ToLower(strings.TrimSpace(c.GetHeader("Upgrade"))) + if upgrade != "websocket" { + return false + } + connection := strings.ToLower(c.GetHeader("Connection")) + return strings.Contains(connection, "upgrade") +} + +func extractJWTFromWebSocketSubprotocol(c *gin.Context) string { + if c == nil { + return "" + } + raw := strings.TrimSpace(c.GetHeader("Sec-WebSocket-Protocol")) + if raw == "" { + return "" + } + + // The header is a comma-separated list of tokens. We reserve the prefix "jwt." + // for carrying the admin JWT. + for _, part := range strings.Split(raw, ",") { + p := strings.TrimSpace(part) + if strings.HasPrefix(p, "jwt.") { + token := strings.TrimSpace(strings.TrimPrefix(p, "jwt.")) + if token != "" { + return token + } + } + } + return "" +} + // validateAdminAPIKey 验证管理员 API Key func validateAdminAPIKey( c *gin.Context, diff --git a/backend/internal/server/middleware/api_key_auth.go b/backend/internal/server/middleware/api_key_auth.go index 74ff8af3..dff6ba95 100644 --- a/backend/internal/server/middleware/api_key_auth.go +++ b/backend/internal/server/middleware/api_key_auth.go @@ -1,11 +1,14 @@ package middleware import ( + "context" "errors" "log" "strings" "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/gin-gonic/gin" @@ -71,6 +74,17 @@ func apiKeyAuthWithSubscription(apiKeyService *service.APIKeyService, subscripti return } + // 检查 IP 限制(白名单/黑名单) + // 注意:错误信息故意模糊,避免暴露具体的 IP 限制机制 + if len(apiKey.IPWhitelist) > 0 || len(apiKey.IPBlacklist) > 0 { + clientIP := ip.GetClientIP(c) + allowed, _ := ip.CheckIPRestriction(clientIP, apiKey.IPWhitelist, apiKey.IPBlacklist) + if !allowed { + AbortWithError(c, 403, "ACCESS_DENIED", "Access denied") + return + } + } + // 检查关联的用户 if apiKey.User == nil { AbortWithError(c, 401, "USER_NOT_FOUND", "User associated with API key not found") @@ -91,6 +105,7 @@ func apiKeyAuthWithSubscription(apiKeyService *service.APIKeyService, subscripti Concurrency: apiKey.User.Concurrency, }) c.Set(string(ContextKeyUserRole), apiKey.User.Role) + setGroupContext(c, apiKey.Group) c.Next() return } @@ -149,6 +164,7 @@ func apiKeyAuthWithSubscription(apiKeyService *service.APIKeyService, subscripti Concurrency: apiKey.User.Concurrency, }) c.Set(string(ContextKeyUserRole), apiKey.User.Role) + setGroupContext(c, apiKey.Group) c.Next() } @@ -173,3 +189,14 @@ func GetSubscriptionFromContext(c *gin.Context) (*service.UserSubscription, bool subscription, ok := value.(*service.UserSubscription) return subscription, ok } + +func setGroupContext(c *gin.Context, group *service.Group) { + if !service.IsGroupContextValid(group) { + return + } + if existing, ok := c.Request.Context().Value(ctxkey.Group).(*service.Group); ok && existing != nil && existing.ID == group.ID && service.IsGroupContextValid(existing) { + return + } + ctx := context.WithValue(c.Request.Context(), ctxkey.Group, group) + c.Request = c.Request.WithContext(ctx) +} diff --git a/backend/internal/server/middleware/api_key_auth_google.go b/backend/internal/server/middleware/api_key_auth_google.go index c5afd7ef..1a0b0dd5 100644 --- a/backend/internal/server/middleware/api_key_auth_google.go +++ b/backend/internal/server/middleware/api_key_auth_google.go @@ -63,6 +63,7 @@ func APIKeyAuthWithSubscriptionGoogle(apiKeyService *service.APIKeyService, subs Concurrency: apiKey.User.Concurrency, }) c.Set(string(ContextKeyUserRole), apiKey.User.Role) + setGroupContext(c, apiKey.Group) c.Next() return } @@ -102,6 +103,7 @@ func APIKeyAuthWithSubscriptionGoogle(apiKeyService *service.APIKeyService, subs Concurrency: apiKey.User.Concurrency, }) c.Set(string(ContextKeyUserRole), apiKey.User.Role) + setGroupContext(c, apiKey.Group) c.Next() } } diff --git a/backend/internal/server/middleware/api_key_auth_google_test.go b/backend/internal/server/middleware/api_key_auth_google_test.go index 0ed5a4a2..6f09469b 100644 --- a/backend/internal/server/middleware/api_key_auth_google_test.go +++ b/backend/internal/server/middleware/api_key_auth_google_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" @@ -26,8 +27,8 @@ func (f fakeAPIKeyRepo) Create(ctx context.Context, key *service.APIKey) error { func (f fakeAPIKeyRepo) GetByID(ctx context.Context, id int64) (*service.APIKey, error) { return nil, errors.New("not implemented") } -func (f fakeAPIKeyRepo) GetOwnerID(ctx context.Context, id int64) (int64, error) { - return 0, errors.New("not implemented") +func (f fakeAPIKeyRepo) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { + return "", 0, errors.New("not implemented") } func (f fakeAPIKeyRepo) GetByKey(ctx context.Context, key string) (*service.APIKey, error) { if f.getByKey == nil { @@ -35,6 +36,9 @@ func (f fakeAPIKeyRepo) GetByKey(ctx context.Context, key string) (*service.APIK } return f.getByKey(ctx, key) } +func (f fakeAPIKeyRepo) GetByKeyForAuth(ctx context.Context, key string) (*service.APIKey, error) { + return f.GetByKey(ctx, key) +} func (f fakeAPIKeyRepo) Update(ctx context.Context, key *service.APIKey) error { return errors.New("not implemented") } @@ -65,6 +69,12 @@ func (f fakeAPIKeyRepo) ClearGroupIDByGroupID(ctx context.Context, groupID int64 func (f fakeAPIKeyRepo) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { return 0, errors.New("not implemented") } +func (f fakeAPIKeyRepo) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + return nil, errors.New("not implemented") +} +func (f fakeAPIKeyRepo) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + return nil, errors.New("not implemented") +} type googleErrorResponse struct { Error struct { @@ -133,6 +143,70 @@ func TestApiKeyAuthWithSubscriptionGoogle_QueryApiKeyRejected(t *testing.T) { require.Equal(t, "INVALID_ARGUMENT", resp.Error.Status) } +func TestApiKeyAuthWithSubscriptionGoogleSetsGroupContext(t *testing.T) { + gin.SetMode(gin.TestMode) + + group := &service.Group{ + ID: 99, + Name: "g1", + Status: service.StatusActive, + Platform: service.PlatformGemini, + Hydrated: true, + } + user := &service.User{ + ID: 7, + Role: service.RoleUser, + Status: service.StatusActive, + Balance: 10, + Concurrency: 3, + } + apiKey := &service.APIKey{ + ID: 100, + UserID: user.ID, + Key: "test-key", + Status: service.StatusActive, + User: user, + Group: group, + } + apiKey.GroupID = &group.ID + + apiKeyService := service.NewAPIKeyService( + fakeAPIKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + if key != apiKey.Key { + return nil, service.ErrAPIKeyNotFound + } + clone := *apiKey + return &clone, nil + }, + }, + nil, + nil, + nil, + nil, + &config.Config{RunMode: config.RunModeSimple}, + ) + + cfg := &config.Config{RunMode: config.RunModeSimple} + r := gin.New() + r.Use(APIKeyAuthWithSubscriptionGoogle(apiKeyService, nil, cfg)) + r.GET("/v1beta/test", func(c *gin.Context) { + groupFromCtx, ok := c.Request.Context().Value(ctxkey.Group).(*service.Group) + if !ok || groupFromCtx == nil || groupFromCtx.ID != group.ID { + c.JSON(http.StatusInternalServerError, gin.H{"ok": false}) + return + } + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("x-api-key", apiKey.Key) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) +} + func TestApiKeyAuthWithSubscriptionGoogle_QueryKeyAllowedOnV1Beta(t *testing.T) { gin.SetMode(gin.TestMode) diff --git a/backend/internal/server/middleware/api_key_auth_test.go b/backend/internal/server/middleware/api_key_auth_test.go index d50fb7b2..84398093 100644 --- a/backend/internal/server/middleware/api_key_auth_test.go +++ b/backend/internal/server/middleware/api_key_auth_test.go @@ -11,6 +11,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/gin-gonic/gin" @@ -25,6 +26,7 @@ func TestSimpleModeBypassesQuotaCheck(t *testing.T) { ID: 42, Name: "sub", Status: service.StatusActive, + Hydrated: true, SubscriptionType: service.SubscriptionTypeSubscription, DailyLimitUSD: &limit, } @@ -110,6 +112,129 @@ func TestSimpleModeBypassesQuotaCheck(t *testing.T) { }) } +func TestAPIKeyAuthSetsGroupContext(t *testing.T) { + gin.SetMode(gin.TestMode) + + group := &service.Group{ + ID: 101, + Name: "g1", + Status: service.StatusActive, + Platform: service.PlatformAnthropic, + Hydrated: true, + } + user := &service.User{ + ID: 7, + Role: service.RoleUser, + Status: service.StatusActive, + Balance: 10, + Concurrency: 3, + } + apiKey := &service.APIKey{ + ID: 100, + UserID: user.ID, + Key: "test-key", + Status: service.StatusActive, + User: user, + Group: group, + } + apiKey.GroupID = &group.ID + + apiKeyRepo := &stubApiKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + if key != apiKey.Key { + return nil, service.ErrAPIKeyNotFound + } + clone := *apiKey + return &clone, nil + }, + } + + cfg := &config.Config{RunMode: config.RunModeSimple} + apiKeyService := service.NewAPIKeyService(apiKeyRepo, nil, nil, nil, nil, cfg) + router := gin.New() + router.Use(gin.HandlerFunc(NewAPIKeyAuthMiddleware(apiKeyService, nil, cfg))) + router.GET("/t", func(c *gin.Context) { + groupFromCtx, ok := c.Request.Context().Value(ctxkey.Group).(*service.Group) + if !ok || groupFromCtx == nil || groupFromCtx.ID != group.ID { + c.JSON(http.StatusInternalServerError, gin.H{"ok": false}) + return + } + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/t", nil) + req.Header.Set("x-api-key", apiKey.Key) + router.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) +} + +func TestAPIKeyAuthOverwritesInvalidContextGroup(t *testing.T) { + gin.SetMode(gin.TestMode) + + group := &service.Group{ + ID: 101, + Name: "g1", + Status: service.StatusActive, + Platform: service.PlatformAnthropic, + Hydrated: true, + } + user := &service.User{ + ID: 7, + Role: service.RoleUser, + Status: service.StatusActive, + Balance: 10, + Concurrency: 3, + } + apiKey := &service.APIKey{ + ID: 100, + UserID: user.ID, + Key: "test-key", + Status: service.StatusActive, + User: user, + Group: group, + } + apiKey.GroupID = &group.ID + + apiKeyRepo := &stubApiKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + if key != apiKey.Key { + return nil, service.ErrAPIKeyNotFound + } + clone := *apiKey + return &clone, nil + }, + } + + cfg := &config.Config{RunMode: config.RunModeSimple} + apiKeyService := service.NewAPIKeyService(apiKeyRepo, nil, nil, nil, nil, cfg) + router := gin.New() + router.Use(gin.HandlerFunc(NewAPIKeyAuthMiddleware(apiKeyService, nil, cfg))) + + invalidGroup := &service.Group{ + ID: group.ID, + Platform: group.Platform, + Status: group.Status, + } + router.GET("/t", func(c *gin.Context) { + groupFromCtx, ok := c.Request.Context().Value(ctxkey.Group).(*service.Group) + if !ok || groupFromCtx == nil || groupFromCtx.ID != group.ID || !groupFromCtx.Hydrated || groupFromCtx == invalidGroup { + c.JSON(http.StatusInternalServerError, gin.H{"ok": false}) + return + } + c.JSON(http.StatusOK, gin.H{"ok": true}) + }) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/t", nil) + req.Header.Set("x-api-key", apiKey.Key) + req = req.WithContext(context.WithValue(req.Context(), ctxkey.Group, invalidGroup)) + router.ServeHTTP(w, req) + + require.Equal(t, http.StatusOK, w.Code) +} + func newAuthTestRouter(apiKeyService *service.APIKeyService, subscriptionService *service.SubscriptionService, cfg *config.Config) *gin.Engine { router := gin.New() router.Use(gin.HandlerFunc(NewAPIKeyAuthMiddleware(apiKeyService, subscriptionService, cfg))) @@ -131,8 +256,8 @@ func (r *stubApiKeyRepo) GetByID(ctx context.Context, id int64) (*service.APIKey return nil, errors.New("not implemented") } -func (r *stubApiKeyRepo) GetOwnerID(ctx context.Context, id int64) (int64, error) { - return 0, errors.New("not implemented") +func (r *stubApiKeyRepo) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { + return "", 0, errors.New("not implemented") } func (r *stubApiKeyRepo) GetByKey(ctx context.Context, key string) (*service.APIKey, error) { @@ -142,6 +267,10 @@ func (r *stubApiKeyRepo) GetByKey(ctx context.Context, key string) (*service.API return nil, errors.New("not implemented") } +func (r *stubApiKeyRepo) GetByKeyForAuth(ctx context.Context, key string) (*service.APIKey, error) { + return r.GetByKey(ctx, key) +} + func (r *stubApiKeyRepo) Update(ctx context.Context, key *service.APIKey) error { return errors.New("not implemented") } @@ -182,6 +311,14 @@ func (r *stubApiKeyRepo) CountByGroupID(ctx context.Context, groupID int64) (int return 0, errors.New("not implemented") } +func (r *stubApiKeyRepo) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (r *stubApiKeyRepo) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + return nil, errors.New("not implemented") +} + type stubUserSubscriptionRepo struct { getActive func(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) updateStatus func(ctx context.Context, subscriptionID int64, status string) error diff --git a/backend/internal/server/middleware/client_request_id.go b/backend/internal/server/middleware/client_request_id.go new file mode 100644 index 00000000..d22b6cc5 --- /dev/null +++ b/backend/internal/server/middleware/client_request_id.go @@ -0,0 +1,30 @@ +package middleware + +import ( + "context" + + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +// ClientRequestID ensures every request has a unique client_request_id in request.Context(). +// +// This is used by the Ops monitoring module for end-to-end request correlation. +func ClientRequestID() gin.HandlerFunc { + return func(c *gin.Context) { + if c.Request == nil { + c.Next() + return + } + + if v := c.Request.Context().Value(ctxkey.ClientRequestID); v != nil { + c.Next() + return + } + + id := uuid.New().String() + c.Request = c.Request.WithContext(context.WithValue(c.Request.Context(), ctxkey.ClientRequestID, id)) + c.Next() + } +} diff --git a/backend/internal/server/middleware/security_headers.go b/backend/internal/server/middleware/security_headers.go index 9fca0cd3..9ce7f449 100644 --- a/backend/internal/server/middleware/security_headers.go +++ b/backend/internal/server/middleware/security_headers.go @@ -1,12 +1,40 @@ package middleware import ( + "crypto/rand" + "encoding/base64" "strings" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/gin-gonic/gin" ) +const ( + // CSPNonceKey is the context key for storing the CSP nonce + CSPNonceKey = "csp_nonce" + // NonceTemplate is the placeholder in CSP policy for nonce + NonceTemplate = "__CSP_NONCE__" + // CloudflareInsightsDomain is the domain for Cloudflare Web Analytics + CloudflareInsightsDomain = "https://static.cloudflareinsights.com" +) + +// GenerateNonce generates a cryptographically secure random nonce +func GenerateNonce() string { + b := make([]byte, 16) + _, _ = rand.Read(b) + return base64.StdEncoding.EncodeToString(b) +} + +// GetNonceFromContext retrieves the CSP nonce from gin context +func GetNonceFromContext(c *gin.Context) string { + if nonce, exists := c.Get(CSPNonceKey); exists { + if s, ok := nonce.(string); ok { + return s + } + } + return "" +} + // SecurityHeaders sets baseline security headers for all responses. func SecurityHeaders(cfg config.CSPConfig) gin.HandlerFunc { policy := strings.TrimSpace(cfg.Policy) @@ -14,13 +42,75 @@ func SecurityHeaders(cfg config.CSPConfig) gin.HandlerFunc { policy = config.DefaultCSPPolicy } + // Enhance policy with required directives (nonce placeholder and Cloudflare Insights) + policy = enhanceCSPPolicy(policy) + return func(c *gin.Context) { c.Header("X-Content-Type-Options", "nosniff") c.Header("X-Frame-Options", "DENY") c.Header("Referrer-Policy", "strict-origin-when-cross-origin") + if cfg.Enabled { - c.Header("Content-Security-Policy", policy) + // Generate nonce for this request + nonce := GenerateNonce() + c.Set(CSPNonceKey, nonce) + + // Replace nonce placeholder in policy + finalPolicy := strings.ReplaceAll(policy, NonceTemplate, "'nonce-"+nonce+"'") + c.Header("Content-Security-Policy", finalPolicy) } c.Next() } } + +// enhanceCSPPolicy ensures the CSP policy includes nonce support and Cloudflare Insights domain. +// This allows the application to work correctly even if the config file has an older CSP policy. +func enhanceCSPPolicy(policy string) string { + // Add nonce placeholder to script-src if not present + if !strings.Contains(policy, NonceTemplate) && !strings.Contains(policy, "'nonce-") { + policy = addToDirective(policy, "script-src", NonceTemplate) + } + + // Add Cloudflare Insights domain to script-src if not present + if !strings.Contains(policy, CloudflareInsightsDomain) { + policy = addToDirective(policy, "script-src", CloudflareInsightsDomain) + } + + return policy +} + +// addToDirective adds a value to a specific CSP directive. +// If the directive doesn't exist, it will be added after default-src. +func addToDirective(policy, directive, value string) string { + // Find the directive in the policy + directivePrefix := directive + " " + idx := strings.Index(policy, directivePrefix) + + if idx == -1 { + // Directive not found, add it after default-src or at the beginning + defaultSrcIdx := strings.Index(policy, "default-src ") + if defaultSrcIdx != -1 { + // Find the end of default-src directive (next semicolon) + endIdx := strings.Index(policy[defaultSrcIdx:], ";") + if endIdx != -1 { + insertPos := defaultSrcIdx + endIdx + 1 + // Insert new directive after default-src + return policy[:insertPos] + " " + directive + " 'self' " + value + ";" + policy[insertPos:] + } + } + // Fallback: prepend the directive + return directive + " 'self' " + value + "; " + policy + } + + // Find the end of this directive (next semicolon or end of string) + endIdx := strings.Index(policy[idx:], ";") + + if endIdx == -1 { + // No semicolon found, directive goes to end of string + return policy + " " + value + } + + // Insert value before the semicolon + insertPos := idx + endIdx + return policy[:insertPos] + " " + value + policy[insertPos:] +} diff --git a/backend/internal/server/middleware/security_headers_test.go b/backend/internal/server/middleware/security_headers_test.go new file mode 100644 index 00000000..dc7a87d8 --- /dev/null +++ b/backend/internal/server/middleware/security_headers_test.go @@ -0,0 +1,365 @@ +package middleware + +import ( + "encoding/base64" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func init() { + gin.SetMode(gin.TestMode) +} + +func TestGenerateNonce(t *testing.T) { + t.Run("generates_valid_base64_string", func(t *testing.T) { + nonce := GenerateNonce() + + // Should be valid base64 + decoded, err := base64.StdEncoding.DecodeString(nonce) + require.NoError(t, err) + + // Should decode to 16 bytes + assert.Len(t, decoded, 16) + }) + + t.Run("generates_unique_nonces", func(t *testing.T) { + nonces := make(map[string]bool) + for i := 0; i < 100; i++ { + nonce := GenerateNonce() + assert.False(t, nonces[nonce], "nonce should be unique") + nonces[nonce] = true + } + }) + + t.Run("nonce_has_expected_length", func(t *testing.T) { + nonce := GenerateNonce() + // 16 bytes -> 24 chars in base64 (with padding) + assert.Len(t, nonce, 24) + }) +} + +func TestGetNonceFromContext(t *testing.T) { + t.Run("returns_nonce_when_present", func(t *testing.T) { + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + expectedNonce := "test-nonce-123" + c.Set(CSPNonceKey, expectedNonce) + + nonce := GetNonceFromContext(c) + assert.Equal(t, expectedNonce, nonce) + }) + + t.Run("returns_empty_string_when_not_present", func(t *testing.T) { + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + nonce := GetNonceFromContext(c) + assert.Empty(t, nonce) + }) + + t.Run("returns_empty_for_wrong_type", func(t *testing.T) { + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + + // Set a non-string value + c.Set(CSPNonceKey, 12345) + + // Should return empty string for wrong type (safe type assertion) + nonce := GetNonceFromContext(c) + assert.Empty(t, nonce) + }) +} + +func TestSecurityHeaders(t *testing.T) { + t.Run("sets_basic_security_headers", func(t *testing.T) { + cfg := config.CSPConfig{Enabled: false} + middleware := SecurityHeaders(cfg) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/", nil) + + middleware(c) + + assert.Equal(t, "nosniff", w.Header().Get("X-Content-Type-Options")) + assert.Equal(t, "DENY", w.Header().Get("X-Frame-Options")) + assert.Equal(t, "strict-origin-when-cross-origin", w.Header().Get("Referrer-Policy")) + }) + + t.Run("csp_disabled_no_csp_header", func(t *testing.T) { + cfg := config.CSPConfig{Enabled: false} + middleware := SecurityHeaders(cfg) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/", nil) + + middleware(c) + + assert.Empty(t, w.Header().Get("Content-Security-Policy")) + }) + + t.Run("csp_enabled_sets_csp_header", func(t *testing.T) { + cfg := config.CSPConfig{ + Enabled: true, + Policy: "default-src 'self'", + } + middleware := SecurityHeaders(cfg) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/", nil) + + middleware(c) + + csp := w.Header().Get("Content-Security-Policy") + assert.NotEmpty(t, csp) + // Policy is auto-enhanced with nonce and Cloudflare Insights domain + assert.Contains(t, csp, "default-src 'self'") + assert.Contains(t, csp, "'nonce-") + assert.Contains(t, csp, CloudflareInsightsDomain) + }) + + t.Run("csp_enabled_with_nonce_placeholder", func(t *testing.T) { + cfg := config.CSPConfig{ + Enabled: true, + Policy: "script-src 'self' __CSP_NONCE__", + } + middleware := SecurityHeaders(cfg) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/", nil) + + middleware(c) + + csp := w.Header().Get("Content-Security-Policy") + assert.NotEmpty(t, csp) + assert.NotContains(t, csp, "__CSP_NONCE__", "placeholder should be replaced") + assert.Contains(t, csp, "'nonce-", "should contain nonce directive") + + // Verify nonce is stored in context + nonce := GetNonceFromContext(c) + assert.NotEmpty(t, nonce) + assert.Contains(t, csp, "'nonce-"+nonce+"'") + }) + + t.Run("uses_default_policy_when_empty", func(t *testing.T) { + cfg := config.CSPConfig{ + Enabled: true, + Policy: "", + } + middleware := SecurityHeaders(cfg) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/", nil) + + middleware(c) + + csp := w.Header().Get("Content-Security-Policy") + assert.NotEmpty(t, csp) + // Default policy should contain these elements + assert.Contains(t, csp, "default-src 'self'") + }) + + t.Run("uses_default_policy_when_whitespace_only", func(t *testing.T) { + cfg := config.CSPConfig{ + Enabled: true, + Policy: " \t\n ", + } + middleware := SecurityHeaders(cfg) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/", nil) + + middleware(c) + + csp := w.Header().Get("Content-Security-Policy") + assert.NotEmpty(t, csp) + assert.Contains(t, csp, "default-src 'self'") + }) + + t.Run("multiple_nonce_placeholders_replaced", func(t *testing.T) { + cfg := config.CSPConfig{ + Enabled: true, + Policy: "script-src __CSP_NONCE__; style-src __CSP_NONCE__", + } + middleware := SecurityHeaders(cfg) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/", nil) + + middleware(c) + + csp := w.Header().Get("Content-Security-Policy") + nonce := GetNonceFromContext(c) + + // Count occurrences of the nonce + count := strings.Count(csp, "'nonce-"+nonce+"'") + assert.Equal(t, 2, count, "both placeholders should be replaced with same nonce") + }) + + t.Run("calls_next_handler", func(t *testing.T) { + cfg := config.CSPConfig{Enabled: true, Policy: "default-src 'self'"} + middleware := SecurityHeaders(cfg) + + nextCalled := false + router := gin.New() + router.Use(middleware) + router.GET("/test", func(c *gin.Context) { + nextCalled = true + c.Status(http.StatusOK) + }) + + w := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/test", nil) + router.ServeHTTP(w, req) + + assert.True(t, nextCalled, "next handler should be called") + assert.Equal(t, http.StatusOK, w.Code) + }) + + t.Run("nonce_unique_per_request", func(t *testing.T) { + cfg := config.CSPConfig{ + Enabled: true, + Policy: "script-src __CSP_NONCE__", + } + middleware := SecurityHeaders(cfg) + + nonces := make(map[string]bool) + for i := 0; i < 10; i++ { + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/", nil) + + middleware(c) + + nonce := GetNonceFromContext(c) + assert.False(t, nonces[nonce], "nonce should be unique per request") + nonces[nonce] = true + } + }) +} + +func TestCSPNonceKey(t *testing.T) { + t.Run("constant_value", func(t *testing.T) { + assert.Equal(t, "csp_nonce", CSPNonceKey) + }) +} + +func TestNonceTemplate(t *testing.T) { + t.Run("constant_value", func(t *testing.T) { + assert.Equal(t, "__CSP_NONCE__", NonceTemplate) + }) +} + +func TestEnhanceCSPPolicy(t *testing.T) { + t.Run("adds_nonce_placeholder_if_missing", func(t *testing.T) { + policy := "default-src 'self'; script-src 'self'" + enhanced := enhanceCSPPolicy(policy) + + assert.Contains(t, enhanced, NonceTemplate) + assert.Contains(t, enhanced, CloudflareInsightsDomain) + }) + + t.Run("does_not_duplicate_nonce_placeholder", func(t *testing.T) { + policy := "default-src 'self'; script-src 'self' __CSP_NONCE__" + enhanced := enhanceCSPPolicy(policy) + + // Should not duplicate + count := strings.Count(enhanced, NonceTemplate) + assert.Equal(t, 1, count) + }) + + t.Run("does_not_duplicate_cloudflare_domain", func(t *testing.T) { + policy := "default-src 'self'; script-src 'self' https://static.cloudflareinsights.com" + enhanced := enhanceCSPPolicy(policy) + + count := strings.Count(enhanced, CloudflareInsightsDomain) + assert.Equal(t, 1, count) + }) + + t.Run("handles_policy_without_script_src", func(t *testing.T) { + policy := "default-src 'self'" + enhanced := enhanceCSPPolicy(policy) + + assert.Contains(t, enhanced, "script-src") + assert.Contains(t, enhanced, NonceTemplate) + assert.Contains(t, enhanced, CloudflareInsightsDomain) + }) + + t.Run("preserves_existing_nonce", func(t *testing.T) { + policy := "script-src 'self' 'nonce-existing'" + enhanced := enhanceCSPPolicy(policy) + + // Should not add placeholder if nonce already exists + assert.NotContains(t, enhanced, NonceTemplate) + assert.Contains(t, enhanced, "'nonce-existing'") + }) +} + +func TestAddToDirective(t *testing.T) { + t.Run("adds_to_existing_directive", func(t *testing.T) { + policy := "script-src 'self'; style-src 'self'" + result := addToDirective(policy, "script-src", "https://example.com") + + assert.Contains(t, result, "script-src 'self' https://example.com") + }) + + t.Run("creates_directive_if_not_exists", func(t *testing.T) { + policy := "default-src 'self'" + result := addToDirective(policy, "script-src", "https://example.com") + + assert.Contains(t, result, "script-src") + assert.Contains(t, result, "https://example.com") + }) + + t.Run("handles_directive_at_end_without_semicolon", func(t *testing.T) { + policy := "default-src 'self'; script-src 'self'" + result := addToDirective(policy, "script-src", "https://example.com") + + assert.Contains(t, result, "https://example.com") + }) + + t.Run("handles_empty_policy", func(t *testing.T) { + policy := "" + result := addToDirective(policy, "script-src", "https://example.com") + + assert.Contains(t, result, "script-src") + assert.Contains(t, result, "https://example.com") + }) +} + +// Benchmark tests +func BenchmarkGenerateNonce(b *testing.B) { + for i := 0; i < b.N; i++ { + GenerateNonce() + } +} + +func BenchmarkSecurityHeadersMiddleware(b *testing.B) { + cfg := config.CSPConfig{ + Enabled: true, + Policy: "script-src 'self' __CSP_NONCE__", + } + middleware := SecurityHeaders(cfg) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/", nil) + middleware(c) + } +} diff --git a/backend/internal/server/router.go b/backend/internal/server/router.go index 15a1b325..cf9015e4 100644 --- a/backend/internal/server/router.go +++ b/backend/internal/server/router.go @@ -1,6 +1,8 @@ package server import ( + "log" + "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/handler" middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" @@ -9,6 +11,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/web" "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" ) // SetupRouter 配置路由器中间件和路由 @@ -20,20 +23,31 @@ func SetupRouter( apiKeyAuth middleware2.APIKeyAuthMiddleware, apiKeyService *service.APIKeyService, subscriptionService *service.SubscriptionService, + opsService *service.OpsService, + settingService *service.SettingService, cfg *config.Config, + redisClient *redis.Client, ) *gin.Engine { // 应用中间件 r.Use(middleware2.Logger()) r.Use(middleware2.CORS(cfg.CORS)) r.Use(middleware2.SecurityHeaders(cfg.Security.CSP)) - // Serve embedded frontend if available + // Serve embedded frontend with settings injection if available if web.HasEmbeddedFrontend() { - r.Use(web.ServeEmbeddedFrontend()) + frontendServer, err := web.NewFrontendServer(settingService) + if err != nil { + log.Printf("Warning: Failed to create frontend server with settings injection: %v, using legacy mode", err) + r.Use(web.ServeEmbeddedFrontend()) + } else { + // Register cache invalidation callback + settingService.SetOnUpdateCallback(frontendServer.InvalidateCache) + r.Use(frontendServer.Middleware()) + } } // 注册路由 - registerRoutes(r, handlers, jwtAuth, adminAuth, apiKeyAuth, apiKeyService, subscriptionService, cfg) + registerRoutes(r, handlers, jwtAuth, adminAuth, apiKeyAuth, apiKeyService, subscriptionService, opsService, cfg, redisClient) return r } @@ -47,7 +61,9 @@ func registerRoutes( apiKeyAuth middleware2.APIKeyAuthMiddleware, apiKeyService *service.APIKeyService, subscriptionService *service.SubscriptionService, + opsService *service.OpsService, cfg *config.Config, + redisClient *redis.Client, ) { // 通用路由(健康检查、状态等) routes.RegisterCommonRoutes(r) @@ -56,8 +72,8 @@ func registerRoutes( v1 := r.Group("/api/v1") // 注册各模块路由 - routes.RegisterAuthRoutes(v1, h, jwtAuth) + routes.RegisterAuthRoutes(v1, h, jwtAuth, redisClient) routes.RegisterUserRoutes(v1, h, jwtAuth) routes.RegisterAdminRoutes(v1, h, adminAuth) - routes.RegisterGatewayRoutes(r, h, apiKeyAuth, apiKeyService, subscriptionService, cfg) + routes.RegisterGatewayRoutes(r, h, apiKeyAuth, apiKeyService, subscriptionService, opsService, cfg) } diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index 663c2d02..ff05b32a 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -44,9 +44,15 @@ func RegisterAdminRoutes( // 卡密管理 registerRedeemCodeRoutes(admin, h) + // 优惠码管理 + registerPromoCodeRoutes(admin, h) + // 系统设置 registerSettingsRoutes(admin, h) + // 运维监控(Ops) + registerOpsRoutes(admin, h) + // 系统管理 registerSystemRoutes(admin, h) @@ -61,6 +67,85 @@ func RegisterAdminRoutes( } } +func registerOpsRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + ops := admin.Group("/ops") + { + // Realtime ops signals + ops.GET("/concurrency", h.Admin.Ops.GetConcurrencyStats) + ops.GET("/account-availability", h.Admin.Ops.GetAccountAvailability) + ops.GET("/realtime-traffic", h.Admin.Ops.GetRealtimeTrafficSummary) + + // Alerts (rules + events) + ops.GET("/alert-rules", h.Admin.Ops.ListAlertRules) + ops.POST("/alert-rules", h.Admin.Ops.CreateAlertRule) + ops.PUT("/alert-rules/:id", h.Admin.Ops.UpdateAlertRule) + ops.DELETE("/alert-rules/:id", h.Admin.Ops.DeleteAlertRule) + ops.GET("/alert-events", h.Admin.Ops.ListAlertEvents) + ops.GET("/alert-events/:id", h.Admin.Ops.GetAlertEvent) + ops.PUT("/alert-events/:id/status", h.Admin.Ops.UpdateAlertEventStatus) + ops.POST("/alert-silences", h.Admin.Ops.CreateAlertSilence) + + // Email notification config (DB-backed) + ops.GET("/email-notification/config", h.Admin.Ops.GetEmailNotificationConfig) + ops.PUT("/email-notification/config", h.Admin.Ops.UpdateEmailNotificationConfig) + + // Runtime settings (DB-backed) + runtime := ops.Group("/runtime") + { + runtime.GET("/alert", h.Admin.Ops.GetAlertRuntimeSettings) + runtime.PUT("/alert", h.Admin.Ops.UpdateAlertRuntimeSettings) + } + + // Advanced settings (DB-backed) + ops.GET("/advanced-settings", h.Admin.Ops.GetAdvancedSettings) + ops.PUT("/advanced-settings", h.Admin.Ops.UpdateAdvancedSettings) + + // Settings group (DB-backed) + settings := ops.Group("/settings") + { + settings.GET("/metric-thresholds", h.Admin.Ops.GetMetricThresholds) + settings.PUT("/metric-thresholds", h.Admin.Ops.UpdateMetricThresholds) + } + + // WebSocket realtime (QPS/TPS) + ws := ops.Group("/ws") + { + ws.GET("/qps", h.Admin.Ops.QPSWSHandler) + } + + // Error logs (legacy) + ops.GET("/errors", h.Admin.Ops.GetErrorLogs) + ops.GET("/errors/:id", h.Admin.Ops.GetErrorLogByID) + ops.GET("/errors/:id/retries", h.Admin.Ops.ListRetryAttempts) + ops.POST("/errors/:id/retry", h.Admin.Ops.RetryErrorRequest) + ops.PUT("/errors/:id/resolve", h.Admin.Ops.UpdateErrorResolution) + + // Request errors (client-visible failures) + ops.GET("/request-errors", h.Admin.Ops.ListRequestErrors) + ops.GET("/request-errors/:id", h.Admin.Ops.GetRequestError) + ops.GET("/request-errors/:id/upstream-errors", h.Admin.Ops.ListRequestErrorUpstreamErrors) + ops.POST("/request-errors/:id/retry-client", h.Admin.Ops.RetryRequestErrorClient) + ops.POST("/request-errors/:id/upstream-errors/:idx/retry", h.Admin.Ops.RetryRequestErrorUpstreamEvent) + ops.PUT("/request-errors/:id/resolve", h.Admin.Ops.ResolveRequestError) + + // Upstream errors (independent upstream failures) + ops.GET("/upstream-errors", h.Admin.Ops.ListUpstreamErrors) + ops.GET("/upstream-errors/:id", h.Admin.Ops.GetUpstreamError) + ops.POST("/upstream-errors/:id/retry", h.Admin.Ops.RetryUpstreamError) + ops.PUT("/upstream-errors/:id/resolve", h.Admin.Ops.ResolveUpstreamError) + + // Request drilldown (success + error) + ops.GET("/requests", h.Admin.Ops.ListRequestDetails) + + // Dashboard (vNext - raw path for MVP) + ops.GET("/dashboard/overview", h.Admin.Ops.GetDashboardOverview) + ops.GET("/dashboard/throughput-trend", h.Admin.Ops.GetDashboardThroughputTrend) + ops.GET("/dashboard/latency-histogram", h.Admin.Ops.GetDashboardLatencyHistogram) + ops.GET("/dashboard/error-trend", h.Admin.Ops.GetDashboardErrorTrend) + ops.GET("/dashboard/error-distribution", h.Admin.Ops.GetDashboardErrorDistribution) + } +} + func registerDashboardRoutes(admin *gin.RouterGroup, h *handler.Handlers) { dashboard := admin.Group("/dashboard") { @@ -72,6 +157,7 @@ func registerDashboardRoutes(admin *gin.RouterGroup, h *handler.Handlers) { dashboard.GET("/users-trend", h.Admin.Dashboard.GetUserUsageTrend) dashboard.POST("/users-usage", h.Admin.Dashboard.GetBatchUsersUsage) dashboard.POST("/api-keys-usage", h.Admin.Dashboard.GetBatchAPIKeysUsage) + dashboard.POST("/aggregation/backfill", h.Admin.Dashboard.BackfillAggregation) } } @@ -183,6 +269,7 @@ func registerProxyRoutes(admin *gin.RouterGroup, h *handler.Handlers) { proxies.POST("/:id/test", h.Admin.Proxy.Test) proxies.GET("/:id/stats", h.Admin.Proxy.GetStats) proxies.GET("/:id/accounts", h.Admin.Proxy.GetProxyAccounts) + proxies.POST("/batch-delete", h.Admin.Proxy.BatchDelete) proxies.POST("/batch", h.Admin.Proxy.BatchCreate) } } @@ -201,6 +288,18 @@ func registerRedeemCodeRoutes(admin *gin.RouterGroup, h *handler.Handlers) { } } +func registerPromoCodeRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + promoCodes := admin.Group("/promo-codes") + { + promoCodes.GET("", h.Admin.Promo.List) + promoCodes.GET("/:id", h.Admin.Promo.GetByID) + promoCodes.POST("", h.Admin.Promo.Create) + promoCodes.PUT("/:id", h.Admin.Promo.Update) + promoCodes.DELETE("/:id", h.Admin.Promo.Delete) + promoCodes.GET("/:id/usages", h.Admin.Promo.GetUsages) + } +} + func registerSettingsRoutes(admin *gin.RouterGroup, h *handler.Handlers) { adminSettings := admin.Group("/settings") { @@ -212,6 +311,9 @@ func registerSettingsRoutes(admin *gin.RouterGroup, h *handler.Handlers) { adminSettings.GET("/admin-api-key", h.Admin.Setting.GetAdminAPIKey) adminSettings.POST("/admin-api-key/regenerate", h.Admin.Setting.RegenerateAdminAPIKey) adminSettings.DELETE("/admin-api-key", h.Admin.Setting.DeleteAdminAPIKey) + // 流超时处理配置 + adminSettings.GET("/stream-timeout", h.Admin.Setting.GetStreamTimeoutSettings) + adminSettings.PUT("/stream-timeout", h.Admin.Setting.UpdateStreamTimeoutSettings) } } diff --git a/backend/internal/server/routes/auth.go b/backend/internal/server/routes/auth.go index e61d3939..aa691eba 100644 --- a/backend/internal/server/routes/auth.go +++ b/backend/internal/server/routes/auth.go @@ -1,24 +1,36 @@ package routes import ( + "time" + "github.com/Wei-Shaw/sub2api/internal/handler" - "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/middleware" + servermiddleware "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/gin-gonic/gin" + "github.com/redis/go-redis/v9" ) // RegisterAuthRoutes 注册认证相关路由 func RegisterAuthRoutes( v1 *gin.RouterGroup, h *handler.Handlers, - jwtAuth middleware.JWTAuthMiddleware, + jwtAuth servermiddleware.JWTAuthMiddleware, + redisClient *redis.Client, ) { + // 创建速率限制器 + rateLimiter := middleware.NewRateLimiter(redisClient) + // 公开接口 auth := v1.Group("/auth") { auth.POST("/register", h.Auth.Register) auth.POST("/login", h.Auth.Login) auth.POST("/send-verify-code", h.Auth.SendVerifyCode) + // 优惠码验证接口添加速率限制:每分钟最多 10 次(Redis 故障时 fail-close) + auth.POST("/validate-promo-code", rateLimiter.LimitWithOptions("validate-promo", 10, time.Minute, middleware.RateLimitOptions{ + FailureMode: middleware.RateLimitFailClose, + }), h.Auth.ValidatePromoCode) auth.GET("/oauth/linuxdo/start", h.Auth.LinuxDoOAuthStart) auth.GET("/oauth/linuxdo/callback", h.Auth.LinuxDoOAuthCallback) } diff --git a/backend/internal/server/routes/gateway.go b/backend/internal/server/routes/gateway.go index 0b62185e..bf019ce3 100644 --- a/backend/internal/server/routes/gateway.go +++ b/backend/internal/server/routes/gateway.go @@ -16,13 +16,18 @@ func RegisterGatewayRoutes( apiKeyAuth middleware.APIKeyAuthMiddleware, apiKeyService *service.APIKeyService, subscriptionService *service.SubscriptionService, + opsService *service.OpsService, cfg *config.Config, ) { bodyLimit := middleware.RequestBodyLimit(cfg.Gateway.MaxBodySize) + clientRequestID := middleware.ClientRequestID() + opsErrorLogger := handler.OpsErrorLoggerMiddleware(opsService) // API网关(Claude API兼容) gateway := r.Group("/v1") gateway.Use(bodyLimit) + gateway.Use(clientRequestID) + gateway.Use(opsErrorLogger) gateway.Use(gin.HandlerFunc(apiKeyAuth)) { gateway.POST("/messages", h.Gateway.Messages) @@ -36,6 +41,8 @@ func RegisterGatewayRoutes( // Gemini 原生 API 兼容层(Gemini SDK/CLI 直连) gemini := r.Group("/v1beta") gemini.Use(bodyLimit) + gemini.Use(clientRequestID) + gemini.Use(opsErrorLogger) gemini.Use(middleware.APIKeyAuthWithSubscriptionGoogle(apiKeyService, subscriptionService, cfg)) { gemini.GET("/models", h.Gateway.GeminiV1BetaListModels) @@ -45,7 +52,7 @@ func RegisterGatewayRoutes( } // OpenAI Responses API(不带v1前缀的别名) - r.POST("/responses", bodyLimit, gin.HandlerFunc(apiKeyAuth), h.OpenAIGateway.Responses) + r.POST("/responses", bodyLimit, clientRequestID, opsErrorLogger, gin.HandlerFunc(apiKeyAuth), h.OpenAIGateway.Responses) // Antigravity 模型列表 r.GET("/antigravity/models", gin.HandlerFunc(apiKeyAuth), h.Gateway.AntigravityModels) @@ -53,6 +60,8 @@ func RegisterGatewayRoutes( // Antigravity 专用路由(仅使用 antigravity 账户,不混合调度) antigravityV1 := r.Group("/antigravity/v1") antigravityV1.Use(bodyLimit) + antigravityV1.Use(clientRequestID) + antigravityV1.Use(opsErrorLogger) antigravityV1.Use(middleware.ForcePlatform(service.PlatformAntigravity)) antigravityV1.Use(gin.HandlerFunc(apiKeyAuth)) { @@ -64,6 +73,8 @@ func RegisterGatewayRoutes( antigravityV1Beta := r.Group("/antigravity/v1beta") antigravityV1Beta.Use(bodyLimit) + antigravityV1Beta.Use(clientRequestID) + antigravityV1Beta.Use(opsErrorLogger) antigravityV1Beta.Use(middleware.ForcePlatform(service.PlatformAntigravity)) antigravityV1Beta.Use(middleware.APIKeyAuthWithSubscriptionGoogle(apiKeyService, subscriptionService, cfg)) { diff --git a/backend/internal/service/account.go b/backend/internal/service/account.go index cfce9bfa..36ba0bcc 100644 --- a/backend/internal/service/account.go +++ b/backend/internal/service/account.go @@ -9,16 +9,19 @@ import ( ) type Account struct { - ID int64 - Name string - Notes *string - Platform string - Type string - Credentials map[string]any - Extra map[string]any - ProxyID *int64 - Concurrency int - Priority int + ID int64 + Name string + Notes *string + Platform string + Type string + Credentials map[string]any + Extra map[string]any + ProxyID *int64 + Concurrency int + Priority int + // RateMultiplier 账号计费倍率(>=0,允许 0 表示该账号计费为 0)。 + // 使用指针用于兼容旧版本调度缓存(Redis)中缺字段的情况:nil 表示按 1.0 处理。 + RateMultiplier *float64 Status string ErrorMessage string LastUsedAt *time.Time @@ -57,6 +60,20 @@ func (a *Account) IsActive() bool { return a.Status == StatusActive } +// BillingRateMultiplier 返回账号计费倍率。 +// - nil 表示未配置/旧缓存缺字段,按 1.0 处理 +// - 允许 0,表示该账号计费为 0 +// - 负数属于非法数据,出于安全考虑按 1.0 处理 +func (a *Account) BillingRateMultiplier() float64 { + if a == nil || a.RateMultiplier == nil { + return 1.0 + } + if *a.RateMultiplier < 0 { + return 1.0 + } + return *a.RateMultiplier +} + func (a *Account) IsSchedulable() bool { if !a.IsActive() || !a.Schedulable { return false @@ -540,3 +557,141 @@ func (a *Account) IsMixedSchedulingEnabled() bool { } return false } + +// WindowCostSchedulability 窗口费用调度状态 +type WindowCostSchedulability int + +const ( + // WindowCostSchedulable 可正常调度 + WindowCostSchedulable WindowCostSchedulability = iota + // WindowCostStickyOnly 仅允许粘性会话 + WindowCostStickyOnly + // WindowCostNotSchedulable 完全不可调度 + WindowCostNotSchedulable +) + +// IsAnthropicOAuthOrSetupToken 判断是否为 Anthropic OAuth 或 SetupToken 类型账号 +// 仅这两类账号支持 5h 窗口额度控制和会话数量控制 +func (a *Account) IsAnthropicOAuthOrSetupToken() bool { + return a.Platform == PlatformAnthropic && (a.Type == AccountTypeOAuth || a.Type == AccountTypeSetupToken) +} + +// GetWindowCostLimit 获取 5h 窗口费用阈值(美元) +// 返回 0 表示未启用 +func (a *Account) GetWindowCostLimit() float64 { + if a.Extra == nil { + return 0 + } + if v, ok := a.Extra["window_cost_limit"]; ok { + return parseExtraFloat64(v) + } + return 0 +} + +// GetWindowCostStickyReserve 获取粘性会话预留额度(美元) +// 默认值为 10 +func (a *Account) GetWindowCostStickyReserve() float64 { + if a.Extra == nil { + return 10.0 + } + if v, ok := a.Extra["window_cost_sticky_reserve"]; ok { + val := parseExtraFloat64(v) + if val > 0 { + return val + } + } + return 10.0 +} + +// GetMaxSessions 获取最大并发会话数 +// 返回 0 表示未启用 +func (a *Account) GetMaxSessions() int { + if a.Extra == nil { + return 0 + } + if v, ok := a.Extra["max_sessions"]; ok { + return parseExtraInt(v) + } + return 0 +} + +// GetSessionIdleTimeoutMinutes 获取会话空闲超时分钟数 +// 默认值为 5 分钟 +func (a *Account) GetSessionIdleTimeoutMinutes() int { + if a.Extra == nil { + return 5 + } + if v, ok := a.Extra["session_idle_timeout_minutes"]; ok { + val := parseExtraInt(v) + if val > 0 { + return val + } + } + return 5 +} + +// CheckWindowCostSchedulability 根据当前窗口费用检查调度状态 +// - 费用 < 阈值: WindowCostSchedulable(可正常调度) +// - 费用 >= 阈值 且 < 阈值+预留: WindowCostStickyOnly(仅粘性会话) +// - 费用 >= 阈值+预留: WindowCostNotSchedulable(不可调度) +func (a *Account) CheckWindowCostSchedulability(currentWindowCost float64) WindowCostSchedulability { + limit := a.GetWindowCostLimit() + if limit <= 0 { + return WindowCostSchedulable + } + + if currentWindowCost < limit { + return WindowCostSchedulable + } + + stickyReserve := a.GetWindowCostStickyReserve() + if currentWindowCost < limit+stickyReserve { + return WindowCostStickyOnly + } + + return WindowCostNotSchedulable +} + +// parseExtraFloat64 从 extra 字段解析 float64 值 +func parseExtraFloat64(value any) float64 { + switch v := value.(type) { + case float64: + return v + case float32: + return float64(v) + case int: + return float64(v) + case int64: + return float64(v) + case json.Number: + if f, err := v.Float64(); err == nil { + return f + } + case string: + if f, err := strconv.ParseFloat(strings.TrimSpace(v), 64); err == nil { + return f + } + } + return 0 +} + +// parseExtraInt 从 extra 字段解析 int 值 +func parseExtraInt(value any) int { + switch v := value.(type) { + case int: + return v + case int64: + return int(v) + case float64: + return int(v) + case json.Number: + if i, err := v.Int64(); err == nil { + return int(i) + } + case string: + if i, err := strconv.Atoi(strings.TrimSpace(v)); err == nil { + return i + } + } + return 0 +} diff --git a/backend/internal/service/account_billing_rate_multiplier_test.go b/backend/internal/service/account_billing_rate_multiplier_test.go new file mode 100644 index 00000000..731cfa7a --- /dev/null +++ b/backend/internal/service/account_billing_rate_multiplier_test.go @@ -0,0 +1,27 @@ +package service + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAccount_BillingRateMultiplier_DefaultsToOneWhenNil(t *testing.T) { + var a Account + require.NoError(t, json.Unmarshal([]byte(`{"id":1,"name":"acc","status":"active"}`), &a)) + require.Nil(t, a.RateMultiplier) + require.Equal(t, 1.0, a.BillingRateMultiplier()) +} + +func TestAccount_BillingRateMultiplier_AllowsZero(t *testing.T) { + v := 0.0 + a := Account{RateMultiplier: &v} + require.Equal(t, 0.0, a.BillingRateMultiplier()) +} + +func TestAccount_BillingRateMultiplier_NegativeFallsBackToOne(t *testing.T) { + v := -1.0 + a := Account{RateMultiplier: &v} + require.Equal(t, 1.0, a.BillingRateMultiplier()) +} diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go index 93a36c3e..90365d2f 100644 --- a/backend/internal/service/account_service.go +++ b/backend/internal/service/account_service.go @@ -51,11 +51,13 @@ type AccountRepository interface { SetRateLimited(ctx context.Context, id int64, resetAt time.Time) error SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error + SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error SetOverloaded(ctx context.Context, id int64, until time.Time) error SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error ClearTempUnschedulable(ctx context.Context, id int64) error ClearRateLimit(ctx context.Context, id int64) error ClearAntigravityQuotaScopes(ctx context.Context, id int64) error + ClearModelRateLimits(ctx context.Context, id int64) error UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error UpdateExtra(ctx context.Context, id int64, updates map[string]any) error BulkUpdate(ctx context.Context, ids []int64, updates AccountBulkUpdate) (int64, error) @@ -64,14 +66,15 @@ type AccountRepository interface { // AccountBulkUpdate describes the fields that can be updated in a bulk operation. // Nil pointers mean "do not change". type AccountBulkUpdate struct { - Name *string - ProxyID *int64 - Concurrency *int - Priority *int - Status *string - Schedulable *bool - Credentials map[string]any - Extra map[string]any + Name *string + ProxyID *int64 + Concurrency *int + Priority *int + RateMultiplier *float64 + Status *string + Schedulable *bool + Credentials map[string]any + Extra map[string]any } // CreateAccountRequest 创建账号请求 diff --git a/backend/internal/service/account_service_delete_test.go b/backend/internal/service/account_service_delete_test.go index fe89b47f..e5eabfc6 100644 --- a/backend/internal/service/account_service_delete_test.go +++ b/backend/internal/service/account_service_delete_test.go @@ -147,6 +147,10 @@ func (s *accountRepoStub) SetAntigravityQuotaScopeLimit(ctx context.Context, id panic("unexpected SetAntigravityQuotaScopeLimit call") } +func (s *accountRepoStub) SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error { + panic("unexpected SetModelRateLimit call") +} + func (s *accountRepoStub) SetOverloaded(ctx context.Context, id int64, until time.Time) error { panic("unexpected SetOverloaded call") } @@ -167,6 +171,10 @@ func (s *accountRepoStub) ClearAntigravityQuotaScopes(ctx context.Context, id in panic("unexpected ClearAntigravityQuotaScopes call") } +func (s *accountRepoStub) ClearModelRateLimits(ctx context.Context, id int64) error { + panic("unexpected ClearModelRateLimits call") +} + func (s *accountRepoStub) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { panic("unexpected UpdateSessionWindow call") } diff --git a/backend/internal/service/account_usage_service.go b/backend/internal/service/account_usage_service.go index f1ee43d2..6f012385 100644 --- a/backend/internal/service/account_usage_service.go +++ b/backend/internal/service/account_usage_service.go @@ -32,8 +32,8 @@ type UsageLogRepository interface { // Admin dashboard stats GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) - GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID int64) ([]usagestats.TrendDataPoint, error) - GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID int64) ([]usagestats.ModelStat, error) + GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) + GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.UserUsageTrendPoint, error) GetBatchUserUsageStats(ctx context.Context, userIDs []int64) (map[int64]*usagestats.BatchUserUsageStats, error) @@ -96,10 +96,16 @@ func NewUsageCache() *UsageCache { } // WindowStats 窗口期统计 +// +// cost: 账号口径费用(total_cost * account_rate_multiplier) +// standard_cost: 标准费用(total_cost,不含倍率) +// user_cost: 用户/API Key 口径费用(actual_cost,受分组倍率影响) type WindowStats struct { - Requests int64 `json:"requests"` - Tokens int64 `json:"tokens"` - Cost float64 `json:"cost"` + Requests int64 `json:"requests"` + Tokens int64 `json:"tokens"` + Cost float64 `json:"cost"` + StandardCost float64 `json:"standard_cost"` + UserCost float64 `json:"user_cost"` } // UsageProgress 使用量进度 @@ -266,7 +272,7 @@ func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Accou } dayStart := geminiDailyWindowStart(now) - stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID) + stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID, 0, nil) if err != nil { return nil, fmt.Errorf("get gemini usage stats failed: %w", err) } @@ -288,7 +294,7 @@ func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Accou // Minute window (RPM) - fixed-window approximation: current minute [truncate(now), truncate(now)+1m) minuteStart := now.Truncate(time.Minute) minuteResetAt := minuteStart.Add(time.Minute) - minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID) + minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID, 0, nil) if err != nil { return nil, fmt.Errorf("get gemini minute usage stats failed: %w", err) } @@ -377,9 +383,11 @@ func (s *AccountUsageService) addWindowStats(ctx context.Context, account *Accou } windowStats = &WindowStats{ - Requests: stats.Requests, - Tokens: stats.Tokens, - Cost: stats.Cost, + Requests: stats.Requests, + Tokens: stats.Tokens, + Cost: stats.Cost, + StandardCost: stats.StandardCost, + UserCost: stats.UserCost, } // 缓存窗口统计(1 分钟) @@ -403,9 +411,11 @@ func (s *AccountUsageService) GetTodayStats(ctx context.Context, accountID int64 } return &WindowStats{ - Requests: stats.Requests, - Tokens: stats.Tokens, - Cost: stats.Cost, + Requests: stats.Requests, + Tokens: stats.Tokens, + Cost: stats.Cost, + StandardCost: stats.StandardCost, + UserCost: stats.UserCost, }, nil } @@ -565,3 +575,9 @@ func buildGeminiUsageProgress(used, limit int64, resetAt time.Time, tokens int64 }, } } + +// GetAccountWindowStats 获取账号在指定时间窗口内的使用统计 +// 用于账号列表页面显示当前窗口费用 +func (s *AccountUsageService) GetAccountWindowStats(ctx context.Context, accountID int64, startTime time.Time) (*usagestats.AccountStats, error) { + return s.usageLogRepo.GetAccountWindowStats(ctx, accountID, startTime) +} diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index f0cb0671..0afa0716 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -55,7 +55,8 @@ type AdminService interface { CreateProxy(ctx context.Context, input *CreateProxyInput) (*Proxy, error) UpdateProxy(ctx context.Context, id int64, input *UpdateProxyInput) (*Proxy, error) DeleteProxy(ctx context.Context, id int64) error - GetProxyAccounts(ctx context.Context, proxyID int64, page, pageSize int) ([]Account, int64, error) + BatchDeleteProxies(ctx context.Context, ids []int64) (*ProxyBatchDeleteResult, error) + GetProxyAccounts(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error) CheckProxyExists(ctx context.Context, host string, port int, username, password string) (bool, error) TestProxy(ctx context.Context, id int64) (*ProxyTestResult, error) @@ -106,6 +107,9 @@ type CreateGroupInput struct { ImagePrice4K *float64 ClaudeCodeOnly bool // 仅允许 Claude Code 客户端 FallbackGroupID *int64 // 降级分组 ID + // 模型路由配置(仅 anthropic 平台使用) + ModelRouting map[string][]int64 + ModelRoutingEnabled bool // 是否启用模型路由 } type UpdateGroupInput struct { @@ -125,6 +129,9 @@ type UpdateGroupInput struct { ImagePrice4K *float64 ClaudeCodeOnly *bool // 仅允许 Claude Code 客户端 FallbackGroupID *int64 // 降级分组 ID + // 模型路由配置(仅 anthropic 平台使用) + ModelRouting map[string][]int64 + ModelRoutingEnabled *bool // 是否启用模型路由 } type CreateAccountInput struct { @@ -137,6 +144,7 @@ type CreateAccountInput struct { ProxyID *int64 Concurrency int Priority int + RateMultiplier *float64 // 账号计费倍率(>=0,允许 0) GroupIDs []int64 ExpiresAt *int64 AutoPauseOnExpired *bool @@ -152,8 +160,9 @@ type UpdateAccountInput struct { Credentials map[string]any Extra map[string]any ProxyID *int64 - Concurrency *int // 使用指针区分"未提供"和"设置为0" - Priority *int // 使用指针区分"未提供"和"设置为0" + Concurrency *int // 使用指针区分"未提供"和"设置为0" + Priority *int // 使用指针区分"未提供"和"设置为0" + RateMultiplier *float64 // 账号计费倍率(>=0,允许 0) Status string GroupIDs *[]int64 ExpiresAt *int64 @@ -163,16 +172,17 @@ type UpdateAccountInput struct { // BulkUpdateAccountsInput describes the payload for bulk updating accounts. type BulkUpdateAccountsInput struct { - AccountIDs []int64 - Name string - ProxyID *int64 - Concurrency *int - Priority *int - Status string - Schedulable *bool - GroupIDs *[]int64 - Credentials map[string]any - Extra map[string]any + AccountIDs []int64 + Name string + ProxyID *int64 + Concurrency *int + Priority *int + RateMultiplier *float64 // 账号计费倍率(>=0,允许 0) + Status string + Schedulable *bool + GroupIDs *[]int64 + Credentials map[string]any + Extra map[string]any // SkipMixedChannelCheck skips the mixed channel risk check when binding groups. // This should only be set when the caller has explicitly confirmed the risk. SkipMixedChannelCheck bool @@ -187,9 +197,11 @@ type BulkUpdateAccountResult struct { // BulkUpdateAccountsResult is the aggregated response for bulk updates. type BulkUpdateAccountsResult struct { - Success int `json:"success"` - Failed int `json:"failed"` - Results []BulkUpdateAccountResult `json:"results"` + Success int `json:"success"` + Failed int `json:"failed"` + SuccessIDs []int64 `json:"success_ids"` + FailedIDs []int64 `json:"failed_ids"` + Results []BulkUpdateAccountResult `json:"results"` } type CreateProxyInput struct { @@ -219,23 +231,35 @@ type GenerateRedeemCodesInput struct { ValidityDays int // 订阅类型专用:有效天数 } -// ProxyTestResult represents the result of testing a proxy -type ProxyTestResult struct { - Success bool `json:"success"` - Message string `json:"message"` - LatencyMs int64 `json:"latency_ms,omitempty"` - IPAddress string `json:"ip_address,omitempty"` - City string `json:"city,omitempty"` - Region string `json:"region,omitempty"` - Country string `json:"country,omitempty"` +type ProxyBatchDeleteResult struct { + DeletedIDs []int64 `json:"deleted_ids"` + Skipped []ProxyBatchDeleteSkipped `json:"skipped"` } -// ProxyExitInfo represents proxy exit information from ipinfo.io +type ProxyBatchDeleteSkipped struct { + ID int64 `json:"id"` + Reason string `json:"reason"` +} + +// ProxyTestResult represents the result of testing a proxy +type ProxyTestResult struct { + Success bool `json:"success"` + Message string `json:"message"` + LatencyMs int64 `json:"latency_ms,omitempty"` + IPAddress string `json:"ip_address,omitempty"` + City string `json:"city,omitempty"` + Region string `json:"region,omitempty"` + Country string `json:"country,omitempty"` + CountryCode string `json:"country_code,omitempty"` +} + +// ProxyExitInfo represents proxy exit information from ip-api.com type ProxyExitInfo struct { - IP string - City string - Region string - Country string + IP string + City string + Region string + Country string + CountryCode string } // ProxyExitInfoProber tests proxy connectivity and retrieves exit information @@ -245,14 +269,16 @@ type ProxyExitInfoProber interface { // adminServiceImpl implements AdminService type adminServiceImpl struct { - userRepo UserRepository - groupRepo GroupRepository - accountRepo AccountRepository - proxyRepo ProxyRepository - apiKeyRepo APIKeyRepository - redeemCodeRepo RedeemCodeRepository - billingCacheService *BillingCacheService - proxyProber ProxyExitInfoProber + userRepo UserRepository + groupRepo GroupRepository + accountRepo AccountRepository + proxyRepo ProxyRepository + apiKeyRepo APIKeyRepository + redeemCodeRepo RedeemCodeRepository + billingCacheService *BillingCacheService + proxyProber ProxyExitInfoProber + proxyLatencyCache ProxyLatencyCache + authCacheInvalidator APIKeyAuthCacheInvalidator } // NewAdminService creates a new AdminService @@ -265,16 +291,20 @@ func NewAdminService( redeemCodeRepo RedeemCodeRepository, billingCacheService *BillingCacheService, proxyProber ProxyExitInfoProber, + proxyLatencyCache ProxyLatencyCache, + authCacheInvalidator APIKeyAuthCacheInvalidator, ) AdminService { return &adminServiceImpl{ - userRepo: userRepo, - groupRepo: groupRepo, - accountRepo: accountRepo, - proxyRepo: proxyRepo, - apiKeyRepo: apiKeyRepo, - redeemCodeRepo: redeemCodeRepo, - billingCacheService: billingCacheService, - proxyProber: proxyProber, + userRepo: userRepo, + groupRepo: groupRepo, + accountRepo: accountRepo, + proxyRepo: proxyRepo, + apiKeyRepo: apiKeyRepo, + redeemCodeRepo: redeemCodeRepo, + billingCacheService: billingCacheService, + proxyProber: proxyProber, + proxyLatencyCache: proxyLatencyCache, + authCacheInvalidator: authCacheInvalidator, } } @@ -324,6 +354,8 @@ func (s *adminServiceImpl) UpdateUser(ctx context.Context, id int64, input *Upda } oldConcurrency := user.Concurrency + oldStatus := user.Status + oldRole := user.Role if input.Email != "" { user.Email = input.Email @@ -356,6 +388,11 @@ func (s *adminServiceImpl) UpdateUser(ctx context.Context, id int64, input *Upda if err := s.userRepo.Update(ctx, user); err != nil { return nil, err } + if s.authCacheInvalidator != nil { + if user.Concurrency != oldConcurrency || user.Status != oldStatus || user.Role != oldRole { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, user.ID) + } + } concurrencyDiff := user.Concurrency - oldConcurrency if concurrencyDiff != 0 { @@ -394,6 +431,9 @@ func (s *adminServiceImpl) DeleteUser(ctx context.Context, id int64) error { log.Printf("delete user failed: user_id=%d err=%v", id, err) return err } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, id) + } return nil } @@ -421,6 +461,10 @@ func (s *adminServiceImpl) UpdateUserBalance(ctx context.Context, userID int64, if err := s.userRepo.Update(ctx, user); err != nil { return nil, err } + balanceDiff := user.Balance - oldBalance + if s.authCacheInvalidator != nil && balanceDiff != 0 { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } if s.billingCacheService != nil { go func() { @@ -432,7 +476,6 @@ func (s *adminServiceImpl) UpdateUserBalance(ctx context.Context, userID int64, }() } - balanceDiff := user.Balance - oldBalance if balanceDiff != 0 { code, err := GenerateRedeemCode() if err != nil { @@ -545,6 +588,7 @@ func (s *adminServiceImpl) CreateGroup(ctx context.Context, input *CreateGroupIn ImagePrice4K: imagePrice4K, ClaudeCodeOnly: input.ClaudeCodeOnly, FallbackGroupID: input.FallbackGroupID, + ModelRouting: input.ModelRouting, } if err := s.groupRepo.Create(ctx, group); err != nil { return nil, err @@ -577,18 +621,33 @@ func (s *adminServiceImpl) validateFallbackGroup(ctx context.Context, currentGro return fmt.Errorf("cannot set self as fallback group") } - // 检查降级分组是否存在 - fallbackGroup, err := s.groupRepo.GetByID(ctx, fallbackGroupID) - if err != nil { - return fmt.Errorf("fallback group not found: %w", err) - } + visited := map[int64]struct{}{} + nextID := fallbackGroupID + for { + if _, seen := visited[nextID]; seen { + return fmt.Errorf("fallback group cycle detected") + } + visited[nextID] = struct{}{} + if currentGroupID > 0 && nextID == currentGroupID { + return fmt.Errorf("fallback group cycle detected") + } - // 降级分组不能启用 claude_code_only,否则会造成死循环 - if fallbackGroup.ClaudeCodeOnly { - return fmt.Errorf("fallback group cannot have claude_code_only enabled") - } + // 检查降级分组是否存在 + fallbackGroup, err := s.groupRepo.GetByIDLite(ctx, nextID) + if err != nil { + return fmt.Errorf("fallback group not found: %w", err) + } - return nil + // 降级分组不能启用 claude_code_only,否则会造成死循环 + if nextID == fallbackGroupID && fallbackGroup.ClaudeCodeOnly { + return fmt.Errorf("fallback group cannot have claude_code_only enabled") + } + + if fallbackGroup.FallbackGroupID == nil { + return nil + } + nextID = *fallbackGroup.FallbackGroupID + } } func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *UpdateGroupInput) (*Group, error) { @@ -658,13 +717,32 @@ func (s *adminServiceImpl) UpdateGroup(ctx context.Context, id int64, input *Upd } } + // 模型路由配置 + if input.ModelRouting != nil { + group.ModelRouting = input.ModelRouting + } + if input.ModelRoutingEnabled != nil { + group.ModelRoutingEnabled = *input.ModelRoutingEnabled + } + if err := s.groupRepo.Update(ctx, group); err != nil { return nil, err } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByGroupID(ctx, id) + } return group, nil } func (s *adminServiceImpl) DeleteGroup(ctx context.Context, id int64) error { + var groupKeys []string + if s.authCacheInvalidator != nil { + keys, err := s.apiKeyRepo.ListKeysByGroupID(ctx, id) + if err == nil { + groupKeys = keys + } + } + affectedUserIDs, err := s.groupRepo.DeleteCascade(ctx, id) if err != nil { return err @@ -683,6 +761,11 @@ func (s *adminServiceImpl) DeleteGroup(ctx context.Context, id int64) error { } }() } + if s.authCacheInvalidator != nil { + for _, key := range groupKeys { + s.authCacheInvalidator.InvalidateAuthCacheByKey(ctx, key) + } + } return nil } @@ -769,6 +852,12 @@ func (s *adminServiceImpl) CreateAccount(ctx context.Context, input *CreateAccou } else { account.AutoPauseOnExpired = true } + if input.RateMultiplier != nil { + if *input.RateMultiplier < 0 { + return nil, errors.New("rate_multiplier must be >= 0") + } + account.RateMultiplier = input.RateMultiplier + } if err := s.accountRepo.Create(ctx, account); err != nil { return nil, err } @@ -821,6 +910,12 @@ func (s *adminServiceImpl) UpdateAccount(ctx context.Context, id int64, input *U if input.Priority != nil { account.Priority = *input.Priority } + if input.RateMultiplier != nil { + if *input.RateMultiplier < 0 { + return nil, errors.New("rate_multiplier must be >= 0") + } + account.RateMultiplier = input.RateMultiplier + } if input.Status != "" { account.Status = input.Status } @@ -871,7 +966,9 @@ func (s *adminServiceImpl) UpdateAccount(ctx context.Context, id int64, input *U // It merges credentials/extra keys instead of overwriting the whole object. func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUpdateAccountsInput) (*BulkUpdateAccountsResult, error) { result := &BulkUpdateAccountsResult{ - Results: make([]BulkUpdateAccountResult, 0, len(input.AccountIDs)), + SuccessIDs: make([]int64, 0, len(input.AccountIDs)), + FailedIDs: make([]int64, 0, len(input.AccountIDs)), + Results: make([]BulkUpdateAccountResult, 0, len(input.AccountIDs)), } if len(input.AccountIDs) == 0 { @@ -892,6 +989,12 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp } } + if input.RateMultiplier != nil { + if *input.RateMultiplier < 0 { + return nil, errors.New("rate_multiplier must be >= 0") + } + } + // Prepare bulk updates for columns and JSONB fields. repoUpdates := AccountBulkUpdate{ Credentials: input.Credentials, @@ -909,6 +1012,9 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp if input.Priority != nil { repoUpdates.Priority = input.Priority } + if input.RateMultiplier != nil { + repoUpdates.RateMultiplier = input.RateMultiplier + } if input.Status != "" { repoUpdates.Status = &input.Status } @@ -935,6 +1041,7 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp entry.Success = false entry.Error = err.Error() result.Failed++ + result.FailedIDs = append(result.FailedIDs, accountID) result.Results = append(result.Results, entry) continue } @@ -944,6 +1051,7 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp entry.Success = false entry.Error = err.Error() result.Failed++ + result.FailedIDs = append(result.FailedIDs, accountID) result.Results = append(result.Results, entry) continue } @@ -953,6 +1061,7 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp entry.Success = false entry.Error = err.Error() result.Failed++ + result.FailedIDs = append(result.FailedIDs, accountID) result.Results = append(result.Results, entry) continue } @@ -960,6 +1069,7 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp entry.Success = true result.Success++ + result.SuccessIDs = append(result.SuccessIDs, accountID) result.Results = append(result.Results, entry) } @@ -1019,6 +1129,7 @@ func (s *adminServiceImpl) ListProxiesWithAccountCount(ctx context.Context, page if err != nil { return nil, 0, err } + s.attachProxyLatency(ctx, proxies) return proxies, result.Total, nil } @@ -1027,7 +1138,12 @@ func (s *adminServiceImpl) GetAllProxies(ctx context.Context) ([]Proxy, error) { } func (s *adminServiceImpl) GetAllProxiesWithAccountCount(ctx context.Context) ([]ProxyWithAccountCount, error) { - return s.proxyRepo.ListActiveWithAccountCount(ctx) + proxies, err := s.proxyRepo.ListActiveWithAccountCount(ctx) + if err != nil { + return nil, err + } + s.attachProxyLatency(ctx, proxies) + return proxies, nil } func (s *adminServiceImpl) GetProxy(ctx context.Context, id int64) (*Proxy, error) { @@ -1047,6 +1163,8 @@ func (s *adminServiceImpl) CreateProxy(ctx context.Context, input *CreateProxyIn if err := s.proxyRepo.Create(ctx, proxy); err != nil { return nil, err } + // Probe latency asynchronously so creation isn't blocked by network timeout. + go s.probeProxyLatency(context.Background(), proxy) return proxy, nil } @@ -1085,12 +1203,53 @@ func (s *adminServiceImpl) UpdateProxy(ctx context.Context, id int64, input *Upd } func (s *adminServiceImpl) DeleteProxy(ctx context.Context, id int64) error { + count, err := s.proxyRepo.CountAccountsByProxyID(ctx, id) + if err != nil { + return err + } + if count > 0 { + return ErrProxyInUse + } return s.proxyRepo.Delete(ctx, id) } -func (s *adminServiceImpl) GetProxyAccounts(ctx context.Context, proxyID int64, page, pageSize int) ([]Account, int64, error) { - // Return mock data for now - would need a dedicated repository method - return []Account{}, 0, nil +func (s *adminServiceImpl) BatchDeleteProxies(ctx context.Context, ids []int64) (*ProxyBatchDeleteResult, error) { + result := &ProxyBatchDeleteResult{} + if len(ids) == 0 { + return result, nil + } + + for _, id := range ids { + count, err := s.proxyRepo.CountAccountsByProxyID(ctx, id) + if err != nil { + result.Skipped = append(result.Skipped, ProxyBatchDeleteSkipped{ + ID: id, + Reason: err.Error(), + }) + continue + } + if count > 0 { + result.Skipped = append(result.Skipped, ProxyBatchDeleteSkipped{ + ID: id, + Reason: ErrProxyInUse.Error(), + }) + continue + } + if err := s.proxyRepo.Delete(ctx, id); err != nil { + result.Skipped = append(result.Skipped, ProxyBatchDeleteSkipped{ + ID: id, + Reason: err.Error(), + }) + continue + } + result.DeletedIDs = append(result.DeletedIDs, id) + } + + return result, nil +} + +func (s *adminServiceImpl) GetProxyAccounts(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error) { + return s.proxyRepo.ListAccountSummariesByProxyID(ctx, proxyID) } func (s *adminServiceImpl) CheckProxyExists(ctx context.Context, host string, port int, username, password string) (bool, error) { @@ -1190,23 +1349,69 @@ func (s *adminServiceImpl) TestProxy(ctx context.Context, id int64) (*ProxyTestR proxyURL := proxy.URL() exitInfo, latencyMs, err := s.proxyProber.ProbeProxy(ctx, proxyURL) if err != nil { + s.saveProxyLatency(ctx, id, &ProxyLatencyInfo{ + Success: false, + Message: err.Error(), + UpdatedAt: time.Now(), + }) return &ProxyTestResult{ Success: false, Message: err.Error(), }, nil } + latency := latencyMs + s.saveProxyLatency(ctx, id, &ProxyLatencyInfo{ + Success: true, + LatencyMs: &latency, + Message: "Proxy is accessible", + IPAddress: exitInfo.IP, + Country: exitInfo.Country, + CountryCode: exitInfo.CountryCode, + Region: exitInfo.Region, + City: exitInfo.City, + UpdatedAt: time.Now(), + }) return &ProxyTestResult{ - Success: true, - Message: "Proxy is accessible", - LatencyMs: latencyMs, - IPAddress: exitInfo.IP, - City: exitInfo.City, - Region: exitInfo.Region, - Country: exitInfo.Country, + Success: true, + Message: "Proxy is accessible", + LatencyMs: latencyMs, + IPAddress: exitInfo.IP, + City: exitInfo.City, + Region: exitInfo.Region, + Country: exitInfo.Country, + CountryCode: exitInfo.CountryCode, }, nil } +func (s *adminServiceImpl) probeProxyLatency(ctx context.Context, proxy *Proxy) { + if s.proxyProber == nil || proxy == nil { + return + } + exitInfo, latencyMs, err := s.proxyProber.ProbeProxy(ctx, proxy.URL()) + if err != nil { + s.saveProxyLatency(ctx, proxy.ID, &ProxyLatencyInfo{ + Success: false, + Message: err.Error(), + UpdatedAt: time.Now(), + }) + return + } + + latency := latencyMs + s.saveProxyLatency(ctx, proxy.ID, &ProxyLatencyInfo{ + Success: true, + LatencyMs: &latency, + Message: "Proxy is accessible", + IPAddress: exitInfo.IP, + Country: exitInfo.Country, + CountryCode: exitInfo.CountryCode, + Region: exitInfo.Region, + City: exitInfo.City, + UpdatedAt: time.Now(), + }) +} + // checkMixedChannelRisk 检查分组中是否存在混合渠道(Antigravity + Anthropic) // 如果存在混合,返回错误提示用户确认 func (s *adminServiceImpl) checkMixedChannelRisk(ctx context.Context, currentAccountID int64, currentAccountPlatform string, groupIDs []int64) error { @@ -1256,6 +1461,51 @@ func (s *adminServiceImpl) checkMixedChannelRisk(ctx context.Context, currentAcc return nil } +func (s *adminServiceImpl) attachProxyLatency(ctx context.Context, proxies []ProxyWithAccountCount) { + if s.proxyLatencyCache == nil || len(proxies) == 0 { + return + } + + ids := make([]int64, 0, len(proxies)) + for i := range proxies { + ids = append(ids, proxies[i].ID) + } + + latencies, err := s.proxyLatencyCache.GetProxyLatencies(ctx, ids) + if err != nil { + log.Printf("Warning: load proxy latency cache failed: %v", err) + return + } + + for i := range proxies { + info := latencies[proxies[i].ID] + if info == nil { + continue + } + if info.Success { + proxies[i].LatencyStatus = "success" + proxies[i].LatencyMs = info.LatencyMs + } else { + proxies[i].LatencyStatus = "failed" + } + proxies[i].LatencyMessage = info.Message + proxies[i].IPAddress = info.IPAddress + proxies[i].Country = info.Country + proxies[i].CountryCode = info.CountryCode + proxies[i].Region = info.Region + proxies[i].City = info.City + } +} + +func (s *adminServiceImpl) saveProxyLatency(ctx context.Context, proxyID int64, info *ProxyLatencyInfo) { + if s.proxyLatencyCache == nil || info == nil { + return + } + if err := s.proxyLatencyCache.SetProxyLatency(ctx, proxyID, info); err != nil { + log.Printf("Warning: store proxy latency cache failed: %v", err) + } +} + // getAccountPlatform 根据账号 platform 判断混合渠道检查用的平台标识 func getAccountPlatform(accountPlatform string) string { switch strings.ToLower(strings.TrimSpace(accountPlatform)) { diff --git a/backend/internal/service/admin_service_bulk_update_test.go b/backend/internal/service/admin_service_bulk_update_test.go new file mode 100644 index 00000000..662b95fb --- /dev/null +++ b/backend/internal/service/admin_service_bulk_update_test.go @@ -0,0 +1,80 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + + "github.com/stretchr/testify/require" +) + +type accountRepoStubForBulkUpdate struct { + accountRepoStub + bulkUpdateErr error + bulkUpdateIDs []int64 + bindGroupErrByID map[int64]error +} + +func (s *accountRepoStubForBulkUpdate) BulkUpdate(_ context.Context, ids []int64, _ AccountBulkUpdate) (int64, error) { + s.bulkUpdateIDs = append([]int64{}, ids...) + if s.bulkUpdateErr != nil { + return 0, s.bulkUpdateErr + } + return int64(len(ids)), nil +} + +func (s *accountRepoStubForBulkUpdate) BindGroups(_ context.Context, accountID int64, _ []int64) error { + if err, ok := s.bindGroupErrByID[accountID]; ok { + return err + } + return nil +} + +// TestAdminService_BulkUpdateAccounts_AllSuccessIDs 验证批量更新成功时返回 success_ids/failed_ids。 +func TestAdminService_BulkUpdateAccounts_AllSuccessIDs(t *testing.T) { + repo := &accountRepoStubForBulkUpdate{} + svc := &adminServiceImpl{accountRepo: repo} + + schedulable := true + input := &BulkUpdateAccountsInput{ + AccountIDs: []int64{1, 2, 3}, + Schedulable: &schedulable, + } + + result, err := svc.BulkUpdateAccounts(context.Background(), input) + require.NoError(t, err) + require.Equal(t, 3, result.Success) + require.Equal(t, 0, result.Failed) + require.ElementsMatch(t, []int64{1, 2, 3}, result.SuccessIDs) + require.Empty(t, result.FailedIDs) + require.Len(t, result.Results, 3) +} + +// TestAdminService_BulkUpdateAccounts_PartialFailureIDs 验证部分失败时 success_ids/failed_ids 正确。 +func TestAdminService_BulkUpdateAccounts_PartialFailureIDs(t *testing.T) { + repo := &accountRepoStubForBulkUpdate{ + bindGroupErrByID: map[int64]error{ + 2: errors.New("bind failed"), + }, + } + svc := &adminServiceImpl{accountRepo: repo} + + groupIDs := []int64{10} + schedulable := false + input := &BulkUpdateAccountsInput{ + AccountIDs: []int64{1, 2, 3}, + GroupIDs: &groupIDs, + Schedulable: &schedulable, + SkipMixedChannelCheck: true, + } + + result, err := svc.BulkUpdateAccounts(context.Background(), input) + require.NoError(t, err) + require.Equal(t, 2, result.Success) + require.Equal(t, 1, result.Failed) + require.ElementsMatch(t, []int64{1, 3}, result.SuccessIDs) + require.ElementsMatch(t, []int64{2}, result.FailedIDs) + require.Len(t, result.Results, 3) +} diff --git a/backend/internal/service/admin_service_delete_test.go b/backend/internal/service/admin_service_delete_test.go index 351f64e8..afa433af 100644 --- a/backend/internal/service/admin_service_delete_test.go +++ b/backend/internal/service/admin_service_delete_test.go @@ -107,6 +107,10 @@ func (s *groupRepoStub) GetByID(ctx context.Context, id int64) (*Group, error) { panic("unexpected GetByID call") } +func (s *groupRepoStub) GetByIDLite(ctx context.Context, id int64) (*Group, error) { + panic("unexpected GetByIDLite call") +} + func (s *groupRepoStub) Update(ctx context.Context, group *Group) error { panic("unexpected Update call") } @@ -149,8 +153,10 @@ func (s *groupRepoStub) DeleteAccountGroupsByGroupID(ctx context.Context, groupI } type proxyRepoStub struct { - deleteErr error - deletedIDs []int64 + deleteErr error + countErr error + accountCount int64 + deletedIDs []int64 } func (s *proxyRepoStub) Create(ctx context.Context, proxy *Proxy) error { @@ -195,7 +201,14 @@ func (s *proxyRepoStub) ExistsByHostPortAuth(ctx context.Context, host string, p } func (s *proxyRepoStub) CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) { - panic("unexpected CountAccountsByProxyID call") + if s.countErr != nil { + return 0, s.countErr + } + return s.accountCount, nil +} + +func (s *proxyRepoStub) ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error) { + panic("unexpected ListAccountSummariesByProxyID call") } type redeemRepoStub struct { @@ -405,6 +418,15 @@ func TestAdminService_DeleteProxy_Idempotent(t *testing.T) { require.Equal(t, []int64{404}, repo.deletedIDs) } +func TestAdminService_DeleteProxy_InUse(t *testing.T) { + repo := &proxyRepoStub{accountCount: 2} + svc := &adminServiceImpl{proxyRepo: repo} + + err := svc.DeleteProxy(context.Background(), 77) + require.ErrorIs(t, err, ErrProxyInUse) + require.Empty(t, repo.deletedIDs) +} + func TestAdminService_DeleteProxy_Error(t *testing.T) { deleteErr := errors.New("delete failed") repo := &proxyRepoStub{deleteErr: deleteErr} diff --git a/backend/internal/service/admin_service_group_test.go b/backend/internal/service/admin_service_group_test.go index 26d6eedf..e0574e2e 100644 --- a/backend/internal/service/admin_service_group_test.go +++ b/backend/internal/service/admin_service_group_test.go @@ -45,6 +45,13 @@ func (s *groupRepoStubForAdmin) GetByID(_ context.Context, _ int64) (*Group, err return s.getByID, nil } +func (s *groupRepoStubForAdmin) GetByIDLite(_ context.Context, _ int64) (*Group, error) { + if s.getErr != nil { + return nil, s.getErr + } + return s.getByID, nil +} + func (s *groupRepoStubForAdmin) Delete(_ context.Context, _ int64) error { panic("unexpected Delete call") } @@ -290,3 +297,84 @@ func TestAdminService_ListGroups_WithSearch(t *testing.T) { require.True(t, *repo.listWithFiltersIsExclusive) }) } + +func TestAdminService_ValidateFallbackGroup_DetectsCycle(t *testing.T) { + groupID := int64(1) + fallbackID := int64(2) + repo := &groupRepoStubForFallbackCycle{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + FallbackGroupID: &fallbackID, + }, + fallbackID: { + ID: fallbackID, + FallbackGroupID: &groupID, + }, + }, + } + svc := &adminServiceImpl{groupRepo: repo} + + err := svc.validateFallbackGroup(context.Background(), groupID, fallbackID) + require.Error(t, err) + require.Contains(t, err.Error(), "fallback group cycle") +} + +type groupRepoStubForFallbackCycle struct { + groups map[int64]*Group +} + +func (s *groupRepoStubForFallbackCycle) Create(_ context.Context, _ *Group) error { + panic("unexpected Create call") +} + +func (s *groupRepoStubForFallbackCycle) Update(_ context.Context, _ *Group) error { + panic("unexpected Update call") +} + +func (s *groupRepoStubForFallbackCycle) GetByID(ctx context.Context, id int64) (*Group, error) { + return s.GetByIDLite(ctx, id) +} + +func (s *groupRepoStubForFallbackCycle) GetByIDLite(_ context.Context, id int64) (*Group, error) { + if g, ok := s.groups[id]; ok { + return g, nil + } + return nil, ErrGroupNotFound +} + +func (s *groupRepoStubForFallbackCycle) Delete(_ context.Context, _ int64) error { + panic("unexpected Delete call") +} + +func (s *groupRepoStubForFallbackCycle) DeleteCascade(_ context.Context, _ int64) ([]int64, error) { + panic("unexpected DeleteCascade call") +} + +func (s *groupRepoStubForFallbackCycle) List(_ context.Context, _ pagination.PaginationParams) ([]Group, *pagination.PaginationResult, error) { + panic("unexpected List call") +} + +func (s *groupRepoStubForFallbackCycle) ListWithFilters(_ context.Context, _ pagination.PaginationParams, _, _, _ string, _ *bool) ([]Group, *pagination.PaginationResult, error) { + panic("unexpected ListWithFilters call") +} + +func (s *groupRepoStubForFallbackCycle) ListActive(_ context.Context) ([]Group, error) { + panic("unexpected ListActive call") +} + +func (s *groupRepoStubForFallbackCycle) ListActiveByPlatform(_ context.Context, _ string) ([]Group, error) { + panic("unexpected ListActiveByPlatform call") +} + +func (s *groupRepoStubForFallbackCycle) ExistsByName(_ context.Context, _ string) (bool, error) { + panic("unexpected ExistsByName call") +} + +func (s *groupRepoStubForFallbackCycle) GetAccountCount(_ context.Context, _ int64) (int64, error) { + panic("unexpected GetAccountCount call") +} + +func (s *groupRepoStubForFallbackCycle) DeleteAccountGroupsByGroupID(_ context.Context, _ int64) (int64, error) { + panic("unexpected DeleteAccountGroupsByGroupID call") +} diff --git a/backend/internal/service/admin_service_update_balance_test.go b/backend/internal/service/admin_service_update_balance_test.go new file mode 100644 index 00000000..d3b3c700 --- /dev/null +++ b/backend/internal/service/admin_service_update_balance_test.go @@ -0,0 +1,97 @@ +//go:build unit + +package service + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +type balanceUserRepoStub struct { + *userRepoStub + updateErr error + updated []*User +} + +func (s *balanceUserRepoStub) Update(ctx context.Context, user *User) error { + if s.updateErr != nil { + return s.updateErr + } + if user == nil { + return nil + } + clone := *user + s.updated = append(s.updated, &clone) + if s.userRepoStub != nil { + s.userRepoStub.user = &clone + } + return nil +} + +type balanceRedeemRepoStub struct { + *redeemRepoStub + created []*RedeemCode +} + +func (s *balanceRedeemRepoStub) Create(ctx context.Context, code *RedeemCode) error { + if code == nil { + return nil + } + clone := *code + s.created = append(s.created, &clone) + return nil +} + +type authCacheInvalidatorStub struct { + userIDs []int64 + groupIDs []int64 + keys []string +} + +func (s *authCacheInvalidatorStub) InvalidateAuthCacheByKey(ctx context.Context, key string) { + s.keys = append(s.keys, key) +} + +func (s *authCacheInvalidatorStub) InvalidateAuthCacheByUserID(ctx context.Context, userID int64) { + s.userIDs = append(s.userIDs, userID) +} + +func (s *authCacheInvalidatorStub) InvalidateAuthCacheByGroupID(ctx context.Context, groupID int64) { + s.groupIDs = append(s.groupIDs, groupID) +} + +func TestAdminService_UpdateUserBalance_InvalidatesAuthCache(t *testing.T) { + baseRepo := &userRepoStub{user: &User{ID: 7, Balance: 10}} + repo := &balanceUserRepoStub{userRepoStub: baseRepo} + redeemRepo := &balanceRedeemRepoStub{redeemRepoStub: &redeemRepoStub{}} + invalidator := &authCacheInvalidatorStub{} + svc := &adminServiceImpl{ + userRepo: repo, + redeemCodeRepo: redeemRepo, + authCacheInvalidator: invalidator, + } + + _, err := svc.UpdateUserBalance(context.Background(), 7, 5, "add", "") + require.NoError(t, err) + require.Equal(t, []int64{7}, invalidator.userIDs) + require.Len(t, redeemRepo.created, 1) +} + +func TestAdminService_UpdateUserBalance_NoChangeNoInvalidate(t *testing.T) { + baseRepo := &userRepoStub{user: &User{ID: 7, Balance: 10}} + repo := &balanceUserRepoStub{userRepoStub: baseRepo} + redeemRepo := &balanceRedeemRepoStub{redeemRepoStub: &redeemRepoStub{}} + invalidator := &authCacheInvalidatorStub{} + svc := &adminServiceImpl{ + userRepo: repo, + redeemCodeRepo: redeemRepo, + authCacheInvalidator: invalidator, + } + + _, err := svc.UpdateUserBalance(context.Background(), 7, 10, "set", "") + require.NoError(t, err) + require.Empty(t, invalidator.userIDs) + require.Empty(t, redeemRepo.created) +} diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index fcdf04f1..60e81158 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -12,6 +12,7 @@ import ( mathrand "math/rand" "net" "net/http" + "os" "strings" "sync/atomic" "time" @@ -28,6 +29,8 @@ const ( antigravityRetryMaxDelay = 16 * time.Second ) +const antigravityScopeRateLimitEnv = "GATEWAY_ANTIGRAVITY_429_SCOPE_LIMIT" + // antigravityRetryLoopParams 重试循环的参数 type antigravityRetryLoopParams struct { ctx context.Context @@ -38,7 +41,9 @@ type antigravityRetryLoopParams struct { action string body []byte quotaScope AntigravityQuotaScope + c *gin.Context httpUpstream HTTPUpstream + settingService *SettingService handleError func(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) } @@ -56,6 +61,17 @@ func antigravityRetryLoop(p antigravityRetryLoopParams) (*antigravityRetryLoopRe var resp *http.Response var usedBaseURL string + logBody := p.settingService != nil && p.settingService.cfg != nil && p.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if p.settingService != nil && p.settingService.cfg != nil && p.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = p.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + getUpstreamDetail := func(body []byte) string { + if !logBody { + return "" + } + return truncateString(string(body), maxBytes) + } urlFallbackLoop: for urlIdx, baseURL := range availableURLs { @@ -73,8 +89,22 @@ urlFallbackLoop: return nil, err } + // Capture upstream request body for ops retry of this attempt. + if p.c != nil && len(p.body) > 0 { + p.c.Set(OpsUpstreamRequestBodyKey, string(p.body)) + } + resp, err = p.httpUpstream.Do(upstreamReq, p.proxyURL, p.account.ID, p.account.Concurrency) if err != nil { + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ + Platform: p.account.Platform, + AccountID: p.account.ID, + AccountName: p.account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) if shouldAntigravityFallbackToNextURL(err, 0) && urlIdx < len(availableURLs)-1 { antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) log.Printf("%s URL fallback (connection error): %s -> %s", p.prefix, baseURL, availableURLs[urlIdx+1]) @@ -89,6 +119,7 @@ urlFallbackLoop: continue } log.Printf("%s status=request_failed retries_exhausted error=%v", p.prefix, err) + setOpsUpstreamError(p.c, 0, safeErr, "") return nil, fmt.Errorf("upstream request failed after retries: %w", err) } @@ -99,13 +130,37 @@ urlFallbackLoop: // "Resource has been exhausted" 是 URL 级别限流,切换 URL if isURLLevelRateLimit(respBody) && urlIdx < len(availableURLs)-1 { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ + Platform: p.account.Platform, + AccountID: p.account.ID, + AccountName: p.account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: upstreamMsg, + Detail: getUpstreamDetail(respBody), + }) antigravity.DefaultURLAvailability.MarkUnavailable(baseURL) - log.Printf("%s URL fallback (429): %s -> %s", p.prefix, baseURL, availableURLs[urlIdx+1]) + log.Printf("%s URL fallback (HTTP 429): %s -> %s body=%s", p.prefix, baseURL, availableURLs[urlIdx+1], truncateForLog(respBody, 200)) continue urlFallbackLoop } // 账户/模型配额限流,重试 3 次(指数退避) if attempt < antigravityMaxRetries { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ + Platform: p.account.Platform, + AccountID: p.account.ID, + AccountName: p.account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: upstreamMsg, + Detail: getUpstreamDetail(respBody), + }) log.Printf("%s status=429 retry=%d/%d body=%s", p.prefix, attempt, antigravityMaxRetries, truncateForLog(respBody, 200)) if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { log.Printf("%s status=context_canceled_during_backoff", p.prefix) @@ -131,6 +186,18 @@ urlFallbackLoop: _ = resp.Body.Close() if attempt < antigravityMaxRetries { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + appendOpsUpstreamError(p.c, OpsUpstreamErrorEvent{ + Platform: p.account.Platform, + AccountID: p.account.ID, + AccountName: p.account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: upstreamMsg, + Detail: getUpstreamDetail(respBody), + }) log.Printf("%s status=%d retry=%d/%d body=%s", p.prefix, resp.StatusCode, attempt, antigravityMaxRetries, truncateForLog(respBody, 500)) if !sleepAntigravityBackoffWithContext(p.ctx, attempt) { log.Printf("%s status=context_canceled_during_backoff", p.prefix) @@ -679,6 +746,9 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, proxyURL = account.Proxy.URL() } + // Sanitize thinking blocks (clean cache_control and flatten history thinking) + sanitizeThinkingBlocks(&claudeReq) + // 获取转换选项 // Antigravity 上游要求必须包含身份提示词,否则会返回 429 transformOpts := s.getClaudeTransformOptions(ctx) @@ -690,6 +760,9 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, return nil, fmt.Errorf("transform request: %w", err) } + // Safety net: ensure no cache_control leaked into Gemini request + geminiBody = cleanCacheControlFromGeminiJSON(geminiBody) + // Antigravity 上游只支持流式请求,统一使用 streamGenerateContent // 如果客户端请求非流式,在响应处理阶段会收集完整流式响应后转换返回 action := "streamGenerateContent" @@ -704,7 +777,9 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, action: action, body: geminiBody, quotaScope: quotaScope, + c: c, httpUpstream: s.httpUpstream, + settingService: s.settingService, handleError: s.handleUpstreamError, }) if err != nil { @@ -720,6 +795,28 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, // Antigravity /v1internal 链路在部分场景会对 thought/thinking signature 做严格校验, // 当历史消息携带的 signature 不合法时会直接 400;去除 thinking 后可继续完成请求。 if resp.StatusCode == http.StatusBadRequest && isSignatureRelatedError(respBody) { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "signature_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + // Conservative two-stage fallback: // 1) Disable top-level thinking + thinking->text // 2) Only if still signature-related 400: also downgrade tool_use/tool_result to text. @@ -753,6 +850,14 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, } retryResp, retryErr := s.httpUpstream.Do(retryReq, proxyURL, account.ID, account.Concurrency) if retryErr != nil { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "signature_retry_request_error", + Message: sanitizeUpstreamErrorMessage(retryErr.Error()), + }) log.Printf("Antigravity account %d: signature retry request failed (%s): %v", account.ID, stage.name, retryErr) continue } @@ -766,6 +871,26 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, retryBody, _ := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) _ = retryResp.Body.Close() + kind := "signature_retry" + if strings.TrimSpace(stage.name) != "" { + kind = "signature_retry_" + strings.ReplaceAll(stage.name, "+", "_") + } + retryUpstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(retryBody)) + retryUpstreamMsg = sanitizeUpstreamErrorMessage(retryUpstreamMsg) + retryUpstreamDetail := "" + if logBody { + retryUpstreamDetail = truncateString(string(retryBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: retryResp.StatusCode, + UpstreamRequestID: retryResp.Header.Get("x-request-id"), + Kind: kind, + Message: retryUpstreamMsg, + Detail: retryUpstreamDetail, + }) // If this stage fixed the signature issue, we stop; otherwise we may try the next stage. if retryResp.StatusCode != http.StatusBadRequest || !isSignatureRelatedError(retryBody) { @@ -793,10 +918,31 @@ func (s *AntigravityGatewayService) Forward(ctx context.Context, c *gin.Context, s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) if s.shouldFailoverUpstreamError(resp.StatusCode) { + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } - return nil, s.writeMappedClaudeError(c, resp.StatusCode, respBody) + return nil, s.writeMappedClaudeError(c, account, resp.StatusCode, resp.Header.Get("x-request-id"), respBody) } } @@ -879,6 +1025,143 @@ func extractAntigravityErrorMessage(body []byte) string { return "" } +// cleanCacheControlFromGeminiJSON removes cache_control from Gemini JSON (emergency fix) +// This should not be needed if transformation is correct, but serves as a safety net +func cleanCacheControlFromGeminiJSON(body []byte) []byte { + // Try a more robust approach: parse and clean + var data map[string]any + if err := json.Unmarshal(body, &data); err != nil { + log.Printf("[Antigravity] Failed to parse Gemini JSON for cache_control cleaning: %v", err) + return body + } + + cleaned := removeCacheControlFromAny(data) + if !cleaned { + return body + } + + if result, err := json.Marshal(data); err == nil { + log.Printf("[Antigravity] Successfully cleaned cache_control from Gemini JSON") + return result + } + + return body +} + +// removeCacheControlFromAny recursively removes cache_control fields +func removeCacheControlFromAny(v any) bool { + cleaned := false + + switch val := v.(type) { + case map[string]any: + for k, child := range val { + if k == "cache_control" { + delete(val, k) + cleaned = true + } else if removeCacheControlFromAny(child) { + cleaned = true + } + } + case []any: + for _, item := range val { + if removeCacheControlFromAny(item) { + cleaned = true + } + } + } + + return cleaned +} + +// sanitizeThinkingBlocks cleans cache_control and flattens history thinking blocks +// Thinking blocks do NOT support cache_control field (Anthropic API/Vertex AI requirement) +// Additionally, history thinking blocks are flattened to text to avoid upstream validation errors +func sanitizeThinkingBlocks(req *antigravity.ClaudeRequest) { + if req == nil { + return + } + + log.Printf("[Antigravity] sanitizeThinkingBlocks: processing request with %d messages", len(req.Messages)) + + // Clean system blocks + if len(req.System) > 0 { + var systemBlocks []map[string]any + if err := json.Unmarshal(req.System, &systemBlocks); err == nil { + for i := range systemBlocks { + if blockType, _ := systemBlocks[i]["type"].(string); blockType == "thinking" || systemBlocks[i]["thinking"] != nil { + if removeCacheControlFromAny(systemBlocks[i]) { + log.Printf("[Antigravity] Deep cleaned cache_control from thinking block in system[%d]", i) + } + } + } + // Marshal back + if cleaned, err := json.Marshal(systemBlocks); err == nil { + req.System = cleaned + } + } + } + + // Clean message content blocks and flatten history + lastMsgIdx := len(req.Messages) - 1 + for msgIdx := range req.Messages { + raw := req.Messages[msgIdx].Content + if len(raw) == 0 { + continue + } + + // Try to parse as blocks array + var blocks []map[string]any + if err := json.Unmarshal(raw, &blocks); err != nil { + continue + } + + cleaned := false + for blockIdx := range blocks { + blockType, _ := blocks[blockIdx]["type"].(string) + + // Check for thinking blocks (typed or untyped) + if blockType == "thinking" || blocks[blockIdx]["thinking"] != nil { + // 1. Clean cache_control + if removeCacheControlFromAny(blocks[blockIdx]) { + log.Printf("[Antigravity] Deep cleaned cache_control from thinking block in messages[%d].content[%d]", msgIdx, blockIdx) + cleaned = true + } + + // 2. Flatten to text if it's a history message (not the last one) + if msgIdx < lastMsgIdx { + log.Printf("[Antigravity] Flattening history thinking block to text at messages[%d].content[%d]", msgIdx, blockIdx) + + // Extract thinking content + var textContent string + if t, ok := blocks[blockIdx]["thinking"].(string); ok { + textContent = t + } else { + // Fallback for non-string content (marshal it) + if b, err := json.Marshal(blocks[blockIdx]["thinking"]); err == nil { + textContent = string(b) + } + } + + // Convert to text block + blocks[blockIdx]["type"] = "text" + blocks[blockIdx]["text"] = textContent + delete(blocks[blockIdx], "thinking") + delete(blocks[blockIdx], "signature") + delete(blocks[blockIdx], "cache_control") // Ensure it's gone + cleaned = true + } + } + } + + // Marshal back if modified + if cleaned { + if marshaled, err := json.Marshal(blocks); err == nil { + req.Messages[msgIdx].Content = marshaled + } + } + } +} + // stripThinkingFromClaudeRequest converts thinking blocks to text blocks in a Claude Messages request. // This preserves the thinking content while avoiding signature validation errors. // Note: redacted_thinking blocks are removed because they cannot be converted to text. @@ -1184,7 +1467,9 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co action: upstreamAction, body: wrappedBody, quotaScope: quotaScope, + c: c, httpUpstream: s.httpUpstream, + settingService: s.settingService, handleError: s.handleUpstreamError, }) if err != nil { @@ -1234,22 +1519,62 @@ func (s *AntigravityGatewayService) ForwardGemini(ctx context.Context, c *gin.Co s.handleUpstreamError(ctx, prefix, account, resp.StatusCode, resp.Header, respBody, quotaScope) - if s.shouldFailoverUpstreamError(resp.StatusCode) { - return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} - } - - // 解包并返回错误 requestID := resp.Header.Get("x-request-id") if requestID != "" { c.Header("x-request-id", requestID) } - unwrapped, _ := s.unwrapV1InternalResponse(respBody) + + unwrapped, unwrapErr := s.unwrapV1InternalResponse(respBody) + unwrappedForOps := unwrapped + if unwrapErr != nil || len(unwrappedForOps) == 0 { + unwrappedForOps = respBody + } + upstreamMsg := strings.TrimSpace(extractAntigravityErrorMessage(unwrappedForOps)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(unwrappedForOps), maxBytes) + } + + // Always record upstream context for Ops error logs, even when we will failover. + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + + if s.shouldFailoverUpstreamError(resp.StatusCode) { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} + } + contentType := resp.Header.Get("Content-Type") if contentType == "" { contentType = "application/json" } - log.Printf("[antigravity-Forward] upstream error status=%d body=%s", resp.StatusCode, truncateForLog(respBody, 500)) - c.Data(resp.StatusCode, contentType, unwrapped) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + log.Printf("[antigravity-Forward] upstream error status=%d body=%s", resp.StatusCode, truncateForLog(unwrappedForOps, 500)) + c.Data(resp.StatusCode, contentType, unwrappedForOps) return nil, fmt.Errorf("antigravity upstream error: %d", resp.StatusCode) } @@ -1338,9 +1663,15 @@ func sleepAntigravityBackoffWithContext(ctx context.Context, attempt int) bool { } } +func antigravityUseScopeRateLimit() bool { + v := strings.ToLower(strings.TrimSpace(os.Getenv(antigravityScopeRateLimitEnv))) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, prefix string, account *Account, statusCode int, headers http.Header, body []byte, quotaScope AntigravityQuotaScope) { // 429 使用 Gemini 格式解析(从 body 解析重置时间) if statusCode == 429 { + useScopeLimit := antigravityUseScopeRateLimit() && quotaScope != "" resetAt := ParseGeminiRateLimitResetTime(body) if resetAt == nil { // 解析失败:使用配置的 fallback 时间,直接限流整个账户 @@ -1350,19 +1681,30 @@ func (s *AntigravityGatewayService) handleUpstreamError(ctx context.Context, pre } defaultDur := time.Duration(fallbackMinutes) * time.Minute ra := time.Now().Add(defaultDur) - log.Printf("%s status=429 rate_limited account=%d reset_in=%v (fallback)", prefix, account.ID, defaultDur) - if err := s.accountRepo.SetRateLimited(ctx, account.ID, ra); err != nil { - log.Printf("%s status=429 rate_limit_set_failed account=%d error=%v", prefix, account.ID, err) + if useScopeLimit { + log.Printf("%s status=429 rate_limited scope=%s reset_in=%v (fallback)", prefix, quotaScope, defaultDur) + if err := s.accountRepo.SetAntigravityQuotaScopeLimit(ctx, account.ID, quotaScope, ra); err != nil { + log.Printf("%s status=429 rate_limit_set_failed scope=%s error=%v", prefix, quotaScope, err) + } + } else { + log.Printf("%s status=429 rate_limited account=%d reset_in=%v (fallback)", prefix, account.ID, defaultDur) + if err := s.accountRepo.SetRateLimited(ctx, account.ID, ra); err != nil { + log.Printf("%s status=429 rate_limit_set_failed account=%d error=%v", prefix, account.ID, err) + } } return } resetTime := time.Unix(*resetAt, 0) - log.Printf("%s status=429 rate_limited scope=%s reset_at=%v reset_in=%v", prefix, quotaScope, resetTime.Format("15:04:05"), time.Until(resetTime).Truncate(time.Second)) - if quotaScope == "" { - return - } - if err := s.accountRepo.SetAntigravityQuotaScopeLimit(ctx, account.ID, quotaScope, resetTime); err != nil { - log.Printf("%s status=429 rate_limit_set_failed scope=%s error=%v", prefix, quotaScope, err) + if useScopeLimit { + log.Printf("%s status=429 rate_limited scope=%s reset_at=%v reset_in=%v", prefix, quotaScope, resetTime.Format("15:04:05"), time.Until(resetTime).Truncate(time.Second)) + if err := s.accountRepo.SetAntigravityQuotaScopeLimit(ctx, account.ID, quotaScope, resetTime); err != nil { + log.Printf("%s status=429 rate_limit_set_failed scope=%s error=%v", prefix, quotaScope, err) + } + } else { + log.Printf("%s status=429 rate_limited account=%d reset_at=%v reset_in=%v", prefix, account.ID, resetTime.Format("15:04:05"), time.Until(resetTime).Truncate(time.Second)) + if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetTime); err != nil { + log.Printf("%s status=429 rate_limit_set_failed account=%d error=%v", prefix, account.ID, err) + } } return } @@ -1533,6 +1875,7 @@ func (s *AntigravityGatewayService) handleGeminiStreamingResponse(c *gin.Context continue } log.Printf("Stream data interval timeout (antigravity)") + // 注意:此函数没有 account 上下文,无法调用 HandleStreamTimeout sendErrorEvent("stream_timeout") return &antigravityStreamResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") } @@ -1824,9 +2167,36 @@ func (s *AntigravityGatewayService) writeClaudeError(c *gin.Context, status int, return fmt.Errorf("%s", message) } -func (s *AntigravityGatewayService) writeMappedClaudeError(c *gin.Context, upstreamStatus int, body []byte) error { - // 记录上游错误详情便于调试 - log.Printf("[antigravity-Forward] upstream_error status=%d body=%s", upstreamStatus, string(body)) +func (s *AntigravityGatewayService) writeMappedClaudeError(c *gin.Context, account *Account, upstreamStatus int, upstreamRequestID string, body []byte) error { + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + + logBody := s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBody + maxBytes := 2048 + if s.settingService != nil && s.settingService.cfg != nil && s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes > 0 { + maxBytes = s.settingService.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + } + + upstreamDetail := "" + if logBody { + upstreamDetail = truncateString(string(body), maxBytes) + } + setOpsUpstreamError(c, upstreamStatus, upstreamMsg, upstreamDetail) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: upstreamStatus, + UpstreamRequestID: upstreamRequestID, + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + // 记录上游错误详情便于排障(可选:由配置控制;不回显到客户端) + if logBody { + log.Printf("[antigravity-Forward] upstream_error status=%d body=%s", upstreamStatus, truncateForLog(body, maxBytes)) + } var statusCode int var errType, errMsg string @@ -1862,7 +2232,10 @@ func (s *AntigravityGatewayService) writeMappedClaudeError(c *gin.Context, upstr "type": "error", "error": gin.H{"type": errType, "message": errMsg}, }) - return fmt.Errorf("upstream error: %d", upstreamStatus) + if upstreamMsg == "" { + return fmt.Errorf("upstream error: %d", upstreamStatus) + } + return fmt.Errorf("upstream error: %d message=%s", upstreamStatus, upstreamMsg) } func (s *AntigravityGatewayService) writeGoogleError(c *gin.Context, status int, message string) error { @@ -2189,6 +2562,7 @@ func (s *AntigravityGatewayService) handleClaudeStreamingResponse(c *gin.Context continue } log.Printf("Stream data interval timeout (antigravity)") + // 注意:此函数没有 account 上下文,无法调用 HandleStreamTimeout sendErrorEvent("stream_timeout") return &antigravityStreamResult{usage: convertUsage(nil), firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") } diff --git a/backend/internal/service/antigravity_quota_scope.go b/backend/internal/service/antigravity_quota_scope.go index e9f7184b..a3b2ec66 100644 --- a/backend/internal/service/antigravity_quota_scope.go +++ b/backend/internal/service/antigravity_quota_scope.go @@ -49,6 +49,9 @@ func (a *Account) IsSchedulableForModel(requestedModel string) bool { if !a.IsSchedulable() { return false } + if a.isModelRateLimited(requestedModel) { + return false + } if a.Platform != PlatformAntigravity { return true } diff --git a/backend/internal/service/antigravity_token_provider.go b/backend/internal/service/antigravity_token_provider.go index cbd1bef4..c5dc55db 100644 --- a/backend/internal/service/antigravity_token_provider.go +++ b/backend/internal/service/antigravity_token_provider.go @@ -45,7 +45,7 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account * return "", errors.New("not an antigravity oauth account") } - cacheKey := antigravityTokenCacheKey(account) + cacheKey := AntigravityTokenCacheKey(account) // 1. 先尝试缓存 if p.tokenCache != nil { @@ -121,7 +121,7 @@ func (p *AntigravityTokenProvider) GetAccessToken(ctx context.Context, account * return accessToken, nil } -func antigravityTokenCacheKey(account *Account) string { +func AntigravityTokenCacheKey(account *Account) string { projectID := strings.TrimSpace(account.GetCredential("project_id")) if projectID != "" { return "ag:" + projectID diff --git a/backend/internal/service/api_key.go b/backend/internal/service/api_key.go index 0cf0f4f9..8c692d09 100644 --- a/backend/internal/service/api_key.go +++ b/backend/internal/service/api_key.go @@ -3,16 +3,18 @@ package service import "time" type APIKey struct { - ID int64 - UserID int64 - Key string - Name string - GroupID *int64 - Status string - CreatedAt time.Time - UpdatedAt time.Time - User *User - Group *Group + ID int64 + UserID int64 + Key string + Name string + GroupID *int64 + Status string + IPWhitelist []string + IPBlacklist []string + CreatedAt time.Time + UpdatedAt time.Time + User *User + Group *Group } func (k *APIKey) IsActive() bool { diff --git a/backend/internal/service/api_key_auth_cache.go b/backend/internal/service/api_key_auth_cache.go new file mode 100644 index 00000000..5b476dbc --- /dev/null +++ b/backend/internal/service/api_key_auth_cache.go @@ -0,0 +1,51 @@ +package service + +// APIKeyAuthSnapshot API Key 认证缓存快照(仅包含认证所需字段) +type APIKeyAuthSnapshot struct { + APIKeyID int64 `json:"api_key_id"` + UserID int64 `json:"user_id"` + GroupID *int64 `json:"group_id,omitempty"` + Status string `json:"status"` + IPWhitelist []string `json:"ip_whitelist,omitempty"` + IPBlacklist []string `json:"ip_blacklist,omitempty"` + User APIKeyAuthUserSnapshot `json:"user"` + Group *APIKeyAuthGroupSnapshot `json:"group,omitempty"` +} + +// APIKeyAuthUserSnapshot 用户快照 +type APIKeyAuthUserSnapshot struct { + ID int64 `json:"id"` + Status string `json:"status"` + Role string `json:"role"` + Balance float64 `json:"balance"` + Concurrency int `json:"concurrency"` +} + +// APIKeyAuthGroupSnapshot 分组快照 +type APIKeyAuthGroupSnapshot struct { + ID int64 `json:"id"` + Name string `json:"name"` + Platform string `json:"platform"` + Status string `json:"status"` + SubscriptionType string `json:"subscription_type"` + RateMultiplier float64 `json:"rate_multiplier"` + DailyLimitUSD *float64 `json:"daily_limit_usd,omitempty"` + WeeklyLimitUSD *float64 `json:"weekly_limit_usd,omitempty"` + MonthlyLimitUSD *float64 `json:"monthly_limit_usd,omitempty"` + ImagePrice1K *float64 `json:"image_price_1k,omitempty"` + ImagePrice2K *float64 `json:"image_price_2k,omitempty"` + ImagePrice4K *float64 `json:"image_price_4k,omitempty"` + ClaudeCodeOnly bool `json:"claude_code_only"` + FallbackGroupID *int64 `json:"fallback_group_id,omitempty"` + + // Model routing is used by gateway account selection, so it must be part of auth cache snapshot. + // Only anthropic groups use these fields; others may leave them empty. + ModelRouting map[string][]int64 `json:"model_routing,omitempty"` + ModelRoutingEnabled bool `json:"model_routing_enabled"` +} + +// APIKeyAuthCacheEntry 缓存条目,支持负缓存 +type APIKeyAuthCacheEntry struct { + NotFound bool `json:"not_found"` + Snapshot *APIKeyAuthSnapshot `json:"snapshot,omitempty"` +} diff --git a/backend/internal/service/api_key_auth_cache_impl.go b/backend/internal/service/api_key_auth_cache_impl.go new file mode 100644 index 00000000..521f1da5 --- /dev/null +++ b/backend/internal/service/api_key_auth_cache_impl.go @@ -0,0 +1,273 @@ +package service + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "fmt" + "math/rand" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/dgraph-io/ristretto" +) + +type apiKeyAuthCacheConfig struct { + l1Size int + l1TTL time.Duration + l2TTL time.Duration + negativeTTL time.Duration + jitterPercent int + singleflight bool +} + +var ( + jitterRandMu sync.Mutex + // 认证缓存抖动使用独立随机源,避免全局 Seed + jitterRand = rand.New(rand.NewSource(time.Now().UnixNano())) +) + +func newAPIKeyAuthCacheConfig(cfg *config.Config) apiKeyAuthCacheConfig { + if cfg == nil { + return apiKeyAuthCacheConfig{} + } + auth := cfg.APIKeyAuth + return apiKeyAuthCacheConfig{ + l1Size: auth.L1Size, + l1TTL: time.Duration(auth.L1TTLSeconds) * time.Second, + l2TTL: time.Duration(auth.L2TTLSeconds) * time.Second, + negativeTTL: time.Duration(auth.NegativeTTLSeconds) * time.Second, + jitterPercent: auth.JitterPercent, + singleflight: auth.Singleflight, + } +} + +func (c apiKeyAuthCacheConfig) l1Enabled() bool { + return c.l1Size > 0 && c.l1TTL > 0 +} + +func (c apiKeyAuthCacheConfig) l2Enabled() bool { + return c.l2TTL > 0 +} + +func (c apiKeyAuthCacheConfig) negativeEnabled() bool { + return c.negativeTTL > 0 +} + +func (c apiKeyAuthCacheConfig) jitterTTL(ttl time.Duration) time.Duration { + if ttl <= 0 { + return ttl + } + if c.jitterPercent <= 0 { + return ttl + } + percent := c.jitterPercent + if percent > 100 { + percent = 100 + } + delta := float64(percent) / 100 + jitterRandMu.Lock() + randVal := jitterRand.Float64() + jitterRandMu.Unlock() + factor := 1 - delta + randVal*(2*delta) + if factor <= 0 { + return ttl + } + return time.Duration(float64(ttl) * factor) +} + +func (s *APIKeyService) initAuthCache(cfg *config.Config) { + s.authCfg = newAPIKeyAuthCacheConfig(cfg) + if !s.authCfg.l1Enabled() { + return + } + cache, err := ristretto.NewCache(&ristretto.Config{ + NumCounters: int64(s.authCfg.l1Size) * 10, + MaxCost: int64(s.authCfg.l1Size), + BufferItems: 64, + }) + if err != nil { + return + } + s.authCacheL1 = cache +} + +func (s *APIKeyService) authCacheKey(key string) string { + sum := sha256.Sum256([]byte(key)) + return hex.EncodeToString(sum[:]) +} + +func (s *APIKeyService) getAuthCacheEntry(ctx context.Context, cacheKey string) (*APIKeyAuthCacheEntry, bool) { + if s.authCacheL1 != nil { + if val, ok := s.authCacheL1.Get(cacheKey); ok { + if entry, ok := val.(*APIKeyAuthCacheEntry); ok { + return entry, true + } + } + } + if s.cache == nil || !s.authCfg.l2Enabled() { + return nil, false + } + entry, err := s.cache.GetAuthCache(ctx, cacheKey) + if err != nil { + return nil, false + } + s.setAuthCacheL1(cacheKey, entry) + return entry, true +} + +func (s *APIKeyService) setAuthCacheL1(cacheKey string, entry *APIKeyAuthCacheEntry) { + if s.authCacheL1 == nil || entry == nil { + return + } + ttl := s.authCfg.l1TTL + if entry.NotFound && s.authCfg.negativeTTL > 0 && s.authCfg.negativeTTL < ttl { + ttl = s.authCfg.negativeTTL + } + ttl = s.authCfg.jitterTTL(ttl) + _ = s.authCacheL1.SetWithTTL(cacheKey, entry, 1, ttl) +} + +func (s *APIKeyService) setAuthCacheEntry(ctx context.Context, cacheKey string, entry *APIKeyAuthCacheEntry, ttl time.Duration) { + if entry == nil { + return + } + s.setAuthCacheL1(cacheKey, entry) + if s.cache == nil || !s.authCfg.l2Enabled() { + return + } + _ = s.cache.SetAuthCache(ctx, cacheKey, entry, s.authCfg.jitterTTL(ttl)) +} + +func (s *APIKeyService) deleteAuthCache(ctx context.Context, cacheKey string) { + if s.authCacheL1 != nil { + s.authCacheL1.Del(cacheKey) + } + if s.cache == nil { + return + } + _ = s.cache.DeleteAuthCache(ctx, cacheKey) +} + +func (s *APIKeyService) loadAuthCacheEntry(ctx context.Context, key, cacheKey string) (*APIKeyAuthCacheEntry, error) { + apiKey, err := s.apiKeyRepo.GetByKeyForAuth(ctx, key) + if err != nil { + if errors.Is(err, ErrAPIKeyNotFound) { + entry := &APIKeyAuthCacheEntry{NotFound: true} + if s.authCfg.negativeEnabled() { + s.setAuthCacheEntry(ctx, cacheKey, entry, s.authCfg.negativeTTL) + } + return entry, nil + } + return nil, fmt.Errorf("get api key: %w", err) + } + apiKey.Key = key + snapshot := s.snapshotFromAPIKey(apiKey) + if snapshot == nil { + return nil, fmt.Errorf("get api key: %w", ErrAPIKeyNotFound) + } + entry := &APIKeyAuthCacheEntry{Snapshot: snapshot} + s.setAuthCacheEntry(ctx, cacheKey, entry, s.authCfg.l2TTL) + return entry, nil +} + +func (s *APIKeyService) applyAuthCacheEntry(key string, entry *APIKeyAuthCacheEntry) (*APIKey, bool, error) { + if entry == nil { + return nil, false, nil + } + if entry.NotFound { + return nil, true, ErrAPIKeyNotFound + } + if entry.Snapshot == nil { + return nil, false, nil + } + return s.snapshotToAPIKey(key, entry.Snapshot), true, nil +} + +func (s *APIKeyService) snapshotFromAPIKey(apiKey *APIKey) *APIKeyAuthSnapshot { + if apiKey == nil || apiKey.User == nil { + return nil + } + snapshot := &APIKeyAuthSnapshot{ + APIKeyID: apiKey.ID, + UserID: apiKey.UserID, + GroupID: apiKey.GroupID, + Status: apiKey.Status, + IPWhitelist: apiKey.IPWhitelist, + IPBlacklist: apiKey.IPBlacklist, + User: APIKeyAuthUserSnapshot{ + ID: apiKey.User.ID, + Status: apiKey.User.Status, + Role: apiKey.User.Role, + Balance: apiKey.User.Balance, + Concurrency: apiKey.User.Concurrency, + }, + } + if apiKey.Group != nil { + snapshot.Group = &APIKeyAuthGroupSnapshot{ + ID: apiKey.Group.ID, + Name: apiKey.Group.Name, + Platform: apiKey.Group.Platform, + Status: apiKey.Group.Status, + SubscriptionType: apiKey.Group.SubscriptionType, + RateMultiplier: apiKey.Group.RateMultiplier, + DailyLimitUSD: apiKey.Group.DailyLimitUSD, + WeeklyLimitUSD: apiKey.Group.WeeklyLimitUSD, + MonthlyLimitUSD: apiKey.Group.MonthlyLimitUSD, + ImagePrice1K: apiKey.Group.ImagePrice1K, + ImagePrice2K: apiKey.Group.ImagePrice2K, + ImagePrice4K: apiKey.Group.ImagePrice4K, + ClaudeCodeOnly: apiKey.Group.ClaudeCodeOnly, + FallbackGroupID: apiKey.Group.FallbackGroupID, + ModelRouting: apiKey.Group.ModelRouting, + ModelRoutingEnabled: apiKey.Group.ModelRoutingEnabled, + } + } + return snapshot +} + +func (s *APIKeyService) snapshotToAPIKey(key string, snapshot *APIKeyAuthSnapshot) *APIKey { + if snapshot == nil { + return nil + } + apiKey := &APIKey{ + ID: snapshot.APIKeyID, + UserID: snapshot.UserID, + GroupID: snapshot.GroupID, + Key: key, + Status: snapshot.Status, + IPWhitelist: snapshot.IPWhitelist, + IPBlacklist: snapshot.IPBlacklist, + User: &User{ + ID: snapshot.User.ID, + Status: snapshot.User.Status, + Role: snapshot.User.Role, + Balance: snapshot.User.Balance, + Concurrency: snapshot.User.Concurrency, + }, + } + if snapshot.Group != nil { + apiKey.Group = &Group{ + ID: snapshot.Group.ID, + Name: snapshot.Group.Name, + Platform: snapshot.Group.Platform, + Status: snapshot.Group.Status, + Hydrated: true, + SubscriptionType: snapshot.Group.SubscriptionType, + RateMultiplier: snapshot.Group.RateMultiplier, + DailyLimitUSD: snapshot.Group.DailyLimitUSD, + WeeklyLimitUSD: snapshot.Group.WeeklyLimitUSD, + MonthlyLimitUSD: snapshot.Group.MonthlyLimitUSD, + ImagePrice1K: snapshot.Group.ImagePrice1K, + ImagePrice2K: snapshot.Group.ImagePrice2K, + ImagePrice4K: snapshot.Group.ImagePrice4K, + ClaudeCodeOnly: snapshot.Group.ClaudeCodeOnly, + FallbackGroupID: snapshot.Group.FallbackGroupID, + ModelRouting: snapshot.Group.ModelRouting, + ModelRoutingEnabled: snapshot.Group.ModelRoutingEnabled, + } + } + return apiKey +} diff --git a/backend/internal/service/api_key_auth_cache_invalidate.go b/backend/internal/service/api_key_auth_cache_invalidate.go new file mode 100644 index 00000000..aeb58bcc --- /dev/null +++ b/backend/internal/service/api_key_auth_cache_invalidate.go @@ -0,0 +1,48 @@ +package service + +import "context" + +// InvalidateAuthCacheByKey 清除指定 API Key 的认证缓存 +func (s *APIKeyService) InvalidateAuthCacheByKey(ctx context.Context, key string) { + if key == "" { + return + } + cacheKey := s.authCacheKey(key) + s.deleteAuthCache(ctx, cacheKey) +} + +// InvalidateAuthCacheByUserID 清除用户相关的 API Key 认证缓存 +func (s *APIKeyService) InvalidateAuthCacheByUserID(ctx context.Context, userID int64) { + if userID <= 0 { + return + } + keys, err := s.apiKeyRepo.ListKeysByUserID(ctx, userID) + if err != nil { + return + } + s.deleteAuthCacheByKeys(ctx, keys) +} + +// InvalidateAuthCacheByGroupID 清除分组相关的 API Key 认证缓存 +func (s *APIKeyService) InvalidateAuthCacheByGroupID(ctx context.Context, groupID int64) { + if groupID <= 0 { + return + } + keys, err := s.apiKeyRepo.ListKeysByGroupID(ctx, groupID) + if err != nil { + return + } + s.deleteAuthCacheByKeys(ctx, keys) +} + +func (s *APIKeyService) deleteAuthCacheByKeys(ctx context.Context, keys []string) { + if len(keys) == 0 { + return + } + for _, key := range keys { + if key == "" { + continue + } + s.deleteAuthCache(ctx, s.authCacheKey(key)) + } +} diff --git a/backend/internal/service/api_key_service.go b/backend/internal/service/api_key_service.go index 0ffe8821..ecc570c7 100644 --- a/backend/internal/service/api_key_service.go +++ b/backend/internal/service/api_key_service.go @@ -9,8 +9,11 @@ import ( "github.com/Wei-Shaw/sub2api/internal/config" infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" + "github.com/dgraph-io/ristretto" + "golang.org/x/sync/singleflight" ) var ( @@ -20,6 +23,7 @@ var ( ErrAPIKeyTooShort = infraerrors.BadRequest("API_KEY_TOO_SHORT", "api key must be at least 16 characters") ErrAPIKeyInvalidChars = infraerrors.BadRequest("API_KEY_INVALID_CHARS", "api key can only contain letters, numbers, underscores, and hyphens") ErrAPIKeyRateLimited = infraerrors.TooManyRequests("API_KEY_RATE_LIMITED", "too many failed attempts, please try again later") + ErrInvalidIPPattern = infraerrors.BadRequest("INVALID_IP_PATTERN", "invalid IP or CIDR pattern") ) const ( @@ -29,9 +33,11 @@ const ( type APIKeyRepository interface { Create(ctx context.Context, key *APIKey) error GetByID(ctx context.Context, id int64) (*APIKey, error) - // GetOwnerID 仅获取 API Key 的所有者 ID,用于删除前的轻量级权限验证 - GetOwnerID(ctx context.Context, id int64) (int64, error) + // GetKeyAndOwnerID 仅获取 API Key 的 key 与所有者 ID,用于删除等轻量场景 + GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) GetByKey(ctx context.Context, key string) (*APIKey, error) + // GetByKeyForAuth 认证专用查询,返回最小字段集 + GetByKeyForAuth(ctx context.Context, key string) (*APIKey, error) Update(ctx context.Context, key *APIKey) error Delete(ctx context.Context, id int64) error @@ -43,6 +49,8 @@ type APIKeyRepository interface { SearchAPIKeys(ctx context.Context, userID int64, keyword string, limit int) ([]APIKey, error) ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) CountByGroupID(ctx context.Context, groupID int64) (int64, error) + ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) + ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) } // APIKeyCache defines cache operations for API key service @@ -53,20 +61,35 @@ type APIKeyCache interface { IncrementDailyUsage(ctx context.Context, apiKey string) error SetDailyUsageExpiry(ctx context.Context, apiKey string, ttl time.Duration) error + + GetAuthCache(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) + SetAuthCache(ctx context.Context, key string, entry *APIKeyAuthCacheEntry, ttl time.Duration) error + DeleteAuthCache(ctx context.Context, key string) error +} + +// APIKeyAuthCacheInvalidator 提供认证缓存失效能力 +type APIKeyAuthCacheInvalidator interface { + InvalidateAuthCacheByKey(ctx context.Context, key string) + InvalidateAuthCacheByUserID(ctx context.Context, userID int64) + InvalidateAuthCacheByGroupID(ctx context.Context, groupID int64) } // CreateAPIKeyRequest 创建API Key请求 type CreateAPIKeyRequest struct { - Name string `json:"name"` - GroupID *int64 `json:"group_id"` - CustomKey *string `json:"custom_key"` // 可选的自定义key + Name string `json:"name"` + GroupID *int64 `json:"group_id"` + CustomKey *string `json:"custom_key"` // 可选的自定义key + IPWhitelist []string `json:"ip_whitelist"` // IP 白名单 + IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单 } // UpdateAPIKeyRequest 更新API Key请求 type UpdateAPIKeyRequest struct { - Name *string `json:"name"` - GroupID *int64 `json:"group_id"` - Status *string `json:"status"` + Name *string `json:"name"` + GroupID *int64 `json:"group_id"` + Status *string `json:"status"` + IPWhitelist []string `json:"ip_whitelist"` // IP 白名单(空数组清空) + IPBlacklist []string `json:"ip_blacklist"` // IP 黑名单(空数组清空) } // APIKeyService API Key服务 @@ -77,6 +100,9 @@ type APIKeyService struct { userSubRepo UserSubscriptionRepository cache APIKeyCache cfg *config.Config + authCacheL1 *ristretto.Cache + authCfg apiKeyAuthCacheConfig + authGroup singleflight.Group } // NewAPIKeyService 创建API Key服务实例 @@ -88,7 +114,7 @@ func NewAPIKeyService( cache APIKeyCache, cfg *config.Config, ) *APIKeyService { - return &APIKeyService{ + svc := &APIKeyService{ apiKeyRepo: apiKeyRepo, userRepo: userRepo, groupRepo: groupRepo, @@ -96,6 +122,8 @@ func NewAPIKeyService( cache: cache, cfg: cfg, } + svc.initAuthCache(cfg) + return svc } // GenerateKey 生成随机API Key @@ -186,6 +214,20 @@ func (s *APIKeyService) Create(ctx context.Context, userID int64, req CreateAPIK return nil, fmt.Errorf("get user: %w", err) } + // 验证 IP 白名单格式 + if len(req.IPWhitelist) > 0 { + if invalid := ip.ValidateIPPatterns(req.IPWhitelist); len(invalid) > 0 { + return nil, fmt.Errorf("%w: %v", ErrInvalidIPPattern, invalid) + } + } + + // 验证 IP 黑名单格式 + if len(req.IPBlacklist) > 0 { + if invalid := ip.ValidateIPPatterns(req.IPBlacklist); len(invalid) > 0 { + return nil, fmt.Errorf("%w: %v", ErrInvalidIPPattern, invalid) + } + } + // 验证分组权限(如果指定了分组) if req.GroupID != nil { group, err := s.groupRepo.GetByID(ctx, *req.GroupID) @@ -236,17 +278,21 @@ func (s *APIKeyService) Create(ctx context.Context, userID int64, req CreateAPIK // 创建API Key记录 apiKey := &APIKey{ - UserID: userID, - Key: key, - Name: req.Name, - GroupID: req.GroupID, - Status: StatusActive, + UserID: userID, + Key: key, + Name: req.Name, + GroupID: req.GroupID, + Status: StatusActive, + IPWhitelist: req.IPWhitelist, + IPBlacklist: req.IPBlacklist, } if err := s.apiKeyRepo.Create(ctx, apiKey); err != nil { return nil, fmt.Errorf("create api key: %w", err) } + s.InvalidateAuthCacheByKey(ctx, apiKey.Key) + return apiKey, nil } @@ -282,21 +328,49 @@ func (s *APIKeyService) GetByID(ctx context.Context, id int64) (*APIKey, error) // GetByKey 根据Key字符串获取API Key(用于认证) func (s *APIKeyService) GetByKey(ctx context.Context, key string) (*APIKey, error) { - // 尝试从Redis缓存获取 - cacheKey := fmt.Sprintf("apikey:%s", key) + cacheKey := s.authCacheKey(key) - // 这里可以添加Redis缓存逻辑,暂时直接查询数据库 - apiKey, err := s.apiKeyRepo.GetByKey(ctx, key) + if entry, ok := s.getAuthCacheEntry(ctx, cacheKey); ok { + if apiKey, used, err := s.applyAuthCacheEntry(key, entry); used { + if err != nil { + return nil, fmt.Errorf("get api key: %w", err) + } + return apiKey, nil + } + } + + if s.authCfg.singleflight { + value, err, _ := s.authGroup.Do(cacheKey, func() (any, error) { + return s.loadAuthCacheEntry(ctx, key, cacheKey) + }) + if err != nil { + return nil, err + } + entry, _ := value.(*APIKeyAuthCacheEntry) + if apiKey, used, err := s.applyAuthCacheEntry(key, entry); used { + if err != nil { + return nil, fmt.Errorf("get api key: %w", err) + } + return apiKey, nil + } + } else { + entry, err := s.loadAuthCacheEntry(ctx, key, cacheKey) + if err != nil { + return nil, err + } + if apiKey, used, err := s.applyAuthCacheEntry(key, entry); used { + if err != nil { + return nil, fmt.Errorf("get api key: %w", err) + } + return apiKey, nil + } + } + + apiKey, err := s.apiKeyRepo.GetByKeyForAuth(ctx, key) if err != nil { return nil, fmt.Errorf("get api key: %w", err) } - - // 缓存到Redis(可选,TTL设置为5分钟) - if s.cache != nil { - // 这里可以序列化并缓存API Key - _ = cacheKey // 使用变量避免未使用错误 - } - + apiKey.Key = key return apiKey, nil } @@ -312,6 +386,20 @@ func (s *APIKeyService) Update(ctx context.Context, id int64, userID int64, req return nil, ErrInsufficientPerms } + // 验证 IP 白名单格式 + if len(req.IPWhitelist) > 0 { + if invalid := ip.ValidateIPPatterns(req.IPWhitelist); len(invalid) > 0 { + return nil, fmt.Errorf("%w: %v", ErrInvalidIPPattern, invalid) + } + } + + // 验证 IP 黑名单格式 + if len(req.IPBlacklist) > 0 { + if invalid := ip.ValidateIPPatterns(req.IPBlacklist); len(invalid) > 0 { + return nil, fmt.Errorf("%w: %v", ErrInvalidIPPattern, invalid) + } + } + // 更新字段 if req.Name != nil { apiKey.Name = *req.Name @@ -344,19 +432,22 @@ func (s *APIKeyService) Update(ctx context.Context, id int64, userID int64, req } } + // 更新 IP 限制(空数组会清空设置) + apiKey.IPWhitelist = req.IPWhitelist + apiKey.IPBlacklist = req.IPBlacklist + if err := s.apiKeyRepo.Update(ctx, apiKey); err != nil { return nil, fmt.Errorf("update api key: %w", err) } + s.InvalidateAuthCacheByKey(ctx, apiKey.Key) + return apiKey, nil } // Delete 删除API Key -// 优化:使用 GetOwnerID 替代 GetByID 进行权限验证, -// 避免加载完整 APIKey 对象及其关联数据(User、Group),提升删除操作的性能 func (s *APIKeyService) Delete(ctx context.Context, id int64, userID int64) error { - // 仅获取所有者 ID 用于权限验证,而非加载完整对象 - ownerID, err := s.apiKeyRepo.GetOwnerID(ctx, id) + key, ownerID, err := s.apiKeyRepo.GetKeyAndOwnerID(ctx, id) if err != nil { return fmt.Errorf("get api key: %w", err) } @@ -366,10 +457,11 @@ func (s *APIKeyService) Delete(ctx context.Context, id int64, userID int64) erro return ErrInsufficientPerms } - // 清除Redis缓存(使用 ownerID 而非 apiKey.UserID) + // 清除Redis缓存(使用 userID 而非 apiKey.UserID) if s.cache != nil { - _ = s.cache.DeleteCreateAttemptCount(ctx, ownerID) + _ = s.cache.DeleteCreateAttemptCount(ctx, userID) } + s.InvalidateAuthCacheByKey(ctx, key) if err := s.apiKeyRepo.Delete(ctx, id); err != nil { return fmt.Errorf("delete api key: %w", err) diff --git a/backend/internal/service/api_key_service_cache_test.go b/backend/internal/service/api_key_service_cache_test.go new file mode 100644 index 00000000..5f2d69c4 --- /dev/null +++ b/backend/internal/service/api_key_service_cache_test.go @@ -0,0 +1,423 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/redis/go-redis/v9" + "github.com/stretchr/testify/require" +) + +type authRepoStub struct { + getByKeyForAuth func(ctx context.Context, key string) (*APIKey, error) + listKeysByUserID func(ctx context.Context, userID int64) ([]string, error) + listKeysByGroupID func(ctx context.Context, groupID int64) ([]string, error) +} + +func (s *authRepoStub) Create(ctx context.Context, key *APIKey) error { + panic("unexpected Create call") +} + +func (s *authRepoStub) GetByID(ctx context.Context, id int64) (*APIKey, error) { + panic("unexpected GetByID call") +} + +func (s *authRepoStub) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { + panic("unexpected GetKeyAndOwnerID call") +} + +func (s *authRepoStub) GetByKey(ctx context.Context, key string) (*APIKey, error) { + panic("unexpected GetByKey call") +} + +func (s *authRepoStub) GetByKeyForAuth(ctx context.Context, key string) (*APIKey, error) { + if s.getByKeyForAuth == nil { + panic("unexpected GetByKeyForAuth call") + } + return s.getByKeyForAuth(ctx, key) +} + +func (s *authRepoStub) Update(ctx context.Context, key *APIKey) error { + panic("unexpected Update call") +} + +func (s *authRepoStub) Delete(ctx context.Context, id int64) error { + panic("unexpected Delete call") +} + +func (s *authRepoStub) ListByUserID(ctx context.Context, userID int64, params pagination.PaginationParams) ([]APIKey, *pagination.PaginationResult, error) { + panic("unexpected ListByUserID call") +} + +func (s *authRepoStub) VerifyOwnership(ctx context.Context, userID int64, apiKeyIDs []int64) ([]int64, error) { + panic("unexpected VerifyOwnership call") +} + +func (s *authRepoStub) CountByUserID(ctx context.Context, userID int64) (int64, error) { + panic("unexpected CountByUserID call") +} + +func (s *authRepoStub) ExistsByKey(ctx context.Context, key string) (bool, error) { + panic("unexpected ExistsByKey call") +} + +func (s *authRepoStub) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]APIKey, *pagination.PaginationResult, error) { + panic("unexpected ListByGroupID call") +} + +func (s *authRepoStub) SearchAPIKeys(ctx context.Context, userID int64, keyword string, limit int) ([]APIKey, error) { + panic("unexpected SearchAPIKeys call") +} + +func (s *authRepoStub) ClearGroupIDByGroupID(ctx context.Context, groupID int64) (int64, error) { + panic("unexpected ClearGroupIDByGroupID call") +} + +func (s *authRepoStub) CountByGroupID(ctx context.Context, groupID int64) (int64, error) { + panic("unexpected CountByGroupID call") +} + +func (s *authRepoStub) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + if s.listKeysByUserID == nil { + panic("unexpected ListKeysByUserID call") + } + return s.listKeysByUserID(ctx, userID) +} + +func (s *authRepoStub) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + if s.listKeysByGroupID == nil { + panic("unexpected ListKeysByGroupID call") + } + return s.listKeysByGroupID(ctx, groupID) +} + +type authCacheStub struct { + getAuthCache func(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) + setAuthKeys []string + deleteAuthKeys []string +} + +func (s *authCacheStub) GetCreateAttemptCount(ctx context.Context, userID int64) (int, error) { + return 0, nil +} + +func (s *authCacheStub) IncrementCreateAttemptCount(ctx context.Context, userID int64) error { + return nil +} + +func (s *authCacheStub) DeleteCreateAttemptCount(ctx context.Context, userID int64) error { + return nil +} + +func (s *authCacheStub) IncrementDailyUsage(ctx context.Context, apiKey string) error { + return nil +} + +func (s *authCacheStub) SetDailyUsageExpiry(ctx context.Context, apiKey string, ttl time.Duration) error { + return nil +} + +func (s *authCacheStub) GetAuthCache(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + if s.getAuthCache == nil { + return nil, redis.Nil + } + return s.getAuthCache(ctx, key) +} + +func (s *authCacheStub) SetAuthCache(ctx context.Context, key string, entry *APIKeyAuthCacheEntry, ttl time.Duration) error { + s.setAuthKeys = append(s.setAuthKeys, key) + return nil +} + +func (s *authCacheStub) DeleteAuthCache(ctx context.Context, key string) error { + s.deleteAuthKeys = append(s.deleteAuthKeys, key) + return nil +} + +func TestAPIKeyService_GetByKey_UsesL2Cache(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + return nil, errors.New("unexpected repo call") + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + NegativeTTLSeconds: 30, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + + groupID := int64(9) + cacheEntry := &APIKeyAuthCacheEntry{ + Snapshot: &APIKeyAuthSnapshot{ + APIKeyID: 1, + UserID: 2, + GroupID: &groupID, + Status: StatusActive, + User: APIKeyAuthUserSnapshot{ + ID: 2, + Status: StatusActive, + Role: RoleUser, + Balance: 10, + Concurrency: 3, + }, + Group: &APIKeyAuthGroupSnapshot{ + ID: groupID, + Name: "g", + Platform: PlatformAnthropic, + Status: StatusActive, + SubscriptionType: SubscriptionTypeStandard, + RateMultiplier: 1, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + "claude-opus-*": {1, 2}, + }, + }, + }, + } + cache.getAuthCache = func(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + return cacheEntry, nil + } + + apiKey, err := svc.GetByKey(context.Background(), "k1") + require.NoError(t, err) + require.Equal(t, int64(1), apiKey.ID) + require.Equal(t, int64(2), apiKey.User.ID) + require.Equal(t, groupID, apiKey.Group.ID) + require.True(t, apiKey.Group.ModelRoutingEnabled) + require.Equal(t, map[string][]int64{"claude-opus-*": {1, 2}}, apiKey.Group.ModelRouting) +} + +func TestAPIKeyService_GetByKey_NegativeCache(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + return nil, errors.New("unexpected repo call") + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + NegativeTTLSeconds: 30, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + cache.getAuthCache = func(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + return &APIKeyAuthCacheEntry{NotFound: true}, nil + } + + _, err := svc.GetByKey(context.Background(), "missing") + require.ErrorIs(t, err, ErrAPIKeyNotFound) +} + +func TestAPIKeyService_GetByKey_CacheMissStoresL2(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + return &APIKey{ + ID: 5, + UserID: 7, + Status: StatusActive, + User: &User{ + ID: 7, + Status: StatusActive, + Role: RoleUser, + Balance: 12, + Concurrency: 2, + }, + }, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + NegativeTTLSeconds: 30, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + cache.getAuthCache = func(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + return nil, redis.Nil + } + + apiKey, err := svc.GetByKey(context.Background(), "k2") + require.NoError(t, err) + require.Equal(t, int64(5), apiKey.ID) + require.Len(t, cache.setAuthKeys, 1) +} + +func TestAPIKeyService_GetByKey_UsesL1Cache(t *testing.T) { + var calls int32 + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + atomic.AddInt32(&calls, 1) + return &APIKey{ + ID: 21, + UserID: 3, + Status: StatusActive, + User: &User{ + ID: 3, + Status: StatusActive, + Role: RoleUser, + Balance: 5, + Concurrency: 2, + }, + }, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L1Size: 1000, + L1TTLSeconds: 60, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + require.NotNil(t, svc.authCacheL1) + + _, err := svc.GetByKey(context.Background(), "k-l1") + require.NoError(t, err) + svc.authCacheL1.Wait() + cacheKey := svc.authCacheKey("k-l1") + _, ok := svc.authCacheL1.Get(cacheKey) + require.True(t, ok) + _, err = svc.GetByKey(context.Background(), "k-l1") + require.NoError(t, err) + require.Equal(t, int32(1), atomic.LoadInt32(&calls)) +} + +func TestAPIKeyService_InvalidateAuthCacheByUserID(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + listKeysByUserID: func(ctx context.Context, userID int64) ([]string, error) { + return []string{"k1", "k2"}, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + NegativeTTLSeconds: 30, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + + svc.InvalidateAuthCacheByUserID(context.Background(), 7) + require.Len(t, cache.deleteAuthKeys, 2) +} + +func TestAPIKeyService_InvalidateAuthCacheByGroupID(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + listKeysByGroupID: func(ctx context.Context, groupID int64) ([]string, error) { + return []string{"k1", "k2"}, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + + svc.InvalidateAuthCacheByGroupID(context.Background(), 9) + require.Len(t, cache.deleteAuthKeys, 2) +} + +func TestAPIKeyService_InvalidateAuthCacheByKey(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + listKeysByUserID: func(ctx context.Context, userID int64) ([]string, error) { + return nil, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + + svc.InvalidateAuthCacheByKey(context.Background(), "k1") + require.Len(t, cache.deleteAuthKeys, 1) +} + +func TestAPIKeyService_GetByKey_CachesNegativeOnRepoMiss(t *testing.T) { + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + return nil, ErrAPIKeyNotFound + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + L2TTLSeconds: 60, + NegativeTTLSeconds: 30, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + cache.getAuthCache = func(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + return nil, redis.Nil + } + + _, err := svc.GetByKey(context.Background(), "missing") + require.ErrorIs(t, err, ErrAPIKeyNotFound) + require.Len(t, cache.setAuthKeys, 1) +} + +func TestAPIKeyService_GetByKey_SingleflightCollapses(t *testing.T) { + var calls int32 + cache := &authCacheStub{} + repo := &authRepoStub{ + getByKeyForAuth: func(ctx context.Context, key string) (*APIKey, error) { + atomic.AddInt32(&calls, 1) + time.Sleep(50 * time.Millisecond) + return &APIKey{ + ID: 11, + UserID: 2, + Status: StatusActive, + User: &User{ + ID: 2, + Status: StatusActive, + Role: RoleUser, + Balance: 1, + Concurrency: 1, + }, + }, nil + }, + } + cfg := &config.Config{ + APIKeyAuth: config.APIKeyAuthCacheConfig{ + Singleflight: true, + }, + } + svc := NewAPIKeyService(repo, nil, nil, nil, cache, cfg) + + start := make(chan struct{}) + wg := sync.WaitGroup{} + errs := make([]error, 5) + for i := 0; i < 5; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + <-start + _, err := svc.GetByKey(context.Background(), "k1") + errs[idx] = err + }(i) + } + close(start) + wg.Wait() + + for _, err := range errs { + require.NoError(t, err) + } + require.Equal(t, int32(1), atomic.LoadInt32(&calls)) +} diff --git a/backend/internal/service/api_key_service_delete_test.go b/backend/internal/service/api_key_service_delete_test.go index 7d04c5ac..32ae884e 100644 --- a/backend/internal/service/api_key_service_delete_test.go +++ b/backend/internal/service/api_key_service_delete_test.go @@ -20,13 +20,12 @@ import ( // 用于隔离测试 APIKeyService.Delete 方法,避免依赖真实数据库。 // // 设计说明: -// - ownerID: 模拟 GetOwnerID 返回的所有者 ID -// - ownerErr: 模拟 GetOwnerID 返回的错误(如 ErrAPIKeyNotFound) +// - apiKey/getByIDErr: 模拟 GetKeyAndOwnerID 返回的记录与错误 // - deleteErr: 模拟 Delete 返回的错误 // - deletedIDs: 记录被调用删除的 API Key ID,用于断言验证 type apiKeyRepoStub struct { - ownerID int64 // GetOwnerID 的返回值 - ownerErr error // GetOwnerID 的错误返回值 + apiKey *APIKey // GetKeyAndOwnerID 的返回值 + getByIDErr error // GetKeyAndOwnerID 的错误返回值 deleteErr error // Delete 的错误返回值 deletedIDs []int64 // 记录已删除的 API Key ID 列表 } @@ -38,19 +37,34 @@ func (s *apiKeyRepoStub) Create(ctx context.Context, key *APIKey) error { } func (s *apiKeyRepoStub) GetByID(ctx context.Context, id int64) (*APIKey, error) { + if s.getByIDErr != nil { + return nil, s.getByIDErr + } + if s.apiKey != nil { + clone := *s.apiKey + return &clone, nil + } panic("unexpected GetByID call") } -// GetOwnerID 返回预设的所有者 ID 或错误。 -// 这是 Delete 方法调用的第一个仓储方法,用于验证调用者是否为 API Key 的所有者。 -func (s *apiKeyRepoStub) GetOwnerID(ctx context.Context, id int64) (int64, error) { - return s.ownerID, s.ownerErr +func (s *apiKeyRepoStub) GetKeyAndOwnerID(ctx context.Context, id int64) (string, int64, error) { + if s.getByIDErr != nil { + return "", 0, s.getByIDErr + } + if s.apiKey != nil { + return s.apiKey.Key, s.apiKey.UserID, nil + } + return "", 0, ErrAPIKeyNotFound } func (s *apiKeyRepoStub) GetByKey(ctx context.Context, key string) (*APIKey, error) { panic("unexpected GetByKey call") } +func (s *apiKeyRepoStub) GetByKeyForAuth(ctx context.Context, key string) (*APIKey, error) { + panic("unexpected GetByKeyForAuth call") +} + func (s *apiKeyRepoStub) Update(ctx context.Context, key *APIKey) error { panic("unexpected Update call") } @@ -96,13 +110,22 @@ func (s *apiKeyRepoStub) CountByGroupID(ctx context.Context, groupID int64) (int panic("unexpected CountByGroupID call") } +func (s *apiKeyRepoStub) ListKeysByUserID(ctx context.Context, userID int64) ([]string, error) { + panic("unexpected ListKeysByUserID call") +} + +func (s *apiKeyRepoStub) ListKeysByGroupID(ctx context.Context, groupID int64) ([]string, error) { + panic("unexpected ListKeysByGroupID call") +} + // apiKeyCacheStub 是 APIKeyCache 接口的测试桩实现。 // 用于验证删除操作时缓存清理逻辑是否被正确调用。 // // 设计说明: // - invalidated: 记录被清除缓存的用户 ID 列表 type apiKeyCacheStub struct { - invalidated []int64 // 记录调用 DeleteCreateAttemptCount 时传入的用户 ID + invalidated []int64 // 记录调用 DeleteCreateAttemptCount 时传入的用户 ID + deleteAuthKeys []string // 记录调用 DeleteAuthCache 时传入的缓存 key } // GetCreateAttemptCount 返回 0,表示用户未超过创建次数限制 @@ -132,15 +155,30 @@ func (s *apiKeyCacheStub) SetDailyUsageExpiry(ctx context.Context, apiKey string return nil } +func (s *apiKeyCacheStub) GetAuthCache(ctx context.Context, key string) (*APIKeyAuthCacheEntry, error) { + return nil, nil +} + +func (s *apiKeyCacheStub) SetAuthCache(ctx context.Context, key string, entry *APIKeyAuthCacheEntry, ttl time.Duration) error { + return nil +} + +func (s *apiKeyCacheStub) DeleteAuthCache(ctx context.Context, key string) error { + s.deleteAuthKeys = append(s.deleteAuthKeys, key) + return nil +} + // TestApiKeyService_Delete_OwnerMismatch 测试非所有者尝试删除时返回权限错误。 // 预期行为: -// - GetOwnerID 返回所有者 ID 为 1 +// - GetKeyAndOwnerID 返回所有者 ID 为 1 // - 调用者 userID 为 2(不匹配) // - 返回 ErrInsufficientPerms 错误 // - Delete 方法不被调用 // - 缓存不被清除 func TestApiKeyService_Delete_OwnerMismatch(t *testing.T) { - repo := &apiKeyRepoStub{ownerID: 1} + repo := &apiKeyRepoStub{ + apiKey: &APIKey{ID: 10, UserID: 1, Key: "k"}, + } cache := &apiKeyCacheStub{} svc := &APIKeyService{apiKeyRepo: repo, cache: cache} @@ -148,17 +186,20 @@ func TestApiKeyService_Delete_OwnerMismatch(t *testing.T) { require.ErrorIs(t, err, ErrInsufficientPerms) require.Empty(t, repo.deletedIDs) // 验证删除操作未被调用 require.Empty(t, cache.invalidated) // 验证缓存未被清除 + require.Empty(t, cache.deleteAuthKeys) } // TestApiKeyService_Delete_Success 测试所有者成功删除 API Key 的场景。 // 预期行为: -// - GetOwnerID 返回所有者 ID 为 7 +// - GetKeyAndOwnerID 返回所有者 ID 为 7 // - 调用者 userID 为 7(匹配) // - Delete 成功执行 // - 缓存被正确清除(使用 ownerID) // - 返回 nil 错误 func TestApiKeyService_Delete_Success(t *testing.T) { - repo := &apiKeyRepoStub{ownerID: 7} + repo := &apiKeyRepoStub{ + apiKey: &APIKey{ID: 42, UserID: 7, Key: "k"}, + } cache := &apiKeyCacheStub{} svc := &APIKeyService{apiKeyRepo: repo, cache: cache} @@ -166,16 +207,17 @@ func TestApiKeyService_Delete_Success(t *testing.T) { require.NoError(t, err) require.Equal(t, []int64{42}, repo.deletedIDs) // 验证正确的 API Key 被删除 require.Equal(t, []int64{7}, cache.invalidated) // 验证所有者的缓存被清除 + require.Equal(t, []string{svc.authCacheKey("k")}, cache.deleteAuthKeys) } // TestApiKeyService_Delete_NotFound 测试删除不存在的 API Key 时返回正确的错误。 // 预期行为: -// - GetOwnerID 返回 ErrAPIKeyNotFound 错误 +// - GetKeyAndOwnerID 返回 ErrAPIKeyNotFound 错误 // - 返回 ErrAPIKeyNotFound 错误(被 fmt.Errorf 包装) // - Delete 方法不被调用 // - 缓存不被清除 func TestApiKeyService_Delete_NotFound(t *testing.T) { - repo := &apiKeyRepoStub{ownerErr: ErrAPIKeyNotFound} + repo := &apiKeyRepoStub{getByIDErr: ErrAPIKeyNotFound} cache := &apiKeyCacheStub{} svc := &APIKeyService{apiKeyRepo: repo, cache: cache} @@ -183,18 +225,19 @@ func TestApiKeyService_Delete_NotFound(t *testing.T) { require.ErrorIs(t, err, ErrAPIKeyNotFound) require.Empty(t, repo.deletedIDs) require.Empty(t, cache.invalidated) + require.Empty(t, cache.deleteAuthKeys) } // TestApiKeyService_Delete_DeleteFails 测试删除操作失败时的错误处理。 // 预期行为: -// - GetOwnerID 返回正确的所有者 ID +// - GetKeyAndOwnerID 返回正确的所有者 ID // - 所有权验证通过 // - 缓存被清除(在删除之前) // - Delete 被调用但返回错误 // - 返回包含 "delete api key" 的错误信息 func TestApiKeyService_Delete_DeleteFails(t *testing.T) { repo := &apiKeyRepoStub{ - ownerID: 3, + apiKey: &APIKey{ID: 42, UserID: 3, Key: "k"}, deleteErr: errors.New("delete failed"), } cache := &apiKeyCacheStub{} @@ -205,4 +248,5 @@ func TestApiKeyService_Delete_DeleteFails(t *testing.T) { require.ErrorContains(t, err, "delete api key") require.Equal(t, []int64{3}, repo.deletedIDs) // 验证删除操作被调用 require.Equal(t, []int64{3}, cache.invalidated) // 验证缓存已被清除(即使删除失败) + require.Equal(t, []string{svc.authCacheKey("k")}, cache.deleteAuthKeys) } diff --git a/backend/internal/service/auth_cache_invalidation_test.go b/backend/internal/service/auth_cache_invalidation_test.go new file mode 100644 index 00000000..b6e56177 --- /dev/null +++ b/backend/internal/service/auth_cache_invalidation_test.go @@ -0,0 +1,33 @@ +//go:build unit + +package service + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUsageService_InvalidateUsageCaches(t *testing.T) { + invalidator := &authCacheInvalidatorStub{} + svc := &UsageService{authCacheInvalidator: invalidator} + + svc.invalidateUsageCaches(context.Background(), 7, false) + require.Empty(t, invalidator.userIDs) + + svc.invalidateUsageCaches(context.Background(), 7, true) + require.Equal(t, []int64{7}, invalidator.userIDs) +} + +func TestRedeemService_InvalidateRedeemCaches_AuthCache(t *testing.T) { + invalidator := &authCacheInvalidatorStub{} + svc := &RedeemService{authCacheInvalidator: invalidator} + + svc.invalidateRedeemCaches(context.Background(), 11, &RedeemCode{Type: RedeemTypeBalance}) + svc.invalidateRedeemCaches(context.Background(), 11, &RedeemCode{Type: RedeemTypeConcurrency}) + groupID := int64(3) + svc.invalidateRedeemCaches(context.Background(), 11, &RedeemCode{Type: RedeemTypeSubscription, GroupID: &groupID}) + + require.Equal(t, []int64{11, 11, 11}, invalidator.userIDs) +} diff --git a/backend/internal/service/auth_service.go b/backend/internal/service/auth_service.go index e232deb3..386b43fc 100644 --- a/backend/internal/service/auth_service.go +++ b/backend/internal/service/auth_service.go @@ -52,6 +52,7 @@ type AuthService struct { emailService *EmailService turnstileService *TurnstileService emailQueueService *EmailQueueService + promoService *PromoService } // NewAuthService 创建认证服务实例 @@ -62,6 +63,7 @@ func NewAuthService( emailService *EmailService, turnstileService *TurnstileService, emailQueueService *EmailQueueService, + promoService *PromoService, ) *AuthService { return &AuthService{ userRepo: userRepo, @@ -70,16 +72,17 @@ func NewAuthService( emailService: emailService, turnstileService: turnstileService, emailQueueService: emailQueueService, + promoService: promoService, } } // Register 用户注册,返回token和用户 func (s *AuthService) Register(ctx context.Context, email, password string) (string, *User, error) { - return s.RegisterWithVerification(ctx, email, password, "") + return s.RegisterWithVerification(ctx, email, password, "", "") } -// RegisterWithVerification 用户注册(支持邮件验证),返回token和用户 -func (s *AuthService) RegisterWithVerification(ctx context.Context, email, password, verifyCode string) (string, *User, error) { +// RegisterWithVerification 用户注册(支持邮件验证和优惠码),返回token和用户 +func (s *AuthService) RegisterWithVerification(ctx context.Context, email, password, verifyCode, promoCode string) (string, *User, error) { // 检查是否开放注册(默认关闭:settingService 未配置时不允许注册) if s.settingService == nil || !s.settingService.IsRegistrationEnabled(ctx) { return "", nil, ErrRegDisabled @@ -150,6 +153,19 @@ func (s *AuthService) RegisterWithVerification(ctx context.Context, email, passw return "", nil, ErrServiceUnavailable } + // 应用优惠码(如果提供) + if promoCode != "" && s.promoService != nil { + if err := s.promoService.ApplyPromoCode(ctx, user.ID, promoCode); err != nil { + // 优惠码应用失败不影响注册,只记录日志 + log.Printf("[Auth] Failed to apply promo code for user %d: %v", user.ID, err) + } else { + // 重新获取用户信息以获取更新后的余额 + if updatedUser, err := s.userRepo.GetByID(ctx, user.ID); err == nil { + user = updatedUser + } + } + } + // 生成token token, err := s.GenerateToken(user) if err != nil { @@ -341,7 +357,7 @@ func (s *AuthService) Login(ctx context.Context, email, password string) (string // - 如果邮箱已存在:直接登录(不需要本地密码) // - 如果邮箱不存在:创建新用户并登录 // -// 注意:该函数用于“终端用户登录 Sub2API 本身”的场景(不同于上游账号的 OAuth,例如 OpenAI/Gemini)。 +// 注意:该函数用于 LinuxDo OAuth 登录场景(不同于上游账号的 OAuth,例如 Claude/OpenAI/Gemini)。 // 为了满足现有数据库约束(需要密码哈希),新用户会生成随机密码并进行哈希保存。 func (s *AuthService) LoginOrRegisterOAuth(ctx context.Context, email, username string) (string, *User, error) { email = strings.TrimSpace(email) @@ -360,8 +376,8 @@ func (s *AuthService) LoginOrRegisterOAuth(ctx context.Context, email, username user, err := s.userRepo.GetByEmail(ctx, email) if err != nil { if errors.Is(err, ErrUserNotFound) { - // OAuth 首次登录视为注册。 - if s.settingService != nil && !s.settingService.IsRegistrationEnabled(ctx) { + // OAuth 首次登录视为注册(fail-close:settingService 未配置时不允许注册) + if s.settingService == nil || !s.settingService.IsRegistrationEnabled(ctx) { return "", nil, ErrRegDisabled } diff --git a/backend/internal/service/auth_service_register_test.go b/backend/internal/service/auth_service_register_test.go index ab1f20a0..bc8f6f68 100644 --- a/backend/internal/service/auth_service_register_test.go +++ b/backend/internal/service/auth_service_register_test.go @@ -100,6 +100,7 @@ func newAuthService(repo *userRepoStub, settings map[string]string, emailCache E emailService, nil, nil, + nil, // promoService ) } @@ -131,7 +132,7 @@ func TestAuthService_Register_EmailVerifyEnabledButServiceNotConfigured(t *testi }, nil) // 应返回服务不可用错误,而不是允许绕过验证 - _, _, err := service.RegisterWithVerification(context.Background(), "user@test.com", "password", "any-code") + _, _, err := service.RegisterWithVerification(context.Background(), "user@test.com", "password", "any-code", "") require.ErrorIs(t, err, ErrServiceUnavailable) } @@ -143,7 +144,7 @@ func TestAuthService_Register_EmailVerifyRequired(t *testing.T) { SettingKeyEmailVerifyEnabled: "true", }, cache) - _, _, err := service.RegisterWithVerification(context.Background(), "user@test.com", "password", "") + _, _, err := service.RegisterWithVerification(context.Background(), "user@test.com", "password", "", "") require.ErrorIs(t, err, ErrEmailVerifyRequired) } @@ -157,7 +158,7 @@ func TestAuthService_Register_EmailVerifyInvalid(t *testing.T) { SettingKeyEmailVerifyEnabled: "true", }, cache) - _, _, err := service.RegisterWithVerification(context.Background(), "user@test.com", "password", "wrong") + _, _, err := service.RegisterWithVerification(context.Background(), "user@test.com", "password", "wrong", "") require.ErrorIs(t, err, ErrInvalidVerifyCode) require.ErrorContains(t, err, "verify code") } diff --git a/backend/internal/service/claude_token_provider.go b/backend/internal/service/claude_token_provider.go new file mode 100644 index 00000000..c7c6e42d --- /dev/null +++ b/backend/internal/service/claude_token_provider.go @@ -0,0 +1,208 @@ +package service + +import ( + "context" + "errors" + "log/slog" + "strconv" + "strings" + "time" +) + +const ( + claudeTokenRefreshSkew = 3 * time.Minute + claudeTokenCacheSkew = 5 * time.Minute + claudeLockWaitTime = 200 * time.Millisecond +) + +// ClaudeTokenCache Token 缓存接口(复用 GeminiTokenCache 接口定义) +type ClaudeTokenCache = GeminiTokenCache + +// ClaudeTokenProvider 管理 Claude (Anthropic) OAuth 账户的 access_token +type ClaudeTokenProvider struct { + accountRepo AccountRepository + tokenCache ClaudeTokenCache + oauthService *OAuthService +} + +func NewClaudeTokenProvider( + accountRepo AccountRepository, + tokenCache ClaudeTokenCache, + oauthService *OAuthService, +) *ClaudeTokenProvider { + return &ClaudeTokenProvider{ + accountRepo: accountRepo, + tokenCache: tokenCache, + oauthService: oauthService, + } +} + +// GetAccessToken 获取有效的 access_token +func (p *ClaudeTokenProvider) GetAccessToken(ctx context.Context, account *Account) (string, error) { + if account == nil { + return "", errors.New("account is nil") + } + if account.Platform != PlatformAnthropic || account.Type != AccountTypeOAuth { + return "", errors.New("not an anthropic oauth account") + } + + cacheKey := ClaudeTokenCacheKey(account) + + // 1. 先尝试缓存 + if p.tokenCache != nil { + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && strings.TrimSpace(token) != "" { + slog.Debug("claude_token_cache_hit", "account_id", account.ID) + return token, nil + } else if err != nil { + slog.Warn("claude_token_cache_get_failed", "account_id", account.ID, "error", err) + } + } + + slog.Debug("claude_token_cache_miss", "account_id", account.ID) + + // 2. 如果即将过期则刷新 + expiresAt := account.GetCredentialAsTime("expires_at") + needsRefresh := expiresAt == nil || time.Until(*expiresAt) <= claudeTokenRefreshSkew + refreshFailed := false + if needsRefresh && p.tokenCache != nil { + locked, lockErr := p.tokenCache.AcquireRefreshLock(ctx, cacheKey, 30*time.Second) + if lockErr == nil && locked { + defer func() { _ = p.tokenCache.ReleaseRefreshLock(ctx, cacheKey) }() + + // 拿到锁后再次检查缓存(另一个 worker 可能已刷新) + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && strings.TrimSpace(token) != "" { + return token, nil + } + + // 从数据库获取最新账户信息 + fresh, err := p.accountRepo.GetByID(ctx, account.ID) + if err == nil && fresh != nil { + account = fresh + } + expiresAt = account.GetCredentialAsTime("expires_at") + if expiresAt == nil || time.Until(*expiresAt) <= claudeTokenRefreshSkew { + if p.oauthService == nil { + slog.Warn("claude_oauth_service_not_configured", "account_id", account.ID) + refreshFailed = true // 无法刷新,标记失败 + } else { + tokenInfo, err := p.oauthService.RefreshAccountToken(ctx, account) + if err != nil { + // 刷新失败时记录警告,但不立即返回错误,尝试使用现有 token + slog.Warn("claude_token_refresh_failed", "account_id", account.ID, "error", err) + refreshFailed = true // 刷新失败,标记以使用短 TTL + } else { + // 构建新 credentials,保留原有字段 + newCredentials := make(map[string]any) + for k, v := range account.Credentials { + newCredentials[k] = v + } + newCredentials["access_token"] = tokenInfo.AccessToken + newCredentials["token_type"] = tokenInfo.TokenType + newCredentials["expires_in"] = strconv.FormatInt(tokenInfo.ExpiresIn, 10) + newCredentials["expires_at"] = strconv.FormatInt(tokenInfo.ExpiresAt, 10) + if tokenInfo.RefreshToken != "" { + newCredentials["refresh_token"] = tokenInfo.RefreshToken + } + if tokenInfo.Scope != "" { + newCredentials["scope"] = tokenInfo.Scope + } + account.Credentials = newCredentials + if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { + slog.Error("claude_token_provider_update_failed", "account_id", account.ID, "error", updateErr) + } + expiresAt = account.GetCredentialAsTime("expires_at") + } + } + } + } else if lockErr != nil { + // Redis 错误导致无法获取锁,降级为无锁刷新(仅在 token 接近过期时) + slog.Warn("claude_token_lock_failed_degraded_refresh", "account_id", account.ID, "error", lockErr) + + // 检查 ctx 是否已取消 + if ctx.Err() != nil { + return "", ctx.Err() + } + + // 从数据库获取最新账户信息 + if p.accountRepo != nil { + fresh, err := p.accountRepo.GetByID(ctx, account.ID) + if err == nil && fresh != nil { + account = fresh + } + } + expiresAt = account.GetCredentialAsTime("expires_at") + + // 仅在 expires_at 已过期/接近过期时才执行无锁刷新 + if expiresAt == nil || time.Until(*expiresAt) <= claudeTokenRefreshSkew { + if p.oauthService == nil { + slog.Warn("claude_oauth_service_not_configured", "account_id", account.ID) + refreshFailed = true + } else { + tokenInfo, err := p.oauthService.RefreshAccountToken(ctx, account) + if err != nil { + slog.Warn("claude_token_refresh_failed_degraded", "account_id", account.ID, "error", err) + refreshFailed = true + } else { + // 构建新 credentials,保留原有字段 + newCredentials := make(map[string]any) + for k, v := range account.Credentials { + newCredentials[k] = v + } + newCredentials["access_token"] = tokenInfo.AccessToken + newCredentials["token_type"] = tokenInfo.TokenType + newCredentials["expires_in"] = strconv.FormatInt(tokenInfo.ExpiresIn, 10) + newCredentials["expires_at"] = strconv.FormatInt(tokenInfo.ExpiresAt, 10) + if tokenInfo.RefreshToken != "" { + newCredentials["refresh_token"] = tokenInfo.RefreshToken + } + if tokenInfo.Scope != "" { + newCredentials["scope"] = tokenInfo.Scope + } + account.Credentials = newCredentials + if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { + slog.Error("claude_token_provider_update_failed", "account_id", account.ID, "error", updateErr) + } + expiresAt = account.GetCredentialAsTime("expires_at") + } + } + } + } else { + // 锁获取失败(被其他 worker 持有),等待 200ms 后重试读取缓存 + time.Sleep(claudeLockWaitTime) + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && strings.TrimSpace(token) != "" { + slog.Debug("claude_token_cache_hit_after_wait", "account_id", account.ID) + return token, nil + } + } + } + + accessToken := account.GetCredential("access_token") + if strings.TrimSpace(accessToken) == "" { + return "", errors.New("access_token not found in credentials") + } + + // 3. 存入缓存 + if p.tokenCache != nil { + ttl := 30 * time.Minute + if refreshFailed { + // 刷新失败时使用短 TTL,避免失效 token 长时间缓存导致 401 抖动 + ttl = time.Minute + slog.Debug("claude_token_cache_short_ttl", "account_id", account.ID, "reason", "refresh_failed") + } else if expiresAt != nil { + until := time.Until(*expiresAt) + switch { + case until > claudeTokenCacheSkew: + ttl = until - claudeTokenCacheSkew + case until > 0: + ttl = until + default: + ttl = time.Minute + } + } + if err := p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl); err != nil { + slog.Warn("claude_token_cache_set_failed", "account_id", account.ID, "error", err) + } + } + + return accessToken, nil +} diff --git a/backend/internal/service/claude_token_provider_test.go b/backend/internal/service/claude_token_provider_test.go new file mode 100644 index 00000000..3e21f6f4 --- /dev/null +++ b/backend/internal/service/claude_token_provider_test.go @@ -0,0 +1,939 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// claudeTokenCacheStub implements ClaudeTokenCache for testing +type claudeTokenCacheStub struct { + mu sync.Mutex + tokens map[string]string + getErr error + setErr error + deleteErr error + lockAcquired bool + lockErr error + releaseLockErr error + getCalled int32 + setCalled int32 + lockCalled int32 + unlockCalled int32 + simulateLockRace bool +} + +func newClaudeTokenCacheStub() *claudeTokenCacheStub { + return &claudeTokenCacheStub{ + tokens: make(map[string]string), + lockAcquired: true, + } +} + +func (s *claudeTokenCacheStub) GetAccessToken(ctx context.Context, cacheKey string) (string, error) { + atomic.AddInt32(&s.getCalled, 1) + if s.getErr != nil { + return "", s.getErr + } + s.mu.Lock() + defer s.mu.Unlock() + return s.tokens[cacheKey], nil +} + +func (s *claudeTokenCacheStub) SetAccessToken(ctx context.Context, cacheKey string, token string, ttl time.Duration) error { + atomic.AddInt32(&s.setCalled, 1) + if s.setErr != nil { + return s.setErr + } + s.mu.Lock() + defer s.mu.Unlock() + s.tokens[cacheKey] = token + return nil +} + +func (s *claudeTokenCacheStub) DeleteAccessToken(ctx context.Context, cacheKey string) error { + if s.deleteErr != nil { + return s.deleteErr + } + s.mu.Lock() + defer s.mu.Unlock() + delete(s.tokens, cacheKey) + return nil +} + +func (s *claudeTokenCacheStub) AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error) { + atomic.AddInt32(&s.lockCalled, 1) + if s.lockErr != nil { + return false, s.lockErr + } + if s.simulateLockRace { + return false, nil + } + return s.lockAcquired, nil +} + +func (s *claudeTokenCacheStub) ReleaseRefreshLock(ctx context.Context, cacheKey string) error { + atomic.AddInt32(&s.unlockCalled, 1) + return s.releaseLockErr +} + +// claudeAccountRepoStub is a minimal stub implementing only the methods used by ClaudeTokenProvider +type claudeAccountRepoStub struct { + account *Account + getErr error + updateErr error + getCalled int32 + updateCalled int32 +} + +func (r *claudeAccountRepoStub) GetByID(ctx context.Context, id int64) (*Account, error) { + atomic.AddInt32(&r.getCalled, 1) + if r.getErr != nil { + return nil, r.getErr + } + return r.account, nil +} + +func (r *claudeAccountRepoStub) Update(ctx context.Context, account *Account) error { + atomic.AddInt32(&r.updateCalled, 1) + if r.updateErr != nil { + return r.updateErr + } + r.account = account + return nil +} + +// claudeOAuthServiceStub implements OAuthService methods for testing +type claudeOAuthServiceStub struct { + tokenInfo *TokenInfo + refreshErr error + refreshCalled int32 +} + +func (s *claudeOAuthServiceStub) RefreshAccountToken(ctx context.Context, account *Account) (*TokenInfo, error) { + atomic.AddInt32(&s.refreshCalled, 1) + if s.refreshErr != nil { + return nil, s.refreshErr + } + return s.tokenInfo, nil +} + +// testClaudeTokenProvider is a test version that uses the stub OAuth service +type testClaudeTokenProvider struct { + accountRepo *claudeAccountRepoStub + tokenCache *claudeTokenCacheStub + oauthService *claudeOAuthServiceStub +} + +func (p *testClaudeTokenProvider) GetAccessToken(ctx context.Context, account *Account) (string, error) { + if account == nil { + return "", errors.New("account is nil") + } + if account.Platform != PlatformAnthropic || account.Type != AccountTypeOAuth { + return "", errors.New("not an anthropic oauth account") + } + + cacheKey := ClaudeTokenCacheKey(account) + + // 1. Check cache + if p.tokenCache != nil { + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && token != "" { + return token, nil + } + } + + // 2. Check if refresh needed + expiresAt := account.GetCredentialAsTime("expires_at") + needsRefresh := expiresAt == nil || time.Until(*expiresAt) <= claudeTokenRefreshSkew + refreshFailed := false + if needsRefresh && p.tokenCache != nil { + locked, err := p.tokenCache.AcquireRefreshLock(ctx, cacheKey, 30*time.Second) + if err == nil && locked { + defer func() { _ = p.tokenCache.ReleaseRefreshLock(ctx, cacheKey) }() + + // Check cache again after acquiring lock + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && token != "" { + return token, nil + } + + // Get fresh account from DB + fresh, err := p.accountRepo.GetByID(ctx, account.ID) + if err == nil && fresh != nil { + account = fresh + } + expiresAt = account.GetCredentialAsTime("expires_at") + if expiresAt == nil || time.Until(*expiresAt) <= claudeTokenRefreshSkew { + if p.oauthService == nil { + refreshFailed = true // 无法刷新,标记失败 + } else { + tokenInfo, err := p.oauthService.RefreshAccountToken(ctx, account) + if err != nil { + refreshFailed = true // 刷新失败,标记以使用短 TTL + } else { + // Build new credentials + newCredentials := make(map[string]any) + for k, v := range account.Credentials { + newCredentials[k] = v + } + newCredentials["access_token"] = tokenInfo.AccessToken + newCredentials["token_type"] = tokenInfo.TokenType + newCredentials["expires_at"] = time.Now().Add(time.Duration(tokenInfo.ExpiresIn) * time.Second).Format(time.RFC3339) + if tokenInfo.RefreshToken != "" { + newCredentials["refresh_token"] = tokenInfo.RefreshToken + } + account.Credentials = newCredentials + _ = p.accountRepo.Update(ctx, account) + expiresAt = account.GetCredentialAsTime("expires_at") + } + } + } + } else if p.tokenCache.simulateLockRace { + // Wait and retry cache + time.Sleep(10 * time.Millisecond) + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && token != "" { + return token, nil + } + } + } + + accessToken := account.GetCredential("access_token") + if accessToken == "" { + return "", errors.New("access_token not found in credentials") + } + + // 3. Store in cache + if p.tokenCache != nil { + ttl := 30 * time.Minute + if refreshFailed { + ttl = time.Minute // 刷新失败时使用短 TTL + } else if expiresAt != nil { + until := time.Until(*expiresAt) + if until > claudeTokenCacheSkew { + ttl = until - claudeTokenCacheSkew + } else if until > 0 { + ttl = until + } else { + ttl = time.Minute + } + } + _ = p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl) + } + + return accessToken, nil +} + +func TestClaudeTokenProvider_CacheHit(t *testing.T) { + cache := newClaudeTokenCacheStub() + account := &Account{ + ID: 100, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "db-token", + }, + } + cacheKey := ClaudeTokenCacheKey(account) + cache.tokens[cacheKey] = "cached-token" + + provider := NewClaudeTokenProvider(nil, cache, nil) + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "cached-token", token) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.getCalled)) + require.Equal(t, int32(0), atomic.LoadInt32(&cache.setCalled)) +} + +func TestClaudeTokenProvider_CacheMiss_FromCredentials(t *testing.T) { + cache := newClaudeTokenCacheStub() + // Token expires in far future, no refresh needed + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 101, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "credential-token", + "expires_at": expiresAt, + }, + } + + provider := NewClaudeTokenProvider(nil, cache, nil) + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "credential-token", token) + + // Should have stored in cache + cacheKey := ClaudeTokenCacheKey(account) + require.Equal(t, "credential-token", cache.tokens[cacheKey]) +} + +func TestClaudeTokenProvider_TokenRefresh(t *testing.T) { + cache := newClaudeTokenCacheStub() + accountRepo := &claudeAccountRepoStub{} + oauthService := &claudeOAuthServiceStub{ + tokenInfo: &TokenInfo{ + AccessToken: "refreshed-token", + RefreshToken: "new-refresh-token", + TokenType: "Bearer", + ExpiresIn: 3600, + ExpiresAt: time.Now().Add(time.Hour).Unix(), + }, + } + + // Token expires soon (within refresh skew) + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 102, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-token", + "refresh_token": "old-refresh-token", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + + provider := &testClaudeTokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: oauthService, + } + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "refreshed-token", token) + require.Equal(t, int32(1), atomic.LoadInt32(&oauthService.refreshCalled)) +} + +func TestClaudeTokenProvider_LockRaceCondition(t *testing.T) { + cache := newClaudeTokenCacheStub() + cache.simulateLockRace = true + accountRepo := &claudeAccountRepoStub{} + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 103, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "race-token", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + + // Simulate another worker already refreshed and cached + cacheKey := ClaudeTokenCacheKey(account) + go func() { + time.Sleep(5 * time.Millisecond) + cache.mu.Lock() + cache.tokens[cacheKey] = "winner-token" + cache.mu.Unlock() + }() + + provider := &testClaudeTokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + } + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.NotEmpty(t, token) +} + +func TestClaudeTokenProvider_NilAccount(t *testing.T) { + provider := NewClaudeTokenProvider(nil, nil, nil) + + token, err := provider.GetAccessToken(context.Background(), nil) + require.Error(t, err) + require.Contains(t, err.Error(), "account is nil") + require.Empty(t, token) +} + +func TestClaudeTokenProvider_WrongPlatform(t *testing.T) { + provider := NewClaudeTokenProvider(nil, nil, nil) + account := &Account{ + ID: 104, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + } + + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "not an anthropic oauth account") + require.Empty(t, token) +} + +func TestClaudeTokenProvider_WrongAccountType(t *testing.T) { + provider := NewClaudeTokenProvider(nil, nil, nil) + account := &Account{ + ID: 105, + Platform: PlatformAnthropic, + Type: AccountTypeAPIKey, + } + + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "not an anthropic oauth account") + require.Empty(t, token) +} + +func TestClaudeTokenProvider_SetupTokenType(t *testing.T) { + provider := NewClaudeTokenProvider(nil, nil, nil) + account := &Account{ + ID: 106, + Platform: PlatformAnthropic, + Type: AccountTypeSetupToken, + } + + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "not an anthropic oauth account") + require.Empty(t, token) +} + +func TestClaudeTokenProvider_NilCache(t *testing.T) { + // Token doesn't need refresh + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 107, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "nocache-token", + "expires_at": expiresAt, + }, + } + + provider := NewClaudeTokenProvider(nil, nil, nil) + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "nocache-token", token) +} + +func TestClaudeTokenProvider_CacheGetError(t *testing.T) { + cache := newClaudeTokenCacheStub() + cache.getErr = errors.New("redis connection failed") + + // Token doesn't need refresh + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 108, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "fallback-token", + "expires_at": expiresAt, + }, + } + + provider := NewClaudeTokenProvider(nil, cache, nil) + + // Should gracefully degrade and return from credentials + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "fallback-token", token) +} + +func TestClaudeTokenProvider_CacheSetError(t *testing.T) { + cache := newClaudeTokenCacheStub() + cache.setErr = errors.New("redis write failed") + + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 109, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "still-works-token", + "expires_at": expiresAt, + }, + } + + provider := NewClaudeTokenProvider(nil, cache, nil) + + // Should still work even if cache set fails + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "still-works-token", token) +} + +func TestClaudeTokenProvider_MissingAccessToken(t *testing.T) { + cache := newClaudeTokenCacheStub() + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 110, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "expires_at": expiresAt, + // missing access_token + }, + } + + provider := NewClaudeTokenProvider(nil, cache, nil) + + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "access_token not found") + require.Empty(t, token) +} + +func TestClaudeTokenProvider_RefreshError(t *testing.T) { + cache := newClaudeTokenCacheStub() + accountRepo := &claudeAccountRepoStub{} + oauthService := &claudeOAuthServiceStub{ + refreshErr: errors.New("oauth refresh failed"), + } + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 111, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-token", + "refresh_token": "old-refresh-token", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + + provider := &testClaudeTokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: oauthService, + } + + // Now with fallback behavior, should return existing token even if refresh fails + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "old-token", token) // Fallback to existing token +} + +func TestClaudeTokenProvider_OAuthServiceNotConfigured(t *testing.T) { + cache := newClaudeTokenCacheStub() + accountRepo := &claudeAccountRepoStub{} + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 112, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-token", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + + provider := &testClaudeTokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: nil, // not configured + } + + // Now with fallback behavior, should return existing token even if oauth service not configured + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "old-token", token) // Fallback to existing token +} + +func TestClaudeTokenProvider_TTLCalculation(t *testing.T) { + tests := []struct { + name string + expiresIn time.Duration + }{ + { + name: "far_future_expiry", + expiresIn: 1 * time.Hour, + }, + { + name: "medium_expiry", + expiresIn: 10 * time.Minute, + }, + { + name: "near_expiry", + expiresIn: 6 * time.Minute, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := newClaudeTokenCacheStub() + expiresAt := time.Now().Add(tt.expiresIn).Format(time.RFC3339) + account := &Account{ + ID: 200, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "test-token", + "expires_at": expiresAt, + }, + } + + provider := NewClaudeTokenProvider(nil, cache, nil) + + _, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + + // Verify token was cached + cacheKey := ClaudeTokenCacheKey(account) + require.Equal(t, "test-token", cache.tokens[cacheKey]) + }) + } +} + +func TestClaudeTokenProvider_AccountRepoGetError(t *testing.T) { + cache := newClaudeTokenCacheStub() + accountRepo := &claudeAccountRepoStub{ + getErr: errors.New("db connection failed"), + } + oauthService := &claudeOAuthServiceStub{ + tokenInfo: &TokenInfo{ + AccessToken: "refreshed-token", + RefreshToken: "new-refresh", + TokenType: "Bearer", + ExpiresIn: 3600, + }, + } + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 113, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-token", + "refresh_token": "old-refresh", + "expires_at": expiresAt, + }, + } + + provider := &testClaudeTokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: oauthService, + } + + // Should still work, just using the passed-in account + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "refreshed-token", token) +} + +func TestClaudeTokenProvider_AccountUpdateError(t *testing.T) { + cache := newClaudeTokenCacheStub() + accountRepo := &claudeAccountRepoStub{ + updateErr: errors.New("db write failed"), + } + oauthService := &claudeOAuthServiceStub{ + tokenInfo: &TokenInfo{ + AccessToken: "refreshed-token", + RefreshToken: "new-refresh", + TokenType: "Bearer", + ExpiresIn: 3600, + }, + } + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 114, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-token", + "refresh_token": "old-refresh", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + + provider := &testClaudeTokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: oauthService, + } + + // Should still return token even if update fails + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "refreshed-token", token) +} + +func TestClaudeTokenProvider_RefreshPreservesExistingCredentials(t *testing.T) { + cache := newClaudeTokenCacheStub() + accountRepo := &claudeAccountRepoStub{} + oauthService := &claudeOAuthServiceStub{ + tokenInfo: &TokenInfo{ + AccessToken: "new-access-token", + RefreshToken: "new-refresh-token", + TokenType: "Bearer", + ExpiresIn: 3600, + }, + } + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 115, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-access-token", + "refresh_token": "old-refresh-token", + "expires_at": expiresAt, + "custom_field": "should-be-preserved", + "organization": "test-org", + }, + } + accountRepo.account = account + + provider := &testClaudeTokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: oauthService, + } + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "new-access-token", token) + + // Verify existing fields are preserved + require.Equal(t, "should-be-preserved", accountRepo.account.Credentials["custom_field"]) + require.Equal(t, "test-org", accountRepo.account.Credentials["organization"]) + // Verify new fields are updated + require.Equal(t, "new-access-token", accountRepo.account.Credentials["access_token"]) + require.Equal(t, "new-refresh-token", accountRepo.account.Credentials["refresh_token"]) +} + +func TestClaudeTokenProvider_DoubleCheckCacheAfterLock(t *testing.T) { + cache := newClaudeTokenCacheStub() + accountRepo := &claudeAccountRepoStub{} + oauthService := &claudeOAuthServiceStub{ + tokenInfo: &TokenInfo{ + AccessToken: "refreshed-token", + RefreshToken: "new-refresh", + TokenType: "Bearer", + ExpiresIn: 3600, + }, + } + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 116, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-token", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + cacheKey := ClaudeTokenCacheKey(account) + + // After lock is acquired, cache should have the token (simulating another worker) + go func() { + time.Sleep(5 * time.Millisecond) + cache.mu.Lock() + cache.tokens[cacheKey] = "cached-by-other-worker" + cache.mu.Unlock() + }() + + provider := &testClaudeTokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: oauthService, + } + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.NotEmpty(t, token) +} + +// Tests for real provider - to increase coverage +func TestClaudeTokenProvider_Real_LockFailedWait(t *testing.T) { + cache := newClaudeTokenCacheStub() + cache.lockAcquired = false // Lock acquisition fails + + // Token expires soon (within refresh skew) to trigger lock attempt + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 300, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "fallback-token", + "expires_at": expiresAt, + }, + } + + // Set token in cache after lock wait period (simulate other worker refreshing) + cacheKey := ClaudeTokenCacheKey(account) + go func() { + time.Sleep(100 * time.Millisecond) + cache.mu.Lock() + cache.tokens[cacheKey] = "refreshed-by-other" + cache.mu.Unlock() + }() + + provider := NewClaudeTokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.NotEmpty(t, token) +} + +func TestClaudeTokenProvider_Real_CacheHitAfterWait(t *testing.T) { + cache := newClaudeTokenCacheStub() + cache.lockAcquired = false // Lock acquisition fails + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 301, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "original-token", + "expires_at": expiresAt, + }, + } + + cacheKey := ClaudeTokenCacheKey(account) + // Set token in cache immediately after wait starts + go func() { + time.Sleep(50 * time.Millisecond) + cache.mu.Lock() + cache.tokens[cacheKey] = "winner-token" + cache.mu.Unlock() + }() + + provider := NewClaudeTokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.NotEmpty(t, token) +} + +func TestClaudeTokenProvider_Real_NoExpiresAt(t *testing.T) { + cache := newClaudeTokenCacheStub() + cache.lockAcquired = false // Prevent entering refresh logic + + // Token with nil expires_at (no expiry set) + account := &Account{ + ID: 302, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "no-expiry-token", + }, + } + + // After lock wait, return token from credentials + provider := NewClaudeTokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "no-expiry-token", token) +} + +func TestClaudeTokenProvider_Real_WhitespaceToken(t *testing.T) { + cache := newClaudeTokenCacheStub() + cacheKey := "claude:account:303" + cache.tokens[cacheKey] = " " // Whitespace only - should be treated as empty + + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 303, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "real-token", + "expires_at": expiresAt, + }, + } + + provider := NewClaudeTokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "real-token", token) +} + +func TestClaudeTokenProvider_Real_EmptyCredentialToken(t *testing.T) { + cache := newClaudeTokenCacheStub() + + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 304, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": " ", // Whitespace only + "expires_at": expiresAt, + }, + } + + provider := NewClaudeTokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "access_token not found") + require.Empty(t, token) +} + +func TestClaudeTokenProvider_Real_LockError(t *testing.T) { + cache := newClaudeTokenCacheStub() + cache.lockErr = errors.New("redis lock failed") + + // Token expires soon (within refresh skew) + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 305, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "fallback-on-lock-error", + "expires_at": expiresAt, + }, + } + + provider := NewClaudeTokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "fallback-on-lock-error", token) +} + +func TestClaudeTokenProvider_Real_NilCredentials(t *testing.T) { + cache := newClaudeTokenCacheStub() + + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 306, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "expires_at": expiresAt, + // No access_token + }, + } + + provider := NewClaudeTokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "access_token not found") + require.Empty(t, token) +} diff --git a/backend/internal/service/dashboard_aggregation_service.go b/backend/internal/service/dashboard_aggregation_service.go new file mode 100644 index 00000000..da5c0e7d --- /dev/null +++ b/backend/internal/service/dashboard_aggregation_service.go @@ -0,0 +1,258 @@ +package service + +import ( + "context" + "errors" + "log" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +const ( + defaultDashboardAggregationTimeout = 2 * time.Minute + defaultDashboardAggregationBackfillTimeout = 30 * time.Minute + dashboardAggregationRetentionInterval = 6 * time.Hour +) + +var ( + // ErrDashboardBackfillDisabled 当配置禁用回填时返回。 + ErrDashboardBackfillDisabled = errors.New("仪表盘聚合回填已禁用") + // ErrDashboardBackfillTooLarge 当回填跨度超过限制时返回。 + ErrDashboardBackfillTooLarge = errors.New("回填时间跨度过大") +) + +// DashboardAggregationRepository 定义仪表盘预聚合仓储接口。 +type DashboardAggregationRepository interface { + AggregateRange(ctx context.Context, start, end time.Time) error + GetAggregationWatermark(ctx context.Context) (time.Time, error) + UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error + CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error + CleanupUsageLogs(ctx context.Context, cutoff time.Time) error + EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error +} + +// DashboardAggregationService 负责定时聚合与回填。 +type DashboardAggregationService struct { + repo DashboardAggregationRepository + timingWheel *TimingWheelService + cfg config.DashboardAggregationConfig + running int32 + lastRetentionCleanup atomic.Value // time.Time +} + +// NewDashboardAggregationService 创建聚合服务。 +func NewDashboardAggregationService(repo DashboardAggregationRepository, timingWheel *TimingWheelService, cfg *config.Config) *DashboardAggregationService { + var aggCfg config.DashboardAggregationConfig + if cfg != nil { + aggCfg = cfg.DashboardAgg + } + return &DashboardAggregationService{ + repo: repo, + timingWheel: timingWheel, + cfg: aggCfg, + } +} + +// Start 启动定时聚合作业(重启生效配置)。 +func (s *DashboardAggregationService) Start() { + if s == nil || s.repo == nil || s.timingWheel == nil { + return + } + if !s.cfg.Enabled { + log.Printf("[DashboardAggregation] 聚合作业已禁用") + return + } + + interval := time.Duration(s.cfg.IntervalSeconds) * time.Second + if interval <= 0 { + interval = time.Minute + } + + if s.cfg.RecomputeDays > 0 { + go s.recomputeRecentDays() + } + + s.timingWheel.ScheduleRecurring("dashboard:aggregation", interval, func() { + s.runScheduledAggregation() + }) + log.Printf("[DashboardAggregation] 聚合作业启动 (interval=%v, lookback=%ds)", interval, s.cfg.LookbackSeconds) + if !s.cfg.BackfillEnabled { + log.Printf("[DashboardAggregation] 回填已禁用,如需补齐保留窗口以外历史数据请手动回填") + } +} + +// TriggerBackfill 触发回填(异步)。 +func (s *DashboardAggregationService) TriggerBackfill(start, end time.Time) error { + if s == nil || s.repo == nil { + return errors.New("聚合服务未初始化") + } + if !s.cfg.BackfillEnabled { + log.Printf("[DashboardAggregation] 回填被拒绝: backfill_enabled=false") + return ErrDashboardBackfillDisabled + } + if !end.After(start) { + return errors.New("回填时间范围无效") + } + if s.cfg.BackfillMaxDays > 0 { + maxRange := time.Duration(s.cfg.BackfillMaxDays) * 24 * time.Hour + if end.Sub(start) > maxRange { + return ErrDashboardBackfillTooLarge + } + } + + go func() { + ctx, cancel := context.WithTimeout(context.Background(), defaultDashboardAggregationBackfillTimeout) + defer cancel() + if err := s.backfillRange(ctx, start, end); err != nil { + log.Printf("[DashboardAggregation] 回填失败: %v", err) + } + }() + return nil +} + +func (s *DashboardAggregationService) recomputeRecentDays() { + days := s.cfg.RecomputeDays + if days <= 0 { + return + } + now := time.Now().UTC() + start := now.AddDate(0, 0, -days) + + ctx, cancel := context.WithTimeout(context.Background(), defaultDashboardAggregationBackfillTimeout) + defer cancel() + if err := s.backfillRange(ctx, start, now); err != nil { + log.Printf("[DashboardAggregation] 启动重算失败: %v", err) + return + } +} + +func (s *DashboardAggregationService) runScheduledAggregation() { + if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { + return + } + defer atomic.StoreInt32(&s.running, 0) + + jobStart := time.Now().UTC() + ctx, cancel := context.WithTimeout(context.Background(), defaultDashboardAggregationTimeout) + defer cancel() + + now := time.Now().UTC() + last, err := s.repo.GetAggregationWatermark(ctx) + if err != nil { + log.Printf("[DashboardAggregation] 读取水位失败: %v", err) + last = time.Unix(0, 0).UTC() + } + + lookback := time.Duration(s.cfg.LookbackSeconds) * time.Second + epoch := time.Unix(0, 0).UTC() + start := last.Add(-lookback) + if !last.After(epoch) { + retentionDays := s.cfg.Retention.UsageLogsDays + if retentionDays <= 0 { + retentionDays = 1 + } + start = truncateToDayUTC(now.AddDate(0, 0, -retentionDays)) + } else if start.After(now) { + start = now.Add(-lookback) + } + + if err := s.aggregateRange(ctx, start, now); err != nil { + log.Printf("[DashboardAggregation] 聚合失败: %v", err) + return + } + + updateErr := s.repo.UpdateAggregationWatermark(ctx, now) + if updateErr != nil { + log.Printf("[DashboardAggregation] 更新水位失败: %v", updateErr) + } + log.Printf("[DashboardAggregation] 聚合完成 (start=%s end=%s duration=%s watermark_updated=%t)", + start.Format(time.RFC3339), + now.Format(time.RFC3339), + time.Since(jobStart).String(), + updateErr == nil, + ) + + s.maybeCleanupRetention(ctx, now) +} + +func (s *DashboardAggregationService) backfillRange(ctx context.Context, start, end time.Time) error { + if !atomic.CompareAndSwapInt32(&s.running, 0, 1) { + return errors.New("聚合作业正在运行") + } + defer atomic.StoreInt32(&s.running, 0) + + jobStart := time.Now().UTC() + startUTC := start.UTC() + endUTC := end.UTC() + if !endUTC.After(startUTC) { + return errors.New("回填时间范围无效") + } + + cursor := truncateToDayUTC(startUTC) + for cursor.Before(endUTC) { + windowEnd := cursor.Add(24 * time.Hour) + if windowEnd.After(endUTC) { + windowEnd = endUTC + } + if err := s.aggregateRange(ctx, cursor, windowEnd); err != nil { + return err + } + cursor = windowEnd + } + + updateErr := s.repo.UpdateAggregationWatermark(ctx, endUTC) + if updateErr != nil { + log.Printf("[DashboardAggregation] 更新水位失败: %v", updateErr) + } + log.Printf("[DashboardAggregation] 回填聚合完成 (start=%s end=%s duration=%s watermark_updated=%t)", + startUTC.Format(time.RFC3339), + endUTC.Format(time.RFC3339), + time.Since(jobStart).String(), + updateErr == nil, + ) + + s.maybeCleanupRetention(ctx, endUTC) + return nil +} + +func (s *DashboardAggregationService) aggregateRange(ctx context.Context, start, end time.Time) error { + if !end.After(start) { + return nil + } + if err := s.repo.EnsureUsageLogsPartitions(ctx, end); err != nil { + log.Printf("[DashboardAggregation] 分区检查失败: %v", err) + } + return s.repo.AggregateRange(ctx, start, end) +} + +func (s *DashboardAggregationService) maybeCleanupRetention(ctx context.Context, now time.Time) { + lastAny := s.lastRetentionCleanup.Load() + if lastAny != nil { + if last, ok := lastAny.(time.Time); ok && now.Sub(last) < dashboardAggregationRetentionInterval { + return + } + } + + hourlyCutoff := now.AddDate(0, 0, -s.cfg.Retention.HourlyDays) + dailyCutoff := now.AddDate(0, 0, -s.cfg.Retention.DailyDays) + usageCutoff := now.AddDate(0, 0, -s.cfg.Retention.UsageLogsDays) + + aggErr := s.repo.CleanupAggregates(ctx, hourlyCutoff, dailyCutoff) + if aggErr != nil { + log.Printf("[DashboardAggregation] 聚合保留清理失败: %v", aggErr) + } + usageErr := s.repo.CleanupUsageLogs(ctx, usageCutoff) + if usageErr != nil { + log.Printf("[DashboardAggregation] usage_logs 保留清理失败: %v", usageErr) + } + if aggErr == nil && usageErr == nil { + s.lastRetentionCleanup.Store(now) + } +} + +func truncateToDayUTC(t time.Time) time.Time { + t = t.UTC() + return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC) +} diff --git a/backend/internal/service/dashboard_aggregation_service_test.go b/backend/internal/service/dashboard_aggregation_service_test.go new file mode 100644 index 00000000..2fc22105 --- /dev/null +++ b/backend/internal/service/dashboard_aggregation_service_test.go @@ -0,0 +1,106 @@ +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type dashboardAggregationRepoTestStub struct { + aggregateCalls int + lastStart time.Time + lastEnd time.Time + watermark time.Time + aggregateErr error + cleanupAggregatesErr error + cleanupUsageErr error +} + +func (s *dashboardAggregationRepoTestStub) AggregateRange(ctx context.Context, start, end time.Time) error { + s.aggregateCalls++ + s.lastStart = start + s.lastEnd = end + return s.aggregateErr +} + +func (s *dashboardAggregationRepoTestStub) GetAggregationWatermark(ctx context.Context) (time.Time, error) { + return s.watermark, nil +} + +func (s *dashboardAggregationRepoTestStub) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error { + return nil +} + +func (s *dashboardAggregationRepoTestStub) CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error { + return s.cleanupAggregatesErr +} + +func (s *dashboardAggregationRepoTestStub) CleanupUsageLogs(ctx context.Context, cutoff time.Time) error { + return s.cleanupUsageErr +} + +func (s *dashboardAggregationRepoTestStub) EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error { + return nil +} + +func TestDashboardAggregationService_RunScheduledAggregation_EpochUsesRetentionStart(t *testing.T) { + repo := &dashboardAggregationRepoTestStub{watermark: time.Unix(0, 0).UTC()} + svc := &DashboardAggregationService{ + repo: repo, + cfg: config.DashboardAggregationConfig{ + Enabled: true, + IntervalSeconds: 60, + LookbackSeconds: 120, + Retention: config.DashboardAggregationRetentionConfig{ + UsageLogsDays: 1, + HourlyDays: 1, + DailyDays: 1, + }, + }, + } + + svc.runScheduledAggregation() + + require.Equal(t, 1, repo.aggregateCalls) + require.False(t, repo.lastEnd.IsZero()) + require.Equal(t, truncateToDayUTC(repo.lastEnd.AddDate(0, 0, -1)), repo.lastStart) +} + +func TestDashboardAggregationService_CleanupRetentionFailure_DoesNotRecord(t *testing.T) { + repo := &dashboardAggregationRepoTestStub{cleanupAggregatesErr: errors.New("清理失败")} + svc := &DashboardAggregationService{ + repo: repo, + cfg: config.DashboardAggregationConfig{ + Retention: config.DashboardAggregationRetentionConfig{ + UsageLogsDays: 1, + HourlyDays: 1, + DailyDays: 1, + }, + }, + } + + svc.maybeCleanupRetention(context.Background(), time.Now().UTC()) + + require.Nil(t, svc.lastRetentionCleanup.Load()) +} + +func TestDashboardAggregationService_TriggerBackfill_TooLarge(t *testing.T) { + repo := &dashboardAggregationRepoTestStub{} + svc := &DashboardAggregationService{ + repo: repo, + cfg: config.DashboardAggregationConfig{ + BackfillEnabled: true, + BackfillMaxDays: 1, + }, + } + + start := time.Now().AddDate(0, 0, -3) + end := time.Now() + err := svc.TriggerBackfill(start, end) + require.ErrorIs(t, err, ErrDashboardBackfillTooLarge) + require.Equal(t, 0, repo.aggregateCalls) +} diff --git a/backend/internal/service/dashboard_service.go b/backend/internal/service/dashboard_service.go index f0b1f2a0..a9811919 100644 --- a/backend/internal/service/dashboard_service.go +++ b/backend/internal/service/dashboard_service.go @@ -2,47 +2,307 @@ package service import ( "context" + "encoding/json" + "errors" "fmt" + "log" + "sync/atomic" "time" + "github.com/Wei-Shaw/sub2api/internal/config" "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" ) -// DashboardService provides aggregated statistics for admin dashboard. -type DashboardService struct { - usageRepo UsageLogRepository +const ( + defaultDashboardStatsFreshTTL = 15 * time.Second + defaultDashboardStatsCacheTTL = 30 * time.Second + defaultDashboardStatsRefreshTimeout = 30 * time.Second +) + +// ErrDashboardStatsCacheMiss 标记仪表盘缓存未命中。 +var ErrDashboardStatsCacheMiss = errors.New("仪表盘缓存未命中") + +// DashboardStatsCache 定义仪表盘统计缓存接口。 +type DashboardStatsCache interface { + GetDashboardStats(ctx context.Context) (string, error) + SetDashboardStats(ctx context.Context, data string, ttl time.Duration) error + DeleteDashboardStats(ctx context.Context) error } -func NewDashboardService(usageRepo UsageLogRepository) *DashboardService { +type dashboardStatsRangeFetcher interface { + GetDashboardStatsWithRange(ctx context.Context, start, end time.Time) (*usagestats.DashboardStats, error) +} + +type dashboardStatsCacheEntry struct { + Stats *usagestats.DashboardStats `json:"stats"` + UpdatedAt int64 `json:"updated_at"` +} + +// DashboardService 提供管理员仪表盘统计服务。 +type DashboardService struct { + usageRepo UsageLogRepository + aggRepo DashboardAggregationRepository + cache DashboardStatsCache + cacheFreshTTL time.Duration + cacheTTL time.Duration + refreshTimeout time.Duration + refreshing int32 + aggEnabled bool + aggInterval time.Duration + aggLookback time.Duration + aggUsageDays int +} + +func NewDashboardService(usageRepo UsageLogRepository, aggRepo DashboardAggregationRepository, cache DashboardStatsCache, cfg *config.Config) *DashboardService { + freshTTL := defaultDashboardStatsFreshTTL + cacheTTL := defaultDashboardStatsCacheTTL + refreshTimeout := defaultDashboardStatsRefreshTimeout + aggEnabled := true + aggInterval := time.Minute + aggLookback := 2 * time.Minute + aggUsageDays := 90 + if cfg != nil { + if !cfg.Dashboard.Enabled { + cache = nil + } + if cfg.Dashboard.StatsFreshTTLSeconds > 0 { + freshTTL = time.Duration(cfg.Dashboard.StatsFreshTTLSeconds) * time.Second + } + if cfg.Dashboard.StatsTTLSeconds > 0 { + cacheTTL = time.Duration(cfg.Dashboard.StatsTTLSeconds) * time.Second + } + if cfg.Dashboard.StatsRefreshTimeoutSeconds > 0 { + refreshTimeout = time.Duration(cfg.Dashboard.StatsRefreshTimeoutSeconds) * time.Second + } + aggEnabled = cfg.DashboardAgg.Enabled + if cfg.DashboardAgg.IntervalSeconds > 0 { + aggInterval = time.Duration(cfg.DashboardAgg.IntervalSeconds) * time.Second + } + if cfg.DashboardAgg.LookbackSeconds > 0 { + aggLookback = time.Duration(cfg.DashboardAgg.LookbackSeconds) * time.Second + } + if cfg.DashboardAgg.Retention.UsageLogsDays > 0 { + aggUsageDays = cfg.DashboardAgg.Retention.UsageLogsDays + } + } + if aggRepo == nil { + aggEnabled = false + } return &DashboardService{ - usageRepo: usageRepo, + usageRepo: usageRepo, + aggRepo: aggRepo, + cache: cache, + cacheFreshTTL: freshTTL, + cacheTTL: cacheTTL, + refreshTimeout: refreshTimeout, + aggEnabled: aggEnabled, + aggInterval: aggInterval, + aggLookback: aggLookback, + aggUsageDays: aggUsageDays, } } func (s *DashboardService) GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) { - stats, err := s.usageRepo.GetDashboardStats(ctx) + if s.cache != nil { + cached, fresh, err := s.getCachedDashboardStats(ctx) + if err == nil && cached != nil { + s.refreshAggregationStaleness(cached) + if !fresh { + s.refreshDashboardStatsAsync() + } + return cached, nil + } + if err != nil && !errors.Is(err, ErrDashboardStatsCacheMiss) { + log.Printf("[Dashboard] 仪表盘缓存读取失败: %v", err) + } + } + + stats, err := s.refreshDashboardStats(ctx) if err != nil { return nil, fmt.Errorf("get dashboard stats: %w", err) } return stats, nil } -func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID int64) ([]usagestats.TrendDataPoint, error) { - trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID) +func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool) ([]usagestats.TrendDataPoint, error) { + trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream) if err != nil { return nil, fmt.Errorf("get usage trend with filters: %w", err) } return trend, nil } -func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID int64) ([]usagestats.ModelStat, error) { - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, 0) +func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool) ([]usagestats.ModelStat, error) { + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, accountID, groupID, stream) if err != nil { return nil, fmt.Errorf("get model stats with filters: %w", err) } return stats, nil } +func (s *DashboardService) getCachedDashboardStats(ctx context.Context) (*usagestats.DashboardStats, bool, error) { + data, err := s.cache.GetDashboardStats(ctx) + if err != nil { + return nil, false, err + } + + var entry dashboardStatsCacheEntry + if err := json.Unmarshal([]byte(data), &entry); err != nil { + s.evictDashboardStatsCache(err) + return nil, false, ErrDashboardStatsCacheMiss + } + if entry.Stats == nil { + s.evictDashboardStatsCache(errors.New("仪表盘缓存缺少统计数据")) + return nil, false, ErrDashboardStatsCacheMiss + } + + age := time.Since(time.Unix(entry.UpdatedAt, 0)) + return entry.Stats, age <= s.cacheFreshTTL, nil +} + +func (s *DashboardService) refreshDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) { + stats, err := s.fetchDashboardStats(ctx) + if err != nil { + return nil, err + } + s.applyAggregationStatus(ctx, stats) + cacheCtx, cancel := s.cacheOperationContext() + defer cancel() + s.saveDashboardStatsCache(cacheCtx, stats) + return stats, nil +} + +func (s *DashboardService) refreshDashboardStatsAsync() { + if s.cache == nil { + return + } + if !atomic.CompareAndSwapInt32(&s.refreshing, 0, 1) { + return + } + + go func() { + defer atomic.StoreInt32(&s.refreshing, 0) + + ctx, cancel := context.WithTimeout(context.Background(), s.refreshTimeout) + defer cancel() + + stats, err := s.fetchDashboardStats(ctx) + if err != nil { + log.Printf("[Dashboard] 仪表盘缓存异步刷新失败: %v", err) + return + } + s.applyAggregationStatus(ctx, stats) + cacheCtx, cancel := s.cacheOperationContext() + defer cancel() + s.saveDashboardStatsCache(cacheCtx, stats) + }() +} + +func (s *DashboardService) fetchDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) { + if !s.aggEnabled { + if fetcher, ok := s.usageRepo.(dashboardStatsRangeFetcher); ok { + now := time.Now().UTC() + start := truncateToDayUTC(now.AddDate(0, 0, -s.aggUsageDays)) + return fetcher.GetDashboardStatsWithRange(ctx, start, now) + } + } + return s.usageRepo.GetDashboardStats(ctx) +} + +func (s *DashboardService) saveDashboardStatsCache(ctx context.Context, stats *usagestats.DashboardStats) { + if s.cache == nil || stats == nil { + return + } + + entry := dashboardStatsCacheEntry{ + Stats: stats, + UpdatedAt: time.Now().Unix(), + } + data, err := json.Marshal(entry) + if err != nil { + log.Printf("[Dashboard] 仪表盘缓存序列化失败: %v", err) + return + } + + if err := s.cache.SetDashboardStats(ctx, string(data), s.cacheTTL); err != nil { + log.Printf("[Dashboard] 仪表盘缓存写入失败: %v", err) + } +} + +func (s *DashboardService) evictDashboardStatsCache(reason error) { + if s.cache == nil { + return + } + cacheCtx, cancel := s.cacheOperationContext() + defer cancel() + + if err := s.cache.DeleteDashboardStats(cacheCtx); err != nil { + log.Printf("[Dashboard] 仪表盘缓存清理失败: %v", err) + } + if reason != nil { + log.Printf("[Dashboard] 仪表盘缓存异常,已清理: %v", reason) + } +} + +func (s *DashboardService) cacheOperationContext() (context.Context, context.CancelFunc) { + return context.WithTimeout(context.Background(), s.refreshTimeout) +} + +func (s *DashboardService) applyAggregationStatus(ctx context.Context, stats *usagestats.DashboardStats) { + if stats == nil { + return + } + updatedAt := s.fetchAggregationUpdatedAt(ctx) + stats.StatsUpdatedAt = updatedAt.UTC().Format(time.RFC3339) + stats.StatsStale = s.isAggregationStale(updatedAt, time.Now().UTC()) +} + +func (s *DashboardService) refreshAggregationStaleness(stats *usagestats.DashboardStats) { + if stats == nil { + return + } + updatedAt := parseStatsUpdatedAt(stats.StatsUpdatedAt) + stats.StatsStale = s.isAggregationStale(updatedAt, time.Now().UTC()) +} + +func (s *DashboardService) fetchAggregationUpdatedAt(ctx context.Context) time.Time { + if s.aggRepo == nil { + return time.Unix(0, 0).UTC() + } + updatedAt, err := s.aggRepo.GetAggregationWatermark(ctx) + if err != nil { + log.Printf("[Dashboard] 读取聚合水位失败: %v", err) + return time.Unix(0, 0).UTC() + } + if updatedAt.IsZero() { + return time.Unix(0, 0).UTC() + } + return updatedAt.UTC() +} + +func (s *DashboardService) isAggregationStale(updatedAt, now time.Time) bool { + if !s.aggEnabled { + return true + } + epoch := time.Unix(0, 0).UTC() + if !updatedAt.After(epoch) { + return true + } + threshold := s.aggInterval + s.aggLookback + return now.Sub(updatedAt) > threshold +} + +func parseStatsUpdatedAt(raw string) time.Time { + if raw == "" { + return time.Unix(0, 0).UTC() + } + parsed, err := time.Parse(time.RFC3339, raw) + if err != nil { + return time.Unix(0, 0).UTC() + } + return parsed.UTC() +} + func (s *DashboardService) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) { trend, err := s.usageRepo.GetAPIKeyUsageTrend(ctx, startTime, endTime, granularity, limit) if err != nil { diff --git a/backend/internal/service/dashboard_service_test.go b/backend/internal/service/dashboard_service_test.go new file mode 100644 index 00000000..db3c78c3 --- /dev/null +++ b/backend/internal/service/dashboard_service_test.go @@ -0,0 +1,387 @@ +package service + +import ( + "context" + "encoding/json" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/stretchr/testify/require" +) + +type usageRepoStub struct { + UsageLogRepository + stats *usagestats.DashboardStats + rangeStats *usagestats.DashboardStats + err error + rangeErr error + calls int32 + rangeCalls int32 + rangeStart time.Time + rangeEnd time.Time + onCall chan struct{} +} + +func (s *usageRepoStub) GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) { + atomic.AddInt32(&s.calls, 1) + if s.onCall != nil { + select { + case s.onCall <- struct{}{}: + default: + } + } + if s.err != nil { + return nil, s.err + } + return s.stats, nil +} + +func (s *usageRepoStub) GetDashboardStatsWithRange(ctx context.Context, start, end time.Time) (*usagestats.DashboardStats, error) { + atomic.AddInt32(&s.rangeCalls, 1) + s.rangeStart = start + s.rangeEnd = end + if s.rangeErr != nil { + return nil, s.rangeErr + } + if s.rangeStats != nil { + return s.rangeStats, nil + } + return s.stats, nil +} + +type dashboardCacheStub struct { + get func(ctx context.Context) (string, error) + set func(ctx context.Context, data string, ttl time.Duration) error + del func(ctx context.Context) error + getCalls int32 + setCalls int32 + delCalls int32 + lastSetMu sync.Mutex + lastSet string +} + +func (c *dashboardCacheStub) GetDashboardStats(ctx context.Context) (string, error) { + atomic.AddInt32(&c.getCalls, 1) + if c.get != nil { + return c.get(ctx) + } + return "", ErrDashboardStatsCacheMiss +} + +func (c *dashboardCacheStub) SetDashboardStats(ctx context.Context, data string, ttl time.Duration) error { + atomic.AddInt32(&c.setCalls, 1) + c.lastSetMu.Lock() + c.lastSet = data + c.lastSetMu.Unlock() + if c.set != nil { + return c.set(ctx, data, ttl) + } + return nil +} + +func (c *dashboardCacheStub) DeleteDashboardStats(ctx context.Context) error { + atomic.AddInt32(&c.delCalls, 1) + if c.del != nil { + return c.del(ctx) + } + return nil +} + +type dashboardAggregationRepoStub struct { + watermark time.Time + err error +} + +func (s *dashboardAggregationRepoStub) AggregateRange(ctx context.Context, start, end time.Time) error { + return nil +} + +func (s *dashboardAggregationRepoStub) GetAggregationWatermark(ctx context.Context) (time.Time, error) { + if s.err != nil { + return time.Time{}, s.err + } + return s.watermark, nil +} + +func (s *dashboardAggregationRepoStub) UpdateAggregationWatermark(ctx context.Context, aggregatedAt time.Time) error { + return nil +} + +func (s *dashboardAggregationRepoStub) CleanupAggregates(ctx context.Context, hourlyCutoff, dailyCutoff time.Time) error { + return nil +} + +func (s *dashboardAggregationRepoStub) CleanupUsageLogs(ctx context.Context, cutoff time.Time) error { + return nil +} + +func (s *dashboardAggregationRepoStub) EnsureUsageLogsPartitions(ctx context.Context, now time.Time) error { + return nil +} + +func (c *dashboardCacheStub) readLastEntry(t *testing.T) dashboardStatsCacheEntry { + t.Helper() + c.lastSetMu.Lock() + data := c.lastSet + c.lastSetMu.Unlock() + + var entry dashboardStatsCacheEntry + err := json.Unmarshal([]byte(data), &entry) + require.NoError(t, err) + return entry +} + +func TestDashboardService_CacheHitFresh(t *testing.T) { + stats := &usagestats.DashboardStats{ + TotalUsers: 10, + StatsUpdatedAt: time.Unix(0, 0).UTC().Format(time.RFC3339), + StatsStale: true, + } + entry := dashboardStatsCacheEntry{ + Stats: stats, + UpdatedAt: time.Now().Unix(), + } + payload, err := json.Marshal(entry) + require.NoError(t, err) + + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return string(payload), nil + }, + } + repo := &usageRepoStub{ + stats: &usagestats.DashboardStats{TotalUsers: 99}, + } + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: true}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, stats, got) + require.Equal(t, int32(0), atomic.LoadInt32(&repo.calls)) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.getCalls)) + require.Equal(t, int32(0), atomic.LoadInt32(&cache.setCalls)) +} + +func TestDashboardService_CacheMiss_StoresCache(t *testing.T) { + stats := &usagestats.DashboardStats{ + TotalUsers: 7, + StatsUpdatedAt: time.Unix(0, 0).UTC().Format(time.RFC3339), + StatsStale: true, + } + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return "", ErrDashboardStatsCacheMiss + }, + } + repo := &usageRepoStub{stats: stats} + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: true}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, stats, got) + require.Equal(t, int32(1), atomic.LoadInt32(&repo.calls)) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.getCalls)) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.setCalls)) + entry := cache.readLastEntry(t) + require.Equal(t, stats, entry.Stats) + require.WithinDuration(t, time.Now(), time.Unix(entry.UpdatedAt, 0), time.Second) +} + +func TestDashboardService_CacheDisabled_SkipsCache(t *testing.T) { + stats := &usagestats.DashboardStats{ + TotalUsers: 3, + StatsUpdatedAt: time.Unix(0, 0).UTC().Format(time.RFC3339), + StatsStale: true, + } + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return "", nil + }, + } + repo := &usageRepoStub{stats: stats} + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: false}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, stats, got) + require.Equal(t, int32(1), atomic.LoadInt32(&repo.calls)) + require.Equal(t, int32(0), atomic.LoadInt32(&cache.getCalls)) + require.Equal(t, int32(0), atomic.LoadInt32(&cache.setCalls)) +} + +func TestDashboardService_CacheHitStale_TriggersAsyncRefresh(t *testing.T) { + staleStats := &usagestats.DashboardStats{ + TotalUsers: 11, + StatsUpdatedAt: time.Unix(0, 0).UTC().Format(time.RFC3339), + StatsStale: true, + } + entry := dashboardStatsCacheEntry{ + Stats: staleStats, + UpdatedAt: time.Now().Add(-defaultDashboardStatsFreshTTL * 2).Unix(), + } + payload, err := json.Marshal(entry) + require.NoError(t, err) + + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return string(payload), nil + }, + } + refreshCh := make(chan struct{}, 1) + repo := &usageRepoStub{ + stats: &usagestats.DashboardStats{TotalUsers: 22}, + onCall: refreshCh, + } + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: true}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, staleStats, got) + + select { + case <-refreshCh: + case <-time.After(1 * time.Second): + t.Fatal("等待异步刷新超时") + } + require.Eventually(t, func() bool { + return atomic.LoadInt32(&cache.setCalls) >= 1 + }, 1*time.Second, 10*time.Millisecond) +} + +func TestDashboardService_CacheParseError_EvictsAndRefetches(t *testing.T) { + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return "not-json", nil + }, + } + stats := &usagestats.DashboardStats{TotalUsers: 9} + repo := &usageRepoStub{stats: stats} + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: true}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, stats, got) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.delCalls)) + require.Equal(t, int32(1), atomic.LoadInt32(&repo.calls)) +} + +func TestDashboardService_CacheParseError_RepoFailure(t *testing.T) { + cache := &dashboardCacheStub{ + get: func(ctx context.Context) (string, error) { + return "not-json", nil + }, + } + repo := &usageRepoStub{err: errors.New("db down")} + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: true}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + }, + } + svc := NewDashboardService(repo, aggRepo, cache, cfg) + + _, err := svc.GetDashboardStats(context.Background()) + require.Error(t, err) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.delCalls)) +} + +func TestDashboardService_StatsUpdatedAtEpochWhenMissing(t *testing.T) { + stats := &usagestats.DashboardStats{} + repo := &usageRepoStub{stats: stats} + aggRepo := &dashboardAggregationRepoStub{watermark: time.Unix(0, 0).UTC()} + cfg := &config.Config{Dashboard: config.DashboardCacheConfig{Enabled: false}} + svc := NewDashboardService(repo, aggRepo, nil, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, "1970-01-01T00:00:00Z", got.StatsUpdatedAt) + require.True(t, got.StatsStale) +} + +func TestDashboardService_StatsStaleFalseWhenFresh(t *testing.T) { + aggNow := time.Now().UTC().Truncate(time.Second) + stats := &usagestats.DashboardStats{} + repo := &usageRepoStub{stats: stats} + aggRepo := &dashboardAggregationRepoStub{watermark: aggNow} + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: false}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: true, + IntervalSeconds: 60, + LookbackSeconds: 120, + }, + } + svc := NewDashboardService(repo, aggRepo, nil, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, aggNow.Format(time.RFC3339), got.StatsUpdatedAt) + require.False(t, got.StatsStale) +} + +func TestDashboardService_AggDisabled_UsesUsageLogsFallback(t *testing.T) { + expected := &usagestats.DashboardStats{TotalUsers: 42} + repo := &usageRepoStub{ + rangeStats: expected, + err: errors.New("should not call aggregated stats"), + } + cfg := &config.Config{ + Dashboard: config.DashboardCacheConfig{Enabled: false}, + DashboardAgg: config.DashboardAggregationConfig{ + Enabled: false, + Retention: config.DashboardAggregationRetentionConfig{ + UsageLogsDays: 7, + }, + }, + } + svc := NewDashboardService(repo, nil, nil, cfg) + + got, err := svc.GetDashboardStats(context.Background()) + require.NoError(t, err) + require.Equal(t, int64(42), got.TotalUsers) + require.Equal(t, int32(0), atomic.LoadInt32(&repo.calls)) + require.Equal(t, int32(1), atomic.LoadInt32(&repo.rangeCalls)) + require.False(t, repo.rangeEnd.IsZero()) + require.Equal(t, truncateToDayUTC(repo.rangeEnd.AddDate(0, 0, -7)), repo.rangeStart) +} diff --git a/backend/internal/service/domain_constants.go b/backend/internal/service/domain_constants.go index df34e167..49bb86a7 100644 --- a/backend/internal/service/domain_constants.go +++ b/backend/internal/service/domain_constants.go @@ -38,6 +38,12 @@ const ( RedeemTypeSubscription = "subscription" ) +// PromoCode status constants +const ( + PromoCodeStatusActive = "active" + PromoCodeStatusDisabled = "disabled" +) + // Admin adjustment type constants const ( AdjustmentTypeAdminBalance = "admin_balance" // 管理员调整余额 @@ -57,6 +63,9 @@ const ( SubscriptionStatusSuspended = "suspended" ) +// LinuxDoConnectSyntheticEmailDomain 是 LinuxDo Connect 用户的合成邮箱后缀(RFC 保留域名)。 +const LinuxDoConnectSyntheticEmailDomain = "@linuxdo-connect.invalid" + // Setting keys const ( // 注册设置 @@ -77,6 +86,12 @@ const ( SettingKeyTurnstileSiteKey = "turnstile_site_key" // Turnstile Site Key SettingKeyTurnstileSecretKey = "turnstile_secret_key" // Turnstile Secret Key + // LinuxDo Connect OAuth 登录设置 + SettingKeyLinuxDoConnectEnabled = "linuxdo_connect_enabled" + SettingKeyLinuxDoConnectClientID = "linuxdo_connect_client_id" + SettingKeyLinuxDoConnectClientSecret = "linuxdo_connect_client_secret" + SettingKeyLinuxDoConnectRedirectURL = "linuxdo_connect_redirect_url" + // OEM设置 SettingKeySiteName = "site_name" // 网站名称 SettingKeySiteLogo = "site_logo" // 网站Logo (base64) @@ -84,6 +99,7 @@ const ( SettingKeyAPIBaseURL = "api_base_url" // API端点地址(用于客户端配置和导入) SettingKeyContactInfo = "contact_info" // 客服联系方式 SettingKeyDocURL = "doc_url" // 文档链接 + SettingKeyHomeContent = "home_content" // 首页内容(支持 Markdown/HTML,或 URL 作为 iframe src) // 默认配置 SettingKeyDefaultConcurrency = "default_concurrency" // 新用户默认并发量 @@ -106,16 +122,38 @@ const ( SettingKeyEnableIdentityPatch = "enable_identity_patch" SettingKeyIdentityPatchPrompt = "identity_patch_prompt" - // LinuxDo Connect OAuth 登录(终端用户 SSO) - SettingKeyLinuxDoConnectEnabled = "linuxdo_connect_enabled" - SettingKeyLinuxDoConnectClientID = "linuxdo_connect_client_id" - SettingKeyLinuxDoConnectClientSecret = "linuxdo_connect_client_secret" - SettingKeyLinuxDoConnectRedirectURL = "linuxdo_connect_redirect_url" -) + // ========================= + // Ops Monitoring (vNext) + // ========================= -// LinuxDoConnectSyntheticEmailDomain 是 LinuxDo Connect 用户的合成邮箱后缀(RFC 保留域名)。 -// 目的:避免第三方登录返回的用户标识与本地真实邮箱发生碰撞,进而造成账号被接管的风险。 -const LinuxDoConnectSyntheticEmailDomain = "@linuxdo-connect.invalid" + // SettingKeyOpsMonitoringEnabled is a DB-backed soft switch to enable/disable ops module at runtime. + SettingKeyOpsMonitoringEnabled = "ops_monitoring_enabled" + + // SettingKeyOpsRealtimeMonitoringEnabled controls realtime features (e.g. WS/QPS push). + SettingKeyOpsRealtimeMonitoringEnabled = "ops_realtime_monitoring_enabled" + + // SettingKeyOpsQueryModeDefault controls the default query mode for ops dashboard (auto/raw/preagg). + SettingKeyOpsQueryModeDefault = "ops_query_mode_default" + + // SettingKeyOpsEmailNotificationConfig stores JSON config for ops email notifications. + SettingKeyOpsEmailNotificationConfig = "ops_email_notification_config" + + // SettingKeyOpsAlertRuntimeSettings stores JSON config for ops alert evaluator runtime settings. + SettingKeyOpsAlertRuntimeSettings = "ops_alert_runtime_settings" + + // SettingKeyOpsMetricsIntervalSeconds controls the ops metrics collector interval (>=60). + SettingKeyOpsMetricsIntervalSeconds = "ops_metrics_interval_seconds" + + // SettingKeyOpsAdvancedSettings stores JSON config for ops advanced settings (data retention, aggregation). + SettingKeyOpsAdvancedSettings = "ops_advanced_settings" + + // ========================= + // Stream Timeout Handling + // ========================= + + // SettingKeyStreamTimeoutSettings stores JSON config for stream timeout handling. + SettingKeyStreamTimeoutSettings = "stream_timeout_settings" +) // AdminAPIKeyPrefix is the prefix for admin API keys (distinct from user "sk-" keys). const AdminAPIKeyPrefix = "admin-" diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index 0039ac1d..4d17d5e1 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -9,6 +9,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/stretchr/testify/require" ) @@ -23,9 +24,11 @@ type mockAccountRepoForPlatform struct { accounts []Account accountsByID map[int64]*Account listPlatformFunc func(ctx context.Context, platform string) ([]Account, error) + getByIDCalls int } func (m *mockAccountRepoForPlatform) GetByID(ctx context.Context, id int64) (*Account, error) { + m.getByIDCalls++ if acc, ok := m.accountsByID[id]; ok { return acc, nil } @@ -142,6 +145,9 @@ func (m *mockAccountRepoForPlatform) SetRateLimited(ctx context.Context, id int6 func (m *mockAccountRepoForPlatform) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { return nil } +func (m *mockAccountRepoForPlatform) SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error { + return nil +} func (m *mockAccountRepoForPlatform) SetOverloaded(ctx context.Context, id int64, until time.Time) error { return nil } @@ -157,6 +163,9 @@ func (m *mockAccountRepoForPlatform) ClearRateLimit(ctx context.Context, id int6 func (m *mockAccountRepoForPlatform) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { return nil } +func (m *mockAccountRepoForPlatform) ClearModelRateLimits(ctx context.Context, id int64) error { + return nil +} func (m *mockAccountRepoForPlatform) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { return nil } @@ -194,6 +203,56 @@ func (m *mockGatewayCacheForPlatform) RefreshSessionTTL(ctx context.Context, gro return nil } +type mockGroupRepoForGateway struct { + groups map[int64]*Group + getByIDCalls int + getByIDLiteCalls int +} + +func (m *mockGroupRepoForGateway) GetByID(ctx context.Context, id int64) (*Group, error) { + m.getByIDCalls++ + if g, ok := m.groups[id]; ok { + return g, nil + } + return nil, ErrGroupNotFound +} + +func (m *mockGroupRepoForGateway) GetByIDLite(ctx context.Context, id int64) (*Group, error) { + m.getByIDLiteCalls++ + if g, ok := m.groups[id]; ok { + return g, nil + } + return nil, ErrGroupNotFound +} + +func (m *mockGroupRepoForGateway) Create(ctx context.Context, group *Group) error { return nil } +func (m *mockGroupRepoForGateway) Update(ctx context.Context, group *Group) error { return nil } +func (m *mockGroupRepoForGateway) Delete(ctx context.Context, id int64) error { return nil } +func (m *mockGroupRepoForGateway) DeleteCascade(ctx context.Context, id int64) ([]int64, error) { + return nil, nil +} +func (m *mockGroupRepoForGateway) List(ctx context.Context, params pagination.PaginationParams) ([]Group, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (m *mockGroupRepoForGateway) ListWithFilters(ctx context.Context, params pagination.PaginationParams, platform, status, search string, isExclusive *bool) ([]Group, *pagination.PaginationResult, error) { + return nil, nil, nil +} +func (m *mockGroupRepoForGateway) ListActive(ctx context.Context) ([]Group, error) { + return nil, nil +} +func (m *mockGroupRepoForGateway) ListActiveByPlatform(ctx context.Context, platform string) ([]Group, error) { + return nil, nil +} +func (m *mockGroupRepoForGateway) ExistsByName(ctx context.Context, name string) (bool, error) { + return false, nil +} +func (m *mockGroupRepoForGateway) GetAccountCount(ctx context.Context, groupID int64) (int64, error) { + return 0, nil +} +func (m *mockGroupRepoForGateway) DeleteAccountGroupsByGroupID(ctx context.Context, groupID int64) (int64, error) { + return 0, nil +} + func ptr[T any](v T) *T { return &v } @@ -900,6 +959,74 @@ func (m *mockConcurrencyService) GetAccountWaitingCount(ctx context.Context, acc return m.accountWaitCounts[accountID], nil } +type mockConcurrencyCache struct { + acquireAccountCalls int + loadBatchCalls int +} + +func (m *mockConcurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { + m.acquireAccountCalls++ + return true, nil +} + +func (m *mockConcurrencyCache) ReleaseAccountSlot(ctx context.Context, accountID int64, requestID string) error { + return nil +} + +func (m *mockConcurrencyCache) GetAccountConcurrency(ctx context.Context, accountID int64) (int, error) { + return 0, nil +} + +func (m *mockConcurrencyCache) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { + return true, nil +} + +func (m *mockConcurrencyCache) DecrementAccountWaitCount(ctx context.Context, accountID int64) error { + return nil +} + +func (m *mockConcurrencyCache) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { + return 0, nil +} + +func (m *mockConcurrencyCache) AcquireUserSlot(ctx context.Context, userID int64, maxConcurrency int, requestID string) (bool, error) { + return true, nil +} + +func (m *mockConcurrencyCache) ReleaseUserSlot(ctx context.Context, userID int64, requestID string) error { + return nil +} + +func (m *mockConcurrencyCache) GetUserConcurrency(ctx context.Context, userID int64) (int, error) { + return 0, nil +} + +func (m *mockConcurrencyCache) IncrementWaitCount(ctx context.Context, userID int64, maxWait int) (bool, error) { + return true, nil +} + +func (m *mockConcurrencyCache) DecrementWaitCount(ctx context.Context, userID int64) error { + return nil +} + +func (m *mockConcurrencyCache) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { + m.loadBatchCalls++ + result := make(map[int64]*AccountLoadInfo, len(accounts)) + for _, acc := range accounts { + result[acc.ID] = &AccountLoadInfo{ + AccountID: acc.ID, + CurrentConcurrency: 0, + WaitingCount: 0, + LoadRate: 0, + } + } + return result, nil +} + +func (m *mockConcurrencyCache) CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error { + return nil +} + // TestGatewayService_SelectAccountWithLoadAwareness tests load-aware account selection func TestGatewayService_SelectAccountWithLoadAwareness(t *testing.T) { ctx := context.Background() @@ -928,13 +1055,67 @@ func TestGatewayService_SelectAccountWithLoadAwareness(t *testing.T) { concurrencyService: nil, // No concurrency service } - result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, "") require.NoError(t, err) require.NotNil(t, result) require.NotNil(t, result.Account) require.Equal(t, int64(1), result.Account.ID, "应选择优先级最高的账号") }) + t.Run("模型路由-无ConcurrencyService也生效", func(t *testing.T) { + groupID := int64(1) + sessionHash := "sticky" + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5, AccountGroups: []AccountGroup{{GroupID: groupID}}}, + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5, AccountGroups: []AccountGroup{{GroupID: groupID}}}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{sessionHash: 1}, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: { + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + ModelRoutingEnabled: true, + ModelRouting: map[string][]int64{ + "claude-a": {1}, + "claude-b": {2}, + }, + }, + }, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + cfg: cfg, + concurrencyService: nil, // legacy path + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, &groupID, sessionHash, "claude-b", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "切换到 claude-b 时应按模型路由切换账号") + require.Equal(t, int64(2), cache.sessionBindings[sessionHash], "粘性绑定应更新为路由选择的账号") + }) + t.Run("无ConcurrencyService-降级到传统选择", func(t *testing.T) { repo := &mockAccountRepoForPlatform{ accounts: []Account{ @@ -959,7 +1140,7 @@ func TestGatewayService_SelectAccountWithLoadAwareness(t *testing.T) { concurrencyService: nil, } - result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, "") require.NoError(t, err) require.NotNil(t, result) require.NotNil(t, result.Account) @@ -991,13 +1172,85 @@ func TestGatewayService_SelectAccountWithLoadAwareness(t *testing.T) { } excludedIDs := map[int64]struct{}{1: {}} - result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", excludedIDs) + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", excludedIDs, "") require.NoError(t, err) require.NotNil(t, result) require.NotNil(t, result.Account) require.Equal(t, int64(2), result.Account.ID, "不应选择被排除的账号") }) + t.Run("粘性命中-不调用GetByID", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"sticky": 1}, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "sticky", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(1), result.Account.ID) + require.Equal(t, 0, repo.getByIDCalls, "粘性命中不应调用GetByID") + require.Equal(t, 0, concurrencyCache.loadBatchCalls, "粘性命中应在负载批量查询前返回") + }) + + t.Run("粘性账号不在候选集-回退负载感知选择", func(t *testing.T) { + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 2, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{ + sessionBindings: map[string]int64{"sticky": 1}, + } + + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = true + + concurrencyCache := &mockConcurrencyCache{} + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "sticky", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "粘性账号不在候选集时应回退到可用账号") + require.Equal(t, 0, repo.getByIDCalls, "粘性账号缺失不应回退到GetByID") + require.Equal(t, 1, concurrencyCache.loadBatchCalls, "应继续进行负载批量查询") + }) + t.Run("无可用账号-返回错误", func(t *testing.T) { repo := &mockAccountRepoForPlatform{ accounts: []Account{}, @@ -1016,9 +1269,264 @@ func TestGatewayService_SelectAccountWithLoadAwareness(t *testing.T) { concurrencyService: nil, } - result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil) + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, "") require.Error(t, err) require.Nil(t, result) require.Contains(t, err.Error(), "no available accounts") }) + + t.Run("过滤不可调度账号-限流账号被跳过", func(t *testing.T) { + now := time.Now() + resetAt := now.Add(10 * time.Minute) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5, RateLimitResetAt: &resetAt}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "应跳过限流账号,选择可用账号") + }) + + t.Run("过滤不可调度账号-过载账号被跳过", func(t *testing.T) { + now := time.Now() + overloadUntil := now.Add(10 * time.Minute) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true, Concurrency: 5, OverloadUntil: &overloadUntil}, + {ID: 2, Platform: PlatformAnthropic, Priority: 2, Status: StatusActive, Schedulable: true, Concurrency: 5}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForPlatform{} + cfg := testConfig() + cfg.Gateway.Scheduling.LoadBatchEnabled = false + + svc := &GatewayService{ + accountRepo: repo, + cache: cache, + cfg: cfg, + concurrencyService: nil, + } + + result, err := svc.SelectAccountWithLoadAwareness(ctx, nil, "", "claude-3-5-sonnet-20241022", nil, "") + require.NoError(t, err) + require.NotNil(t, result) + require.NotNil(t, result.Account) + require.Equal(t, int64(2), result.Account.ID, "应跳过过载账号,选择可用账号") + }) +} + +func TestGatewayService_GroupResolution_ReusesContextGroup(t *testing.T) { + ctx := context.Background() + groupID := int64(42) + group := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + } + ctx = context.WithValue(ctx, ctxkey.Group, group) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{groupID: group}, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cfg: testConfig(), + } + + account, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, account) + require.Equal(t, 0, groupRepo.getByIDCalls) + require.Equal(t, 0, groupRepo.getByIDLiteCalls) +} + +func TestGatewayService_GroupResolution_IgnoresInvalidContextGroup(t *testing.T) { + ctx := context.Background() + groupID := int64(42) + ctxGroup := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + } + ctx = context.WithValue(ctx, ctxkey.Group, ctxGroup) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + group := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + } + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{groupID: group}, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cfg: testConfig(), + } + + account, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, account) + require.Equal(t, 0, groupRepo.getByIDCalls) + require.Equal(t, 1, groupRepo.getByIDLiteCalls) +} + +func TestGatewayService_GroupContext_OverwritesInvalidContextGroup(t *testing.T) { + groupID := int64(42) + invalidGroup := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + } + hydratedGroup := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + } + + ctx := context.WithValue(context.Background(), ctxkey.Group, invalidGroup) + svc := &GatewayService{} + ctx = svc.withGroupContext(ctx, hydratedGroup) + + got, ok := ctx.Value(ctxkey.Group).(*Group) + require.True(t, ok) + require.Same(t, hydratedGroup, got) +} + +func TestGatewayService_GroupResolution_FallbackUsesLiteOnce(t *testing.T) { + ctx := context.Background() + groupID := int64(10) + fallbackID := int64(11) + group := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + ClaudeCodeOnly: true, + FallbackGroupID: &fallbackID, + Hydrated: true, + } + fallbackGroup := &Group{ + ID: fallbackID, + Platform: PlatformAnthropic, + Status: StatusActive, + Hydrated: true, + } + ctx = context.WithValue(ctx, ctxkey.Group, group) + + repo := &mockAccountRepoForPlatform{ + accounts: []Account{ + {ID: 1, Platform: PlatformAnthropic, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{fallbackID: fallbackGroup}, + } + + svc := &GatewayService{ + accountRepo: repo, + groupRepo: groupRepo, + cfg: testConfig(), + } + + account, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "claude-3-5-sonnet-20241022", nil) + require.NoError(t, err) + require.NotNil(t, account) + require.Equal(t, 0, groupRepo.getByIDCalls) + require.Equal(t, 1, groupRepo.getByIDLiteCalls) +} + +func TestGatewayService_ResolveGatewayGroup_DetectsFallbackCycle(t *testing.T) { + ctx := context.Background() + groupID := int64(10) + fallbackID := int64(11) + + group := &Group{ + ID: groupID, + Platform: PlatformAnthropic, + Status: StatusActive, + ClaudeCodeOnly: true, + FallbackGroupID: &fallbackID, + } + fallbackGroup := &Group{ + ID: fallbackID, + Platform: PlatformAnthropic, + Status: StatusActive, + ClaudeCodeOnly: true, + FallbackGroupID: &groupID, + } + + groupRepo := &mockGroupRepoForGateway{ + groups: map[int64]*Group{ + groupID: group, + fallbackID: fallbackGroup, + }, + } + + svc := &GatewayService{ + groupRepo: groupRepo, + } + + gotGroup, gotID, err := svc.resolveGatewayGroup(ctx, &groupID) + require.Error(t, err) + require.Nil(t, gotGroup) + require.Nil(t, gotID) + require.Contains(t, err.Error(), "fallback group cycle") } diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 72343e2c..9cb15e4a 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -13,6 +13,7 @@ import ( "log" mathrand "math/rand" "net/http" + "os" "regexp" "sort" "strings" @@ -39,6 +40,21 @@ const ( maxCacheControlBlocks = 4 // Anthropic API 允许的最大 cache_control 块数量 ) +func (s *GatewayService) debugModelRoutingEnabled() bool { + v := strings.ToLower(strings.TrimSpace(os.Getenv("SUB2API_DEBUG_MODEL_ROUTING"))) + return v == "1" || v == "true" || v == "yes" || v == "on" +} + +func shortSessionHash(sessionHash string) string { + if sessionHash == "" { + return "" + } + if len(sessionHash) <= 8 { + return sessionHash + } + return sessionHash[:8] +} + // sseDataRe matches SSE data lines with optional whitespace after colon. // Some upstream APIs return non-standard "data:" without space (should be "data: "). var ( @@ -152,6 +168,7 @@ type GatewayService struct { userSubRepo UserSubscriptionRepository cache GatewayCache cfg *config.Config + schedulerSnapshot *SchedulerSnapshotService billingService *BillingService rateLimitService *RateLimitService billingCacheService *BillingCacheService @@ -159,6 +176,8 @@ type GatewayService struct { httpUpstream HTTPUpstream deferredService *DeferredService concurrencyService *ConcurrencyService + claudeTokenProvider *ClaudeTokenProvider + sessionLimitCache SessionLimitCache // 会话数量限制缓存(仅 Anthropic OAuth/SetupToken) } // NewGatewayService creates a new GatewayService @@ -170,6 +189,7 @@ func NewGatewayService( userSubRepo UserSubscriptionRepository, cache GatewayCache, cfg *config.Config, + schedulerSnapshot *SchedulerSnapshotService, concurrencyService *ConcurrencyService, billingService *BillingService, rateLimitService *RateLimitService, @@ -177,6 +197,8 @@ func NewGatewayService( identityService *IdentityService, httpUpstream HTTPUpstream, deferredService *DeferredService, + claudeTokenProvider *ClaudeTokenProvider, + sessionLimitCache SessionLimitCache, ) *GatewayService { return &GatewayService{ accountRepo: accountRepo, @@ -186,6 +208,7 @@ func NewGatewayService( userSubRepo: userSubRepo, cache: cache, cfg: cfg, + schedulerSnapshot: schedulerSnapshot, concurrencyService: concurrencyService, billingService: billingService, rateLimitService: rateLimitService, @@ -193,6 +216,8 @@ func NewGatewayService( identityService: identityService, httpUpstream: httpUpstream, deferredService: deferredService, + claudeTokenProvider: claudeTokenProvider, + sessionLimitCache: sessionLimitCache, } } @@ -362,27 +387,13 @@ func (s *GatewayService) SelectAccountForModelWithExclusions(ctx context.Context if hasForcePlatform && forcePlatform != "" { platform = forcePlatform } else if groupID != nil { - // 根据分组 platform 决定查询哪种账号 - group, err := s.groupRepo.GetByID(ctx, *groupID) + group, resolvedGroupID, err := s.resolveGatewayGroup(ctx, groupID) if err != nil { - return nil, fmt.Errorf("get group failed: %w", err) + return nil, err } + groupID = resolvedGroupID + ctx = s.withGroupContext(ctx, group) platform = group.Platform - - // 检查 Claude Code 客户端限制 - if group.ClaudeCodeOnly { - isClaudeCode := IsClaudeCodeClient(ctx) - if !isClaudeCode { - // 非 Claude Code 客户端,检查是否有降级分组 - if group.FallbackGroupID != nil { - // 使用降级分组重新调度 - fallbackGroupID := *group.FallbackGroupID - return s.SelectAccountForModelWithExclusions(ctx, &fallbackGroupID, sessionHash, requestedModel, excludedIDs) - } - // 无降级分组,拒绝访问 - return nil, ErrClaudeCodeOnly - } - } } else { // 无分组时只使用原生 anthropic 平台 platform = PlatformAnthropic @@ -400,8 +411,12 @@ func (s *GatewayService) SelectAccountForModelWithExclusions(ctx context.Context } // SelectAccountWithLoadAwareness selects account with load-awareness and wait plan. -func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*AccountSelectionResult, error) { +// metadataUserID: 原始 metadata.user_id 字段(用于提取会话 UUID 进行会话数量限制) +func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, metadataUserID string) (*AccountSelectionResult, error) { cfg := s.schedulingConfig() + // 提取会话 UUID(用于会话数量限制) + sessionUUID := extractSessionUUID(metadataUserID) + var stickyAccountID int64 if sessionHash != "" && s.cache != nil { if accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash); err == nil { @@ -410,10 +425,20 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro } // 检查 Claude Code 客户端限制(可能会替换 groupID 为降级分组) - groupID, err := s.checkClaudeCodeRestriction(ctx, groupID) + group, groupID, err := s.checkClaudeCodeRestriction(ctx, groupID) if err != nil { return nil, err } + ctx = s.withGroupContext(ctx, group) + + if s.debugModelRoutingEnabled() && requestedModel != "" { + groupPlatform := "" + if group != nil { + groupPlatform = group.Platform + } + log.Printf("[ModelRoutingDebug] select entry: group_id=%v group_platform=%s model=%s session=%s sticky_account=%d load_batch=%v concurrency=%v", + derefGroupID(groupID), groupPlatform, requestedModel, shortSessionHash(sessionHash), stickyAccountID, cfg.LoadBatchEnabled, s.concurrencyService != nil) + } if s.concurrencyService == nil || !cfg.LoadBatchEnabled { account, err := s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs) @@ -453,11 +478,14 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro }, nil } - platform, hasForcePlatform, err := s.resolvePlatform(ctx, groupID) + platform, hasForcePlatform, err := s.resolvePlatform(ctx, groupID, group) if err != nil { return nil, err } preferOAuth := platform == PlatformGemini + if s.debugModelRoutingEnabled() && platform == PlatformAnthropic && requestedModel != "" { + log.Printf("[ModelRoutingDebug] load-aware enabled: group_id=%v model=%s session=%s platform=%s", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), platform) + } accounts, useMixed, err := s.listSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) if err != nil { @@ -475,23 +503,242 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro return excluded } - // ============ Layer 1: 粘性会话优先 ============ - if sessionHash != "" && s.cache != nil { + // 提前构建 accountByID(供 Layer 1 和 Layer 1.5 使用) + accountByID := make(map[int64]*Account, len(accounts)) + for i := range accounts { + accountByID[accounts[i].ID] = &accounts[i] + } + + // 获取模型路由配置(仅 anthropic 平台) + var routingAccountIDs []int64 + if group != nil && requestedModel != "" && group.Platform == PlatformAnthropic { + routingAccountIDs = group.GetRoutingAccountIDs(requestedModel) + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] context group routing: group_id=%d model=%s enabled=%v rules=%d matched_ids=%v session=%s sticky_account=%d", + group.ID, requestedModel, group.ModelRoutingEnabled, len(group.ModelRouting), routingAccountIDs, shortSessionHash(sessionHash), stickyAccountID) + if len(routingAccountIDs) == 0 && group.ModelRoutingEnabled && len(group.ModelRouting) > 0 { + keys := make([]string, 0, len(group.ModelRouting)) + for k := range group.ModelRouting { + keys = append(keys, k) + } + sort.Strings(keys) + const maxKeys = 20 + if len(keys) > maxKeys { + keys = keys[:maxKeys] + } + log.Printf("[ModelRoutingDebug] context group routing miss: group_id=%d model=%s patterns(sample)=%v", group.ID, requestedModel, keys) + } + } + } + + // ============ Layer 1: 模型路由优先选择(优先级高于粘性会话) ============ + if len(routingAccountIDs) > 0 && s.concurrencyService != nil { + // 1. 过滤出路由列表中可调度的账号 + var routingCandidates []*Account + var filteredExcluded, filteredMissing, filteredUnsched, filteredPlatform, filteredModelScope, filteredModelMapping, filteredWindowCost int + for _, routingAccountID := range routingAccountIDs { + if isExcluded(routingAccountID) { + filteredExcluded++ + continue + } + account, ok := accountByID[routingAccountID] + if !ok || !account.IsSchedulable() { + if !ok { + filteredMissing++ + } else { + filteredUnsched++ + } + continue + } + if !s.isAccountAllowedForPlatform(account, platform, useMixed) { + filteredPlatform++ + continue + } + if !account.IsSchedulableForModel(requestedModel) { + filteredModelScope++ + continue + } + if requestedModel != "" && !s.isModelSupportedByAccount(account, requestedModel) { + filteredModelMapping++ + continue + } + // 窗口费用检查(非粘性会话路径) + if !s.isAccountSchedulableForWindowCost(ctx, account, false) { + filteredWindowCost++ + continue + } + routingCandidates = append(routingCandidates, account) + } + + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] routed candidates: group_id=%v model=%s routed=%d candidates=%d filtered(excluded=%d missing=%d unsched=%d platform=%d model_scope=%d model_mapping=%d window_cost=%d)", + derefGroupID(groupID), requestedModel, len(routingAccountIDs), len(routingCandidates), + filteredExcluded, filteredMissing, filteredUnsched, filteredPlatform, filteredModelScope, filteredModelMapping, filteredWindowCost) + } + + if len(routingCandidates) > 0 { + // 1.5. 在路由账号范围内检查粘性会话 + if sessionHash != "" && s.cache != nil { + stickyAccountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + if err == nil && stickyAccountID > 0 && containsInt64(routingAccountIDs, stickyAccountID) && !isExcluded(stickyAccountID) { + // 粘性账号在路由列表中,优先使用 + if stickyAccount, ok := accountByID[stickyAccountID]; ok { + if stickyAccount.IsSchedulable() && + s.isAccountAllowedForPlatform(stickyAccount, platform, useMixed) && + stickyAccount.IsSchedulableForModel(requestedModel) && + (requestedModel == "" || s.isModelSupportedByAccount(stickyAccount, requestedModel)) && + s.isAccountSchedulableForWindowCost(ctx, stickyAccount, true) { // 粘性会话窗口费用检查 + result, err := s.tryAcquireAccountSlot(ctx, stickyAccountID, stickyAccount.Concurrency) + if err == nil && result.Acquired { + // 会话数量限制检查 + if !s.checkAndRegisterSession(ctx, stickyAccount, sessionUUID) { + result.ReleaseFunc() // 释放槽位 + // 继续到负载感知选择 + } else { + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL) + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] routed sticky hit: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), stickyAccountID) + } + return &AccountSelectionResult{ + Account: stickyAccount, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + } + + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, stickyAccountID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: stickyAccount, + WaitPlan: &AccountWaitPlan{ + AccountID: stickyAccountID, + MaxConcurrency: stickyAccount.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + // 粘性账号槽位满且等待队列已满,继续使用负载感知选择 + } + } + } + } + + // 2. 批量获取负载信息 + routingLoads := make([]AccountWithConcurrency, 0, len(routingCandidates)) + for _, acc := range routingCandidates { + routingLoads = append(routingLoads, AccountWithConcurrency{ + ID: acc.ID, + MaxConcurrency: acc.Concurrency, + }) + } + routingLoadMap, _ := s.concurrencyService.GetAccountsLoadBatch(ctx, routingLoads) + + // 3. 按负载感知排序 + type accountWithLoad struct { + account *Account + loadInfo *AccountLoadInfo + } + var routingAvailable []accountWithLoad + for _, acc := range routingCandidates { + loadInfo := routingLoadMap[acc.ID] + if loadInfo == nil { + loadInfo = &AccountLoadInfo{AccountID: acc.ID} + } + if loadInfo.LoadRate < 100 { + routingAvailable = append(routingAvailable, accountWithLoad{account: acc, loadInfo: loadInfo}) + } + } + + if len(routingAvailable) > 0 { + // 排序:优先级 > 负载率 > 最后使用时间 + sort.SliceStable(routingAvailable, func(i, j int) bool { + a, b := routingAvailable[i], routingAvailable[j] + if a.account.Priority != b.account.Priority { + return a.account.Priority < b.account.Priority + } + if a.loadInfo.LoadRate != b.loadInfo.LoadRate { + return a.loadInfo.LoadRate < b.loadInfo.LoadRate + } + switch { + case a.account.LastUsedAt == nil && b.account.LastUsedAt != nil: + return true + case a.account.LastUsedAt != nil && b.account.LastUsedAt == nil: + return false + case a.account.LastUsedAt == nil && b.account.LastUsedAt == nil: + return false + default: + return a.account.LastUsedAt.Before(*b.account.LastUsedAt) + } + }) + + // 4. 尝试获取槽位 + for _, item := range routingAvailable { + result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) + if err == nil && result.Acquired { + // 会话数量限制检查 + if !s.checkAndRegisterSession(ctx, item.account, sessionUUID) { + result.ReleaseFunc() // 释放槽位,继续尝试下一个账号 + continue + } + if sessionHash != "" && s.cache != nil { + _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, item.account.ID, stickySessionTTL) + } + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] routed select: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), item.account.ID) + } + return &AccountSelectionResult{ + Account: item.account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + } + + // 5. 所有路由账号槽位满,返回等待计划(选择负载最低的) + acc := routingAvailable[0].account + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] routed wait: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), acc.ID) + } + return &AccountSelectionResult{ + Account: acc, + WaitPlan: &AccountWaitPlan{ + AccountID: acc.ID, + MaxConcurrency: acc.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + // 路由列表中的账号都不可用(负载率 >= 100),继续到 Layer 2 回退 + log.Printf("[ModelRouting] All routed accounts unavailable for model=%s, falling back to normal selection", requestedModel) + } + } + + // ============ Layer 1.5: 粘性会话(仅在无模型路由配置时生效) ============ + if len(routingAccountIDs) == 0 && sessionHash != "" && s.cache != nil { accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) if err == nil && accountID > 0 && !isExcluded(accountID) { - account, err := s.accountRepo.GetByID(ctx, accountID) - if err == nil && s.isAccountInGroup(account, groupID) && + account, ok := accountByID[accountID] + if ok && s.isAccountInGroup(account, groupID) && s.isAccountAllowedForPlatform(account, platform, useMixed) && account.IsSchedulableForModel(requestedModel) && - (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) && + s.isAccountSchedulableForWindowCost(ctx, account, true) { // 粘性会话窗口费用检查 result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) if err == nil && result.Acquired { - _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL) - return &AccountSelectionResult{ - Account: account, - Acquired: true, - ReleaseFunc: result.ReleaseFunc, - }, nil + // 会话数量限制检查 + if !s.checkAndRegisterSession(ctx, account, sessionUUID) { + result.ReleaseFunc() // 释放槽位,继续到 Layer 2 + } else { + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL) + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } } waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) @@ -517,6 +764,12 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro if isExcluded(acc.ID) { continue } + // Scheduler snapshots can be temporarily stale (bucket rebuild is throttled); + // re-check schedulability here so recently rate-limited/overloaded accounts + // are not selected again before the bucket is rebuilt. + if !acc.IsSchedulable() { + continue + } if !s.isAccountAllowedForPlatform(acc, platform, useMixed) { continue } @@ -526,6 +779,10 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { continue } + // 窗口费用检查(非粘性会话路径) + if !s.isAccountSchedulableForWindowCost(ctx, acc, false) { + continue + } candidates = append(candidates, acc) } @@ -543,7 +800,7 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro loadMap, err := s.concurrencyService.GetAccountsLoadBatch(ctx, accountLoads) if err != nil { - if result, ok := s.tryAcquireByLegacyOrder(ctx, candidates, groupID, sessionHash, preferOAuth); ok { + if result, ok := s.tryAcquireByLegacyOrder(ctx, candidates, groupID, sessionHash, preferOAuth, sessionUUID); ok { return result, nil } } else { @@ -592,6 +849,11 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro for _, item := range available { result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) if err == nil && result.Acquired { + // 会话数量限制检查 + if !s.checkAndRegisterSession(ctx, item.account, sessionUUID) { + result.ReleaseFunc() // 释放槽位,继续尝试下一个账号 + continue + } if sessionHash != "" && s.cache != nil { _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, item.account.ID, stickySessionTTL) } @@ -621,13 +883,18 @@ func (s *GatewayService) SelectAccountWithLoadAwareness(ctx context.Context, gro return nil, errors.New("no available accounts") } -func (s *GatewayService) tryAcquireByLegacyOrder(ctx context.Context, candidates []*Account, groupID *int64, sessionHash string, preferOAuth bool) (*AccountSelectionResult, bool) { +func (s *GatewayService) tryAcquireByLegacyOrder(ctx context.Context, candidates []*Account, groupID *int64, sessionHash string, preferOAuth bool, sessionUUID string) (*AccountSelectionResult, bool) { ordered := append([]*Account(nil), candidates...) sortAccountsByPriorityAndLastUsed(ordered, preferOAuth) for _, acc := range ordered { result, err := s.tryAcquireAccountSlot(ctx, acc.ID, acc.Concurrency) if err == nil && result.Acquired { + // 会话数量限制检查 + if !s.checkAndRegisterSession(ctx, acc, sessionUUID) { + result.ReleaseFunc() // 释放槽位,继续尝试下一个账号 + continue + } if sessionHash != "" && s.cache != nil { _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, acc.ID, stickySessionTTL) } @@ -656,51 +923,123 @@ func (s *GatewayService) schedulingConfig() config.GatewaySchedulingConfig { } } +func (s *GatewayService) withGroupContext(ctx context.Context, group *Group) context.Context { + if !IsGroupContextValid(group) { + return ctx + } + if existing, ok := ctx.Value(ctxkey.Group).(*Group); ok && existing != nil && existing.ID == group.ID && IsGroupContextValid(existing) { + return ctx + } + return context.WithValue(ctx, ctxkey.Group, group) +} + +func (s *GatewayService) groupFromContext(ctx context.Context, groupID int64) *Group { + if group, ok := ctx.Value(ctxkey.Group).(*Group); ok && IsGroupContextValid(group) && group.ID == groupID { + return group + } + return nil +} + +func (s *GatewayService) resolveGroupByID(ctx context.Context, groupID int64) (*Group, error) { + if group := s.groupFromContext(ctx, groupID); group != nil { + return group, nil + } + group, err := s.groupRepo.GetByIDLite(ctx, groupID) + if err != nil { + return nil, fmt.Errorf("get group failed: %w", err) + } + return group, nil +} + +func (s *GatewayService) routingAccountIDsForRequest(ctx context.Context, groupID *int64, requestedModel string, platform string) []int64 { + if groupID == nil || requestedModel == "" || platform != PlatformAnthropic { + return nil + } + group, err := s.resolveGroupByID(ctx, *groupID) + if err != nil || group == nil { + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] resolve group failed: group_id=%v model=%s platform=%s err=%v", derefGroupID(groupID), requestedModel, platform, err) + } + return nil + } + // Preserve existing behavior: model routing only applies to anthropic groups. + if group.Platform != PlatformAnthropic { + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] skip: non-anthropic group platform: group_id=%d group_platform=%s model=%s", group.ID, group.Platform, requestedModel) + } + return nil + } + ids := group.GetRoutingAccountIDs(requestedModel) + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] routing lookup: group_id=%d model=%s enabled=%v rules=%d matched_ids=%v", + group.ID, requestedModel, group.ModelRoutingEnabled, len(group.ModelRouting), ids) + } + return ids +} + +func (s *GatewayService) resolveGatewayGroup(ctx context.Context, groupID *int64) (*Group, *int64, error) { + if groupID == nil { + return nil, nil, nil + } + + currentID := *groupID + visited := map[int64]struct{}{} + for { + if _, seen := visited[currentID]; seen { + return nil, nil, fmt.Errorf("fallback group cycle detected") + } + visited[currentID] = struct{}{} + + group, err := s.resolveGroupByID(ctx, currentID) + if err != nil { + return nil, nil, err + } + + if !group.ClaudeCodeOnly || IsClaudeCodeClient(ctx) { + return group, ¤tID, nil + } + + if group.FallbackGroupID == nil { + return nil, nil, ErrClaudeCodeOnly + } + currentID = *group.FallbackGroupID + } +} + // checkClaudeCodeRestriction 检查分组的 Claude Code 客户端限制 // 如果分组启用了 claude_code_only 且请求不是来自 Claude Code 客户端: // - 有降级分组:返回降级分组的 ID // - 无降级分组:返回 ErrClaudeCodeOnly 错误 -func (s *GatewayService) checkClaudeCodeRestriction(ctx context.Context, groupID *int64) (*int64, error) { +func (s *GatewayService) checkClaudeCodeRestriction(ctx context.Context, groupID *int64) (*Group, *int64, error) { if groupID == nil { - return groupID, nil + return nil, groupID, nil } // 强制平台模式不检查 Claude Code 限制 if _, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string); hasForcePlatform { - return groupID, nil + return nil, groupID, nil } - group, err := s.groupRepo.GetByID(ctx, *groupID) + group, resolvedID, err := s.resolveGatewayGroup(ctx, groupID) if err != nil { - return nil, fmt.Errorf("get group failed: %w", err) + return nil, nil, err } - if !group.ClaudeCodeOnly { - return groupID, nil - } - - // 分组启用了 Claude Code 限制 - if IsClaudeCodeClient(ctx) { - return groupID, nil - } - - // 非 Claude Code 客户端,检查降级分组 - if group.FallbackGroupID != nil { - return group.FallbackGroupID, nil - } - - return nil, ErrClaudeCodeOnly + return group, resolvedID, nil } -func (s *GatewayService) resolvePlatform(ctx context.Context, groupID *int64) (string, bool, error) { +func (s *GatewayService) resolvePlatform(ctx context.Context, groupID *int64, group *Group) (string, bool, error) { forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) if hasForcePlatform && forcePlatform != "" { return forcePlatform, true, nil } + if group != nil { + return group.Platform, false, nil + } if groupID != nil { - group, err := s.groupRepo.GetByID(ctx, *groupID) + group, err := s.resolveGroupByID(ctx, *groupID) if err != nil { - return "", false, fmt.Errorf("get group failed: %w", err) + return "", false, err } return group.Platform, false, nil } @@ -708,6 +1047,9 @@ func (s *GatewayService) resolvePlatform(ctx context.Context, groupID *int64) (s } func (s *GatewayService) listSchedulableAccounts(ctx context.Context, groupID *int64, platform string, hasForcePlatform bool) ([]Account, bool, error) { + if s.schedulerSnapshot != nil { + return s.schedulerSnapshot.ListSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + } useMixed := (platform == PlatformAnthropic || platform == PlatformGemini) && !hasForcePlatform if useMixed { platforms := []string{platform, PlatformAntigravity} @@ -784,6 +1126,114 @@ func (s *GatewayService) tryAcquireAccountSlot(ctx context.Context, accountID in return s.concurrencyService.AcquireAccountSlot(ctx, accountID, maxConcurrency) } +// isAccountSchedulableForWindowCost 检查账号是否可根据窗口费用进行调度 +// 仅适用于 Anthropic OAuth/SetupToken 账号 +// 返回 true 表示可调度,false 表示不可调度 +func (s *GatewayService) isAccountSchedulableForWindowCost(ctx context.Context, account *Account, isSticky bool) bool { + // 只检查 Anthropic OAuth/SetupToken 账号 + if !account.IsAnthropicOAuthOrSetupToken() { + return true + } + + limit := account.GetWindowCostLimit() + if limit <= 0 { + return true // 未启用窗口费用限制 + } + + // 尝试从缓存获取窗口费用 + var currentCost float64 + if s.sessionLimitCache != nil { + if cost, hit, err := s.sessionLimitCache.GetWindowCost(ctx, account.ID); err == nil && hit { + currentCost = cost + goto checkSchedulability + } + } + + // 缓存未命中,从数据库查询 + { + var startTime time.Time + if account.SessionWindowStart != nil { + startTime = *account.SessionWindowStart + } else { + startTime = time.Now().Add(-5 * time.Hour) + } + + stats, err := s.usageLogRepo.GetAccountWindowStats(ctx, account.ID, startTime) + if err != nil { + // 失败开放:查询失败时允许调度 + return true + } + + // 使用标准费用(不含账号倍率) + currentCost = stats.StandardCost + + // 设置缓存(忽略错误) + if s.sessionLimitCache != nil { + _ = s.sessionLimitCache.SetWindowCost(ctx, account.ID, currentCost) + } + } + +checkSchedulability: + schedulability := account.CheckWindowCostSchedulability(currentCost) + + switch schedulability { + case WindowCostSchedulable: + return true + case WindowCostStickyOnly: + return isSticky + case WindowCostNotSchedulable: + return false + } + return true +} + +// checkAndRegisterSession 检查并注册会话,用于会话数量限制 +// 仅适用于 Anthropic OAuth/SetupToken 账号 +// 返回 true 表示允许(在限制内或会话已存在),false 表示拒绝(超出限制且是新会话) +func (s *GatewayService) checkAndRegisterSession(ctx context.Context, account *Account, sessionUUID string) bool { + // 只检查 Anthropic OAuth/SetupToken 账号 + if !account.IsAnthropicOAuthOrSetupToken() { + return true + } + + maxSessions := account.GetMaxSessions() + if maxSessions <= 0 || sessionUUID == "" { + return true // 未启用会话限制或无会话ID + } + + if s.sessionLimitCache == nil { + return true // 缓存不可用时允许通过 + } + + idleTimeout := time.Duration(account.GetSessionIdleTimeoutMinutes()) * time.Minute + + allowed, err := s.sessionLimitCache.RegisterSession(ctx, account.ID, sessionUUID, maxSessions, idleTimeout) + if err != nil { + // 失败开放:缓存错误时允许通过 + return true + } + return allowed +} + +// extractSessionUUID 从 metadata.user_id 中提取会话 UUID +// 格式: user_{64位hex}_account__session_{uuid} +func extractSessionUUID(metadataUserID string) string { + if metadataUserID == "" { + return "" + } + if match := sessionIDRegex.FindStringSubmatch(metadataUserID); len(match) > 1 { + return match[1] + } + return "" +} + +func (s *GatewayService) getSchedulableAccount(ctx context.Context, accountID int64) (*Account, error) { + if s.schedulerSnapshot != nil { + return s.schedulerSnapshot.GetAccount(ctx, accountID) + } + return s.accountRepo.GetByID(ctx, accountID) +} + func sortAccountsByPriorityAndLastUsed(accounts []*Account, preferOAuth bool) { sort.SliceStable(accounts, func(i, j int) bool { a, b := accounts[i], accounts[j] @@ -859,12 +1309,122 @@ func shuffleWithinPriority(accounts []*Account) { // selectAccountForModelWithPlatform 选择单平台账户(完全隔离) func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, platform string) (*Account, error) { preferOAuth := platform == PlatformGemini + routingAccountIDs := s.routingAccountIDsForRequest(ctx, groupID, requestedModel, platform) + + var accounts []Account + accountsLoaded := false + + // ============ Model Routing (legacy path): apply before sticky session ============ + // When load-awareness is disabled (e.g. concurrency service not configured), we still honor model routing + // so switching model can switch upstream account within the same sticky session. + if len(routingAccountIDs) > 0 { + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] legacy routed begin: group_id=%v model=%s platform=%s session=%s routed_ids=%v", + derefGroupID(groupID), requestedModel, platform, shortSessionHash(sessionHash), routingAccountIDs) + } + // 1) Sticky session only applies if the bound account is within the routing set. + if sessionHash != "" && s.cache != nil { + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + if err == nil && accountID > 0 && containsInt64(routingAccountIDs, accountID) { + if _, excluded := excludedIDs[accountID]; !excluded { + account, err := s.getSchedulableAccount(ctx, accountID) + // 检查账号分组归属和平台匹配(确保粘性会话不会跨分组或跨平台) + if err == nil && s.isAccountInGroup(account, groupID) && account.Platform == platform && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { + log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + } + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] legacy routed sticky hit: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), accountID) + } + return account, nil + } + } + } + } + + // 2) Select an account from the routed candidates. + forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) + if hasForcePlatform && forcePlatform == "" { + hasForcePlatform = false + } + var err error + accounts, _, err = s.listSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + accountsLoaded = true + + routingSet := make(map[int64]struct{}, len(routingAccountIDs)) + for _, id := range routingAccountIDs { + if id > 0 { + routingSet[id] = struct{}{} + } + } + + var selected *Account + for i := range accounts { + acc := &accounts[i] + if _, ok := routingSet[acc.ID]; !ok { + continue + } + if _, excluded := excludedIDs[acc.ID]; excluded { + continue + } + // Scheduler snapshots can be temporarily stale; re-check schedulability here to + // avoid selecting accounts that were recently rate-limited/overloaded. + if !acc.IsSchedulable() { + continue + } + if !acc.IsSchedulableForModel(requestedModel) { + continue + } + if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { + continue + } + if selected == nil { + selected = acc + continue + } + if acc.Priority < selected.Priority { + selected = acc + } else if acc.Priority == selected.Priority { + switch { + case acc.LastUsedAt == nil && selected.LastUsedAt != nil: + selected = acc + case acc.LastUsedAt != nil && selected.LastUsedAt == nil: + // keep selected (never used is preferred) + case acc.LastUsedAt == nil && selected.LastUsedAt == nil: + if preferOAuth && acc.Type != selected.Type && acc.Type == AccountTypeOAuth { + selected = acc + } + default: + if acc.LastUsedAt.Before(*selected.LastUsedAt) { + selected = acc + } + } + } + } + + if selected != nil { + if sessionHash != "" && s.cache != nil { + if err := s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, selected.ID, stickySessionTTL); err != nil { + log.Printf("set session account failed: session=%s account_id=%d err=%v", sessionHash, selected.ID, err) + } + } + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] legacy routed select: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), selected.ID) + } + return selected, nil + } + log.Printf("[ModelRouting] No routed accounts available for model=%s, falling back to normal selection", requestedModel) + } + // 1. 查询粘性会话 if sessionHash != "" && s.cache != nil { accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) if err == nil && accountID > 0 { if _, excluded := excludedIDs[accountID]; !excluded { - account, err := s.accountRepo.GetByID(ctx, accountID) + account, err := s.getSchedulableAccount(ctx, accountID) // 检查账号分组归属和平台匹配(确保粘性会话不会跨分组或跨平台) if err == nil && s.isAccountInGroup(account, groupID) && account.Platform == platform && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { @@ -877,18 +1437,16 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, } // 2. 获取可调度账号列表(单平台) - var accounts []Account - var err error - if s.cfg.RunMode == config.RunModeSimple { - // 简易模式:忽略 groupID,查询所有可用账号 - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) - } else if groupID != nil { - accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, platform) - } else { - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, platform) - } - if err != nil { - return nil, fmt.Errorf("query accounts failed: %w", err) + if !accountsLoaded { + forcePlatform, hasForcePlatform := ctx.Value(ctxkey.ForcePlatform).(string) + if hasForcePlatform && forcePlatform == "" { + hasForcePlatform = false + } + var err error + accounts, _, err = s.listSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } } // 3. 按优先级+最久未用选择(考虑模型支持) @@ -898,6 +1456,11 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, if _, excluded := excludedIDs[acc.ID]; excluded { continue } + // Scheduler snapshots can be temporarily stale; re-check schedulability here to + // avoid selecting accounts that were recently rate-limited/overloaded. + if !acc.IsSchedulable() { + continue + } if !acc.IsSchedulableForModel(requestedModel) { continue } @@ -948,15 +1511,123 @@ func (s *GatewayService) selectAccountForModelWithPlatform(ctx context.Context, // selectAccountWithMixedScheduling 选择账户(支持混合调度) // 查询原生平台账户 + 启用 mixed_scheduling 的 antigravity 账户 func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, nativePlatform string) (*Account, error) { - platforms := []string{nativePlatform, PlatformAntigravity} preferOAuth := nativePlatform == PlatformGemini + routingAccountIDs := s.routingAccountIDsForRequest(ctx, groupID, requestedModel, nativePlatform) + + var accounts []Account + accountsLoaded := false + + // ============ Model Routing (legacy path): apply before sticky session ============ + if len(routingAccountIDs) > 0 { + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] legacy mixed routed begin: group_id=%v model=%s platform=%s session=%s routed_ids=%v", + derefGroupID(groupID), requestedModel, nativePlatform, shortSessionHash(sessionHash), routingAccountIDs) + } + // 1) Sticky session only applies if the bound account is within the routing set. + if sessionHash != "" && s.cache != nil { + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) + if err == nil && accountID > 0 && containsInt64(routingAccountIDs, accountID) { + if _, excluded := excludedIDs[accountID]; !excluded { + account, err := s.getSchedulableAccount(ctx, accountID) + // 检查账号分组归属和有效性:原生平台直接匹配,antigravity 需要启用混合调度 + if err == nil && s.isAccountInGroup(account, groupID) && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { + if account.Platform == nativePlatform || (account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled()) { + if err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), sessionHash, stickySessionTTL); err != nil { + log.Printf("refresh session ttl failed: session=%s err=%v", sessionHash, err) + } + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] legacy mixed routed sticky hit: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), accountID) + } + return account, nil + } + } + } + } + } + + // 2) Select an account from the routed candidates. + var err error + accounts, _, err = s.listSchedulableAccounts(ctx, groupID, nativePlatform, false) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + accountsLoaded = true + + routingSet := make(map[int64]struct{}, len(routingAccountIDs)) + for _, id := range routingAccountIDs { + if id > 0 { + routingSet[id] = struct{}{} + } + } + + var selected *Account + for i := range accounts { + acc := &accounts[i] + if _, ok := routingSet[acc.ID]; !ok { + continue + } + if _, excluded := excludedIDs[acc.ID]; excluded { + continue + } + // Scheduler snapshots can be temporarily stale; re-check schedulability here to + // avoid selecting accounts that were recently rate-limited/overloaded. + if !acc.IsSchedulable() { + continue + } + // 过滤:原生平台直接通过,antigravity 需要启用混合调度 + if acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { + continue + } + if !acc.IsSchedulableForModel(requestedModel) { + continue + } + if requestedModel != "" && !s.isModelSupportedByAccount(acc, requestedModel) { + continue + } + if selected == nil { + selected = acc + continue + } + if acc.Priority < selected.Priority { + selected = acc + } else if acc.Priority == selected.Priority { + switch { + case acc.LastUsedAt == nil && selected.LastUsedAt != nil: + selected = acc + case acc.LastUsedAt != nil && selected.LastUsedAt == nil: + // keep selected (never used is preferred) + case acc.LastUsedAt == nil && selected.LastUsedAt == nil: + if preferOAuth && acc.Platform == PlatformGemini && selected.Platform == PlatformGemini && acc.Type != selected.Type && acc.Type == AccountTypeOAuth { + selected = acc + } + default: + if acc.LastUsedAt.Before(*selected.LastUsedAt) { + selected = acc + } + } + } + } + + if selected != nil { + if sessionHash != "" && s.cache != nil { + if err := s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), sessionHash, selected.ID, stickySessionTTL); err != nil { + log.Printf("set session account failed: session=%s account_id=%d err=%v", sessionHash, selected.ID, err) + } + } + if s.debugModelRoutingEnabled() { + log.Printf("[ModelRoutingDebug] legacy mixed routed select: group_id=%v model=%s session=%s account=%d", derefGroupID(groupID), requestedModel, shortSessionHash(sessionHash), selected.ID) + } + return selected, nil + } + log.Printf("[ModelRouting] No routed accounts available for model=%s, falling back to normal selection", requestedModel) + } // 1. 查询粘性会话 if sessionHash != "" && s.cache != nil { accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), sessionHash) if err == nil && accountID > 0 { if _, excluded := excludedIDs[accountID]; !excluded { - account, err := s.accountRepo.GetByID(ctx, accountID) + account, err := s.getSchedulableAccount(ctx, accountID) // 检查账号分组归属和有效性:原生平台直接匹配,antigravity 需要启用混合调度 if err == nil && s.isAccountInGroup(account, groupID) && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { if account.Platform == nativePlatform || (account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled()) { @@ -971,15 +1642,12 @@ func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, g } // 2. 获取可调度账号列表 - var accounts []Account - var err error - if groupID != nil { - accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatforms(ctx, *groupID, platforms) - } else { - accounts, err = s.accountRepo.ListSchedulableByPlatforms(ctx, platforms) - } - if err != nil { - return nil, fmt.Errorf("query accounts failed: %w", err) + if !accountsLoaded { + var err error + accounts, _, err = s.listSchedulableAccounts(ctx, groupID, nativePlatform, false) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } } // 3. 按优先级+最久未用选择(考虑模型支持和混合调度) @@ -989,6 +1657,11 @@ func (s *GatewayService) selectAccountWithMixedScheduling(ctx context.Context, g if _, excluded := excludedIDs[acc.ID]; excluded { continue } + // Scheduler snapshots can be temporarily stale; re-check schedulability here to + // avoid selecting accounts that were recently rate-limited/overloaded. + if !acc.IsSchedulable() { + continue + } // 过滤:原生平台直接通过,antigravity 需要启用混合调度 if acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { continue @@ -1075,6 +1748,16 @@ func (s *GatewayService) GetAccessToken(ctx context.Context, account *Account) ( } func (s *GatewayService) getOAuthToken(ctx context.Context, account *Account) (string, string, error) { + // 对于 Anthropic OAuth 账号,使用 ClaudeTokenProvider 获取缓存的 token + if account.Platform == PlatformAnthropic && account.Type == AccountTypeOAuth && s.claudeTokenProvider != nil { + accessToken, err := s.claudeTokenProvider.GetAccessToken(ctx, account) + if err != nil { + return "", "", err + } + return accessToken, "oauth", nil + } + + // 其他情况(Gemini 有自己的 TokenProvider,setup-token 类型等)直接从账号读取 accessToken := account.GetCredential("access_token") if accessToken == "" { return "", "", errors.New("access_token not found in credentials") @@ -1239,6 +1922,9 @@ func enforceCacheControlLimit(body []byte) []byte { return body } + // 清理 thinking 块中的非法 cache_control(thinking 块不支持该字段) + removeCacheControlFromThinkingBlocks(data) + // 计算当前 cache_control 块数量 count := countCacheControlBlocks(data) if count <= maxCacheControlBlocks { @@ -1266,6 +1952,7 @@ func enforceCacheControlLimit(body []byte) []byte { } // countCacheControlBlocks 统计 system 和 messages 中的 cache_control 块数量 +// 注意:thinking 块不支持 cache_control,统计时跳过 func countCacheControlBlocks(data map[string]any) int { count := 0 @@ -1273,6 +1960,10 @@ func countCacheControlBlocks(data map[string]any) int { if system, ok := data["system"].([]any); ok { for _, item := range system { if m, ok := item.(map[string]any); ok { + // thinking 块不支持 cache_control,跳过 + if blockType, _ := m["type"].(string); blockType == "thinking" { + continue + } if _, has := m["cache_control"]; has { count++ } @@ -1287,6 +1978,10 @@ func countCacheControlBlocks(data map[string]any) int { if content, ok := msgMap["content"].([]any); ok { for _, item := range content { if m, ok := item.(map[string]any); ok { + // thinking 块不支持 cache_control,跳过 + if blockType, _ := m["type"].(string); blockType == "thinking" { + continue + } if _, has := m["cache_control"]; has { count++ } @@ -1302,6 +1997,7 @@ func countCacheControlBlocks(data map[string]any) int { // removeCacheControlFromMessages 从 messages 中移除一个 cache_control(从头开始) // 返回 true 表示成功移除,false 表示没有可移除的 +// 注意:跳过 thinking 块(它不支持 cache_control) func removeCacheControlFromMessages(data map[string]any) bool { messages, ok := data["messages"].([]any) if !ok { @@ -1319,6 +2015,10 @@ func removeCacheControlFromMessages(data map[string]any) bool { } for _, item := range content { if m, ok := item.(map[string]any); ok { + // thinking 块不支持 cache_control,跳过 + if blockType, _ := m["type"].(string); blockType == "thinking" { + continue + } if _, has := m["cache_control"]; has { delete(m, "cache_control") return true @@ -1331,6 +2031,7 @@ func removeCacheControlFromMessages(data map[string]any) bool { // removeCacheControlFromSystem 从 system 中移除一个 cache_control(从尾部开始,保护注入的 prompt) // 返回 true 表示成功移除,false 表示没有可移除的 +// 注意:跳过 thinking 块(它不支持 cache_control) func removeCacheControlFromSystem(data map[string]any) bool { system, ok := data["system"].([]any) if !ok { @@ -1340,6 +2041,10 @@ func removeCacheControlFromSystem(data map[string]any) bool { // 从尾部开始移除,保护开头注入的 Claude Code prompt for i := len(system) - 1; i >= 0; i-- { if m, ok := system[i].(map[string]any); ok { + // thinking 块不支持 cache_control,跳过 + if blockType, _ := m["type"].(string); blockType == "thinking" { + continue + } if _, has := m["cache_control"]; has { delete(m, "cache_control") return true @@ -1349,6 +2054,44 @@ func removeCacheControlFromSystem(data map[string]any) bool { return false } +// removeCacheControlFromThinkingBlocks 强制清理所有 thinking 块中的非法 cache_control +// thinking 块不支持 cache_control 字段,这个函数确保所有 thinking 块都不含该字段 +func removeCacheControlFromThinkingBlocks(data map[string]any) { + // 清理 system 中的 thinking 块 + if system, ok := data["system"].([]any); ok { + for _, item := range system { + if m, ok := item.(map[string]any); ok { + if blockType, _ := m["type"].(string); blockType == "thinking" { + if _, has := m["cache_control"]; has { + delete(m, "cache_control") + log.Printf("[Warning] Removed illegal cache_control from thinking block in system") + } + } + } + } + } + + // 清理 messages 中的 thinking 块 + if messages, ok := data["messages"].([]any); ok { + for msgIdx, msg := range messages { + if msgMap, ok := msg.(map[string]any); ok { + if content, ok := msgMap["content"].([]any); ok { + for contentIdx, item := range content { + if m, ok := item.(map[string]any); ok { + if blockType, _ := m["type"].(string); blockType == "thinking" { + if _, has := m["cache_control"]; has { + delete(m, "cache_control") + log.Printf("[Warning] Removed illegal cache_control from thinking block in messages[%d].content[%d]", msgIdx, contentIdx) + } + } + } + } + } + } + } + } +} + // Forward 转发请求到Claude API func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *Account, parsed *ParsedRequest) (*ForwardResult, error) { startTime := time.Now() @@ -1402,6 +2145,9 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A for attempt := 1; attempt <= maxRetryAttempts; attempt++ { // 构建上游请求(每次重试需要重新构建,因为请求体需要重新读取) upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, tokenType, reqModel) + // Capture upstream request body for ops retry of this attempt. + c.Set(OpsUpstreamRequestBodyKey, string(body)) + if err != nil { return nil, err } @@ -1412,7 +2158,25 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A if resp != nil && resp.Body != nil { _ = resp.Body.Close() } - return nil, fmt.Errorf("upstream request failed: %w", err) + // Ensure the client receives an error response (handlers assume Forward writes on non-failover errors). + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + setOpsUpstreamError(c, 0, safeErr, "") + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) + c.JSON(http.StatusBadGateway, gin.H{ + "type": "error", + "error": gin.H{ + "type": "upstream_error", + "message": "Upstream request failed", + }, + }) + return nil, fmt.Errorf("upstream request failed: %s", safeErr) } // 优先检测thinking block签名错误(400)并重试一次 @@ -1422,6 +2186,22 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A _ = resp.Body.Close() if s.isThinkingBlockSignatureError(respBody) { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "signature_error", + Message: extractUpstreamErrorMessage(respBody), + Detail: func() string { + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + return truncateString(string(respBody), s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes) + } + return "" + }(), + }) + looksLikeToolSignatureError := func(msg string) bool { m := strings.ToLower(msg) return strings.Contains(m, "tool_use") || @@ -1458,6 +2238,21 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A retryRespBody, retryReadErr := io.ReadAll(io.LimitReader(retryResp.Body, 2<<20)) _ = retryResp.Body.Close() if retryReadErr == nil && retryResp.StatusCode == 400 && s.isThinkingBlockSignatureError(retryRespBody) { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: retryResp.StatusCode, + UpstreamRequestID: retryResp.Header.Get("x-request-id"), + Kind: "signature_retry_thinking", + Message: extractUpstreamErrorMessage(retryRespBody), + Detail: func() string { + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + return truncateString(string(retryRespBody), s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes) + } + return "" + }(), + }) msg2 := extractUpstreamErrorMessage(retryRespBody) if looksLikeToolSignatureError(msg2) && time.Since(retryStart) < maxRetryElapsed { log.Printf("Account %d: signature retry still failing and looks tool-related, retrying with tool blocks downgraded", account.ID) @@ -1472,6 +2267,14 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A if retryResp2 != nil && retryResp2.Body != nil { _ = retryResp2.Body.Close() } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "signature_retry_tools_request_error", + Message: sanitizeUpstreamErrorMessage(retryErr2.Error()), + }) log.Printf("Account %d: tool-downgrade signature retry failed: %v", account.ID, retryErr2) } else { log.Printf("Account %d: tool-downgrade signature retry build failed: %v", account.ID, buildErr2) @@ -1521,9 +2324,25 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A break } + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry", + Message: extractUpstreamErrorMessage(respBody), + Detail: func() string { + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + return truncateString(string(respBody), s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes) + } + return "" + }(), + }) log.Printf("Account %d: upstream error %d, retry %d/%d after %v (elapsed=%v/%v)", account.ID, resp.StatusCode, attempt, maxRetryAttempts, delay, elapsed, maxRetryElapsed) - _ = resp.Body.Close() if err := sleepWithContext(ctx, delay); err != nil { return nil, err } @@ -1551,7 +2370,26 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A // 处理重试耗尽的情况 if resp.StatusCode >= 400 && s.shouldRetryUpstreamError(account, resp.StatusCode) { if s.shouldFailoverUpstreamError(resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + s.handleRetryExhaustedSideEffects(ctx, resp, account) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry_exhausted_failover", + Message: extractUpstreamErrorMessage(respBody), + Detail: func() string { + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + return truncateString(string(respBody), s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes) + } + return "" + }(), + }) return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } return s.handleRetryExhaustedError(ctx, resp, c, account) @@ -1559,7 +2397,25 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A // 处理可切换账号的错误 if resp.StatusCode >= 400 && s.shouldFailoverUpstreamError(resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + s.handleFailoverSideEffects(ctx, resp, account) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "failover", + Message: extractUpstreamErrorMessage(respBody), + Detail: func() string { + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + return truncateString(string(respBody), s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes) + } + return "" + }(), + }) return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } @@ -1576,6 +2432,27 @@ func (s *GatewayService) Forward(ctx context.Context, c *gin.Context, account *A resp.Body = io.NopCloser(bytes.NewReader(respBody)) if s.shouldFailoverOn400(respBody) { + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "failover_on_400", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + if s.cfg.Gateway.LogUpstreamErrorBody { log.Printf( "Account %d: 400 error, attempting failover: %s", @@ -1872,7 +2749,30 @@ func extractUpstreamErrorMessage(body []byte) string { } func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account) (*ForwardResult, error) { - body, _ := io.ReadAll(resp.Body) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + + // Enrich Ops error logs with upstream status + message, and optionally a truncated body snippet. + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(body), maxBytes) + } + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) // 处理上游错误,标记账号状态 shouldDisable := false @@ -1883,24 +2783,33 @@ func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Res return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } + // 记录上游错误响应体摘要便于排障(可选:由配置控制;不回显到客户端) + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "Upstream error %d (account=%d platform=%s type=%s): %s", + resp.StatusCode, + account.ID, + account.Platform, + account.Type, + truncateForLog(body, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } + // 根据状态码返回适当的自定义错误响应(不透传上游详细信息) var errType, errMsg string var statusCode int switch resp.StatusCode { case 400: - // 仅记录上游错误摘要(避免输出请求内容);需要时可通过配置打开 - if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { - log.Printf( - "Upstream 400 error (account=%d platform=%s type=%s): %s", - account.ID, - account.Platform, - account.Type, - truncateForLog(body, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), - ) - } c.Data(http.StatusBadRequest, "application/json", body) - return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) + summary := upstreamMsg + if summary == "" { + summary = truncateForLog(body, 512) + } + if summary == "" { + return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) + } + return nil, fmt.Errorf("upstream error: %d message=%s", resp.StatusCode, summary) case 401: statusCode = http.StatusBadGateway errType = "upstream_error" @@ -1936,11 +2845,14 @@ func (s *GatewayService) handleErrorResponse(ctx context.Context, resp *http.Res }, }) - return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) + if upstreamMsg == "" { + return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) + } + return nil, fmt.Errorf("upstream error: %d message=%s", resp.StatusCode, upstreamMsg) } func (s *GatewayService) handleRetryExhaustedSideEffects(ctx context.Context, resp *http.Response, account *Account) { - body, _ := io.ReadAll(resp.Body) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) statusCode := resp.StatusCode // OAuth/Setup Token 账号的 403:标记账号异常 @@ -1954,7 +2866,7 @@ func (s *GatewayService) handleRetryExhaustedSideEffects(ctx context.Context, re } func (s *GatewayService) handleFailoverSideEffects(ctx context.Context, resp *http.Response, account *Account) { - body, _ := io.ReadAll(resp.Body) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, body) } @@ -1962,8 +2874,45 @@ func (s *GatewayService) handleFailoverSideEffects(ctx context.Context, resp *ht // OAuth 403:标记账号异常 // API Key 未配置错误码:仅返回错误,不标记账号 func (s *GatewayService) handleRetryExhaustedError(ctx context.Context, resp *http.Response, c *gin.Context, account *Account) (*ForwardResult, error) { + // Capture upstream error body before side-effects consume the stream. + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + s.handleRetryExhaustedSideEffects(ctx, resp, account) + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "retry_exhausted", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "Upstream error %d retries_exhausted (account=%d platform=%s type=%s): %s", + resp.StatusCode, + account.ID, + account.Platform, + account.Type, + truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } + // 返回统一的重试耗尽错误响应 c.JSON(http.StatusBadGateway, gin.H{ "type": "error", @@ -1973,7 +2922,10 @@ func (s *GatewayService) handleRetryExhaustedError(ctx context.Context, resp *ht }, }) - return nil, fmt.Errorf("upstream error: %d (retries exhausted)", resp.StatusCode) + if upstreamMsg == "" { + return nil, fmt.Errorf("upstream error: %d (retries exhausted)", resp.StatusCode) + } + return nil, fmt.Errorf("upstream error: %d (retries exhausted) message=%s", resp.StatusCode, upstreamMsg) } // streamingResult 流式响应结果 @@ -2154,6 +3106,10 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http return &streamingResult{usage: usage, firstTokenMs: firstTokenMs, clientDisconnect: true}, nil } log.Printf("Stream data interval timeout: account=%d model=%s interval=%s", account.ID, originalModel, streamInterval) + // 处理流超时,可能标记账户为临时不可调度或错误状态 + if s.rateLimitService != nil { + s.rateLimitService.HandleStreamTimeout(ctx, account, originalModel) + } sendErrorEvent("stream_timeout") return &streamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") } @@ -2307,6 +3263,7 @@ type RecordUsageInput struct { Account *Account Subscription *UserSubscription // 可选:订阅信息 UserAgent string // 请求的 User-Agent + IPAddress string // 请求的客户端 IP 地址 } // RecordUsage 记录使用量并扣费(或更新订阅用量) @@ -2366,30 +3323,32 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu if result.ImageSize != "" { imageSize = &result.ImageSize } + accountRateMultiplier := account.BillingRateMultiplier() usageLog := &UsageLog{ - UserID: user.ID, - APIKeyID: apiKey.ID, - AccountID: account.ID, - RequestID: result.RequestID, - Model: result.Model, - InputTokens: result.Usage.InputTokens, - OutputTokens: result.Usage.OutputTokens, - CacheCreationTokens: result.Usage.CacheCreationInputTokens, - CacheReadTokens: result.Usage.CacheReadInputTokens, - InputCost: cost.InputCost, - OutputCost: cost.OutputCost, - CacheCreationCost: cost.CacheCreationCost, - CacheReadCost: cost.CacheReadCost, - TotalCost: cost.TotalCost, - ActualCost: cost.ActualCost, - RateMultiplier: multiplier, - BillingType: billingType, - Stream: result.Stream, - DurationMs: &durationMs, - FirstTokenMs: result.FirstTokenMs, - ImageCount: result.ImageCount, - ImageSize: imageSize, - CreatedAt: time.Now(), + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: result.RequestID, + Model: result.Model, + InputTokens: result.Usage.InputTokens, + OutputTokens: result.Usage.OutputTokens, + CacheCreationTokens: result.Usage.CacheCreationInputTokens, + CacheReadTokens: result.Usage.CacheReadInputTokens, + InputCost: cost.InputCost, + OutputCost: cost.OutputCost, + CacheCreationCost: cost.CacheCreationCost, + CacheReadCost: cost.CacheReadCost, + TotalCost: cost.TotalCost, + ActualCost: cost.ActualCost, + RateMultiplier: multiplier, + AccountRateMultiplier: &accountRateMultiplier, + BillingType: billingType, + Stream: result.Stream, + DurationMs: &durationMs, + FirstTokenMs: result.FirstTokenMs, + ImageCount: result.ImageCount, + ImageSize: imageSize, + CreatedAt: time.Now(), } // 添加 UserAgent @@ -2397,6 +3356,11 @@ func (s *GatewayService) RecordUsage(ctx context.Context, input *RecordUsageInpu usageLog.UserAgent = &input.UserAgent } + // 添加 IPAddress + if input.IPAddress != "" { + usageLog.IPAddress = &input.IPAddress + } + // 添加分组和订阅关联 if apiKey.GroupID != nil { usageLog.GroupID = apiKey.GroupID @@ -2497,6 +3461,7 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, // 发送请求 resp, err := s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) if err != nil { + setOpsUpstreamError(c, 0, sanitizeUpstreamErrorMessage(err.Error()), "") s.countTokensError(c, http.StatusBadGateway, "upstream_error", "Request failed") return fmt.Errorf("upstream request failed: %w", err) } @@ -2534,6 +3499,18 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, // 标记账号状态(429/529等) s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + // 记录上游错误摘要便于排障(不回显请求内容) if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { log.Printf( @@ -2555,7 +3532,10 @@ func (s *GatewayService) ForwardCountTokens(ctx context.Context, c *gin.Context, errMsg = "Service overloaded" } s.countTokensError(c, resp.StatusCode, "upstream_error", errMsg) - return fmt.Errorf("upstream error: %d", resp.StatusCode) + if upstreamMsg == "" { + return fmt.Errorf("upstream error: %d", resp.StatusCode) + } + return fmt.Errorf("upstream error: %d message=%s", resp.StatusCode, upstreamMsg) } // 透传成功响应 diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index 2b500072..75de90f2 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -40,6 +40,7 @@ type GeminiMessagesCompatService struct { accountRepo AccountRepository groupRepo GroupRepository cache GatewayCache + schedulerSnapshot *SchedulerSnapshotService tokenProvider *GeminiTokenProvider rateLimitService *RateLimitService httpUpstream HTTPUpstream @@ -51,6 +52,7 @@ func NewGeminiMessagesCompatService( accountRepo AccountRepository, groupRepo GroupRepository, cache GatewayCache, + schedulerSnapshot *SchedulerSnapshotService, tokenProvider *GeminiTokenProvider, rateLimitService *RateLimitService, httpUpstream HTTPUpstream, @@ -61,6 +63,7 @@ func NewGeminiMessagesCompatService( accountRepo: accountRepo, groupRepo: groupRepo, cache: cache, + schedulerSnapshot: schedulerSnapshot, tokenProvider: tokenProvider, rateLimitService: rateLimitService, httpUpstream: httpUpstream, @@ -86,9 +89,15 @@ func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx co platform = forcePlatform } else if groupID != nil { // 根据分组 platform 决定查询哪种账号 - group, err := s.groupRepo.GetByID(ctx, *groupID) - if err != nil { - return nil, fmt.Errorf("get group failed: %w", err) + var group *Group + if ctxGroup, ok := ctx.Value(ctxkey.Group).(*Group); ok && IsGroupContextValid(ctxGroup) && ctxGroup.ID == *groupID { + group = ctxGroup + } else { + var err error + group, err = s.groupRepo.GetByIDLite(ctx, *groupID) + if err != nil { + return nil, fmt.Errorf("get group failed: %w", err) + } } platform = group.Platform } else { @@ -99,12 +108,6 @@ func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx co // gemini 分组支持混合调度(包含启用了 mixed_scheduling 的 antigravity 账户) // 注意:强制平台模式不走混合调度 useMixedScheduling := platform == PlatformGemini && !hasForcePlatform - var queryPlatforms []string - if useMixedScheduling { - queryPlatforms = []string{PlatformGemini, PlatformAntigravity} - } else { - queryPlatforms = []string{platform} - } cacheKey := "gemini:" + sessionHash @@ -112,7 +115,7 @@ func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx co accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), cacheKey) if err == nil && accountID > 0 { if _, excluded := excludedIDs[accountID]; !excluded { - account, err := s.accountRepo.GetByID(ctx, accountID) + account, err := s.getSchedulableAccount(ctx, accountID) // 检查账号是否有效:原生平台直接匹配,antigravity 需要启用混合调度 if err == nil && account.IsSchedulableForModel(requestedModel) && (requestedModel == "" || s.isModelSupportedByAccount(account, requestedModel)) { valid := false @@ -143,22 +146,16 @@ func (s *GeminiMessagesCompatService) SelectAccountForModelWithExclusions(ctx co } // 查询可调度账户(强制平台模式:优先按分组查找,找不到再查全部) - var accounts []Account - var err error - if groupID != nil { - accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatforms(ctx, *groupID, queryPlatforms) + accounts, err := s.listSchedulableAccountsOnce(ctx, groupID, platform, hasForcePlatform) + if err != nil { + return nil, fmt.Errorf("query accounts failed: %w", err) + } + // 强制平台模式下,分组中找不到账户时回退查询全部 + if len(accounts) == 0 && groupID != nil && hasForcePlatform { + accounts, err = s.listSchedulableAccountsOnce(ctx, nil, platform, hasForcePlatform) if err != nil { return nil, fmt.Errorf("query accounts failed: %w", err) } - // 强制平台模式下,分组中找不到账户时回退查询全部 - if len(accounts) == 0 && hasForcePlatform { - accounts, err = s.accountRepo.ListSchedulableByPlatforms(ctx, queryPlatforms) - } - } else { - accounts, err = s.accountRepo.ListSchedulableByPlatforms(ctx, queryPlatforms) - } - if err != nil { - return nil, fmt.Errorf("query accounts failed: %w", err) } var selected *Account @@ -239,6 +236,31 @@ func (s *GeminiMessagesCompatService) GetAntigravityGatewayService() *Antigravit return s.antigravityGatewayService } +func (s *GeminiMessagesCompatService) getSchedulableAccount(ctx context.Context, accountID int64) (*Account, error) { + if s.schedulerSnapshot != nil { + return s.schedulerSnapshot.GetAccount(ctx, accountID) + } + return s.accountRepo.GetByID(ctx, accountID) +} + +func (s *GeminiMessagesCompatService) listSchedulableAccountsOnce(ctx context.Context, groupID *int64, platform string, hasForcePlatform bool) ([]Account, error) { + if s.schedulerSnapshot != nil { + accounts, _, err := s.schedulerSnapshot.ListSchedulableAccounts(ctx, groupID, platform, hasForcePlatform) + return accounts, err + } + + useMixedScheduling := platform == PlatformGemini && !hasForcePlatform + queryPlatforms := []string{platform} + if useMixedScheduling { + queryPlatforms = []string{platform, PlatformAntigravity} + } + + if groupID != nil { + return s.accountRepo.ListSchedulableByGroupIDAndPlatforms(ctx, *groupID, queryPlatforms) + } + return s.accountRepo.ListSchedulableByPlatforms(ctx, queryPlatforms) +} + func (s *GeminiMessagesCompatService) validateUpstreamBaseURL(raw string) (string, error) { if s.cfg != nil && !s.cfg.Security.URLAllowlist.Enabled { normalized, err := urlvalidator.ValidateURLFormat(raw, s.cfg.Security.URLAllowlist.AllowInsecureHTTP) @@ -260,13 +282,7 @@ func (s *GeminiMessagesCompatService) validateUpstreamBaseURL(raw string) (strin // HasAntigravityAccounts 检查是否有可用的 antigravity 账户 func (s *GeminiMessagesCompatService) HasAntigravityAccounts(ctx context.Context, groupID *int64) (bool, error) { - var accounts []Account - var err error - if groupID != nil { - accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, PlatformAntigravity) - } else { - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, PlatformAntigravity) - } + accounts, err := s.listSchedulableAccountsOnce(ctx, groupID, PlatformAntigravity, false) if err != nil { return false, err } @@ -282,13 +298,7 @@ func (s *GeminiMessagesCompatService) HasAntigravityAccounts(ctx context.Context // 3) OAuth accounts explicitly marked as ai_studio // 4) Any remaining Gemini accounts (fallback) func (s *GeminiMessagesCompatService) SelectAccountForAIStudioEndpoints(ctx context.Context, groupID *int64) (*Account, error) { - var accounts []Account - var err error - if groupID != nil { - accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, PlatformGemini) - } else { - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, PlatformGemini) - } + accounts, err := s.listSchedulableAccountsOnce(ctx, groupID, PlatformGemini, true) if err != nil { return nil, fmt.Errorf("query accounts failed: %w", err) } @@ -535,14 +545,30 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex } requestIDHeader = idHeader + // Capture upstream request body for ops retry of this attempt. + if c != nil { + // In this code path `body` is already the JSON sent to upstream. + c.Set(OpsUpstreamRequestBodyKey, string(body)) + } + resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) if err != nil { + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) if attempt < geminiMaxRetries { log.Printf("Gemini account %d: upstream request failed, retry %d/%d: %v", account.ID, attempt, geminiMaxRetries, err) sleepGeminiBackoff(attempt) continue } - return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries: "+sanitizeUpstreamErrorMessage(err.Error())) + setOpsUpstreamError(c, 0, safeErr, "") + return nil, s.writeClaudeError(c, http.StatusBadGateway, "upstream_error", "Upstream request failed after retries: "+safeErr) } // Special-case: signature/thought_signature validation errors are not transient, but may be fixed by @@ -552,6 +578,31 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex _ = resp.Body.Close() if isGeminiSignatureRelatedError(respBody) { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "signature_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + var strippedClaudeBody []byte stageName := "" switch signatureRetryStage { @@ -602,6 +653,31 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) } if attempt < geminiMaxRetries { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "retry", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + log.Printf("Gemini account %d: upstream status %d, retry %d/%d", account.ID, resp.StatusCode, attempt, geminiMaxRetries) sleepGeminiBackoff(attempt) continue @@ -627,12 +703,64 @@ func (s *GeminiMessagesCompatService) Forward(ctx context.Context, c *gin.Contex } s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) if tempMatched { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } if s.shouldFailoverGeminiUpstreamError(resp.StatusCode) { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } - return nil, s.writeGeminiMappedError(c, resp.StatusCode, respBody) + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + return nil, s.writeGeminiMappedError(c, account, resp.StatusCode, upstreamReqID, respBody) } requestID := resp.Header.Get(requestIDHeader) @@ -855,8 +983,23 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. } requestIDHeader = idHeader + // Capture upstream request body for ops retry of this attempt. + if c != nil { + // In this code path `body` is already the JSON sent to upstream. + c.Set(OpsUpstreamRequestBodyKey, string(body)) + } + resp, err = s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) if err != nil { + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) if attempt < geminiMaxRetries { log.Printf("Gemini account %d: upstream request failed, retry %d/%d: %v", account.ID, attempt, geminiMaxRetries, err) sleepGeminiBackoff(attempt) @@ -874,7 +1017,8 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. FirstTokenMs: nil, }, nil } - return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries: "+sanitizeUpstreamErrorMessage(err.Error())) + setOpsUpstreamError(c, 0, safeErr, "") + return nil, s.writeGoogleError(c, http.StatusBadGateway, "Upstream request failed after retries: "+safeErr) } if resp.StatusCode >= 400 && s.shouldRetryGeminiUpstreamError(account, resp.StatusCode) { @@ -893,6 +1037,31 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. s.handleGeminiUpstreamError(ctx, account, resp.StatusCode, resp.Header, respBody) } if attempt < geminiMaxRetries { + upstreamReqID := resp.Header.Get(requestIDHeader) + if upstreamReqID == "" { + upstreamReqID = resp.Header.Get("x-goog-request-id") + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: upstreamReqID, + Kind: "retry", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + log.Printf("Gemini account %d: upstream status %d, retry %d/%d", account.ID, resp.StatusCode, attempt, geminiMaxRetries) sleepGeminiBackoff(attempt) continue @@ -956,19 +1125,87 @@ func (s *GeminiMessagesCompatService) ForwardNative(ctx context.Context, c *gin. } if tempMatched { + evBody := unwrapIfNeeded(isOAuth, respBody) + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(evBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(evBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } if s.shouldFailoverGeminiUpstreamError(resp.StatusCode) { + evBody := unwrapIfNeeded(isOAuth, respBody) + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(evBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(evBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } respBody = unwrapIfNeeded(isOAuth, respBody) + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + log.Printf("[Gemini] native upstream error %d: %s", resp.StatusCode, truncateForLog(respBody, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes)) + } + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: requestID, + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + contentType := resp.Header.Get("Content-Type") if contentType == "" { contentType = "application/json" } c.Data(resp.StatusCode, contentType, respBody) - return nil, fmt.Errorf("gemini upstream error: %d", resp.StatusCode) + if upstreamMsg == "" { + return nil, fmt.Errorf("gemini upstream error: %d", resp.StatusCode) + } + return nil, fmt.Errorf("gemini upstream error: %d message=%s", resp.StatusCode, upstreamMsg) } var usage *ClaudeUsage @@ -1070,7 +1307,33 @@ func sanitizeUpstreamErrorMessage(msg string) string { return sensitiveQueryParamRegex.ReplaceAllString(msg, `$1***`) } -func (s *GeminiMessagesCompatService) writeGeminiMappedError(c *gin.Context, upstreamStatus int, body []byte) error { +func (s *GeminiMessagesCompatService) writeGeminiMappedError(c *gin.Context, account *Account, upstreamStatus int, upstreamRequestID string, body []byte) error { + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(body), maxBytes) + } + setOpsUpstreamError(c, upstreamStatus, upstreamMsg, upstreamDetail) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: upstreamStatus, + UpstreamRequestID: upstreamRequestID, + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf("[Gemini] upstream error %d: %s", upstreamStatus, truncateForLog(body, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes)) + } + var statusCode int var errType, errMsg string @@ -1178,7 +1441,10 @@ func (s *GeminiMessagesCompatService) writeGeminiMappedError(c *gin.Context, ups "type": "error", "error": gin.H{"type": errType, "message": errMsg}, }) - return fmt.Errorf("upstream error: %d", upstreamStatus) + if upstreamMsg == "" { + return fmt.Errorf("upstream error: %d", upstreamStatus) + } + return fmt.Errorf("upstream error: %d message=%s", upstreamStatus, upstreamMsg) } type claudeErrorMapping struct { diff --git a/backend/internal/service/gemini_multiplatform_test.go b/backend/internal/service/gemini_multiplatform_test.go index 15e84040..fc009873 100644 --- a/backend/internal/service/gemini_multiplatform_test.go +++ b/backend/internal/service/gemini_multiplatform_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" "github.com/stretchr/testify/require" ) @@ -127,6 +128,9 @@ func (m *mockAccountRepoForGemini) SetRateLimited(ctx context.Context, id int64, func (m *mockAccountRepoForGemini) SetAntigravityQuotaScopeLimit(ctx context.Context, id int64, scope AntigravityQuotaScope, resetAt time.Time) error { return nil } +func (m *mockAccountRepoForGemini) SetModelRateLimit(ctx context.Context, id int64, scope string, resetAt time.Time) error { + return nil +} func (m *mockAccountRepoForGemini) SetOverloaded(ctx context.Context, id int64, until time.Time) error { return nil } @@ -140,6 +144,9 @@ func (m *mockAccountRepoForGemini) ClearRateLimit(ctx context.Context, id int64) func (m *mockAccountRepoForGemini) ClearAntigravityQuotaScopes(ctx context.Context, id int64) error { return nil } +func (m *mockAccountRepoForGemini) ClearModelRateLimits(ctx context.Context, id int64) error { + return nil +} func (m *mockAccountRepoForGemini) UpdateSessionWindow(ctx context.Context, id int64, start, end *time.Time, status string) error { return nil } @@ -155,10 +162,21 @@ var _ AccountRepository = (*mockAccountRepoForGemini)(nil) // mockGroupRepoForGemini Gemini 测试用的 group repo mock type mockGroupRepoForGemini struct { - groups map[int64]*Group + groups map[int64]*Group + getByIDCalls int + getByIDLiteCalls int } func (m *mockGroupRepoForGemini) GetByID(ctx context.Context, id int64) (*Group, error) { + m.getByIDCalls++ + if g, ok := m.groups[id]; ok { + return g, nil + } + return nil, errors.New("group not found") +} + +func (m *mockGroupRepoForGemini) GetByIDLite(ctx context.Context, id int64) (*Group, error) { + m.getByIDLiteCalls++ if g, ok := m.groups[id]; ok { return g, nil } @@ -251,6 +269,77 @@ func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_GeminiP require.Equal(t, PlatformGemini, acc.Platform, "无分组时应只返回 gemini 平台账户") } +func TestGeminiMessagesCompatService_GroupResolution_ReusesContextGroup(t *testing.T) { + ctx := context.Background() + groupID := int64(7) + group := &Group{ + ID: groupID, + Platform: PlatformGemini, + Status: StatusActive, + Hydrated: true, + } + ctx = context.WithValue(ctx, ctxkey.Group, group) + + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{groups: map[int64]*Group{}} + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, 0, groupRepo.getByIDCalls) + require.Equal(t, 0, groupRepo.getByIDLiteCalls) +} + +func TestGeminiMessagesCompatService_GroupResolution_UsesLiteFetch(t *testing.T) { + ctx := context.Background() + groupID := int64(7) + + repo := &mockAccountRepoForGemini{ + accounts: []Account{ + {ID: 1, Platform: PlatformGemini, Priority: 1, Status: StatusActive, Schedulable: true}, + }, + accountsByID: map[int64]*Account{}, + } + for i := range repo.accounts { + repo.accountsByID[repo.accounts[i].ID] = &repo.accounts[i] + } + + cache := &mockGatewayCacheForGemini{} + groupRepo := &mockGroupRepoForGemini{ + groups: map[int64]*Group{ + groupID: {ID: groupID, Platform: PlatformGemini}, + }, + } + + svc := &GeminiMessagesCompatService{ + accountRepo: repo, + groupRepo: groupRepo, + cache: cache, + } + + acc, err := svc.SelectAccountForModelWithExclusions(ctx, &groupID, "", "gemini-2.5-flash", nil) + require.NoError(t, err) + require.NotNil(t, acc) + require.Equal(t, 0, groupRepo.getByIDCalls) + require.Equal(t, 1, groupRepo.getByIDLiteCalls) +} + // TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_AntigravityGroup 测试 antigravity 分组 func TestGeminiMessagesCompatService_SelectAccountForModelWithExclusions_AntigravityGroup(t *testing.T) { ctx := context.Background() diff --git a/backend/internal/service/gemini_token_cache.go b/backend/internal/service/gemini_token_cache.go index d5e64f9a..70f246da 100644 --- a/backend/internal/service/gemini_token_cache.go +++ b/backend/internal/service/gemini_token_cache.go @@ -10,6 +10,7 @@ type GeminiTokenCache interface { // cacheKey should be stable for the token scope; for GeminiCli OAuth we primarily use project_id. GetAccessToken(ctx context.Context, cacheKey string) (string, error) SetAccessToken(ctx context.Context, cacheKey string, token string, ttl time.Duration) error + DeleteAccessToken(ctx context.Context, cacheKey string) error AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error) ReleaseRefreshLock(ctx context.Context, cacheKey string) error diff --git a/backend/internal/service/gemini_token_provider.go b/backend/internal/service/gemini_token_provider.go index 0257d19f..f13ae169 100644 --- a/backend/internal/service/gemini_token_provider.go +++ b/backend/internal/service/gemini_token_provider.go @@ -40,7 +40,7 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou return "", errors.New("not a gemini oauth account") } - cacheKey := geminiTokenCacheKey(account) + cacheKey := GeminiTokenCacheKey(account) // 1) Try cache first. if p.tokenCache != nil { @@ -151,10 +151,10 @@ func (p *GeminiTokenProvider) GetAccessToken(ctx context.Context, account *Accou return accessToken, nil } -func geminiTokenCacheKey(account *Account) string { +func GeminiTokenCacheKey(account *Account) string { projectID := strings.TrimSpace(account.GetCredential("project_id")) if projectID != "" { - return projectID + return "gemini:" + projectID } - return "account:" + strconv.FormatInt(account.ID, 10) + return "gemini:account:" + strconv.FormatInt(account.ID, 10) } diff --git a/backend/internal/service/group.go b/backend/internal/service/group.go index 80d89074..d6d1269b 100644 --- a/backend/internal/service/group.go +++ b/backend/internal/service/group.go @@ -1,6 +1,9 @@ package service -import "time" +import ( + "strings" + "time" +) type Group struct { ID int64 @@ -10,6 +13,7 @@ type Group struct { RateMultiplier float64 IsExclusive bool Status string + Hydrated bool // indicates the group was loaded from a trusted repository source SubscriptionType string DailyLimitUSD *float64 @@ -26,6 +30,12 @@ type Group struct { ClaudeCodeOnly bool FallbackGroupID *int64 + // 模型路由配置 + // key: 模型匹配模式(支持 * 通配符,如 "claude-opus-*") + // value: 优先账号 ID 列表 + ModelRouting map[string][]int64 + ModelRoutingEnabled bool + CreatedAt time.Time UpdatedAt time.Time @@ -72,3 +82,58 @@ func (g *Group) GetImagePrice(imageSize string) *float64 { return g.ImagePrice2K } } + +// IsGroupContextValid reports whether a group from context has the fields required for routing decisions. +func IsGroupContextValid(group *Group) bool { + if group == nil { + return false + } + if group.ID <= 0 { + return false + } + if !group.Hydrated { + return false + } + if group.Platform == "" || group.Status == "" { + return false + } + return true +} + +// GetRoutingAccountIDs 根据请求模型获取路由账号 ID 列表 +// 返回匹配的优先账号 ID 列表,如果没有匹配规则则返回 nil +func (g *Group) GetRoutingAccountIDs(requestedModel string) []int64 { + if !g.ModelRoutingEnabled || len(g.ModelRouting) == 0 || requestedModel == "" { + return nil + } + + // 1. 精确匹配优先 + if accountIDs, ok := g.ModelRouting[requestedModel]; ok && len(accountIDs) > 0 { + return accountIDs + } + + // 2. 通配符匹配(前缀匹配) + for pattern, accountIDs := range g.ModelRouting { + if matchModelPattern(pattern, requestedModel) && len(accountIDs) > 0 { + return accountIDs + } + } + + return nil +} + +// matchModelPattern 检查模型是否匹配模式 +// 支持 * 通配符,如 "claude-opus-*" 匹配 "claude-opus-4-20250514" +func matchModelPattern(pattern, model string) bool { + if pattern == model { + return true + } + + // 处理 * 通配符(仅支持末尾通配符) + if strings.HasSuffix(pattern, "*") { + prefix := strings.TrimSuffix(pattern, "*") + return strings.HasPrefix(model, prefix) + } + + return false +} diff --git a/backend/internal/service/group_service.go b/backend/internal/service/group_service.go index a444556f..324f347b 100644 --- a/backend/internal/service/group_service.go +++ b/backend/internal/service/group_service.go @@ -16,6 +16,7 @@ var ( type GroupRepository interface { Create(ctx context.Context, group *Group) error GetByID(ctx context.Context, id int64) (*Group, error) + GetByIDLite(ctx context.Context, id int64) (*Group, error) Update(ctx context.Context, group *Group) error Delete(ctx context.Context, id int64) error DeleteCascade(ctx context.Context, id int64) ([]int64, error) @@ -49,13 +50,15 @@ type UpdateGroupRequest struct { // GroupService 分组管理服务 type GroupService struct { - groupRepo GroupRepository + groupRepo GroupRepository + authCacheInvalidator APIKeyAuthCacheInvalidator } // NewGroupService 创建分组服务实例 -func NewGroupService(groupRepo GroupRepository) *GroupService { +func NewGroupService(groupRepo GroupRepository, authCacheInvalidator APIKeyAuthCacheInvalidator) *GroupService { return &GroupService{ - groupRepo: groupRepo, + groupRepo: groupRepo, + authCacheInvalidator: authCacheInvalidator, } } @@ -154,6 +157,9 @@ func (s *GroupService) Update(ctx context.Context, id int64, req UpdateGroupRequ if err := s.groupRepo.Update(ctx, group); err != nil { return nil, fmt.Errorf("update group: %w", err) } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByGroupID(ctx, id) + } return group, nil } @@ -166,6 +172,9 @@ func (s *GroupService) Delete(ctx context.Context, id int64) error { return fmt.Errorf("get group: %w", err) } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByGroupID(ctx, id) + } if err := s.groupRepo.Delete(ctx, id); err != nil { return fmt.Errorf("delete group: %w", err) } diff --git a/backend/internal/service/model_rate_limit.go b/backend/internal/service/model_rate_limit.go new file mode 100644 index 00000000..49354a7f --- /dev/null +++ b/backend/internal/service/model_rate_limit.go @@ -0,0 +1,56 @@ +package service + +import ( + "strings" + "time" +) + +const modelRateLimitsKey = "model_rate_limits" +const modelRateLimitScopeClaudeSonnet = "claude_sonnet" + +func resolveModelRateLimitScope(requestedModel string) (string, bool) { + model := strings.ToLower(strings.TrimSpace(requestedModel)) + if model == "" { + return "", false + } + model = strings.TrimPrefix(model, "models/") + if strings.Contains(model, "sonnet") { + return modelRateLimitScopeClaudeSonnet, true + } + return "", false +} + +func (a *Account) isModelRateLimited(requestedModel string) bool { + scope, ok := resolveModelRateLimitScope(requestedModel) + if !ok { + return false + } + resetAt := a.modelRateLimitResetAt(scope) + if resetAt == nil { + return false + } + return time.Now().Before(*resetAt) +} + +func (a *Account) modelRateLimitResetAt(scope string) *time.Time { + if a == nil || a.Extra == nil || scope == "" { + return nil + } + rawLimits, ok := a.Extra[modelRateLimitsKey].(map[string]any) + if !ok { + return nil + } + rawLimit, ok := rawLimits[scope].(map[string]any) + if !ok { + return nil + } + resetAtRaw, ok := rawLimit["rate_limit_reset_at"].(string) + if !ok || strings.TrimSpace(resetAtRaw) == "" { + return nil + } + resetAt, err := time.Parse(time.RFC3339, resetAtRaw) + if err != nil { + return nil + } + return &resetAt +} diff --git a/backend/internal/service/openai_codex_transform.go b/backend/internal/service/openai_codex_transform.go new file mode 100644 index 00000000..264bdf95 --- /dev/null +++ b/backend/internal/service/openai_codex_transform.go @@ -0,0 +1,528 @@ +package service + +import ( + _ "embed" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "strings" + "time" +) + +const ( + opencodeCodexHeaderURL = "https://raw.githubusercontent.com/anomalyco/opencode/dev/packages/opencode/src/session/prompt/codex_header.txt" + codexCacheTTL = 15 * time.Minute +) + +//go:embed prompts/codex_cli_instructions.md +var codexCLIInstructions string + +var codexModelMap = map[string]string{ + "gpt-5.1-codex": "gpt-5.1-codex", + "gpt-5.1-codex-low": "gpt-5.1-codex", + "gpt-5.1-codex-medium": "gpt-5.1-codex", + "gpt-5.1-codex-high": "gpt-5.1-codex", + "gpt-5.1-codex-max": "gpt-5.1-codex-max", + "gpt-5.1-codex-max-low": "gpt-5.1-codex-max", + "gpt-5.1-codex-max-medium": "gpt-5.1-codex-max", + "gpt-5.1-codex-max-high": "gpt-5.1-codex-max", + "gpt-5.1-codex-max-xhigh": "gpt-5.1-codex-max", + "gpt-5.2": "gpt-5.2", + "gpt-5.2-none": "gpt-5.2", + "gpt-5.2-low": "gpt-5.2", + "gpt-5.2-medium": "gpt-5.2", + "gpt-5.2-high": "gpt-5.2", + "gpt-5.2-xhigh": "gpt-5.2", + "gpt-5.2-codex": "gpt-5.2-codex", + "gpt-5.2-codex-low": "gpt-5.2-codex", + "gpt-5.2-codex-medium": "gpt-5.2-codex", + "gpt-5.2-codex-high": "gpt-5.2-codex", + "gpt-5.2-codex-xhigh": "gpt-5.2-codex", + "gpt-5.1-codex-mini": "gpt-5.1-codex-mini", + "gpt-5.1-codex-mini-medium": "gpt-5.1-codex-mini", + "gpt-5.1-codex-mini-high": "gpt-5.1-codex-mini", + "gpt-5.1": "gpt-5.1", + "gpt-5.1-none": "gpt-5.1", + "gpt-5.1-low": "gpt-5.1", + "gpt-5.1-medium": "gpt-5.1", + "gpt-5.1-high": "gpt-5.1", + "gpt-5.1-chat-latest": "gpt-5.1", + "gpt-5-codex": "gpt-5.1-codex", + "codex-mini-latest": "gpt-5.1-codex-mini", + "gpt-5-codex-mini": "gpt-5.1-codex-mini", + "gpt-5-codex-mini-medium": "gpt-5.1-codex-mini", + "gpt-5-codex-mini-high": "gpt-5.1-codex-mini", + "gpt-5": "gpt-5.1", + "gpt-5-mini": "gpt-5.1", + "gpt-5-nano": "gpt-5.1", +} + +type codexTransformResult struct { + Modified bool + NormalizedModel string + PromptCacheKey string +} + +type opencodeCacheMetadata struct { + ETag string `json:"etag"` + LastFetch string `json:"lastFetch,omitempty"` + LastChecked int64 `json:"lastChecked"` +} + +func applyCodexOAuthTransform(reqBody map[string]any) codexTransformResult { + result := codexTransformResult{} + // 工具续链需求会影响存储策略与 input 过滤逻辑。 + needsToolContinuation := NeedsToolContinuation(reqBody) + + model := "" + if v, ok := reqBody["model"].(string); ok { + model = v + } + normalizedModel := normalizeCodexModel(model) + if normalizedModel != "" { + if model != normalizedModel { + reqBody["model"] = normalizedModel + result.Modified = true + } + result.NormalizedModel = normalizedModel + } + + // OAuth 走 ChatGPT internal API 时,store 必须为 false;显式 true 也会强制覆盖。 + // 避免上游返回 "Store must be set to false"。 + if v, ok := reqBody["store"].(bool); !ok || v { + reqBody["store"] = false + result.Modified = true + } + if v, ok := reqBody["stream"].(bool); !ok || !v { + reqBody["stream"] = true + result.Modified = true + } + + if _, ok := reqBody["max_output_tokens"]; ok { + delete(reqBody, "max_output_tokens") + result.Modified = true + } + if _, ok := reqBody["max_completion_tokens"]; ok { + delete(reqBody, "max_completion_tokens") + result.Modified = true + } + + if normalizeCodexTools(reqBody) { + result.Modified = true + } + + if v, ok := reqBody["prompt_cache_key"].(string); ok { + result.PromptCacheKey = strings.TrimSpace(v) + } + + instructions := strings.TrimSpace(getOpenCodeCodexHeader()) + existingInstructions, _ := reqBody["instructions"].(string) + existingInstructions = strings.TrimSpace(existingInstructions) + + if instructions != "" { + if existingInstructions != instructions { + reqBody["instructions"] = instructions + result.Modified = true + } + } else if existingInstructions == "" { + // 未获取到 opencode 指令时,回退使用 Codex CLI 指令。 + codexInstructions := strings.TrimSpace(getCodexCLIInstructions()) + if codexInstructions != "" { + reqBody["instructions"] = codexInstructions + result.Modified = true + } + } + + // 续链场景保留 item_reference 与 id,避免 call_id 上下文丢失。 + if input, ok := reqBody["input"].([]any); ok { + input = filterCodexInput(input, needsToolContinuation) + reqBody["input"] = input + result.Modified = true + } + + return result +} + +func normalizeCodexModel(model string) string { + if model == "" { + return "gpt-5.1" + } + + modelID := model + if strings.Contains(modelID, "/") { + parts := strings.Split(modelID, "/") + modelID = parts[len(parts)-1] + } + + if mapped := getNormalizedCodexModel(modelID); mapped != "" { + return mapped + } + + normalized := strings.ToLower(modelID) + + if strings.Contains(normalized, "gpt-5.2-codex") || strings.Contains(normalized, "gpt 5.2 codex") { + return "gpt-5.2-codex" + } + if strings.Contains(normalized, "gpt-5.2") || strings.Contains(normalized, "gpt 5.2") { + return "gpt-5.2" + } + if strings.Contains(normalized, "gpt-5.1-codex-max") || strings.Contains(normalized, "gpt 5.1 codex max") { + return "gpt-5.1-codex-max" + } + if strings.Contains(normalized, "gpt-5.1-codex-mini") || strings.Contains(normalized, "gpt 5.1 codex mini") { + return "gpt-5.1-codex-mini" + } + if strings.Contains(normalized, "codex-mini-latest") || + strings.Contains(normalized, "gpt-5-codex-mini") || + strings.Contains(normalized, "gpt 5 codex mini") { + return "codex-mini-latest" + } + if strings.Contains(normalized, "gpt-5.1-codex") || strings.Contains(normalized, "gpt 5.1 codex") { + return "gpt-5.1-codex" + } + if strings.Contains(normalized, "gpt-5.1") || strings.Contains(normalized, "gpt 5.1") { + return "gpt-5.1" + } + if strings.Contains(normalized, "codex") { + return "gpt-5.1-codex" + } + if strings.Contains(normalized, "gpt-5") || strings.Contains(normalized, "gpt 5") { + return "gpt-5.1" + } + + return "gpt-5.1" +} + +func getNormalizedCodexModel(modelID string) string { + if modelID == "" { + return "" + } + if mapped, ok := codexModelMap[modelID]; ok { + return mapped + } + lower := strings.ToLower(modelID) + for key, value := range codexModelMap { + if strings.ToLower(key) == lower { + return value + } + } + return "" +} + +func getOpenCodeCachedPrompt(url, cacheFileName, metaFileName string) string { + cacheDir := codexCachePath("") + if cacheDir == "" { + return "" + } + cacheFile := filepath.Join(cacheDir, cacheFileName) + metaFile := filepath.Join(cacheDir, metaFileName) + + var cachedContent string + if content, ok := readFile(cacheFile); ok { + cachedContent = content + } + + var meta opencodeCacheMetadata + if loadJSON(metaFile, &meta) && meta.LastChecked > 0 && cachedContent != "" { + if time.Since(time.UnixMilli(meta.LastChecked)) < codexCacheTTL { + return cachedContent + } + } + + content, etag, status, err := fetchWithETag(url, meta.ETag) + if err == nil && status == http.StatusNotModified && cachedContent != "" { + return cachedContent + } + if err == nil && status >= 200 && status < 300 && content != "" { + _ = writeFile(cacheFile, content) + meta = opencodeCacheMetadata{ + ETag: etag, + LastFetch: time.Now().UTC().Format(time.RFC3339), + LastChecked: time.Now().UnixMilli(), + } + _ = writeJSON(metaFile, meta) + return content + } + + return cachedContent +} + +func getOpenCodeCodexHeader() string { + // 优先从 opencode 仓库缓存获取指令。 + opencodeInstructions := getOpenCodeCachedPrompt(opencodeCodexHeaderURL, "opencode-codex-header.txt", "opencode-codex-header-meta.json") + + // 若 opencode 指令可用,直接返回。 + if opencodeInstructions != "" { + return opencodeInstructions + } + + // 否则回退使用本地 Codex CLI 指令。 + return getCodexCLIInstructions() +} + +func getCodexCLIInstructions() string { + return codexCLIInstructions +} + +func GetOpenCodeInstructions() string { + return getOpenCodeCodexHeader() +} + +// GetCodexCLIInstructions 返回内置的 Codex CLI 指令内容。 +func GetCodexCLIInstructions() string { + return getCodexCLIInstructions() +} + +// ReplaceWithCodexInstructions 将请求 instructions 替换为内置 Codex 指令(必要时)。 +func ReplaceWithCodexInstructions(reqBody map[string]any) bool { + codexInstructions := strings.TrimSpace(getCodexCLIInstructions()) + if codexInstructions == "" { + return false + } + + existingInstructions, _ := reqBody["instructions"].(string) + if strings.TrimSpace(existingInstructions) != codexInstructions { + reqBody["instructions"] = codexInstructions + return true + } + + return false +} + +// IsInstructionError 判断错误信息是否与指令格式/系统提示相关。 +func IsInstructionError(errorMessage string) bool { + if errorMessage == "" { + return false + } + + lowerMsg := strings.ToLower(errorMessage) + instructionKeywords := []string{ + "instruction", + "instructions", + "system prompt", + "system message", + "invalid prompt", + "prompt format", + } + + for _, keyword := range instructionKeywords { + if strings.Contains(lowerMsg, keyword) { + return true + } + } + + return false +} + +// filterCodexInput 按需过滤 item_reference 与 id。 +// preserveReferences 为 true 时保持引用与 id,以满足续链请求对上下文的依赖。 +func filterCodexInput(input []any, preserveReferences bool) []any { + filtered := make([]any, 0, len(input)) + for _, item := range input { + m, ok := item.(map[string]any) + if !ok { + filtered = append(filtered, item) + continue + } + typ, _ := m["type"].(string) + if typ == "item_reference" { + if !preserveReferences { + continue + } + newItem := make(map[string]any, len(m)) + for key, value := range m { + newItem[key] = value + } + filtered = append(filtered, newItem) + continue + } + + newItem := m + copied := false + // 仅在需要修改字段时创建副本,避免直接改写原始输入。 + ensureCopy := func() { + if copied { + return + } + newItem = make(map[string]any, len(m)) + for key, value := range m { + newItem[key] = value + } + copied = true + } + + if isCodexToolCallItemType(typ) { + if callID, ok := m["call_id"].(string); !ok || strings.TrimSpace(callID) == "" { + if id, ok := m["id"].(string); ok && strings.TrimSpace(id) != "" { + ensureCopy() + newItem["call_id"] = id + } + } + } + + if !preserveReferences { + ensureCopy() + delete(newItem, "id") + if !isCodexToolCallItemType(typ) { + delete(newItem, "call_id") + } + } + + filtered = append(filtered, newItem) + } + return filtered +} + +func isCodexToolCallItemType(typ string) bool { + if typ == "" { + return false + } + return strings.HasSuffix(typ, "_call") || strings.HasSuffix(typ, "_call_output") +} + +func normalizeCodexTools(reqBody map[string]any) bool { + rawTools, ok := reqBody["tools"] + if !ok || rawTools == nil { + return false + } + tools, ok := rawTools.([]any) + if !ok { + return false + } + + modified := false + for idx, tool := range tools { + toolMap, ok := tool.(map[string]any) + if !ok { + continue + } + + toolType, _ := toolMap["type"].(string) + if strings.TrimSpace(toolType) != "function" { + continue + } + + function, ok := toolMap["function"].(map[string]any) + if !ok { + continue + } + + if _, ok := toolMap["name"]; !ok { + if name, ok := function["name"].(string); ok && strings.TrimSpace(name) != "" { + toolMap["name"] = name + modified = true + } + } + if _, ok := toolMap["description"]; !ok { + if desc, ok := function["description"].(string); ok && strings.TrimSpace(desc) != "" { + toolMap["description"] = desc + modified = true + } + } + if _, ok := toolMap["parameters"]; !ok { + if params, ok := function["parameters"]; ok { + toolMap["parameters"] = params + modified = true + } + } + if _, ok := toolMap["strict"]; !ok { + if strict, ok := function["strict"]; ok { + toolMap["strict"] = strict + modified = true + } + } + + tools[idx] = toolMap + } + + if modified { + reqBody["tools"] = tools + } + + return modified +} + +func codexCachePath(filename string) string { + home, err := os.UserHomeDir() + if err != nil { + return "" + } + cacheDir := filepath.Join(home, ".opencode", "cache") + if filename == "" { + return cacheDir + } + return filepath.Join(cacheDir, filename) +} + +func readFile(path string) (string, bool) { + if path == "" { + return "", false + } + data, err := os.ReadFile(path) + if err != nil { + return "", false + } + return string(data), true +} + +func writeFile(path, content string) error { + if path == "" { + return fmt.Errorf("empty cache path") + } + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + return os.WriteFile(path, []byte(content), 0o644) +} + +func loadJSON(path string, target any) bool { + data, err := os.ReadFile(path) + if err != nil { + return false + } + if err := json.Unmarshal(data, target); err != nil { + return false + } + return true +} + +func writeJSON(path string, value any) error { + if path == "" { + return fmt.Errorf("empty json path") + } + if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { + return err + } + data, err := json.Marshal(value) + if err != nil { + return err + } + return os.WriteFile(path, data, 0o644) +} + +func fetchWithETag(url, etag string) (string, string, int, error) { + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return "", "", 0, err + } + req.Header.Set("User-Agent", "sub2api-codex") + if etag != "" { + req.Header.Set("If-None-Match", etag) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", 0, err + } + defer func() { + _ = resp.Body.Close() + }() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", "", resp.StatusCode, err + } + return string(body), resp.Header.Get("etag"), resp.StatusCode, nil +} diff --git a/backend/internal/service/openai_codex_transform_test.go b/backend/internal/service/openai_codex_transform_test.go new file mode 100644 index 00000000..0ff9485a --- /dev/null +++ b/backend/internal/service/openai_codex_transform_test.go @@ -0,0 +1,167 @@ +package service + +import ( + "encoding/json" + "os" + "path/filepath" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestApplyCodexOAuthTransform_ToolContinuationPreservesInput(t *testing.T) { + // 续链场景:保留 item_reference 与 id,但不再强制 store=true。 + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.2", + "input": []any{ + map[string]any{"type": "item_reference", "id": "ref1", "text": "x"}, + map[string]any{"type": "function_call_output", "call_id": "call_1", "output": "ok", "id": "o1"}, + }, + "tool_choice": "auto", + } + + applyCodexOAuthTransform(reqBody) + + // 未显式设置 store=true,默认为 false。 + store, ok := reqBody["store"].(bool) + require.True(t, ok) + require.False(t, store) + + input, ok := reqBody["input"].([]any) + require.True(t, ok) + require.Len(t, input, 2) + + // 校验 input[0] 为 map,避免断言失败导致测试中断。 + first, ok := input[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "item_reference", first["type"]) + require.Equal(t, "ref1", first["id"]) + + // 校验 input[1] 为 map,确保后续字段断言安全。 + second, ok := input[1].(map[string]any) + require.True(t, ok) + require.Equal(t, "o1", second["id"]) +} + +func TestApplyCodexOAuthTransform_ExplicitStoreFalsePreserved(t *testing.T) { + // 续链场景:显式 store=false 不再强制为 true,保持 false。 + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.1", + "store": false, + "input": []any{ + map[string]any{"type": "function_call_output", "call_id": "call_1"}, + }, + "tool_choice": "auto", + } + + applyCodexOAuthTransform(reqBody) + + store, ok := reqBody["store"].(bool) + require.True(t, ok) + require.False(t, store) +} + +func TestApplyCodexOAuthTransform_ExplicitStoreTrueForcedFalse(t *testing.T) { + // 显式 store=true 也会强制为 false。 + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.1", + "store": true, + "input": []any{ + map[string]any{"type": "function_call_output", "call_id": "call_1"}, + }, + "tool_choice": "auto", + } + + applyCodexOAuthTransform(reqBody) + + store, ok := reqBody["store"].(bool) + require.True(t, ok) + require.False(t, store) +} + +func TestApplyCodexOAuthTransform_NonContinuationDefaultsStoreFalseAndStripsIDs(t *testing.T) { + // 非续链场景:未设置 store 时默认 false,并移除 input 中的 id。 + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.1", + "input": []any{ + map[string]any{"type": "text", "id": "t1", "text": "hi"}, + }, + } + + applyCodexOAuthTransform(reqBody) + + store, ok := reqBody["store"].(bool) + require.True(t, ok) + require.False(t, store) + + input, ok := reqBody["input"].([]any) + require.True(t, ok) + require.Len(t, input, 1) + // 校验 input[0] 为 map,避免类型不匹配触发 errcheck。 + item, ok := input[0].(map[string]any) + require.True(t, ok) + _, hasID := item["id"] + require.False(t, hasID) +} + +func TestFilterCodexInput_RemovesItemReferenceWhenNotPreserved(t *testing.T) { + input := []any{ + map[string]any{"type": "item_reference", "id": "ref1"}, + map[string]any{"type": "text", "id": "t1", "text": "hi"}, + } + + filtered := filterCodexInput(input, false) + require.Len(t, filtered, 1) + // 校验 filtered[0] 为 map,确保字段检查可靠。 + item, ok := filtered[0].(map[string]any) + require.True(t, ok) + require.Equal(t, "text", item["type"]) + _, hasID := item["id"] + require.False(t, hasID) +} + +func TestApplyCodexOAuthTransform_EmptyInput(t *testing.T) { + // 空 input 应保持为空且不触发异常。 + setupCodexCache(t) + + reqBody := map[string]any{ + "model": "gpt-5.1", + "input": []any{}, + } + + applyCodexOAuthTransform(reqBody) + + input, ok := reqBody["input"].([]any) + require.True(t, ok) + require.Len(t, input, 0) +} + +func setupCodexCache(t *testing.T) { + t.Helper() + + // 使用临时 HOME 避免触发网络拉取 header。 + tempDir := t.TempDir() + t.Setenv("HOME", tempDir) + + cacheDir := filepath.Join(tempDir, ".opencode", "cache") + require.NoError(t, os.MkdirAll(cacheDir, 0o755)) + require.NoError(t, os.WriteFile(filepath.Join(cacheDir, "opencode-codex-header.txt"), []byte("header"), 0o644)) + + meta := map[string]any{ + "etag": "", + "lastFetch": time.Now().UTC().Format(time.RFC3339), + "lastChecked": time.Now().UnixMilli(), + } + data, err := json.Marshal(meta) + require.NoError(t, err) + require.NoError(t, os.WriteFile(filepath.Join(cacheDir, "opencode-codex-header-meta.json"), data, 0o644)) +} diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 42e98585..c7d94882 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -20,6 +20,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" "github.com/Wei-Shaw/sub2api/internal/util/responseheaders" "github.com/Wei-Shaw/sub2api/internal/util/urlvalidator" "github.com/gin-gonic/gin" @@ -41,6 +42,7 @@ var openaiSSEDataRe = regexp.MustCompile(`^data:\s*`) var openaiAllowedHeaders = map[string]bool{ "accept-language": true, "content-type": true, + "conversation_id": true, "user-agent": true, "originator": true, "session_id": true, @@ -84,12 +86,15 @@ type OpenAIGatewayService struct { userSubRepo UserSubscriptionRepository cache GatewayCache cfg *config.Config + schedulerSnapshot *SchedulerSnapshotService concurrencyService *ConcurrencyService billingService *BillingService rateLimitService *RateLimitService billingCacheService *BillingCacheService httpUpstream HTTPUpstream deferredService *DeferredService + openAITokenProvider *OpenAITokenProvider + toolCorrector *CodexToolCorrector } // NewOpenAIGatewayService creates a new OpenAIGatewayService @@ -100,12 +105,14 @@ func NewOpenAIGatewayService( userSubRepo UserSubscriptionRepository, cache GatewayCache, cfg *config.Config, + schedulerSnapshot *SchedulerSnapshotService, concurrencyService *ConcurrencyService, billingService *BillingService, rateLimitService *RateLimitService, billingCacheService *BillingCacheService, httpUpstream HTTPUpstream, deferredService *DeferredService, + openAITokenProvider *OpenAITokenProvider, ) *OpenAIGatewayService { return &OpenAIGatewayService{ accountRepo: accountRepo, @@ -114,12 +121,15 @@ func NewOpenAIGatewayService( userSubRepo: userSubRepo, cache: cache, cfg: cfg, + schedulerSnapshot: schedulerSnapshot, concurrencyService: concurrencyService, billingService: billingService, rateLimitService: rateLimitService, billingCacheService: billingCacheService, httpUpstream: httpUpstream, deferredService: deferredService, + openAITokenProvider: openAITokenProvider, + toolCorrector: NewCodexToolCorrector(), } } @@ -158,7 +168,7 @@ func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.C accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash) if err == nil && accountID > 0 { if _, excluded := excludedIDs[accountID]; !excluded { - account, err := s.accountRepo.GetByID(ctx, accountID) + account, err := s.getSchedulableAccount(ctx, accountID) if err == nil && account.IsSchedulable() && account.IsOpenAI() && (requestedModel == "" || account.IsModelSupported(requestedModel)) { // Refresh sticky session TTL _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), "openai:"+sessionHash, openaiStickySessionTTL) @@ -169,16 +179,7 @@ func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.C } // 2. Get schedulable OpenAI accounts - var accounts []Account - var err error - // 简易模式:忽略分组限制,查询所有可用账号 - if s.cfg.RunMode == config.RunModeSimple { - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, PlatformOpenAI) - } else if groupID != nil { - accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, *groupID, PlatformOpenAI) - } else { - accounts, err = s.accountRepo.ListSchedulableByPlatform(ctx, PlatformOpenAI) - } + accounts, err := s.listSchedulableAccounts(ctx, groupID) if err != nil { return nil, fmt.Errorf("query accounts failed: %w", err) } @@ -190,6 +191,11 @@ func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.C if _, excluded := excludedIDs[acc.ID]; excluded { continue } + // Scheduler snapshots can be temporarily stale; re-check schedulability here to + // avoid selecting accounts that were recently rate-limited/overloaded. + if !acc.IsSchedulable() { + continue + } // Check model support if requestedModel != "" && !acc.IsModelSupported(requestedModel) { continue @@ -300,7 +306,7 @@ func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Contex if sessionHash != "" { accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash) if err == nil && accountID > 0 && !isExcluded(accountID) { - account, err := s.accountRepo.GetByID(ctx, accountID) + account, err := s.getSchedulableAccount(ctx, accountID) if err == nil && account.IsSchedulable() && account.IsOpenAI() && (requestedModel == "" || account.IsModelSupported(requestedModel)) { result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) @@ -336,6 +342,12 @@ func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Contex if isExcluded(acc.ID) { continue } + // Scheduler snapshots can be temporarily stale (bucket rebuild is throttled); + // re-check schedulability here so recently rate-limited/overloaded accounts + // are not selected again before the bucket is rebuilt. + if !acc.IsSchedulable() { + continue + } if requestedModel != "" && !acc.IsModelSupported(requestedModel) { continue } @@ -445,6 +457,10 @@ func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Contex } func (s *OpenAIGatewayService) listSchedulableAccounts(ctx context.Context, groupID *int64) ([]Account, error) { + if s.schedulerSnapshot != nil { + accounts, _, err := s.schedulerSnapshot.ListSchedulableAccounts(ctx, groupID, PlatformOpenAI, false) + return accounts, err + } var accounts []Account var err error if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { @@ -467,6 +483,13 @@ func (s *OpenAIGatewayService) tryAcquireAccountSlot(ctx context.Context, accoun return s.concurrencyService.AcquireAccountSlot(ctx, accountID, maxConcurrency) } +func (s *OpenAIGatewayService) getSchedulableAccount(ctx context.Context, accountID int64) (*Account, error) { + if s.schedulerSnapshot != nil { + return s.schedulerSnapshot.GetAccount(ctx, accountID) + } + return s.accountRepo.GetByID(ctx, accountID) +} + func (s *OpenAIGatewayService) schedulingConfig() config.GatewaySchedulingConfig { if s.cfg != nil { return s.cfg.Gateway.Scheduling @@ -485,6 +508,15 @@ func (s *OpenAIGatewayService) schedulingConfig() config.GatewaySchedulingConfig func (s *OpenAIGatewayService) GetAccessToken(ctx context.Context, account *Account) (string, string, error) { switch account.Type { case AccountTypeOAuth: + // 使用 TokenProvider 获取缓存的 token + if s.openAITokenProvider != nil { + accessToken, err := s.openAITokenProvider.GetAccessToken(ctx, account) + if err != nil { + return "", "", err + } + return accessToken, "oauth", nil + } + // 降级:TokenProvider 未配置时直接从账号读取 accessToken := account.GetOpenAIAccessToken() if accessToken == "" { return "", "", errors.New("access_token not found in credentials") @@ -511,7 +543,7 @@ func (s *OpenAIGatewayService) shouldFailoverUpstreamError(statusCode int) bool } func (s *OpenAIGatewayService) handleFailoverSideEffects(ctx context.Context, resp *http.Response, account *Account) { - body, _ := io.ReadAll(resp.Body) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, body) } @@ -528,33 +560,97 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco // Extract model and stream from parsed body reqModel, _ := reqBody["model"].(string) reqStream, _ := reqBody["stream"].(bool) + promptCacheKey := "" + if v, ok := reqBody["prompt_cache_key"].(string); ok { + promptCacheKey = strings.TrimSpace(v) + } // Track if body needs re-serialization bodyModified := false originalModel := reqModel - // Apply model mapping + isCodexCLI := openai.IsCodexCLIRequest(c.GetHeader("User-Agent")) + + // 对所有请求执行模型映射(包含 Codex CLI)。 mappedModel := account.GetMappedModel(reqModel) if mappedModel != reqModel { + log.Printf("[OpenAI] Model mapping applied: %s -> %s (account: %s, isCodexCLI: %v)", reqModel, mappedModel, account.Name, isCodexCLI) reqBody["model"] = mappedModel bodyModified = true } - // For OAuth accounts using ChatGPT internal API: - // 1. Add store: false - // 2. Normalize input format for Codex API compatibility - if account.Type == AccountTypeOAuth { - reqBody["store"] = false - bodyModified = true - - // Normalize input format: convert AI SDK multi-part content format to simplified format - // AI SDK sends: {"content": [{"type": "input_text", "text": "..."}]} - // Codex API expects: {"content": "..."} - if normalizeInputForCodexAPI(reqBody) { + // 针对所有 OpenAI 账号执行 Codex 模型名规范化,确保上游识别一致。 + if model, ok := reqBody["model"].(string); ok { + normalizedModel := normalizeCodexModel(model) + if normalizedModel != "" && normalizedModel != model { + log.Printf("[OpenAI] Codex model normalization: %s -> %s (account: %s, type: %s, isCodexCLI: %v)", + model, normalizedModel, account.Name, account.Type, isCodexCLI) + reqBody["model"] = normalizedModel + mappedModel = normalizedModel bodyModified = true } } + // 规范化 reasoning.effort 参数(minimal -> none),与上游允许值对齐。 + if reasoning, ok := reqBody["reasoning"].(map[string]any); ok { + if effort, ok := reasoning["effort"].(string); ok && effort == "minimal" { + reasoning["effort"] = "none" + bodyModified = true + log.Printf("[OpenAI] Normalized reasoning.effort: minimal -> none (account: %s)", account.Name) + } + } + + if account.Type == AccountTypeOAuth && !isCodexCLI { + codexResult := applyCodexOAuthTransform(reqBody) + if codexResult.Modified { + bodyModified = true + } + if codexResult.NormalizedModel != "" { + mappedModel = codexResult.NormalizedModel + } + if codexResult.PromptCacheKey != "" { + promptCacheKey = codexResult.PromptCacheKey + } + } + + // Handle max_output_tokens based on platform and account type + if !isCodexCLI { + if maxOutputTokens, hasMaxOutputTokens := reqBody["max_output_tokens"]; hasMaxOutputTokens { + switch account.Platform { + case PlatformOpenAI: + // For OpenAI API Key, remove max_output_tokens (not supported) + // For OpenAI OAuth (Responses API), keep it (supported) + if account.Type == AccountTypeAPIKey { + delete(reqBody, "max_output_tokens") + bodyModified = true + } + case PlatformAnthropic: + // For Anthropic (Claude), convert to max_tokens + delete(reqBody, "max_output_tokens") + if _, hasMaxTokens := reqBody["max_tokens"]; !hasMaxTokens { + reqBody["max_tokens"] = maxOutputTokens + } + bodyModified = true + case PlatformGemini: + // For Gemini, remove (will be handled by Gemini-specific transform) + delete(reqBody, "max_output_tokens") + bodyModified = true + default: + // For unknown platforms, remove to be safe + delete(reqBody, "max_output_tokens") + bodyModified = true + } + } + + // Also handle max_completion_tokens (similar logic) + if _, hasMaxCompletionTokens := reqBody["max_completion_tokens"]; hasMaxCompletionTokens { + if account.Type == AccountTypeAPIKey || account.Platform != PlatformOpenAI { + delete(reqBody, "max_completion_tokens") + bodyModified = true + } + } + } + // Re-serialize body only if modified if bodyModified { var err error @@ -571,7 +667,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco } // Build upstream request - upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, reqStream) + upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, reqStream, promptCacheKey, isCodexCLI) if err != nil { return nil, err } @@ -582,16 +678,63 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco proxyURL = account.Proxy.URL() } + // Capture upstream request body for ops retry of this attempt. + if c != nil { + c.Set(OpsUpstreamRequestBodyKey, string(body)) + } + // Send request resp, err := s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) if err != nil { - return nil, fmt.Errorf("upstream request failed: %w", err) + // Ensure the client receives an error response (handlers assume Forward writes on non-failover errors). + safeErr := sanitizeUpstreamErrorMessage(err.Error()) + setOpsUpstreamError(c, 0, safeErr, "") + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "request_error", + Message: safeErr, + }) + c.JSON(http.StatusBadGateway, gin.H{ + "error": gin.H{ + "type": "upstream_error", + "message": "Upstream request failed", + }, + }) + return nil, fmt.Errorf("upstream request failed: %s", safeErr) } defer func() { _ = resp.Body.Close() }() // Handle error response if resp.StatusCode >= 400 { if s.shouldFailoverUpstreamError(resp.StatusCode) { + respBody, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + _ = resp.Body.Close() + resp.Body = io.NopCloser(bytes.NewReader(respBody)) + + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(respBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(respBody), maxBytes) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "failover", + Message: upstreamMsg, + Detail: upstreamDetail, + }) + s.handleFailoverSideEffects(ctx, resp, account) return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } @@ -632,7 +775,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco }, nil } -func (s *OpenAIGatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token string, isStream bool) (*http.Request, error) { +func (s *OpenAIGatewayService) buildUpstreamRequest(ctx context.Context, c *gin.Context, account *Account, body []byte, token string, isStream bool, promptCacheKey string, isCodexCLI bool) (*http.Request, error) { // Determine target URL based on account type var targetURL string switch account.Type { @@ -672,12 +815,6 @@ func (s *OpenAIGatewayService) buildUpstreamRequest(ctx context.Context, c *gin. if chatgptAccountID != "" { req.Header.Set("chatgpt-account-id", chatgptAccountID) } - // Set accept header based on stream mode - if isStream { - req.Header.Set("accept", "text/event-stream") - } else { - req.Header.Set("accept", "application/json") - } } // Whitelist passthrough headers @@ -689,6 +826,19 @@ func (s *OpenAIGatewayService) buildUpstreamRequest(ctx context.Context, c *gin. } } } + if account.Type == AccountTypeOAuth { + req.Header.Set("OpenAI-Beta", "responses=experimental") + if isCodexCLI { + req.Header.Set("originator", "codex_cli_rs") + } else { + req.Header.Set("originator", "opencode") + } + req.Header.Set("accept", "text/event-stream") + if promptCacheKey != "" { + req.Header.Set("conversation_id", promptCacheKey) + req.Header.Set("session_id", promptCacheKey) + } + } // Apply custom User-Agent if configured customUA := account.GetOpenAIUserAgent() @@ -705,17 +855,53 @@ func (s *OpenAIGatewayService) buildUpstreamRequest(ctx context.Context, c *gin. } func (s *OpenAIGatewayService) handleErrorResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account) (*OpenAIForwardResult, error) { - body, _ := io.ReadAll(resp.Body) + body, _ := io.ReadAll(io.LimitReader(resp.Body, 2<<20)) + + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(body)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + upstreamDetail := "" + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + maxBytes := s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes + if maxBytes <= 0 { + maxBytes = 2048 + } + upstreamDetail = truncateString(string(body), maxBytes) + } + setOpsUpstreamError(c, resp.StatusCode, upstreamMsg, upstreamDetail) + + if s.cfg != nil && s.cfg.Gateway.LogUpstreamErrorBody { + log.Printf( + "OpenAI upstream error %d (account=%d platform=%s type=%s): %s", + resp.StatusCode, + account.ID, + account.Platform, + account.Type, + truncateForLog(body, s.cfg.Gateway.LogUpstreamErrorBodyMaxBytes), + ) + } // Check custom error codes if !account.ShouldHandleErrorCode(resp.StatusCode) { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: "http_error", + Message: upstreamMsg, + Detail: upstreamDetail, + }) c.JSON(http.StatusInternalServerError, gin.H{ "error": gin.H{ "type": "upstream_error", "message": "Upstream gateway error", }, }) - return nil, fmt.Errorf("upstream error: %d (not in custom error codes)", resp.StatusCode) + if upstreamMsg == "" { + return nil, fmt.Errorf("upstream error: %d (not in custom error codes)", resp.StatusCode) + } + return nil, fmt.Errorf("upstream error: %d (not in custom error codes) message=%s", resp.StatusCode, upstreamMsg) } // Handle upstream error (mark account status) @@ -723,6 +909,20 @@ func (s *OpenAIGatewayService) handleErrorResponse(ctx context.Context, resp *ht if s.rateLimitService != nil { shouldDisable = s.rateLimitService.HandleUpstreamError(ctx, account, resp.StatusCode, resp.Header, body) } + kind := "http_error" + if shouldDisable { + kind = "failover" + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: resp.StatusCode, + UpstreamRequestID: resp.Header.Get("x-request-id"), + Kind: kind, + Message: upstreamMsg, + Detail: upstreamDetail, + }) if shouldDisable { return nil, &UpstreamFailoverError{StatusCode: resp.StatusCode} } @@ -761,7 +961,10 @@ func (s *OpenAIGatewayService) handleErrorResponse(ctx context.Context, resp *ht }, }) - return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) + if upstreamMsg == "" { + return nil, fmt.Errorf("upstream error: %d", resp.StatusCode) + } + return nil, fmt.Errorf("upstream error: %d message=%s", resp.StatusCode, upstreamMsg) } // openaiStreamingResult streaming response result @@ -905,6 +1108,11 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp line = s.replaceModelInSSELine(line, mappedModel, originalModel) } + // Correct Codex tool calls if needed (apply_patch -> edit, etc.) + if correctedData, corrected := s.toolCorrector.CorrectToolCallsInSSEData(data); corrected { + line = "data: " + correctedData + } + // Forward line if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { sendErrorEvent("write_failed") @@ -933,6 +1141,10 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp continue } log.Printf("Stream data interval timeout: account=%d model=%s interval=%s", account.ID, originalModel, streamInterval) + // 处理流超时,可能标记账户为临时不可调度或错误状态 + if s.rateLimitService != nil { + s.rateLimitService.HandleStreamTimeout(ctx, account, originalModel) + } sendErrorEvent("stream_timeout") return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") @@ -988,6 +1200,20 @@ func (s *OpenAIGatewayService) replaceModelInSSELine(line, fromModel, toModel st return line } +// correctToolCallsInResponseBody 修正响应体中的工具调用 +func (s *OpenAIGatewayService) correctToolCallsInResponseBody(body []byte) []byte { + if len(body) == 0 { + return body + } + + bodyStr := string(body) + corrected, changed := s.toolCorrector.CorrectToolCallsInSSEData(bodyStr) + if changed { + return []byte(corrected) + } + return body +} + func (s *OpenAIGatewayService) parseSSEUsage(data string, usage *OpenAIUsage) { // Parse response.completed event for usage (OpenAI Responses format) var event struct { @@ -1016,6 +1242,13 @@ func (s *OpenAIGatewayService) handleNonStreamingResponse(ctx context.Context, r return nil, err } + if account.Type == AccountTypeOAuth { + bodyLooksLikeSSE := bytes.Contains(body, []byte("data:")) || bytes.Contains(body, []byte("event:")) + if isEventStreamResponse(resp.Header) || bodyLooksLikeSSE { + return s.handleOAuthSSEToJSON(resp, c, body, originalModel, mappedModel) + } + } + // Parse usage var response struct { Usage struct { @@ -1055,6 +1288,112 @@ func (s *OpenAIGatewayService) handleNonStreamingResponse(ctx context.Context, r return usage, nil } +func isEventStreamResponse(header http.Header) bool { + contentType := strings.ToLower(header.Get("Content-Type")) + return strings.Contains(contentType, "text/event-stream") +} + +func (s *OpenAIGatewayService) handleOAuthSSEToJSON(resp *http.Response, c *gin.Context, body []byte, originalModel, mappedModel string) (*OpenAIUsage, error) { + bodyText := string(body) + finalResponse, ok := extractCodexFinalResponse(bodyText) + + usage := &OpenAIUsage{} + if ok { + var response struct { + Usage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + InputTokenDetails struct { + CachedTokens int `json:"cached_tokens"` + } `json:"input_tokens_details"` + } `json:"usage"` + } + if err := json.Unmarshal(finalResponse, &response); err == nil { + usage.InputTokens = response.Usage.InputTokens + usage.OutputTokens = response.Usage.OutputTokens + usage.CacheReadInputTokens = response.Usage.InputTokenDetails.CachedTokens + } + body = finalResponse + if originalModel != mappedModel { + body = s.replaceModelInResponseBody(body, mappedModel, originalModel) + } + // Correct tool calls in final response + body = s.correctToolCallsInResponseBody(body) + } else { + usage = s.parseSSEUsageFromBody(bodyText) + if originalModel != mappedModel { + bodyText = s.replaceModelInSSEBody(bodyText, mappedModel, originalModel) + } + body = []byte(bodyText) + } + + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + + contentType := "application/json; charset=utf-8" + if !ok { + contentType = resp.Header.Get("Content-Type") + if contentType == "" { + contentType = "text/event-stream" + } + } + c.Data(resp.StatusCode, contentType, body) + + return usage, nil +} + +func extractCodexFinalResponse(body string) ([]byte, bool) { + lines := strings.Split(body, "\n") + for _, line := range lines { + if !openaiSSEDataRe.MatchString(line) { + continue + } + data := openaiSSEDataRe.ReplaceAllString(line, "") + if data == "" || data == "[DONE]" { + continue + } + var event struct { + Type string `json:"type"` + Response json.RawMessage `json:"response"` + } + if json.Unmarshal([]byte(data), &event) != nil { + continue + } + if event.Type == "response.done" || event.Type == "response.completed" { + if len(event.Response) > 0 { + return event.Response, true + } + } + } + return nil, false +} + +func (s *OpenAIGatewayService) parseSSEUsageFromBody(body string) *OpenAIUsage { + usage := &OpenAIUsage{} + lines := strings.Split(body, "\n") + for _, line := range lines { + if !openaiSSEDataRe.MatchString(line) { + continue + } + data := openaiSSEDataRe.ReplaceAllString(line, "") + if data == "" || data == "[DONE]" { + continue + } + s.parseSSEUsage(data, usage) + } + return usage +} + +func (s *OpenAIGatewayService) replaceModelInSSEBody(body, fromModel, toModel string) string { + lines := strings.Split(body, "\n") + for i, line := range lines { + if !openaiSSEDataRe.MatchString(line) { + continue + } + lines[i] = s.replaceModelInSSELine(line, fromModel, toModel) + } + return strings.Join(lines, "\n") +} + func (s *OpenAIGatewayService) validateUpstreamBaseURL(raw string) (string, error) { if s.cfg != nil && !s.cfg.Security.URLAllowlist.Enabled { normalized, err := urlvalidator.ValidateURLFormat(raw, s.cfg.Security.URLAllowlist.AllowInsecureHTTP) @@ -1094,101 +1433,6 @@ func (s *OpenAIGatewayService) replaceModelInResponseBody(body []byte, fromModel return newBody } -// normalizeInputForCodexAPI converts AI SDK multi-part content format to simplified format -// that the ChatGPT internal Codex API expects. -// -// AI SDK sends content as an array of typed objects: -// -// {"content": [{"type": "input_text", "text": "hello"}]} -// -// ChatGPT Codex API expects content as a simple string: -// -// {"content": "hello"} -// -// This function modifies reqBody in-place and returns true if any modification was made. -func normalizeInputForCodexAPI(reqBody map[string]any) bool { - input, ok := reqBody["input"] - if !ok { - return false - } - - // Handle case where input is a simple string (already compatible) - if _, isString := input.(string); isString { - return false - } - - // Handle case where input is an array of messages - inputArray, ok := input.([]any) - if !ok { - return false - } - - modified := false - for _, item := range inputArray { - message, ok := item.(map[string]any) - if !ok { - continue - } - - content, ok := message["content"] - if !ok { - continue - } - - // If content is already a string, no conversion needed - if _, isString := content.(string); isString { - continue - } - - // If content is an array (AI SDK format), convert to string - contentArray, ok := content.([]any) - if !ok { - continue - } - - // Extract text from content array - var textParts []string - for _, part := range contentArray { - partMap, ok := part.(map[string]any) - if !ok { - continue - } - - // Handle different content types - partType, _ := partMap["type"].(string) - switch partType { - case "input_text", "text": - // Extract text from input_text or text type - if text, ok := partMap["text"].(string); ok { - textParts = append(textParts, text) - } - case "input_image", "image": - // For images, we need to preserve the original format - // as ChatGPT Codex API may support images in a different way - // For now, skip image parts (they will be lost in conversion) - // TODO: Consider preserving image data or handling it separately - continue - case "input_file", "file": - // Similar to images, file inputs may need special handling - continue - default: - // For unknown types, try to extract text if available - if text, ok := partMap["text"].(string); ok { - textParts = append(textParts, text) - } - } - } - - // Convert content array to string - if len(textParts) > 0 { - message["content"] = strings.Join(textParts, "\n") - modified = true - } - } - - return modified -} - // OpenAIRecordUsageInput input for recording usage type OpenAIRecordUsageInput struct { Result *OpenAIForwardResult @@ -1197,6 +1441,7 @@ type OpenAIRecordUsageInput struct { Account *Account Subscription *UserSubscription UserAgent string // 请求的 User-Agent + IPAddress string // 请求的客户端 IP 地址 } // RecordUsage records usage and deducts balance @@ -1242,28 +1487,30 @@ func (s *OpenAIGatewayService) RecordUsage(ctx context.Context, input *OpenAIRec // Create usage log durationMs := int(result.Duration.Milliseconds()) + accountRateMultiplier := account.BillingRateMultiplier() usageLog := &UsageLog{ - UserID: user.ID, - APIKeyID: apiKey.ID, - AccountID: account.ID, - RequestID: result.RequestID, - Model: result.Model, - InputTokens: actualInputTokens, - OutputTokens: result.Usage.OutputTokens, - CacheCreationTokens: result.Usage.CacheCreationInputTokens, - CacheReadTokens: result.Usage.CacheReadInputTokens, - InputCost: cost.InputCost, - OutputCost: cost.OutputCost, - CacheCreationCost: cost.CacheCreationCost, - CacheReadCost: cost.CacheReadCost, - TotalCost: cost.TotalCost, - ActualCost: cost.ActualCost, - RateMultiplier: multiplier, - BillingType: billingType, - Stream: result.Stream, - DurationMs: &durationMs, - FirstTokenMs: result.FirstTokenMs, - CreatedAt: time.Now(), + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: result.RequestID, + Model: result.Model, + InputTokens: actualInputTokens, + OutputTokens: result.Usage.OutputTokens, + CacheCreationTokens: result.Usage.CacheCreationInputTokens, + CacheReadTokens: result.Usage.CacheReadInputTokens, + InputCost: cost.InputCost, + OutputCost: cost.OutputCost, + CacheCreationCost: cost.CacheCreationCost, + CacheReadCost: cost.CacheReadCost, + TotalCost: cost.TotalCost, + ActualCost: cost.ActualCost, + RateMultiplier: multiplier, + AccountRateMultiplier: &accountRateMultiplier, + BillingType: billingType, + Stream: result.Stream, + DurationMs: &durationMs, + FirstTokenMs: result.FirstTokenMs, + CreatedAt: time.Now(), } // 添加 UserAgent @@ -1271,6 +1518,11 @@ func (s *OpenAIGatewayService) RecordUsage(ctx context.Context, input *OpenAIRec usageLog.UserAgent = &input.UserAgent } + // 添加 IPAddress + if input.IPAddress != "" { + usageLog.IPAddress = &input.IPAddress + } + if apiKey.GroupID != nil { usageLog.GroupID = apiKey.GroupID } diff --git a/backend/internal/service/openai_gateway_service_test.go b/backend/internal/service/openai_gateway_service_test.go index 8562d940..42b88b7d 100644 --- a/backend/internal/service/openai_gateway_service_test.go +++ b/backend/internal/service/openai_gateway_service_test.go @@ -3,6 +3,7 @@ package service import ( "bufio" "bytes" + "context" "errors" "io" "net/http" @@ -15,6 +16,129 @@ import ( "github.com/gin-gonic/gin" ) +type stubOpenAIAccountRepo struct { + AccountRepository + accounts []Account +} + +func (r stubOpenAIAccountRepo) ListSchedulableByGroupIDAndPlatform(ctx context.Context, groupID int64, platform string) ([]Account, error) { + return append([]Account(nil), r.accounts...), nil +} + +func (r stubOpenAIAccountRepo) ListSchedulableByPlatform(ctx context.Context, platform string) ([]Account, error) { + return append([]Account(nil), r.accounts...), nil +} + +type stubConcurrencyCache struct { + ConcurrencyCache +} + +func (c stubConcurrencyCache) AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) { + return true, nil +} + +func (c stubConcurrencyCache) ReleaseAccountSlot(ctx context.Context, accountID int64, requestID string) error { + return nil +} + +func (c stubConcurrencyCache) GetAccountsLoadBatch(ctx context.Context, accounts []AccountWithConcurrency) (map[int64]*AccountLoadInfo, error) { + out := make(map[int64]*AccountLoadInfo, len(accounts)) + for _, acc := range accounts { + out[acc.ID] = &AccountLoadInfo{AccountID: acc.ID, LoadRate: 0} + } + return out, nil +} + +func TestOpenAISelectAccountWithLoadAwareness_FiltersUnschedulable(t *testing.T) { + now := time.Now() + resetAt := now.Add(10 * time.Minute) + groupID := int64(1) + + rateLimited := Account{ + ID: 1, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 0, + RateLimitResetAt: &resetAt, + } + available := Account{ + ID: 2, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 1, + } + + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{rateLimited, available}}, + concurrencyService: NewConcurrencyService(stubConcurrencyCache{}), + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "", "gpt-5.2", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.Account == nil { + t.Fatalf("expected selection with account") + } + if selection.Account.ID != available.ID { + t.Fatalf("expected account %d, got %d", available.ID, selection.Account.ID) + } + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + +func TestOpenAISelectAccountWithLoadAwareness_FiltersUnschedulableWhenNoConcurrencyService(t *testing.T) { + now := time.Now() + resetAt := now.Add(10 * time.Minute) + groupID := int64(1) + + rateLimited := Account{ + ID: 1, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 0, + RateLimitResetAt: &resetAt, + } + available := Account{ + ID: 2, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 1, + } + + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{rateLimited, available}}, + // concurrencyService is nil, forcing the non-load-batch selection path. + } + + selection, err := svc.SelectAccountWithLoadAwareness(context.Background(), &groupID, "", "gpt-5.2", nil) + if err != nil { + t.Fatalf("SelectAccountWithLoadAwareness error: %v", err) + } + if selection == nil || selection.Account == nil { + t.Fatalf("expected selection with account") + } + if selection.Account.ID != available.ID { + t.Fatalf("expected account %d, got %d", available.ID, selection.Account.ID) + } + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + func TestOpenAIStreamingTimeout(t *testing.T) { gin.SetMode(gin.TestMode) cfg := &config.Config{ @@ -220,7 +344,7 @@ func TestOpenAIInvalidBaseURLWhenAllowlistDisabled(t *testing.T) { Credentials: map[string]any{"base_url": "://invalid-url"}, } - _, err := svc.buildUpstreamRequest(c.Request.Context(), c, account, []byte("{}"), "token", false) + _, err := svc.buildUpstreamRequest(c.Request.Context(), c, account, []byte("{}"), "token", false, "", false) if err == nil { t.Fatalf("expected error for invalid base_url when allowlist disabled") } diff --git a/backend/internal/service/openai_gateway_service_tool_correction_test.go b/backend/internal/service/openai_gateway_service_tool_correction_test.go new file mode 100644 index 00000000..d4491cfe --- /dev/null +++ b/backend/internal/service/openai_gateway_service_tool_correction_test.go @@ -0,0 +1,133 @@ +package service + +import ( + "strings" + "testing" +) + +// TestOpenAIGatewayService_ToolCorrection 测试 OpenAIGatewayService 中的工具修正集成 +func TestOpenAIGatewayService_ToolCorrection(t *testing.T) { + // 创建一个简单的 service 实例来测试工具修正 + service := &OpenAIGatewayService{ + toolCorrector: NewCodexToolCorrector(), + } + + tests := []struct { + name string + input []byte + expected string + changed bool + }{ + { + name: "correct apply_patch in response body", + input: []byte(`{ + "choices": [{ + "message": { + "tool_calls": [{ + "function": {"name": "apply_patch"} + }] + } + }] + }`), + expected: "edit", + changed: true, + }, + { + name: "correct update_plan in response body", + input: []byte(`{ + "tool_calls": [{ + "function": {"name": "update_plan"} + }] + }`), + expected: "todowrite", + changed: true, + }, + { + name: "no change for correct tool name", + input: []byte(`{ + "tool_calls": [{ + "function": {"name": "edit"} + }] + }`), + expected: "edit", + changed: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := service.correctToolCallsInResponseBody(tt.input) + resultStr := string(result) + + // 检查是否包含期望的工具名称 + if !strings.Contains(resultStr, tt.expected) { + t.Errorf("expected result to contain %q, got %q", tt.expected, resultStr) + } + + // 对于预期有变化的情况,验证结果与输入不同 + if tt.changed && string(result) == string(tt.input) { + t.Error("expected result to be different from input, but they are the same") + } + + // 对于预期无变化的情况,验证结果与输入相同 + if !tt.changed && string(result) != string(tt.input) { + t.Error("expected result to be same as input, but they are different") + } + }) + } +} + +// TestOpenAIGatewayService_ToolCorrectorInitialization 测试工具修正器是否正确初始化 +func TestOpenAIGatewayService_ToolCorrectorInitialization(t *testing.T) { + service := &OpenAIGatewayService{ + toolCorrector: NewCodexToolCorrector(), + } + + if service.toolCorrector == nil { + t.Fatal("toolCorrector should not be nil") + } + + // 测试修正器可以正常工作 + data := `{"tool_calls":[{"function":{"name":"apply_patch"}}]}` + corrected, changed := service.toolCorrector.CorrectToolCallsInSSEData(data) + + if !changed { + t.Error("expected tool call to be corrected") + } + + if !strings.Contains(corrected, "edit") { + t.Errorf("expected corrected data to contain 'edit', got %q", corrected) + } +} + +// TestToolCorrectionStats 测试工具修正统计功能 +func TestToolCorrectionStats(t *testing.T) { + service := &OpenAIGatewayService{ + toolCorrector: NewCodexToolCorrector(), + } + + // 执行几次修正 + testData := []string{ + `{"tool_calls":[{"function":{"name":"apply_patch"}}]}`, + `{"tool_calls":[{"function":{"name":"update_plan"}}]}`, + `{"tool_calls":[{"function":{"name":"apply_patch"}}]}`, + } + + for _, data := range testData { + service.toolCorrector.CorrectToolCallsInSSEData(data) + } + + stats := service.toolCorrector.GetStats() + + if stats.TotalCorrected != 3 { + t.Errorf("expected 3 corrections, got %d", stats.TotalCorrected) + } + + if stats.CorrectionsByTool["apply_patch->edit"] != 2 { + t.Errorf("expected 2 apply_patch->edit corrections, got %d", stats.CorrectionsByTool["apply_patch->edit"]) + } + + if stats.CorrectionsByTool["update_plan->todowrite"] != 1 { + t.Errorf("expected 1 update_plan->todowrite correction, got %d", stats.CorrectionsByTool["update_plan->todowrite"]) + } +} diff --git a/backend/internal/service/openai_token_provider.go b/backend/internal/service/openai_token_provider.go new file mode 100644 index 00000000..82a0866f --- /dev/null +++ b/backend/internal/service/openai_token_provider.go @@ -0,0 +1,189 @@ +package service + +import ( + "context" + "errors" + "log/slog" + "strings" + "time" +) + +const ( + openAITokenRefreshSkew = 3 * time.Minute + openAITokenCacheSkew = 5 * time.Minute + openAILockWaitTime = 200 * time.Millisecond +) + +// OpenAITokenCache Token 缓存接口(复用 GeminiTokenCache 接口定义) +type OpenAITokenCache = GeminiTokenCache + +// OpenAITokenProvider 管理 OpenAI OAuth 账户的 access_token +type OpenAITokenProvider struct { + accountRepo AccountRepository + tokenCache OpenAITokenCache + openAIOAuthService *OpenAIOAuthService +} + +func NewOpenAITokenProvider( + accountRepo AccountRepository, + tokenCache OpenAITokenCache, + openAIOAuthService *OpenAIOAuthService, +) *OpenAITokenProvider { + return &OpenAITokenProvider{ + accountRepo: accountRepo, + tokenCache: tokenCache, + openAIOAuthService: openAIOAuthService, + } +} + +// GetAccessToken 获取有效的 access_token +func (p *OpenAITokenProvider) GetAccessToken(ctx context.Context, account *Account) (string, error) { + if account == nil { + return "", errors.New("account is nil") + } + if account.Platform != PlatformOpenAI || account.Type != AccountTypeOAuth { + return "", errors.New("not an openai oauth account") + } + + cacheKey := OpenAITokenCacheKey(account) + + // 1. 先尝试缓存 + if p.tokenCache != nil { + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && strings.TrimSpace(token) != "" { + slog.Debug("openai_token_cache_hit", "account_id", account.ID) + return token, nil + } else if err != nil { + slog.Warn("openai_token_cache_get_failed", "account_id", account.ID, "error", err) + } + } + + slog.Debug("openai_token_cache_miss", "account_id", account.ID) + + // 2. 如果即将过期则刷新 + expiresAt := account.GetCredentialAsTime("expires_at") + needsRefresh := expiresAt == nil || time.Until(*expiresAt) <= openAITokenRefreshSkew + refreshFailed := false + if needsRefresh && p.tokenCache != nil { + locked, lockErr := p.tokenCache.AcquireRefreshLock(ctx, cacheKey, 30*time.Second) + if lockErr == nil && locked { + defer func() { _ = p.tokenCache.ReleaseRefreshLock(ctx, cacheKey) }() + + // 拿到锁后再次检查缓存(另一个 worker 可能已刷新) + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && strings.TrimSpace(token) != "" { + return token, nil + } + + // 从数据库获取最新账户信息 + fresh, err := p.accountRepo.GetByID(ctx, account.ID) + if err == nil && fresh != nil { + account = fresh + } + expiresAt = account.GetCredentialAsTime("expires_at") + if expiresAt == nil || time.Until(*expiresAt) <= openAITokenRefreshSkew { + if p.openAIOAuthService == nil { + slog.Warn("openai_oauth_service_not_configured", "account_id", account.ID) + refreshFailed = true // 无法刷新,标记失败 + } else { + tokenInfo, err := p.openAIOAuthService.RefreshAccountToken(ctx, account) + if err != nil { + // 刷新失败时记录警告,但不立即返回错误,尝试使用现有 token + slog.Warn("openai_token_refresh_failed", "account_id", account.ID, "error", err) + refreshFailed = true // 刷新失败,标记以使用短 TTL + } else { + newCredentials := p.openAIOAuthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + account.Credentials = newCredentials + if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { + slog.Error("openai_token_provider_update_failed", "account_id", account.ID, "error", updateErr) + } + expiresAt = account.GetCredentialAsTime("expires_at") + } + } + } + } else if lockErr != nil { + // Redis 错误导致无法获取锁,降级为无锁刷新(仅在 token 接近过期时) + slog.Warn("openai_token_lock_failed_degraded_refresh", "account_id", account.ID, "error", lockErr) + + // 检查 ctx 是否已取消 + if ctx.Err() != nil { + return "", ctx.Err() + } + + // 从数据库获取最新账户信息 + if p.accountRepo != nil { + fresh, err := p.accountRepo.GetByID(ctx, account.ID) + if err == nil && fresh != nil { + account = fresh + } + } + expiresAt = account.GetCredentialAsTime("expires_at") + + // 仅在 expires_at 已过期/接近过期时才执行无锁刷新 + if expiresAt == nil || time.Until(*expiresAt) <= openAITokenRefreshSkew { + if p.openAIOAuthService == nil { + slog.Warn("openai_oauth_service_not_configured", "account_id", account.ID) + refreshFailed = true + } else { + tokenInfo, err := p.openAIOAuthService.RefreshAccountToken(ctx, account) + if err != nil { + slog.Warn("openai_token_refresh_failed_degraded", "account_id", account.ID, "error", err) + refreshFailed = true + } else { + newCredentials := p.openAIOAuthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + account.Credentials = newCredentials + if updateErr := p.accountRepo.Update(ctx, account); updateErr != nil { + slog.Error("openai_token_provider_update_failed", "account_id", account.ID, "error", updateErr) + } + expiresAt = account.GetCredentialAsTime("expires_at") + } + } + } + } else { + // 锁获取失败(被其他 worker 持有),等待 200ms 后重试读取缓存 + time.Sleep(openAILockWaitTime) + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && strings.TrimSpace(token) != "" { + slog.Debug("openai_token_cache_hit_after_wait", "account_id", account.ID) + return token, nil + } + } + } + + accessToken := account.GetOpenAIAccessToken() + if strings.TrimSpace(accessToken) == "" { + return "", errors.New("access_token not found in credentials") + } + + // 3. 存入缓存 + if p.tokenCache != nil { + ttl := 30 * time.Minute + if refreshFailed { + // 刷新失败时使用短 TTL,避免失效 token 长时间缓存导致 401 抖动 + ttl = time.Minute + slog.Debug("openai_token_cache_short_ttl", "account_id", account.ID, "reason", "refresh_failed") + } else if expiresAt != nil { + until := time.Until(*expiresAt) + switch { + case until > openAITokenCacheSkew: + ttl = until - openAITokenCacheSkew + case until > 0: + ttl = until + default: + ttl = time.Minute + } + } + if err := p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl); err != nil { + slog.Warn("openai_token_cache_set_failed", "account_id", account.ID, "error", err) + } + } + + return accessToken, nil +} diff --git a/backend/internal/service/openai_token_provider_test.go b/backend/internal/service/openai_token_provider_test.go new file mode 100644 index 00000000..c2e3dbb0 --- /dev/null +++ b/backend/internal/service/openai_token_provider_test.go @@ -0,0 +1,810 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// openAITokenCacheStub implements OpenAITokenCache for testing +type openAITokenCacheStub struct { + mu sync.Mutex + tokens map[string]string + getErr error + setErr error + deleteErr error + lockAcquired bool + lockErr error + releaseLockErr error + getCalled int32 + setCalled int32 + lockCalled int32 + unlockCalled int32 + simulateLockRace bool +} + +func newOpenAITokenCacheStub() *openAITokenCacheStub { + return &openAITokenCacheStub{ + tokens: make(map[string]string), + lockAcquired: true, + } +} + +func (s *openAITokenCacheStub) GetAccessToken(ctx context.Context, cacheKey string) (string, error) { + atomic.AddInt32(&s.getCalled, 1) + if s.getErr != nil { + return "", s.getErr + } + s.mu.Lock() + defer s.mu.Unlock() + return s.tokens[cacheKey], nil +} + +func (s *openAITokenCacheStub) SetAccessToken(ctx context.Context, cacheKey string, token string, ttl time.Duration) error { + atomic.AddInt32(&s.setCalled, 1) + if s.setErr != nil { + return s.setErr + } + s.mu.Lock() + defer s.mu.Unlock() + s.tokens[cacheKey] = token + return nil +} + +func (s *openAITokenCacheStub) DeleteAccessToken(ctx context.Context, cacheKey string) error { + if s.deleteErr != nil { + return s.deleteErr + } + s.mu.Lock() + defer s.mu.Unlock() + delete(s.tokens, cacheKey) + return nil +} + +func (s *openAITokenCacheStub) AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error) { + atomic.AddInt32(&s.lockCalled, 1) + if s.lockErr != nil { + return false, s.lockErr + } + if s.simulateLockRace { + return false, nil + } + return s.lockAcquired, nil +} + +func (s *openAITokenCacheStub) ReleaseRefreshLock(ctx context.Context, cacheKey string) error { + atomic.AddInt32(&s.unlockCalled, 1) + return s.releaseLockErr +} + +// openAIAccountRepoStub is a minimal stub implementing only the methods used by OpenAITokenProvider +type openAIAccountRepoStub struct { + account *Account + getErr error + updateErr error + getCalled int32 + updateCalled int32 +} + +func (r *openAIAccountRepoStub) GetByID(ctx context.Context, id int64) (*Account, error) { + atomic.AddInt32(&r.getCalled, 1) + if r.getErr != nil { + return nil, r.getErr + } + return r.account, nil +} + +func (r *openAIAccountRepoStub) Update(ctx context.Context, account *Account) error { + atomic.AddInt32(&r.updateCalled, 1) + if r.updateErr != nil { + return r.updateErr + } + r.account = account + return nil +} + +// openAIOAuthServiceStub implements OpenAIOAuthService methods for testing +type openAIOAuthServiceStub struct { + tokenInfo *OpenAITokenInfo + refreshErr error + refreshCalled int32 +} + +func (s *openAIOAuthServiceStub) RefreshAccountToken(ctx context.Context, account *Account) (*OpenAITokenInfo, error) { + atomic.AddInt32(&s.refreshCalled, 1) + if s.refreshErr != nil { + return nil, s.refreshErr + } + return s.tokenInfo, nil +} + +func (s *openAIOAuthServiceStub) BuildAccountCredentials(info *OpenAITokenInfo) map[string]any { + now := time.Now() + return map[string]any{ + "access_token": info.AccessToken, + "refresh_token": info.RefreshToken, + "expires_at": now.Add(time.Duration(info.ExpiresIn) * time.Second).Format(time.RFC3339), + } +} + +func TestOpenAITokenProvider_CacheHit(t *testing.T) { + cache := newOpenAITokenCacheStub() + account := &Account{ + ID: 100, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "db-token", + }, + } + cacheKey := OpenAITokenCacheKey(account) + cache.tokens[cacheKey] = "cached-token" + + provider := NewOpenAITokenProvider(nil, cache, nil) + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "cached-token", token) + require.Equal(t, int32(1), atomic.LoadInt32(&cache.getCalled)) + require.Equal(t, int32(0), atomic.LoadInt32(&cache.setCalled)) +} + +func TestOpenAITokenProvider_CacheMiss_FromCredentials(t *testing.T) { + cache := newOpenAITokenCacheStub() + // Token expires in far future, no refresh needed + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 101, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "credential-token", + "expires_at": expiresAt, + }, + } + + provider := NewOpenAITokenProvider(nil, cache, nil) + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "credential-token", token) + + // Should have stored in cache + cacheKey := OpenAITokenCacheKey(account) + require.Equal(t, "credential-token", cache.tokens[cacheKey]) +} + +func TestOpenAITokenProvider_TokenRefresh(t *testing.T) { + cache := newOpenAITokenCacheStub() + accountRepo := &openAIAccountRepoStub{} + oauthService := &openAIOAuthServiceStub{ + tokenInfo: &OpenAITokenInfo{ + AccessToken: "refreshed-token", + RefreshToken: "new-refresh-token", + ExpiresIn: 3600, + }, + } + + // Token expires soon (within refresh skew) + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 102, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-token", + "refresh_token": "old-refresh-token", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + + // We need to directly test with the stub - create a custom provider + customProvider := &testOpenAITokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: oauthService, + } + + token, err := customProvider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "refreshed-token", token) + require.Equal(t, int32(1), atomic.LoadInt32(&oauthService.refreshCalled)) +} + +// testOpenAITokenProvider is a test version that uses the stub OAuth service +type testOpenAITokenProvider struct { + accountRepo *openAIAccountRepoStub + tokenCache *openAITokenCacheStub + oauthService *openAIOAuthServiceStub +} + +func (p *testOpenAITokenProvider) GetAccessToken(ctx context.Context, account *Account) (string, error) { + if account == nil { + return "", errors.New("account is nil") + } + if account.Platform != PlatformOpenAI || account.Type != AccountTypeOAuth { + return "", errors.New("not an openai oauth account") + } + + cacheKey := OpenAITokenCacheKey(account) + + // 1. Check cache + if p.tokenCache != nil { + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && token != "" { + return token, nil + } + } + + // 2. Check if refresh needed + expiresAt := account.GetCredentialAsTime("expires_at") + needsRefresh := expiresAt == nil || time.Until(*expiresAt) <= openAITokenRefreshSkew + refreshFailed := false + if needsRefresh && p.tokenCache != nil { + locked, err := p.tokenCache.AcquireRefreshLock(ctx, cacheKey, 30*time.Second) + if err == nil && locked { + defer func() { _ = p.tokenCache.ReleaseRefreshLock(ctx, cacheKey) }() + + // Check cache again after acquiring lock + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && token != "" { + return token, nil + } + + // Get fresh account from DB + fresh, err := p.accountRepo.GetByID(ctx, account.ID) + if err == nil && fresh != nil { + account = fresh + } + expiresAt = account.GetCredentialAsTime("expires_at") + if expiresAt == nil || time.Until(*expiresAt) <= openAITokenRefreshSkew { + if p.oauthService == nil { + refreshFailed = true // 无法刷新,标记失败 + } else { + tokenInfo, err := p.oauthService.RefreshAccountToken(ctx, account) + if err != nil { + refreshFailed = true // 刷新失败,标记以使用短 TTL + } else { + newCredentials := p.oauthService.BuildAccountCredentials(tokenInfo) + for k, v := range account.Credentials { + if _, exists := newCredentials[k]; !exists { + newCredentials[k] = v + } + } + account.Credentials = newCredentials + _ = p.accountRepo.Update(ctx, account) + expiresAt = account.GetCredentialAsTime("expires_at") + } + } + } + } else if p.tokenCache.simulateLockRace { + // Wait and retry cache + time.Sleep(10 * time.Millisecond) // Short wait for test + if token, err := p.tokenCache.GetAccessToken(ctx, cacheKey); err == nil && token != "" { + return token, nil + } + } + } + + accessToken := account.GetOpenAIAccessToken() + if accessToken == "" { + return "", errors.New("access_token not found in credentials") + } + + // 3. Store in cache + if p.tokenCache != nil { + ttl := 30 * time.Minute + if refreshFailed { + ttl = time.Minute // 刷新失败时使用短 TTL + } else if expiresAt != nil { + until := time.Until(*expiresAt) + if until > openAITokenCacheSkew { + ttl = until - openAITokenCacheSkew + } else if until > 0 { + ttl = until + } else { + ttl = time.Minute + } + } + _ = p.tokenCache.SetAccessToken(ctx, cacheKey, accessToken, ttl) + } + + return accessToken, nil +} + +func TestOpenAITokenProvider_LockRaceCondition(t *testing.T) { + cache := newOpenAITokenCacheStub() + cache.simulateLockRace = true + accountRepo := &openAIAccountRepoStub{} + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 103, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "race-token", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + + // Simulate another worker already refreshed and cached + cacheKey := OpenAITokenCacheKey(account) + go func() { + time.Sleep(5 * time.Millisecond) + cache.mu.Lock() + cache.tokens[cacheKey] = "winner-token" + cache.mu.Unlock() + }() + + provider := &testOpenAITokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + } + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + // Should get the token set by the "winner" or the original + require.NotEmpty(t, token) +} + +func TestOpenAITokenProvider_NilAccount(t *testing.T) { + provider := NewOpenAITokenProvider(nil, nil, nil) + + token, err := provider.GetAccessToken(context.Background(), nil) + require.Error(t, err) + require.Contains(t, err.Error(), "account is nil") + require.Empty(t, token) +} + +func TestOpenAITokenProvider_WrongPlatform(t *testing.T) { + provider := NewOpenAITokenProvider(nil, nil, nil) + account := &Account{ + ID: 104, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "not an openai oauth account") + require.Empty(t, token) +} + +func TestOpenAITokenProvider_WrongAccountType(t *testing.T) { + provider := NewOpenAITokenProvider(nil, nil, nil) + account := &Account{ + ID: 105, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + } + + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "not an openai oauth account") + require.Empty(t, token) +} + +func TestOpenAITokenProvider_NilCache(t *testing.T) { + // Token doesn't need refresh + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 106, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "nocache-token", + "expires_at": expiresAt, + }, + } + + provider := NewOpenAITokenProvider(nil, nil, nil) + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "nocache-token", token) +} + +func TestOpenAITokenProvider_CacheGetError(t *testing.T) { + cache := newOpenAITokenCacheStub() + cache.getErr = errors.New("redis connection failed") + + // Token doesn't need refresh + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 107, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "fallback-token", + "expires_at": expiresAt, + }, + } + + provider := NewOpenAITokenProvider(nil, cache, nil) + + // Should gracefully degrade and return from credentials + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "fallback-token", token) +} + +func TestOpenAITokenProvider_CacheSetError(t *testing.T) { + cache := newOpenAITokenCacheStub() + cache.setErr = errors.New("redis write failed") + + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 108, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "still-works-token", + "expires_at": expiresAt, + }, + } + + provider := NewOpenAITokenProvider(nil, cache, nil) + + // Should still work even if cache set fails + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "still-works-token", token) +} + +func TestOpenAITokenProvider_MissingAccessToken(t *testing.T) { + cache := newOpenAITokenCacheStub() + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 109, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "expires_at": expiresAt, + // missing access_token + }, + } + + provider := NewOpenAITokenProvider(nil, cache, nil) + + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "access_token not found") + require.Empty(t, token) +} + +func TestOpenAITokenProvider_RefreshError(t *testing.T) { + cache := newOpenAITokenCacheStub() + accountRepo := &openAIAccountRepoStub{} + oauthService := &openAIOAuthServiceStub{ + refreshErr: errors.New("oauth refresh failed"), + } + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 110, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-token", + "refresh_token": "old-refresh-token", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + + provider := &testOpenAITokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: oauthService, + } + + // Now with fallback behavior, should return existing token even if refresh fails + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "old-token", token) // Fallback to existing token +} + +func TestOpenAITokenProvider_OAuthServiceNotConfigured(t *testing.T) { + cache := newOpenAITokenCacheStub() + accountRepo := &openAIAccountRepoStub{} + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 111, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-token", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + + provider := &testOpenAITokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: nil, // not configured + } + + // Now with fallback behavior, should return existing token even if oauth service not configured + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "old-token", token) // Fallback to existing token +} + +func TestOpenAITokenProvider_TTLCalculation(t *testing.T) { + tests := []struct { + name string + expiresIn time.Duration + }{ + { + name: "far_future_expiry", + expiresIn: 1 * time.Hour, + }, + { + name: "medium_expiry", + expiresIn: 10 * time.Minute, + }, + { + name: "near_expiry", + expiresIn: 6 * time.Minute, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache := newOpenAITokenCacheStub() + expiresAt := time.Now().Add(tt.expiresIn).Format(time.RFC3339) + account := &Account{ + ID: 200, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "test-token", + "expires_at": expiresAt, + }, + } + + provider := NewOpenAITokenProvider(nil, cache, nil) + + _, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + + // Verify token was cached + cacheKey := OpenAITokenCacheKey(account) + require.Equal(t, "test-token", cache.tokens[cacheKey]) + }) + } +} + +func TestOpenAITokenProvider_DoubleCheckAfterLock(t *testing.T) { + cache := newOpenAITokenCacheStub() + accountRepo := &openAIAccountRepoStub{} + oauthService := &openAIOAuthServiceStub{ + tokenInfo: &OpenAITokenInfo{ + AccessToken: "refreshed-token", + RefreshToken: "new-refresh", + ExpiresIn: 3600, + }, + } + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 112, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "old-token", + "expires_at": expiresAt, + }, + } + accountRepo.account = account + cacheKey := OpenAITokenCacheKey(account) + + // Simulate: first GetAccessToken returns empty, but after lock acquired, cache has token + originalGet := int32(0) + cache.tokens[cacheKey] = "" // Empty initially + + provider := &testOpenAITokenProvider{ + accountRepo: accountRepo, + tokenCache: cache, + oauthService: oauthService, + } + + // In a goroutine, set the cached token after a small delay (simulating race) + go func() { + time.Sleep(5 * time.Millisecond) + cache.mu.Lock() + cache.tokens[cacheKey] = "cached-by-other" + cache.mu.Unlock() + }() + + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + // Should get either the refreshed token or the cached one + require.NotEmpty(t, token) + _ = originalGet // Suppress unused warning +} + +// Tests for real provider - to increase coverage +func TestOpenAITokenProvider_Real_LockFailedWait(t *testing.T) { + cache := newOpenAITokenCacheStub() + cache.lockAcquired = false // Lock acquisition fails + + // Token expires soon (within refresh skew) to trigger lock attempt + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 200, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "fallback-token", + "expires_at": expiresAt, + }, + } + + // Set token in cache after lock wait period (simulate other worker refreshing) + cacheKey := OpenAITokenCacheKey(account) + go func() { + time.Sleep(100 * time.Millisecond) + cache.mu.Lock() + cache.tokens[cacheKey] = "refreshed-by-other" + cache.mu.Unlock() + }() + + provider := NewOpenAITokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + // Should get either the fallback token or the refreshed one + require.NotEmpty(t, token) +} + +func TestOpenAITokenProvider_Real_CacheHitAfterWait(t *testing.T) { + cache := newOpenAITokenCacheStub() + cache.lockAcquired = false // Lock acquisition fails + + // Token expires soon + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 201, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "original-token", + "expires_at": expiresAt, + }, + } + + cacheKey := OpenAITokenCacheKey(account) + // Set token in cache immediately after wait starts + go func() { + time.Sleep(50 * time.Millisecond) + cache.mu.Lock() + cache.tokens[cacheKey] = "winner-token" + cache.mu.Unlock() + }() + + provider := NewOpenAITokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.NotEmpty(t, token) +} + +func TestOpenAITokenProvider_Real_ExpiredWithoutRefreshToken(t *testing.T) { + cache := newOpenAITokenCacheStub() + cache.lockAcquired = false // Prevent entering refresh logic + + // Token with nil expires_at (no expiry set) - should use credentials + account := &Account{ + ID: 202, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "no-expiry-token", + }, + } + + provider := NewOpenAITokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + // Without OAuth service, refresh will fail but token should be returned from credentials + require.NoError(t, err) + require.Equal(t, "no-expiry-token", token) +} + +func TestOpenAITokenProvider_Real_WhitespaceToken(t *testing.T) { + cache := newOpenAITokenCacheStub() + cacheKey := "openai:account:203" + cache.tokens[cacheKey] = " " // Whitespace only - should be treated as empty + + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 203, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "real-token", + "expires_at": expiresAt, + }, + } + + provider := NewOpenAITokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "real-token", token) // Should fall back to credentials +} + +func TestOpenAITokenProvider_Real_LockError(t *testing.T) { + cache := newOpenAITokenCacheStub() + cache.lockErr = errors.New("redis lock failed") + + // Token expires soon (within refresh skew) + expiresAt := time.Now().Add(1 * time.Minute).Format(time.RFC3339) + account := &Account{ + ID: 204, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "fallback-on-lock-error", + "expires_at": expiresAt, + }, + } + + provider := NewOpenAITokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, "fallback-on-lock-error", token) +} + +func TestOpenAITokenProvider_Real_WhitespaceCredentialToken(t *testing.T) { + cache := newOpenAITokenCacheStub() + + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 205, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": " ", // Whitespace only + "expires_at": expiresAt, + }, + } + + provider := NewOpenAITokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "access_token not found") + require.Empty(t, token) +} + +func TestOpenAITokenProvider_Real_NilCredentials(t *testing.T) { + cache := newOpenAITokenCacheStub() + + expiresAt := time.Now().Add(1 * time.Hour).Format(time.RFC3339) + account := &Account{ + ID: 206, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "expires_at": expiresAt, + // No access_token + }, + } + + provider := NewOpenAITokenProvider(nil, cache, nil) + token, err := provider.GetAccessToken(context.Background(), account) + require.Error(t, err) + require.Contains(t, err.Error(), "access_token not found") + require.Empty(t, token) +} diff --git a/backend/internal/service/openai_tool_continuation.go b/backend/internal/service/openai_tool_continuation.go new file mode 100644 index 00000000..e59082b2 --- /dev/null +++ b/backend/internal/service/openai_tool_continuation.go @@ -0,0 +1,213 @@ +package service + +import "strings" + +// NeedsToolContinuation 判定请求是否需要工具调用续链处理。 +// 满足以下任一信号即视为续链:previous_response_id、input 内包含 function_call_output/item_reference、 +// 或显式声明 tools/tool_choice。 +func NeedsToolContinuation(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + if hasNonEmptyString(reqBody["previous_response_id"]) { + return true + } + if hasToolsSignal(reqBody) { + return true + } + if hasToolChoiceSignal(reqBody) { + return true + } + if inputHasType(reqBody, "function_call_output") { + return true + } + if inputHasType(reqBody, "item_reference") { + return true + } + return false +} + +// HasFunctionCallOutput 判断 input 是否包含 function_call_output,用于触发续链校验。 +func HasFunctionCallOutput(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + return inputHasType(reqBody, "function_call_output") +} + +// HasToolCallContext 判断 input 是否包含带 call_id 的 tool_call/function_call, +// 用于判断 function_call_output 是否具备可关联的上下文。 +func HasToolCallContext(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "tool_call" && itemType != "function_call" { + continue + } + if callID, ok := itemMap["call_id"].(string); ok && strings.TrimSpace(callID) != "" { + return true + } + } + return false +} + +// FunctionCallOutputCallIDs 提取 input 中 function_call_output 的 call_id 集合。 +// 仅返回非空 call_id,用于与 item_reference.id 做匹配校验。 +func FunctionCallOutputCallIDs(reqBody map[string]any) []string { + if reqBody == nil { + return nil + } + input, ok := reqBody["input"].([]any) + if !ok { + return nil + } + ids := make(map[string]struct{}) + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "function_call_output" { + continue + } + if callID, ok := itemMap["call_id"].(string); ok && strings.TrimSpace(callID) != "" { + ids[callID] = struct{}{} + } + } + if len(ids) == 0 { + return nil + } + result := make([]string, 0, len(ids)) + for id := range ids { + result = append(result, id) + } + return result +} + +// HasFunctionCallOutputMissingCallID 判断是否存在缺少 call_id 的 function_call_output。 +func HasFunctionCallOutputMissingCallID(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "function_call_output" { + continue + } + callID, _ := itemMap["call_id"].(string) + if strings.TrimSpace(callID) == "" { + return true + } + } + return false +} + +// HasItemReferenceForCallIDs 判断 item_reference.id 是否覆盖所有 call_id。 +// 用于仅依赖引用项完成续链场景的校验。 +func HasItemReferenceForCallIDs(reqBody map[string]any, callIDs []string) bool { + if reqBody == nil || len(callIDs) == 0 { + return false + } + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + referenceIDs := make(map[string]struct{}) + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "item_reference" { + continue + } + idValue, _ := itemMap["id"].(string) + idValue = strings.TrimSpace(idValue) + if idValue == "" { + continue + } + referenceIDs[idValue] = struct{}{} + } + if len(referenceIDs) == 0 { + return false + } + for _, callID := range callIDs { + if _, ok := referenceIDs[callID]; !ok { + return false + } + } + return true +} + +// inputHasType 判断 input 中是否存在指定类型的 item。 +func inputHasType(reqBody map[string]any, want string) bool { + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType == want { + return true + } + } + return false +} + +// hasNonEmptyString 判断字段是否为非空字符串。 +func hasNonEmptyString(value any) bool { + stringValue, ok := value.(string) + return ok && strings.TrimSpace(stringValue) != "" +} + +// hasToolsSignal 判断 tools 字段是否显式声明(存在且不为空)。 +func hasToolsSignal(reqBody map[string]any) bool { + raw, exists := reqBody["tools"] + if !exists || raw == nil { + return false + } + if tools, ok := raw.([]any); ok { + return len(tools) > 0 + } + return false +} + +// hasToolChoiceSignal 判断 tool_choice 是否显式声明(非空或非 nil)。 +func hasToolChoiceSignal(reqBody map[string]any) bool { + raw, exists := reqBody["tool_choice"] + if !exists || raw == nil { + return false + } + switch value := raw.(type) { + case string: + return strings.TrimSpace(value) != "" + case map[string]any: + return len(value) > 0 + default: + return false + } +} diff --git a/backend/internal/service/openai_tool_continuation_test.go b/backend/internal/service/openai_tool_continuation_test.go new file mode 100644 index 00000000..fe737ad6 --- /dev/null +++ b/backend/internal/service/openai_tool_continuation_test.go @@ -0,0 +1,98 @@ +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNeedsToolContinuationSignals(t *testing.T) { + // 覆盖所有触发续链的信号来源,确保判定逻辑完整。 + cases := []struct { + name string + body map[string]any + want bool + }{ + {name: "nil", body: nil, want: false}, + {name: "previous_response_id", body: map[string]any{"previous_response_id": "resp_1"}, want: true}, + {name: "previous_response_id_blank", body: map[string]any{"previous_response_id": " "}, want: false}, + {name: "function_call_output", body: map[string]any{"input": []any{map[string]any{"type": "function_call_output"}}}, want: true}, + {name: "item_reference", body: map[string]any{"input": []any{map[string]any{"type": "item_reference"}}}, want: true}, + {name: "tools", body: map[string]any{"tools": []any{map[string]any{"type": "function"}}}, want: true}, + {name: "tools_empty", body: map[string]any{"tools": []any{}}, want: false}, + {name: "tools_invalid", body: map[string]any{"tools": "bad"}, want: false}, + {name: "tool_choice", body: map[string]any{"tool_choice": "auto"}, want: true}, + {name: "tool_choice_object", body: map[string]any{"tool_choice": map[string]any{"type": "function"}}, want: true}, + {name: "tool_choice_empty_object", body: map[string]any{"tool_choice": map[string]any{}}, want: false}, + {name: "none", body: map[string]any{"input": []any{map[string]any{"type": "text", "text": "hi"}}}, want: false}, + } + + for _, tt := range cases { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.want, NeedsToolContinuation(tt.body)) + }) + } +} + +func TestHasFunctionCallOutput(t *testing.T) { + // 仅当 input 中存在 function_call_output 才视为续链输出。 + require.False(t, HasFunctionCallOutput(nil)) + require.True(t, HasFunctionCallOutput(map[string]any{ + "input": []any{map[string]any{"type": "function_call_output"}}, + })) + require.False(t, HasFunctionCallOutput(map[string]any{ + "input": "text", + })) +} + +func TestHasToolCallContext(t *testing.T) { + // tool_call/function_call 必须包含 call_id,才能作为可关联上下文。 + require.False(t, HasToolCallContext(nil)) + require.True(t, HasToolCallContext(map[string]any{ + "input": []any{map[string]any{"type": "tool_call", "call_id": "call_1"}}, + })) + require.True(t, HasToolCallContext(map[string]any{ + "input": []any{map[string]any{"type": "function_call", "call_id": "call_2"}}, + })) + require.False(t, HasToolCallContext(map[string]any{ + "input": []any{map[string]any{"type": "tool_call"}}, + })) +} + +func TestFunctionCallOutputCallIDs(t *testing.T) { + // 仅提取非空 call_id,去重后返回。 + require.Empty(t, FunctionCallOutputCallIDs(nil)) + callIDs := FunctionCallOutputCallIDs(map[string]any{ + "input": []any{ + map[string]any{"type": "function_call_output", "call_id": "call_1"}, + map[string]any{"type": "function_call_output", "call_id": ""}, + map[string]any{"type": "function_call_output", "call_id": "call_1"}, + }, + }) + require.ElementsMatch(t, []string{"call_1"}, callIDs) +} + +func TestHasFunctionCallOutputMissingCallID(t *testing.T) { + require.False(t, HasFunctionCallOutputMissingCallID(nil)) + require.True(t, HasFunctionCallOutputMissingCallID(map[string]any{ + "input": []any{map[string]any{"type": "function_call_output"}}, + })) + require.False(t, HasFunctionCallOutputMissingCallID(map[string]any{ + "input": []any{map[string]any{"type": "function_call_output", "call_id": "call_1"}}, + })) +} + +func TestHasItemReferenceForCallIDs(t *testing.T) { + // item_reference 需要覆盖所有 call_id 才视为可关联上下文。 + require.False(t, HasItemReferenceForCallIDs(nil, []string{"call_1"})) + require.False(t, HasItemReferenceForCallIDs(map[string]any{}, []string{"call_1"})) + req := map[string]any{ + "input": []any{ + map[string]any{"type": "item_reference", "id": "call_1"}, + map[string]any{"type": "item_reference", "id": "call_2"}, + }, + } + require.True(t, HasItemReferenceForCallIDs(req, []string{"call_1"})) + require.True(t, HasItemReferenceForCallIDs(req, []string{"call_1", "call_2"})) + require.False(t, HasItemReferenceForCallIDs(req, []string{"call_1", "call_3"})) +} diff --git a/backend/internal/service/openai_tool_corrector.go b/backend/internal/service/openai_tool_corrector.go new file mode 100644 index 00000000..9c9eab84 --- /dev/null +++ b/backend/internal/service/openai_tool_corrector.go @@ -0,0 +1,307 @@ +package service + +import ( + "encoding/json" + "fmt" + "log" + "sync" +) + +// codexToolNameMapping 定义 Codex 原生工具名称到 OpenCode 工具名称的映射 +var codexToolNameMapping = map[string]string{ + "apply_patch": "edit", + "applyPatch": "edit", + "update_plan": "todowrite", + "updatePlan": "todowrite", + "read_plan": "todoread", + "readPlan": "todoread", + "search_files": "grep", + "searchFiles": "grep", + "list_files": "glob", + "listFiles": "glob", + "read_file": "read", + "readFile": "read", + "write_file": "write", + "writeFile": "write", + "execute_bash": "bash", + "executeBash": "bash", + "exec_bash": "bash", + "execBash": "bash", +} + +// ToolCorrectionStats 记录工具修正的统计信息(导出用于 JSON 序列化) +type ToolCorrectionStats struct { + TotalCorrected int `json:"total_corrected"` + CorrectionsByTool map[string]int `json:"corrections_by_tool"` +} + +// CodexToolCorrector 处理 Codex 工具调用的自动修正 +type CodexToolCorrector struct { + stats ToolCorrectionStats + mu sync.RWMutex +} + +// NewCodexToolCorrector 创建新的工具修正器 +func NewCodexToolCorrector() *CodexToolCorrector { + return &CodexToolCorrector{ + stats: ToolCorrectionStats{ + CorrectionsByTool: make(map[string]int), + }, + } +} + +// CorrectToolCallsInSSEData 修正 SSE 数据中的工具调用 +// 返回修正后的数据和是否进行了修正 +func (c *CodexToolCorrector) CorrectToolCallsInSSEData(data string) (string, bool) { + if data == "" || data == "\n" { + return data, false + } + + // 尝试解析 JSON + var payload map[string]any + if err := json.Unmarshal([]byte(data), &payload); err != nil { + // 不是有效的 JSON,直接返回原数据 + return data, false + } + + corrected := false + + // 处理 tool_calls 数组 + if toolCalls, ok := payload["tool_calls"].([]any); ok { + if c.correctToolCallsArray(toolCalls) { + corrected = true + } + } + + // 处理 function_call 对象 + if functionCall, ok := payload["function_call"].(map[string]any); ok { + if c.correctFunctionCall(functionCall) { + corrected = true + } + } + + // 处理 delta.tool_calls + if delta, ok := payload["delta"].(map[string]any); ok { + if toolCalls, ok := delta["tool_calls"].([]any); ok { + if c.correctToolCallsArray(toolCalls) { + corrected = true + } + } + if functionCall, ok := delta["function_call"].(map[string]any); ok { + if c.correctFunctionCall(functionCall) { + corrected = true + } + } + } + + // 处理 choices[].message.tool_calls 和 choices[].delta.tool_calls + if choices, ok := payload["choices"].([]any); ok { + for _, choice := range choices { + if choiceMap, ok := choice.(map[string]any); ok { + // 处理 message 中的工具调用 + if message, ok := choiceMap["message"].(map[string]any); ok { + if toolCalls, ok := message["tool_calls"].([]any); ok { + if c.correctToolCallsArray(toolCalls) { + corrected = true + } + } + if functionCall, ok := message["function_call"].(map[string]any); ok { + if c.correctFunctionCall(functionCall) { + corrected = true + } + } + } + // 处理 delta 中的工具调用 + if delta, ok := choiceMap["delta"].(map[string]any); ok { + if toolCalls, ok := delta["tool_calls"].([]any); ok { + if c.correctToolCallsArray(toolCalls) { + corrected = true + } + } + if functionCall, ok := delta["function_call"].(map[string]any); ok { + if c.correctFunctionCall(functionCall) { + corrected = true + } + } + } + } + } + } + + if !corrected { + return data, false + } + + // 序列化回 JSON + correctedBytes, err := json.Marshal(payload) + if err != nil { + log.Printf("[CodexToolCorrector] Failed to marshal corrected data: %v", err) + return data, false + } + + return string(correctedBytes), true +} + +// correctToolCallsArray 修正工具调用数组中的工具名称 +func (c *CodexToolCorrector) correctToolCallsArray(toolCalls []any) bool { + corrected := false + for _, toolCall := range toolCalls { + if toolCallMap, ok := toolCall.(map[string]any); ok { + if function, ok := toolCallMap["function"].(map[string]any); ok { + if c.correctFunctionCall(function) { + corrected = true + } + } + } + } + return corrected +} + +// correctFunctionCall 修正单个函数调用的工具名称和参数 +func (c *CodexToolCorrector) correctFunctionCall(functionCall map[string]any) bool { + name, ok := functionCall["name"].(string) + if !ok || name == "" { + return false + } + + corrected := false + + // 查找并修正工具名称 + if correctName, found := codexToolNameMapping[name]; found { + functionCall["name"] = correctName + c.recordCorrection(name, correctName) + corrected = true + name = correctName // 使用修正后的名称进行参数修正 + } + + // 修正工具参数(基于工具名称) + if c.correctToolParameters(name, functionCall) { + corrected = true + } + + return corrected +} + +// correctToolParameters 修正工具参数以符合 OpenCode 规范 +func (c *CodexToolCorrector) correctToolParameters(toolName string, functionCall map[string]any) bool { + arguments, ok := functionCall["arguments"] + if !ok { + return false + } + + // arguments 可能是字符串(JSON)或已解析的 map + var argsMap map[string]any + switch v := arguments.(type) { + case string: + // 解析 JSON 字符串 + if err := json.Unmarshal([]byte(v), &argsMap); err != nil { + return false + } + case map[string]any: + argsMap = v + default: + return false + } + + corrected := false + + // 根据工具名称应用特定的参数修正规则 + switch toolName { + case "bash": + // 移除 workdir 参数(OpenCode 不支持) + if _, exists := argsMap["workdir"]; exists { + delete(argsMap, "workdir") + corrected = true + log.Printf("[CodexToolCorrector] Removed 'workdir' parameter from bash tool") + } + if _, exists := argsMap["work_dir"]; exists { + delete(argsMap, "work_dir") + corrected = true + log.Printf("[CodexToolCorrector] Removed 'work_dir' parameter from bash tool") + } + + case "edit": + // OpenCode edit 使用 old_string/new_string,Codex 可能使用其他名称 + // 这里可以添加参数名称的映射逻辑 + if _, exists := argsMap["file_path"]; !exists { + if path, exists := argsMap["path"]; exists { + argsMap["file_path"] = path + delete(argsMap, "path") + corrected = true + log.Printf("[CodexToolCorrector] Renamed 'path' to 'file_path' in edit tool") + } + } + } + + // 如果修正了参数,需要重新序列化 + if corrected { + if _, wasString := arguments.(string); wasString { + // 原本是字符串,序列化回字符串 + if newArgsJSON, err := json.Marshal(argsMap); err == nil { + functionCall["arguments"] = string(newArgsJSON) + } + } else { + // 原本是 map,直接赋值 + functionCall["arguments"] = argsMap + } + } + + return corrected +} + +// recordCorrection 记录一次工具名称修正 +func (c *CodexToolCorrector) recordCorrection(from, to string) { + c.mu.Lock() + defer c.mu.Unlock() + + c.stats.TotalCorrected++ + key := fmt.Sprintf("%s->%s", from, to) + c.stats.CorrectionsByTool[key]++ + + log.Printf("[CodexToolCorrector] Corrected tool call: %s -> %s (total: %d)", + from, to, c.stats.TotalCorrected) +} + +// GetStats 获取工具修正统计信息 +func (c *CodexToolCorrector) GetStats() ToolCorrectionStats { + c.mu.RLock() + defer c.mu.RUnlock() + + // 返回副本以避免并发问题 + statsCopy := ToolCorrectionStats{ + TotalCorrected: c.stats.TotalCorrected, + CorrectionsByTool: make(map[string]int, len(c.stats.CorrectionsByTool)), + } + for k, v := range c.stats.CorrectionsByTool { + statsCopy.CorrectionsByTool[k] = v + } + + return statsCopy +} + +// ResetStats 重置统计信息 +func (c *CodexToolCorrector) ResetStats() { + c.mu.Lock() + defer c.mu.Unlock() + + c.stats.TotalCorrected = 0 + c.stats.CorrectionsByTool = make(map[string]int) +} + +// CorrectToolName 直接修正工具名称(用于非 SSE 场景) +func CorrectToolName(name string) (string, bool) { + if correctName, found := codexToolNameMapping[name]; found { + return correctName, true + } + return name, false +} + +// GetToolNameMapping 获取工具名称映射表 +func GetToolNameMapping() map[string]string { + // 返回副本以避免外部修改 + mapping := make(map[string]string, len(codexToolNameMapping)) + for k, v := range codexToolNameMapping { + mapping[k] = v + } + return mapping +} diff --git a/backend/internal/service/openai_tool_corrector_test.go b/backend/internal/service/openai_tool_corrector_test.go new file mode 100644 index 00000000..3e885b4b --- /dev/null +++ b/backend/internal/service/openai_tool_corrector_test.go @@ -0,0 +1,503 @@ +package service + +import ( + "encoding/json" + "testing" +) + +func TestCorrectToolCallsInSSEData(t *testing.T) { + corrector := NewCodexToolCorrector() + + tests := []struct { + name string + input string + expectCorrected bool + checkFunc func(t *testing.T, result string) + }{ + { + name: "empty string", + input: "", + expectCorrected: false, + }, + { + name: "newline only", + input: "\n", + expectCorrected: false, + }, + { + name: "invalid json", + input: "not a json", + expectCorrected: false, + }, + { + name: "correct apply_patch in tool_calls", + input: `{"tool_calls":[{"function":{"name":"apply_patch","arguments":"{}"}}]}`, + expectCorrected: true, + checkFunc: func(t *testing.T, result string) { + var payload map[string]any + if err := json.Unmarshal([]byte(result), &payload); err != nil { + t.Fatalf("Failed to parse result: %v", err) + } + toolCalls, ok := payload["tool_calls"].([]any) + if !ok || len(toolCalls) == 0 { + t.Fatal("No tool_calls found in result") + } + toolCall, ok := toolCalls[0].(map[string]any) + if !ok { + t.Fatal("Invalid tool_call format") + } + functionCall, ok := toolCall["function"].(map[string]any) + if !ok { + t.Fatal("Invalid function format") + } + if functionCall["name"] != "edit" { + t.Errorf("Expected tool name 'edit', got '%v'", functionCall["name"]) + } + }, + }, + { + name: "correct update_plan in function_call", + input: `{"function_call":{"name":"update_plan","arguments":"{}"}}`, + expectCorrected: true, + checkFunc: func(t *testing.T, result string) { + var payload map[string]any + if err := json.Unmarshal([]byte(result), &payload); err != nil { + t.Fatalf("Failed to parse result: %v", err) + } + functionCall, ok := payload["function_call"].(map[string]any) + if !ok { + t.Fatal("Invalid function_call format") + } + if functionCall["name"] != "todowrite" { + t.Errorf("Expected tool name 'todowrite', got '%v'", functionCall["name"]) + } + }, + }, + { + name: "correct search_files in delta.tool_calls", + input: `{"delta":{"tool_calls":[{"function":{"name":"search_files"}}]}}`, + expectCorrected: true, + checkFunc: func(t *testing.T, result string) { + var payload map[string]any + if err := json.Unmarshal([]byte(result), &payload); err != nil { + t.Fatalf("Failed to parse result: %v", err) + } + delta, ok := payload["delta"].(map[string]any) + if !ok { + t.Fatal("Invalid delta format") + } + toolCalls, ok := delta["tool_calls"].([]any) + if !ok || len(toolCalls) == 0 { + t.Fatal("No tool_calls found in delta") + } + toolCall, ok := toolCalls[0].(map[string]any) + if !ok { + t.Fatal("Invalid tool_call format") + } + functionCall, ok := toolCall["function"].(map[string]any) + if !ok { + t.Fatal("Invalid function format") + } + if functionCall["name"] != "grep" { + t.Errorf("Expected tool name 'grep', got '%v'", functionCall["name"]) + } + }, + }, + { + name: "correct list_files in choices.message.tool_calls", + input: `{"choices":[{"message":{"tool_calls":[{"function":{"name":"list_files"}}]}}]}`, + expectCorrected: true, + checkFunc: func(t *testing.T, result string) { + var payload map[string]any + if err := json.Unmarshal([]byte(result), &payload); err != nil { + t.Fatalf("Failed to parse result: %v", err) + } + choices, ok := payload["choices"].([]any) + if !ok || len(choices) == 0 { + t.Fatal("No choices found in result") + } + choice, ok := choices[0].(map[string]any) + if !ok { + t.Fatal("Invalid choice format") + } + message, ok := choice["message"].(map[string]any) + if !ok { + t.Fatal("Invalid message format") + } + toolCalls, ok := message["tool_calls"].([]any) + if !ok || len(toolCalls) == 0 { + t.Fatal("No tool_calls found in message") + } + toolCall, ok := toolCalls[0].(map[string]any) + if !ok { + t.Fatal("Invalid tool_call format") + } + functionCall, ok := toolCall["function"].(map[string]any) + if !ok { + t.Fatal("Invalid function format") + } + if functionCall["name"] != "glob" { + t.Errorf("Expected tool name 'glob', got '%v'", functionCall["name"]) + } + }, + }, + { + name: "no correction needed", + input: `{"tool_calls":[{"function":{"name":"read","arguments":"{}"}}]}`, + expectCorrected: false, + }, + { + name: "correct multiple tool calls", + input: `{"tool_calls":[{"function":{"name":"apply_patch"}},{"function":{"name":"read_file"}}]}`, + expectCorrected: true, + checkFunc: func(t *testing.T, result string) { + var payload map[string]any + if err := json.Unmarshal([]byte(result), &payload); err != nil { + t.Fatalf("Failed to parse result: %v", err) + } + toolCalls, ok := payload["tool_calls"].([]any) + if !ok || len(toolCalls) < 2 { + t.Fatal("Expected at least 2 tool_calls") + } + + toolCall1, ok := toolCalls[0].(map[string]any) + if !ok { + t.Fatal("Invalid first tool_call format") + } + func1, ok := toolCall1["function"].(map[string]any) + if !ok { + t.Fatal("Invalid first function format") + } + if func1["name"] != "edit" { + t.Errorf("Expected first tool name 'edit', got '%v'", func1["name"]) + } + + toolCall2, ok := toolCalls[1].(map[string]any) + if !ok { + t.Fatal("Invalid second tool_call format") + } + func2, ok := toolCall2["function"].(map[string]any) + if !ok { + t.Fatal("Invalid second function format") + } + if func2["name"] != "read" { + t.Errorf("Expected second tool name 'read', got '%v'", func2["name"]) + } + }, + }, + { + name: "camelCase format - applyPatch", + input: `{"tool_calls":[{"function":{"name":"applyPatch"}}]}`, + expectCorrected: true, + checkFunc: func(t *testing.T, result string) { + var payload map[string]any + if err := json.Unmarshal([]byte(result), &payload); err != nil { + t.Fatalf("Failed to parse result: %v", err) + } + toolCalls, ok := payload["tool_calls"].([]any) + if !ok || len(toolCalls) == 0 { + t.Fatal("No tool_calls found in result") + } + toolCall, ok := toolCalls[0].(map[string]any) + if !ok { + t.Fatal("Invalid tool_call format") + } + functionCall, ok := toolCall["function"].(map[string]any) + if !ok { + t.Fatal("Invalid function format") + } + if functionCall["name"] != "edit" { + t.Errorf("Expected tool name 'edit', got '%v'", functionCall["name"]) + } + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, corrected := corrector.CorrectToolCallsInSSEData(tt.input) + + if corrected != tt.expectCorrected { + t.Errorf("Expected corrected=%v, got %v", tt.expectCorrected, corrected) + } + + if !corrected && result != tt.input { + t.Errorf("Expected unchanged result when not corrected") + } + + if tt.checkFunc != nil { + tt.checkFunc(t, result) + } + }) + } +} + +func TestCorrectToolName(t *testing.T) { + tests := []struct { + input string + expected string + corrected bool + }{ + {"apply_patch", "edit", true}, + {"applyPatch", "edit", true}, + {"update_plan", "todowrite", true}, + {"updatePlan", "todowrite", true}, + {"read_plan", "todoread", true}, + {"readPlan", "todoread", true}, + {"search_files", "grep", true}, + {"searchFiles", "grep", true}, + {"list_files", "glob", true}, + {"listFiles", "glob", true}, + {"read_file", "read", true}, + {"readFile", "read", true}, + {"write_file", "write", true}, + {"writeFile", "write", true}, + {"execute_bash", "bash", true}, + {"executeBash", "bash", true}, + {"exec_bash", "bash", true}, + {"execBash", "bash", true}, + {"unknown_tool", "unknown_tool", false}, + {"read", "read", false}, + {"edit", "edit", false}, + } + + for _, tt := range tests { + t.Run(tt.input, func(t *testing.T) { + result, corrected := CorrectToolName(tt.input) + + if corrected != tt.corrected { + t.Errorf("Expected corrected=%v, got %v", tt.corrected, corrected) + } + + if result != tt.expected { + t.Errorf("Expected '%s', got '%s'", tt.expected, result) + } + }) + } +} + +func TestGetToolNameMapping(t *testing.T) { + mapping := GetToolNameMapping() + + expectedMappings := map[string]string{ + "apply_patch": "edit", + "update_plan": "todowrite", + "read_plan": "todoread", + "search_files": "grep", + "list_files": "glob", + } + + for from, to := range expectedMappings { + if mapping[from] != to { + t.Errorf("Expected mapping[%s] = %s, got %s", from, to, mapping[from]) + } + } + + mapping["test_tool"] = "test_value" + newMapping := GetToolNameMapping() + if _, exists := newMapping["test_tool"]; exists { + t.Error("Modifications to returned mapping should not affect original") + } +} + +func TestCorrectorStats(t *testing.T) { + corrector := NewCodexToolCorrector() + + stats := corrector.GetStats() + if stats.TotalCorrected != 0 { + t.Errorf("Expected TotalCorrected=0, got %d", stats.TotalCorrected) + } + if len(stats.CorrectionsByTool) != 0 { + t.Errorf("Expected empty CorrectionsByTool, got length %d", len(stats.CorrectionsByTool)) + } + + corrector.CorrectToolCallsInSSEData(`{"tool_calls":[{"function":{"name":"apply_patch"}}]}`) + corrector.CorrectToolCallsInSSEData(`{"tool_calls":[{"function":{"name":"apply_patch"}}]}`) + corrector.CorrectToolCallsInSSEData(`{"tool_calls":[{"function":{"name":"update_plan"}}]}`) + + stats = corrector.GetStats() + if stats.TotalCorrected != 3 { + t.Errorf("Expected TotalCorrected=3, got %d", stats.TotalCorrected) + } + + if stats.CorrectionsByTool["apply_patch->edit"] != 2 { + t.Errorf("Expected apply_patch->edit count=2, got %d", stats.CorrectionsByTool["apply_patch->edit"]) + } + + if stats.CorrectionsByTool["update_plan->todowrite"] != 1 { + t.Errorf("Expected update_plan->todowrite count=1, got %d", stats.CorrectionsByTool["update_plan->todowrite"]) + } + + corrector.ResetStats() + stats = corrector.GetStats() + if stats.TotalCorrected != 0 { + t.Errorf("Expected TotalCorrected=0 after reset, got %d", stats.TotalCorrected) + } + if len(stats.CorrectionsByTool) != 0 { + t.Errorf("Expected empty CorrectionsByTool after reset, got length %d", len(stats.CorrectionsByTool)) + } +} + +func TestComplexSSEData(t *testing.T) { + corrector := NewCodexToolCorrector() + + input := `{ + "id": "chatcmpl-123", + "object": "chat.completion.chunk", + "created": 1234567890, + "model": "gpt-5.1-codex", + "choices": [ + { + "index": 0, + "delta": { + "tool_calls": [ + { + "index": 0, + "function": { + "name": "apply_patch", + "arguments": "{\"file\":\"test.go\"}" + } + } + ] + }, + "finish_reason": null + } + ] + }` + + result, corrected := corrector.CorrectToolCallsInSSEData(input) + + if !corrected { + t.Error("Expected data to be corrected") + } + + var payload map[string]any + if err := json.Unmarshal([]byte(result), &payload); err != nil { + t.Fatalf("Failed to parse result: %v", err) + } + + choices, ok := payload["choices"].([]any) + if !ok || len(choices) == 0 { + t.Fatal("No choices found in result") + } + choice, ok := choices[0].(map[string]any) + if !ok { + t.Fatal("Invalid choice format") + } + delta, ok := choice["delta"].(map[string]any) + if !ok { + t.Fatal("Invalid delta format") + } + toolCalls, ok := delta["tool_calls"].([]any) + if !ok || len(toolCalls) == 0 { + t.Fatal("No tool_calls found in delta") + } + toolCall, ok := toolCalls[0].(map[string]any) + if !ok { + t.Fatal("Invalid tool_call format") + } + function, ok := toolCall["function"].(map[string]any) + if !ok { + t.Fatal("Invalid function format") + } + + if function["name"] != "edit" { + t.Errorf("Expected tool name 'edit', got '%v'", function["name"]) + } +} + +// TestCorrectToolParameters 测试工具参数修正 +func TestCorrectToolParameters(t *testing.T) { + corrector := NewCodexToolCorrector() + + tests := []struct { + name string + input string + expected map[string]bool // key: 期待存在的参数, value: true表示应该存在 + }{ + { + name: "remove workdir from bash tool", + input: `{ + "tool_calls": [{ + "function": { + "name": "bash", + "arguments": "{\"command\":\"ls\",\"workdir\":\"/tmp\"}" + } + }] + }`, + expected: map[string]bool{ + "command": true, + "workdir": false, + }, + }, + { + name: "rename path to file_path in edit tool", + input: `{ + "tool_calls": [{ + "function": { + "name": "apply_patch", + "arguments": "{\"path\":\"/foo/bar.go\",\"old_string\":\"old\",\"new_string\":\"new\"}" + } + }] + }`, + expected: map[string]bool{ + "file_path": true, + "path": false, + "old_string": true, + "new_string": true, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + corrected, changed := corrector.CorrectToolCallsInSSEData(tt.input) + if !changed { + t.Error("expected data to be corrected") + } + + // 解析修正后的数据 + var result map[string]any + if err := json.Unmarshal([]byte(corrected), &result); err != nil { + t.Fatalf("failed to parse corrected data: %v", err) + } + + // 检查工具调用 + toolCalls, ok := result["tool_calls"].([]any) + if !ok || len(toolCalls) == 0 { + t.Fatal("no tool_calls found in corrected data") + } + + toolCall, ok := toolCalls[0].(map[string]any) + if !ok { + t.Fatal("invalid tool_call structure") + } + + function, ok := toolCall["function"].(map[string]any) + if !ok { + t.Fatal("no function found in tool_call") + } + + argumentsStr, ok := function["arguments"].(string) + if !ok { + t.Fatal("arguments is not a string") + } + + var args map[string]any + if err := json.Unmarshal([]byte(argumentsStr), &args); err != nil { + t.Fatalf("failed to parse arguments: %v", err) + } + + // 验证期望的参数 + for param, shouldExist := range tt.expected { + _, exists := args[param] + if shouldExist && !exists { + t.Errorf("expected parameter %q to exist, but it doesn't", param) + } + if !shouldExist && exists { + t.Errorf("expected parameter %q to not exist, but it does", param) + } + } + }) + } +} diff --git a/backend/internal/service/ops_account_availability.go b/backend/internal/service/ops_account_availability.go new file mode 100644 index 00000000..da66ec4d --- /dev/null +++ b/backend/internal/service/ops_account_availability.go @@ -0,0 +1,194 @@ +package service + +import ( + "context" + "errors" + "time" +) + +// GetAccountAvailabilityStats returns current account availability stats. +// +// Query-level filtering is intentionally limited to platform/group to match the dashboard scope. +func (s *OpsService) GetAccountAvailabilityStats(ctx context.Context, platformFilter string, groupIDFilter *int64) ( + map[string]*PlatformAvailability, + map[int64]*GroupAvailability, + map[int64]*AccountAvailability, + *time.Time, + error, +) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, nil, nil, nil, err + } + + accounts, err := s.listAllAccountsForOps(ctx, platformFilter) + if err != nil { + return nil, nil, nil, nil, err + } + + if groupIDFilter != nil && *groupIDFilter > 0 { + filtered := make([]Account, 0, len(accounts)) + for _, acc := range accounts { + for _, grp := range acc.Groups { + if grp != nil && grp.ID == *groupIDFilter { + filtered = append(filtered, acc) + break + } + } + } + accounts = filtered + } + + now := time.Now() + collectedAt := now + + platform := make(map[string]*PlatformAvailability) + group := make(map[int64]*GroupAvailability) + account := make(map[int64]*AccountAvailability) + + for _, acc := range accounts { + if acc.ID <= 0 { + continue + } + + isTempUnsched := false + if acc.TempUnschedulableUntil != nil && now.Before(*acc.TempUnschedulableUntil) { + isTempUnsched = true + } + + isRateLimited := acc.RateLimitResetAt != nil && now.Before(*acc.RateLimitResetAt) + isOverloaded := acc.OverloadUntil != nil && now.Before(*acc.OverloadUntil) + hasError := acc.Status == StatusError + + // Normalize exclusive status flags so the UI doesn't show conflicting badges. + if hasError { + isRateLimited = false + isOverloaded = false + } + + isAvailable := acc.Status == StatusActive && acc.Schedulable && !isRateLimited && !isOverloaded && !isTempUnsched + + if acc.Platform != "" { + if _, ok := platform[acc.Platform]; !ok { + platform[acc.Platform] = &PlatformAvailability{ + Platform: acc.Platform, + } + } + p := platform[acc.Platform] + p.TotalAccounts++ + if isAvailable { + p.AvailableCount++ + } + if isRateLimited { + p.RateLimitCount++ + } + if hasError { + p.ErrorCount++ + } + } + + for _, grp := range acc.Groups { + if grp == nil || grp.ID <= 0 { + continue + } + if _, ok := group[grp.ID]; !ok { + group[grp.ID] = &GroupAvailability{ + GroupID: grp.ID, + GroupName: grp.Name, + Platform: grp.Platform, + } + } + g := group[grp.ID] + g.TotalAccounts++ + if isAvailable { + g.AvailableCount++ + } + if isRateLimited { + g.RateLimitCount++ + } + if hasError { + g.ErrorCount++ + } + } + + displayGroupID := int64(0) + displayGroupName := "" + if len(acc.Groups) > 0 && acc.Groups[0] != nil { + displayGroupID = acc.Groups[0].ID + displayGroupName = acc.Groups[0].Name + } + + item := &AccountAvailability{ + AccountID: acc.ID, + AccountName: acc.Name, + Platform: acc.Platform, + GroupID: displayGroupID, + GroupName: displayGroupName, + Status: acc.Status, + + IsAvailable: isAvailable, + IsRateLimited: isRateLimited, + IsOverloaded: isOverloaded, + HasError: hasError, + + ErrorMessage: acc.ErrorMessage, + } + + if isRateLimited && acc.RateLimitResetAt != nil { + item.RateLimitResetAt = acc.RateLimitResetAt + remainingSec := int64(time.Until(*acc.RateLimitResetAt).Seconds()) + if remainingSec > 0 { + item.RateLimitRemainingSec = &remainingSec + } + } + if isOverloaded && acc.OverloadUntil != nil { + item.OverloadUntil = acc.OverloadUntil + remainingSec := int64(time.Until(*acc.OverloadUntil).Seconds()) + if remainingSec > 0 { + item.OverloadRemainingSec = &remainingSec + } + } + if isTempUnsched && acc.TempUnschedulableUntil != nil { + item.TempUnschedulableUntil = acc.TempUnschedulableUntil + } + + account[acc.ID] = item + } + + return platform, group, account, &collectedAt, nil +} + +type OpsAccountAvailability struct { + Group *GroupAvailability + Accounts map[int64]*AccountAvailability + CollectedAt *time.Time +} + +func (s *OpsService) GetAccountAvailability(ctx context.Context, platformFilter string, groupIDFilter *int64) (*OpsAccountAvailability, error) { + if s == nil { + return nil, errors.New("ops service is nil") + } + + if s.getAccountAvailability != nil { + return s.getAccountAvailability(ctx, platformFilter, groupIDFilter) + } + + _, groupStats, accountStats, collectedAt, err := s.GetAccountAvailabilityStats(ctx, platformFilter, groupIDFilter) + if err != nil { + return nil, err + } + + var group *GroupAvailability + if groupIDFilter != nil && *groupIDFilter > 0 { + group = groupStats[*groupIDFilter] + } + + if accountStats == nil { + accountStats = map[int64]*AccountAvailability{} + } + + return &OpsAccountAvailability{ + Group: group, + Accounts: accountStats, + CollectedAt: collectedAt, + }, nil +} diff --git a/backend/internal/service/ops_advisory_lock.go b/backend/internal/service/ops_advisory_lock.go new file mode 100644 index 00000000..f7ef4cee --- /dev/null +++ b/backend/internal/service/ops_advisory_lock.go @@ -0,0 +1,46 @@ +package service + +import ( + "context" + "database/sql" + "hash/fnv" + "time" +) + +func hashAdvisoryLockID(key string) int64 { + h := fnv.New64a() + _, _ = h.Write([]byte(key)) + return int64(h.Sum64()) +} + +func tryAcquireDBAdvisoryLock(ctx context.Context, db *sql.DB, lockID int64) (func(), bool) { + if db == nil { + return nil, false + } + if ctx == nil { + ctx = context.Background() + } + + conn, err := db.Conn(ctx) + if err != nil { + return nil, false + } + + acquired := false + if err := conn.QueryRowContext(ctx, "SELECT pg_try_advisory_lock($1)", lockID).Scan(&acquired); err != nil { + _ = conn.Close() + return nil, false + } + if !acquired { + _ = conn.Close() + return nil, false + } + + release := func() { + unlockCtx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _, _ = conn.ExecContext(unlockCtx, "SELECT pg_advisory_unlock($1)", lockID) + _ = conn.Close() + } + return release, true +} diff --git a/backend/internal/service/ops_aggregation_service.go b/backend/internal/service/ops_aggregation_service.go new file mode 100644 index 00000000..972462ec --- /dev/null +++ b/backend/internal/service/ops_aggregation_service.go @@ -0,0 +1,448 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "fmt" + "log" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" +) + +const ( + opsAggHourlyJobName = "ops_preaggregation_hourly" + opsAggDailyJobName = "ops_preaggregation_daily" + + opsAggHourlyInterval = 10 * time.Minute + opsAggDailyInterval = 1 * time.Hour + + // Keep in sync with ops retention target (vNext default 30d). + opsAggBackfillWindow = 30 * 24 * time.Hour + + // Recompute overlap to absorb late-arriving rows near boundaries. + opsAggHourlyOverlap = 2 * time.Hour + opsAggDailyOverlap = 48 * time.Hour + + opsAggHourlyChunk = 24 * time.Hour + opsAggDailyChunk = 7 * 24 * time.Hour + + // Delay around boundaries (e.g. 10:00..10:05) to avoid aggregating buckets + // that may still receive late inserts. + opsAggSafeDelay = 5 * time.Minute + + opsAggMaxQueryTimeout = 3 * time.Second + opsAggHourlyTimeout = 5 * time.Minute + opsAggDailyTimeout = 2 * time.Minute + + opsAggHourlyLeaderLockKey = "ops:aggregation:hourly:leader" + opsAggDailyLeaderLockKey = "ops:aggregation:daily:leader" + + opsAggHourlyLeaderLockTTL = 15 * time.Minute + opsAggDailyLeaderLockTTL = 10 * time.Minute +) + +// OpsAggregationService periodically backfills ops_metrics_hourly / ops_metrics_daily +// for stable long-window dashboard queries. +// +// It is safe to run in multi-replica deployments when Redis is available (leader lock). +type OpsAggregationService struct { + opsRepo OpsRepository + settingRepo SettingRepository + cfg *config.Config + + db *sql.DB + redisClient *redis.Client + instanceID string + + stopCh chan struct{} + startOnce sync.Once + stopOnce sync.Once + + hourlyMu sync.Mutex + dailyMu sync.Mutex + + skipLogMu sync.Mutex + skipLogAt time.Time +} + +func NewOpsAggregationService( + opsRepo OpsRepository, + settingRepo SettingRepository, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsAggregationService { + return &OpsAggregationService{ + opsRepo: opsRepo, + settingRepo: settingRepo, + cfg: cfg, + db: db, + redisClient: redisClient, + instanceID: uuid.NewString(), + } +} + +func (s *OpsAggregationService) Start() { + if s == nil { + return + } + s.startOnce.Do(func() { + if s.stopCh == nil { + s.stopCh = make(chan struct{}) + } + go s.hourlyLoop() + go s.dailyLoop() + }) +} + +func (s *OpsAggregationService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + if s.stopCh != nil { + close(s.stopCh) + } + }) +} + +func (s *OpsAggregationService) hourlyLoop() { + // First run immediately. + s.aggregateHourly() + + ticker := time.NewTicker(opsAggHourlyInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + s.aggregateHourly() + case <-s.stopCh: + return + } + } +} + +func (s *OpsAggregationService) dailyLoop() { + // First run immediately. + s.aggregateDaily() + + ticker := time.NewTicker(opsAggDailyInterval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + s.aggregateDaily() + case <-s.stopCh: + return + } + } +} + +func (s *OpsAggregationService) aggregateHourly() { + if s == nil || s.opsRepo == nil { + return + } + if s.cfg != nil { + if !s.cfg.Ops.Enabled { + return + } + if !s.cfg.Ops.Aggregation.Enabled { + return + } + } + + ctx, cancel := context.WithTimeout(context.Background(), opsAggHourlyTimeout) + defer cancel() + + if !s.isMonitoringEnabled(ctx) { + return + } + + release, ok := s.tryAcquireLeaderLock(ctx, opsAggHourlyLeaderLockKey, opsAggHourlyLeaderLockTTL, "[OpsAggregation][hourly]") + if !ok { + return + } + if release != nil { + defer release() + } + + s.hourlyMu.Lock() + defer s.hourlyMu.Unlock() + + startedAt := time.Now().UTC() + runAt := startedAt + + // Aggregate stable full hours only. + end := utcFloorToHour(time.Now().UTC().Add(-opsAggSafeDelay)) + start := end.Add(-opsAggBackfillWindow) + + // Resume from the latest bucket with overlap. + { + ctxMax, cancelMax := context.WithTimeout(context.Background(), opsAggMaxQueryTimeout) + latest, ok, err := s.opsRepo.GetLatestHourlyBucketStart(ctxMax) + cancelMax() + if err != nil { + log.Printf("[OpsAggregation][hourly] failed to read latest bucket: %v", err) + } else if ok { + candidate := latest.Add(-opsAggHourlyOverlap) + if candidate.After(start) { + start = candidate + } + } + } + + start = utcFloorToHour(start) + if !start.Before(end) { + return + } + + var aggErr error + for cursor := start; cursor.Before(end); cursor = cursor.Add(opsAggHourlyChunk) { + chunkEnd := minTime(cursor.Add(opsAggHourlyChunk), end) + if err := s.opsRepo.UpsertHourlyMetrics(ctx, cursor, chunkEnd); err != nil { + aggErr = err + log.Printf("[OpsAggregation][hourly] upsert failed (%s..%s): %v", cursor.Format(time.RFC3339), chunkEnd.Format(time.RFC3339), err) + break + } + } + + finishedAt := time.Now().UTC() + durationMs := finishedAt.Sub(startedAt).Milliseconds() + dur := durationMs + + if aggErr != nil { + msg := truncateString(aggErr.Error(), 2048) + errAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), 2*time.Second) + defer hbCancel() + _ = s.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAggHourlyJobName, + LastRunAt: &runAt, + LastErrorAt: &errAt, + LastError: &msg, + LastDurationMs: &dur, + }) + return + } + + successAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), 2*time.Second) + defer hbCancel() + result := truncateString(fmt.Sprintf("window=%s..%s", start.Format(time.RFC3339), end.Format(time.RFC3339)), 2048) + _ = s.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAggHourlyJobName, + LastRunAt: &runAt, + LastSuccessAt: &successAt, + LastDurationMs: &dur, + LastResult: &result, + }) +} + +func (s *OpsAggregationService) aggregateDaily() { + if s == nil || s.opsRepo == nil { + return + } + if s.cfg != nil { + if !s.cfg.Ops.Enabled { + return + } + if !s.cfg.Ops.Aggregation.Enabled { + return + } + } + + ctx, cancel := context.WithTimeout(context.Background(), opsAggDailyTimeout) + defer cancel() + + if !s.isMonitoringEnabled(ctx) { + return + } + + release, ok := s.tryAcquireLeaderLock(ctx, opsAggDailyLeaderLockKey, opsAggDailyLeaderLockTTL, "[OpsAggregation][daily]") + if !ok { + return + } + if release != nil { + defer release() + } + + s.dailyMu.Lock() + defer s.dailyMu.Unlock() + + startedAt := time.Now().UTC() + runAt := startedAt + + end := utcFloorToDay(time.Now().UTC()) + start := end.Add(-opsAggBackfillWindow) + + { + ctxMax, cancelMax := context.WithTimeout(context.Background(), opsAggMaxQueryTimeout) + latest, ok, err := s.opsRepo.GetLatestDailyBucketDate(ctxMax) + cancelMax() + if err != nil { + log.Printf("[OpsAggregation][daily] failed to read latest bucket: %v", err) + } else if ok { + candidate := latest.Add(-opsAggDailyOverlap) + if candidate.After(start) { + start = candidate + } + } + } + + start = utcFloorToDay(start) + if !start.Before(end) { + return + } + + var aggErr error + for cursor := start; cursor.Before(end); cursor = cursor.Add(opsAggDailyChunk) { + chunkEnd := minTime(cursor.Add(opsAggDailyChunk), end) + if err := s.opsRepo.UpsertDailyMetrics(ctx, cursor, chunkEnd); err != nil { + aggErr = err + log.Printf("[OpsAggregation][daily] upsert failed (%s..%s): %v", cursor.Format("2006-01-02"), chunkEnd.Format("2006-01-02"), err) + break + } + } + + finishedAt := time.Now().UTC() + durationMs := finishedAt.Sub(startedAt).Milliseconds() + dur := durationMs + + if aggErr != nil { + msg := truncateString(aggErr.Error(), 2048) + errAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), 2*time.Second) + defer hbCancel() + _ = s.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAggDailyJobName, + LastRunAt: &runAt, + LastErrorAt: &errAt, + LastError: &msg, + LastDurationMs: &dur, + }) + return + } + + successAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), 2*time.Second) + defer hbCancel() + result := truncateString(fmt.Sprintf("window=%s..%s", start.Format(time.RFC3339), end.Format(time.RFC3339)), 2048) + _ = s.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAggDailyJobName, + LastRunAt: &runAt, + LastSuccessAt: &successAt, + LastDurationMs: &dur, + LastResult: &result, + }) +} + +func (s *OpsAggregationService) isMonitoringEnabled(ctx context.Context) bool { + if s == nil { + return false + } + if s.cfg != nil && !s.cfg.Ops.Enabled { + return false + } + if s.settingRepo == nil { + return true + } + if ctx == nil { + ctx = context.Background() + } + + value, err := s.settingRepo.GetValue(ctx, SettingKeyOpsMonitoringEnabled) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + return true + } + return true + } + switch strings.ToLower(strings.TrimSpace(value)) { + case "false", "0", "off", "disabled": + return false + default: + return true + } +} + +var opsAggReleaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +func (s *OpsAggregationService) tryAcquireLeaderLock(ctx context.Context, key string, ttl time.Duration, logPrefix string) (func(), bool) { + if s == nil { + return nil, false + } + if ctx == nil { + ctx = context.Background() + } + + // Prefer Redis leader lock when available (multi-instance), but avoid stampeding + // the DB when Redis is flaky by falling back to a DB advisory lock. + if s.redisClient != nil { + ok, err := s.redisClient.SetNX(ctx, key, s.instanceID, ttl).Result() + if err == nil { + if !ok { + s.maybeLogSkip(logPrefix) + return nil, false + } + release := func() { + ctx2, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _, _ = opsAggReleaseScript.Run(ctx2, s.redisClient, []string{key}, s.instanceID).Result() + } + return release, true + } + // Redis error: fall through to DB advisory lock. + } + + release, ok := tryAcquireDBAdvisoryLock(ctx, s.db, hashAdvisoryLockID(key)) + if !ok { + s.maybeLogSkip(logPrefix) + return nil, false + } + return release, true +} + +func (s *OpsAggregationService) maybeLogSkip(prefix string) { + s.skipLogMu.Lock() + defer s.skipLogMu.Unlock() + + now := time.Now() + if !s.skipLogAt.IsZero() && now.Sub(s.skipLogAt) < time.Minute { + return + } + s.skipLogAt = now + if prefix == "" { + prefix = "[OpsAggregation]" + } + log.Printf("%s leader lock held by another instance; skipping", prefix) +} + +func utcFloorToHour(t time.Time) time.Time { + return t.UTC().Truncate(time.Hour) +} + +func utcFloorToDay(t time.Time) time.Time { + u := t.UTC() + y, m, d := u.Date() + return time.Date(y, m, d, 0, 0, 0, 0, time.UTC) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} diff --git a/backend/internal/service/ops_alert_evaluator_service.go b/backend/internal/service/ops_alert_evaluator_service.go new file mode 100644 index 00000000..7c62e247 --- /dev/null +++ b/backend/internal/service/ops_alert_evaluator_service.go @@ -0,0 +1,944 @@ +package service + +import ( + "context" + "fmt" + "log" + "math" + "strconv" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" +) + +const ( + opsAlertEvaluatorJobName = "ops_alert_evaluator" + + opsAlertEvaluatorTimeout = 45 * time.Second + opsAlertEvaluatorLeaderLockKey = "ops:alert:evaluator:leader" + opsAlertEvaluatorLeaderLockTTL = 90 * time.Second + opsAlertEvaluatorSkipLogInterval = 1 * time.Minute +) + +var opsAlertEvaluatorReleaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +type OpsAlertEvaluatorService struct { + opsService *OpsService + opsRepo OpsRepository + emailService *EmailService + + redisClient *redis.Client + cfg *config.Config + instanceID string + + stopCh chan struct{} + startOnce sync.Once + stopOnce sync.Once + wg sync.WaitGroup + + mu sync.Mutex + ruleStates map[int64]*opsAlertRuleState + + emailLimiter *slidingWindowLimiter + + skipLogMu sync.Mutex + skipLogAt time.Time + + warnNoRedisOnce sync.Once +} + +type opsAlertRuleState struct { + LastEvaluatedAt time.Time + ConsecutiveBreaches int +} + +func NewOpsAlertEvaluatorService( + opsService *OpsService, + opsRepo OpsRepository, + emailService *EmailService, + redisClient *redis.Client, + cfg *config.Config, +) *OpsAlertEvaluatorService { + return &OpsAlertEvaluatorService{ + opsService: opsService, + opsRepo: opsRepo, + emailService: emailService, + redisClient: redisClient, + cfg: cfg, + instanceID: uuid.NewString(), + ruleStates: map[int64]*opsAlertRuleState{}, + emailLimiter: newSlidingWindowLimiter(0, time.Hour), + } +} + +func (s *OpsAlertEvaluatorService) Start() { + if s == nil { + return + } + s.startOnce.Do(func() { + if s.stopCh == nil { + s.stopCh = make(chan struct{}) + } + go s.run() + }) +} + +func (s *OpsAlertEvaluatorService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + if s.stopCh != nil { + close(s.stopCh) + } + }) + s.wg.Wait() +} + +func (s *OpsAlertEvaluatorService) run() { + s.wg.Add(1) + defer s.wg.Done() + + // Start immediately to produce early feedback in ops dashboard. + timer := time.NewTimer(0) + defer timer.Stop() + + for { + select { + case <-timer.C: + interval := s.getInterval() + s.evaluateOnce(interval) + timer.Reset(interval) + case <-s.stopCh: + return + } + } +} + +func (s *OpsAlertEvaluatorService) getInterval() time.Duration { + // Default. + interval := 60 * time.Second + + if s == nil || s.opsService == nil { + return interval + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + cfg, err := s.opsService.GetOpsAlertRuntimeSettings(ctx) + if err != nil || cfg == nil { + return interval + } + if cfg.EvaluationIntervalSeconds <= 0 { + return interval + } + if cfg.EvaluationIntervalSeconds < 1 { + return interval + } + if cfg.EvaluationIntervalSeconds > int((24 * time.Hour).Seconds()) { + return interval + } + return time.Duration(cfg.EvaluationIntervalSeconds) * time.Second +} + +func (s *OpsAlertEvaluatorService) evaluateOnce(interval time.Duration) { + if s == nil || s.opsRepo == nil { + return + } + if s.cfg != nil && !s.cfg.Ops.Enabled { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), opsAlertEvaluatorTimeout) + defer cancel() + + if s.opsService != nil && !s.opsService.IsMonitoringEnabled(ctx) { + return + } + + runtimeCfg := defaultOpsAlertRuntimeSettings() + if s.opsService != nil { + if loaded, err := s.opsService.GetOpsAlertRuntimeSettings(ctx); err == nil && loaded != nil { + runtimeCfg = loaded + } + } + + release, ok := s.tryAcquireLeaderLock(ctx, runtimeCfg.DistributedLock) + if !ok { + return + } + if release != nil { + defer release() + } + + startedAt := time.Now().UTC() + runAt := startedAt + + rules, err := s.opsRepo.ListAlertRules(ctx) + if err != nil { + s.recordHeartbeatError(runAt, time.Since(startedAt), err) + log.Printf("[OpsAlertEvaluator] list rules failed: %v", err) + return + } + + rulesTotal := len(rules) + rulesEnabled := 0 + rulesEvaluated := 0 + eventsCreated := 0 + eventsResolved := 0 + emailsSent := 0 + + now := time.Now().UTC() + safeEnd := now.Truncate(time.Minute) + if safeEnd.IsZero() { + safeEnd = now + } + + systemMetrics, _ := s.opsRepo.GetLatestSystemMetrics(ctx, 1) + + // Cleanup stale state for removed rules. + s.pruneRuleStates(rules) + + for _, rule := range rules { + if rule == nil || !rule.Enabled || rule.ID <= 0 { + continue + } + rulesEnabled++ + + scopePlatform, scopeGroupID, scopeRegion := parseOpsAlertRuleScope(rule.Filters) + + windowMinutes := rule.WindowMinutes + if windowMinutes <= 0 { + windowMinutes = 1 + } + windowStart := safeEnd.Add(-time.Duration(windowMinutes) * time.Minute) + windowEnd := safeEnd + + metricValue, ok := s.computeRuleMetric(ctx, rule, systemMetrics, windowStart, windowEnd, scopePlatform, scopeGroupID) + if !ok { + s.resetRuleState(rule.ID, now) + continue + } + rulesEvaluated++ + + breachedNow := compareMetric(metricValue, rule.Operator, rule.Threshold) + required := requiredSustainedBreaches(rule.SustainedMinutes, interval) + consecutive := s.updateRuleBreaches(rule.ID, now, interval, breachedNow) + + activeEvent, err := s.opsRepo.GetActiveAlertEvent(ctx, rule.ID) + if err != nil { + log.Printf("[OpsAlertEvaluator] get active event failed (rule=%d): %v", rule.ID, err) + continue + } + + if breachedNow && consecutive >= required { + if activeEvent != nil { + continue + } + + // Scoped silencing: if a matching silence exists, skip creating a firing event. + if s.opsService != nil { + platform := strings.TrimSpace(scopePlatform) + region := scopeRegion + if platform != "" { + if ok, err := s.opsService.IsAlertSilenced(ctx, rule.ID, platform, scopeGroupID, region, now); err == nil && ok { + continue + } + } + } + + latestEvent, err := s.opsRepo.GetLatestAlertEvent(ctx, rule.ID) + if err != nil { + log.Printf("[OpsAlertEvaluator] get latest event failed (rule=%d): %v", rule.ID, err) + continue + } + if latestEvent != nil && rule.CooldownMinutes > 0 { + cooldown := time.Duration(rule.CooldownMinutes) * time.Minute + if now.Sub(latestEvent.FiredAt) < cooldown { + continue + } + } + + firedEvent := &OpsAlertEvent{ + RuleID: rule.ID, + Severity: strings.TrimSpace(rule.Severity), + Status: OpsAlertStatusFiring, + Title: fmt.Sprintf("%s: %s", strings.TrimSpace(rule.Severity), strings.TrimSpace(rule.Name)), + Description: buildOpsAlertDescription(rule, metricValue, windowMinutes, scopePlatform, scopeGroupID), + MetricValue: float64Ptr(metricValue), + ThresholdValue: float64Ptr(rule.Threshold), + Dimensions: buildOpsAlertDimensions(scopePlatform, scopeGroupID), + FiredAt: now, + CreatedAt: now, + } + + created, err := s.opsRepo.CreateAlertEvent(ctx, firedEvent) + if err != nil { + log.Printf("[OpsAlertEvaluator] create event failed (rule=%d): %v", rule.ID, err) + continue + } + + eventsCreated++ + if created != nil && created.ID > 0 { + if s.maybeSendAlertEmail(ctx, runtimeCfg, rule, created) { + emailsSent++ + } + } + continue + } + + // Not breached: resolve active event if present. + if activeEvent != nil { + resolvedAt := now + if err := s.opsRepo.UpdateAlertEventStatus(ctx, activeEvent.ID, OpsAlertStatusResolved, &resolvedAt); err != nil { + log.Printf("[OpsAlertEvaluator] resolve event failed (event=%d): %v", activeEvent.ID, err) + } else { + eventsResolved++ + } + } + } + + result := truncateString(fmt.Sprintf("rules=%d enabled=%d evaluated=%d created=%d resolved=%d emails_sent=%d", rulesTotal, rulesEnabled, rulesEvaluated, eventsCreated, eventsResolved, emailsSent), 2048) + s.recordHeartbeatSuccess(runAt, time.Since(startedAt), result) +} + +func (s *OpsAlertEvaluatorService) pruneRuleStates(rules []*OpsAlertRule) { + s.mu.Lock() + defer s.mu.Unlock() + + live := map[int64]struct{}{} + for _, r := range rules { + if r != nil && r.ID > 0 { + live[r.ID] = struct{}{} + } + } + for id := range s.ruleStates { + if _, ok := live[id]; !ok { + delete(s.ruleStates, id) + } + } +} + +func (s *OpsAlertEvaluatorService) resetRuleState(ruleID int64, now time.Time) { + if ruleID <= 0 { + return + } + s.mu.Lock() + defer s.mu.Unlock() + state, ok := s.ruleStates[ruleID] + if !ok { + state = &opsAlertRuleState{} + s.ruleStates[ruleID] = state + } + state.LastEvaluatedAt = now + state.ConsecutiveBreaches = 0 +} + +func (s *OpsAlertEvaluatorService) updateRuleBreaches(ruleID int64, now time.Time, interval time.Duration, breached bool) int { + if ruleID <= 0 { + return 0 + } + s.mu.Lock() + defer s.mu.Unlock() + + state, ok := s.ruleStates[ruleID] + if !ok { + state = &opsAlertRuleState{} + s.ruleStates[ruleID] = state + } + + if !state.LastEvaluatedAt.IsZero() && interval > 0 { + if now.Sub(state.LastEvaluatedAt) > interval*2 { + state.ConsecutiveBreaches = 0 + } + } + + state.LastEvaluatedAt = now + if breached { + state.ConsecutiveBreaches++ + } else { + state.ConsecutiveBreaches = 0 + } + return state.ConsecutiveBreaches +} + +func requiredSustainedBreaches(sustainedMinutes int, interval time.Duration) int { + if sustainedMinutes <= 0 { + return 1 + } + if interval <= 0 { + return sustainedMinutes + } + required := int(math.Ceil(float64(sustainedMinutes*60) / interval.Seconds())) + if required < 1 { + return 1 + } + return required +} + +func parseOpsAlertRuleScope(filters map[string]any) (platform string, groupID *int64, region *string) { + if filters == nil { + return "", nil, nil + } + if v, ok := filters["platform"]; ok { + if s, ok := v.(string); ok { + platform = strings.TrimSpace(s) + } + } + if v, ok := filters["group_id"]; ok { + switch t := v.(type) { + case float64: + if t > 0 { + id := int64(t) + groupID = &id + } + case int64: + if t > 0 { + id := t + groupID = &id + } + case int: + if t > 0 { + id := int64(t) + groupID = &id + } + case string: + n, err := strconv.ParseInt(strings.TrimSpace(t), 10, 64) + if err == nil && n > 0 { + groupID = &n + } + } + } + if v, ok := filters["region"]; ok { + if s, ok := v.(string); ok { + vv := strings.TrimSpace(s) + if vv != "" { + region = &vv + } + } + } + return platform, groupID, region +} + +func (s *OpsAlertEvaluatorService) computeRuleMetric( + ctx context.Context, + rule *OpsAlertRule, + systemMetrics *OpsSystemMetricsSnapshot, + start time.Time, + end time.Time, + platform string, + groupID *int64, +) (float64, bool) { + if rule == nil { + return 0, false + } + switch strings.TrimSpace(rule.MetricType) { + case "cpu_usage_percent": + if systemMetrics != nil && systemMetrics.CPUUsagePercent != nil { + return *systemMetrics.CPUUsagePercent, true + } + return 0, false + case "memory_usage_percent": + if systemMetrics != nil && systemMetrics.MemoryUsagePercent != nil { + return *systemMetrics.MemoryUsagePercent, true + } + return 0, false + case "concurrency_queue_depth": + if systemMetrics != nil && systemMetrics.ConcurrencyQueueDepth != nil { + return float64(*systemMetrics.ConcurrencyQueueDepth), true + } + return 0, false + case "group_available_accounts": + if groupID == nil || *groupID <= 0 { + return 0, false + } + if s == nil || s.opsService == nil { + return 0, false + } + availability, err := s.opsService.GetAccountAvailability(ctx, platform, groupID) + if err != nil || availability == nil { + return 0, false + } + if availability.Group == nil { + return 0, true + } + return float64(availability.Group.AvailableCount), true + case "group_available_ratio": + if groupID == nil || *groupID <= 0 { + return 0, false + } + if s == nil || s.opsService == nil { + return 0, false + } + availability, err := s.opsService.GetAccountAvailability(ctx, platform, groupID) + if err != nil || availability == nil { + return 0, false + } + return computeGroupAvailableRatio(availability.Group), true + case "account_rate_limited_count": + if s == nil || s.opsService == nil { + return 0, false + } + availability, err := s.opsService.GetAccountAvailability(ctx, platform, groupID) + if err != nil || availability == nil { + return 0, false + } + return float64(countAccountsByCondition(availability.Accounts, func(acc *AccountAvailability) bool { + return acc.IsRateLimited + })), true + case "account_error_count": + if s == nil || s.opsService == nil { + return 0, false + } + availability, err := s.opsService.GetAccountAvailability(ctx, platform, groupID) + if err != nil || availability == nil { + return 0, false + } + return float64(countAccountsByCondition(availability.Accounts, func(acc *AccountAvailability) bool { + return acc.HasError && acc.TempUnschedulableUntil == nil + })), true + } + + overview, err := s.opsRepo.GetDashboardOverview(ctx, &OpsDashboardFilter{ + StartTime: start, + EndTime: end, + Platform: platform, + GroupID: groupID, + QueryMode: OpsQueryModeRaw, + }) + if err != nil { + return 0, false + } + if overview == nil { + return 0, false + } + + switch strings.TrimSpace(rule.MetricType) { + case "success_rate": + if overview.RequestCountSLA <= 0 { + return 0, false + } + return overview.SLA * 100, true + case "error_rate": + if overview.RequestCountSLA <= 0 { + return 0, false + } + return overview.ErrorRate * 100, true + case "upstream_error_rate": + if overview.RequestCountSLA <= 0 { + return 0, false + } + return overview.UpstreamErrorRate * 100, true + default: + return 0, false + } +} + +func compareMetric(value float64, operator string, threshold float64) bool { + switch strings.TrimSpace(operator) { + case ">": + return value > threshold + case ">=": + return value >= threshold + case "<": + return value < threshold + case "<=": + return value <= threshold + case "==": + return value == threshold + case "!=": + return value != threshold + default: + return false + } +} + +func buildOpsAlertDimensions(platform string, groupID *int64) map[string]any { + dims := map[string]any{} + if strings.TrimSpace(platform) != "" { + dims["platform"] = strings.TrimSpace(platform) + } + if groupID != nil && *groupID > 0 { + dims["group_id"] = *groupID + } + if len(dims) == 0 { + return nil + } + return dims +} + +func buildOpsAlertDescription(rule *OpsAlertRule, value float64, windowMinutes int, platform string, groupID *int64) string { + if rule == nil { + return "" + } + scope := "overall" + if strings.TrimSpace(platform) != "" { + scope = fmt.Sprintf("platform=%s", strings.TrimSpace(platform)) + } + if groupID != nil && *groupID > 0 { + scope = fmt.Sprintf("%s group_id=%d", scope, *groupID) + } + if windowMinutes <= 0 { + windowMinutes = 1 + } + return fmt.Sprintf("%s %s %.2f (current %.2f) over last %dm (%s)", + strings.TrimSpace(rule.MetricType), + strings.TrimSpace(rule.Operator), + rule.Threshold, + value, + windowMinutes, + strings.TrimSpace(scope), + ) +} + +func (s *OpsAlertEvaluatorService) maybeSendAlertEmail(ctx context.Context, runtimeCfg *OpsAlertRuntimeSettings, rule *OpsAlertRule, event *OpsAlertEvent) bool { + if s == nil || s.emailService == nil || s.opsService == nil || event == nil || rule == nil { + return false + } + if event.EmailSent { + return false + } + if !rule.NotifyEmail { + return false + } + + emailCfg, err := s.opsService.GetEmailNotificationConfig(ctx) + if err != nil || emailCfg == nil || !emailCfg.Alert.Enabled { + return false + } + + if len(emailCfg.Alert.Recipients) == 0 { + return false + } + if !shouldSendOpsAlertEmailByMinSeverity(strings.TrimSpace(emailCfg.Alert.MinSeverity), strings.TrimSpace(rule.Severity)) { + return false + } + + if runtimeCfg != nil && runtimeCfg.Silencing.Enabled { + if isOpsAlertSilenced(time.Now().UTC(), rule, event, runtimeCfg.Silencing) { + return false + } + } + + // Apply/update rate limiter. + s.emailLimiter.SetLimit(emailCfg.Alert.RateLimitPerHour) + + subject := fmt.Sprintf("[Ops Alert][%s] %s", strings.TrimSpace(rule.Severity), strings.TrimSpace(rule.Name)) + body := buildOpsAlertEmailBody(rule, event) + + anySent := false + for _, to := range emailCfg.Alert.Recipients { + addr := strings.TrimSpace(to) + if addr == "" { + continue + } + if !s.emailLimiter.Allow(time.Now().UTC()) { + continue + } + if err := s.emailService.SendEmail(ctx, addr, subject, body); err != nil { + // Ignore per-recipient failures; continue best-effort. + continue + } + anySent = true + } + + if anySent { + _ = s.opsRepo.UpdateAlertEventEmailSent(context.Background(), event.ID, true) + } + return anySent +} + +func buildOpsAlertEmailBody(rule *OpsAlertRule, event *OpsAlertEvent) string { + if rule == nil || event == nil { + return "" + } + metric := strings.TrimSpace(rule.MetricType) + value := "-" + threshold := fmt.Sprintf("%.2f", rule.Threshold) + if event.MetricValue != nil { + value = fmt.Sprintf("%.2f", *event.MetricValue) + } + if event.ThresholdValue != nil { + threshold = fmt.Sprintf("%.2f", *event.ThresholdValue) + } + return fmt.Sprintf(` +

Ops Alert

+

Rule: %s

+

Severity: %s

+

Status: %s

+

Metric: %s %s %s

+

Fired at: %s

+

Description: %s

+`, + htmlEscape(rule.Name), + htmlEscape(rule.Severity), + htmlEscape(event.Status), + htmlEscape(metric), + htmlEscape(rule.Operator), + htmlEscape(fmt.Sprintf("%s (threshold %s)", value, threshold)), + event.FiredAt.Format(time.RFC3339), + htmlEscape(event.Description), + ) +} + +func shouldSendOpsAlertEmailByMinSeverity(minSeverity string, ruleSeverity string) bool { + minSeverity = strings.ToLower(strings.TrimSpace(minSeverity)) + if minSeverity == "" { + return true + } + + eventLevel := opsEmailSeverityForOps(ruleSeverity) + minLevel := strings.ToLower(minSeverity) + + rank := func(level string) int { + switch level { + case "critical": + return 3 + case "warning": + return 2 + case "info": + return 1 + default: + return 0 + } + } + return rank(eventLevel) >= rank(minLevel) +} + +func opsEmailSeverityForOps(severity string) string { + switch strings.ToUpper(strings.TrimSpace(severity)) { + case "P0": + return "critical" + case "P1": + return "warning" + default: + return "info" + } +} + +func isOpsAlertSilenced(now time.Time, rule *OpsAlertRule, event *OpsAlertEvent, silencing OpsAlertSilencingSettings) bool { + if !silencing.Enabled { + return false + } + if now.IsZero() { + now = time.Now().UTC() + } + if strings.TrimSpace(silencing.GlobalUntilRFC3339) != "" { + if t, err := time.Parse(time.RFC3339, strings.TrimSpace(silencing.GlobalUntilRFC3339)); err == nil { + if now.Before(t) { + return true + } + } + } + + for _, entry := range silencing.Entries { + untilRaw := strings.TrimSpace(entry.UntilRFC3339) + if untilRaw == "" { + continue + } + until, err := time.Parse(time.RFC3339, untilRaw) + if err != nil { + continue + } + if now.After(until) { + continue + } + if entry.RuleID != nil && rule != nil && rule.ID > 0 && *entry.RuleID != rule.ID { + continue + } + if len(entry.Severities) > 0 { + match := false + for _, s := range entry.Severities { + if strings.EqualFold(strings.TrimSpace(s), strings.TrimSpace(event.Severity)) || strings.EqualFold(strings.TrimSpace(s), strings.TrimSpace(rule.Severity)) { + match = true + break + } + } + if !match { + continue + } + } + return true + } + + return false +} + +func (s *OpsAlertEvaluatorService) tryAcquireLeaderLock(ctx context.Context, lock OpsDistributedLockSettings) (func(), bool) { + if !lock.Enabled { + return nil, true + } + if s.redisClient == nil { + s.warnNoRedisOnce.Do(func() { + log.Printf("[OpsAlertEvaluator] redis not configured; running without distributed lock") + }) + return nil, true + } + key := strings.TrimSpace(lock.Key) + if key == "" { + key = opsAlertEvaluatorLeaderLockKey + } + ttl := time.Duration(lock.TTLSeconds) * time.Second + if ttl <= 0 { + ttl = opsAlertEvaluatorLeaderLockTTL + } + + ok, err := s.redisClient.SetNX(ctx, key, s.instanceID, ttl).Result() + if err != nil { + // Prefer fail-closed to avoid duplicate evaluators stampeding the DB when Redis is flaky. + // Single-node deployments can disable the distributed lock via runtime settings. + s.warnNoRedisOnce.Do(func() { + log.Printf("[OpsAlertEvaluator] leader lock SetNX failed; skipping this cycle: %v", err) + }) + return nil, false + } + if !ok { + s.maybeLogSkip(key) + return nil, false + } + return func() { + _, _ = opsAlertEvaluatorReleaseScript.Run(ctx, s.redisClient, []string{key}, s.instanceID).Result() + }, true +} + +func (s *OpsAlertEvaluatorService) maybeLogSkip(key string) { + s.skipLogMu.Lock() + defer s.skipLogMu.Unlock() + + now := time.Now() + if !s.skipLogAt.IsZero() && now.Sub(s.skipLogAt) < opsAlertEvaluatorSkipLogInterval { + return + } + s.skipLogAt = now + log.Printf("[OpsAlertEvaluator] leader lock held by another instance; skipping (key=%q)", key) +} + +func (s *OpsAlertEvaluatorService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration, result string) { + if s == nil || s.opsRepo == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + msg := strings.TrimSpace(result) + if msg == "" { + msg = "ok" + } + msg = truncateString(msg, 2048) + _ = s.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAlertEvaluatorJobName, + LastRunAt: &runAt, + LastSuccessAt: &now, + LastDurationMs: &durMs, + LastResult: &msg, + }) +} + +func (s *OpsAlertEvaluatorService) recordHeartbeatError(runAt time.Time, duration time.Duration, err error) { + if s == nil || s.opsRepo == nil || err == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + msg := truncateString(err.Error(), 2048) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = s.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsAlertEvaluatorJobName, + LastRunAt: &runAt, + LastErrorAt: &now, + LastError: &msg, + LastDurationMs: &durMs, + }) +} + +func htmlEscape(s string) string { + replacer := strings.NewReplacer( + "&", "&", + "<", "<", + ">", ">", + `"`, """, + "'", "'", + ) + return replacer.Replace(s) +} + +type slidingWindowLimiter struct { + mu sync.Mutex + limit int + window time.Duration + sent []time.Time +} + +func newSlidingWindowLimiter(limit int, window time.Duration) *slidingWindowLimiter { + if window <= 0 { + window = time.Hour + } + return &slidingWindowLimiter{ + limit: limit, + window: window, + sent: []time.Time{}, + } +} + +func (l *slidingWindowLimiter) SetLimit(limit int) { + l.mu.Lock() + defer l.mu.Unlock() + l.limit = limit +} + +func (l *slidingWindowLimiter) Allow(now time.Time) bool { + l.mu.Lock() + defer l.mu.Unlock() + + if l.limit <= 0 { + return true + } + cutoff := now.Add(-l.window) + keep := l.sent[:0] + for _, t := range l.sent { + if t.After(cutoff) { + keep = append(keep, t) + } + } + l.sent = keep + if len(l.sent) >= l.limit { + return false + } + l.sent = append(l.sent, now) + return true +} + +// computeGroupAvailableRatio returns the available percentage for a group. +// Formula: (AvailableCount / TotalAccounts) * 100. +// Returns 0 when TotalAccounts is 0. +func computeGroupAvailableRatio(group *GroupAvailability) float64 { + if group == nil || group.TotalAccounts <= 0 { + return 0 + } + return (float64(group.AvailableCount) / float64(group.TotalAccounts)) * 100 +} + +// countAccountsByCondition counts accounts that satisfy the given condition. +func countAccountsByCondition(accounts map[int64]*AccountAvailability, condition func(*AccountAvailability) bool) int64 { + if len(accounts) == 0 || condition == nil { + return 0 + } + var count int64 + for _, account := range accounts { + if account != nil && condition(account) { + count++ + } + } + return count +} diff --git a/backend/internal/service/ops_alert_evaluator_service_test.go b/backend/internal/service/ops_alert_evaluator_service_test.go new file mode 100644 index 00000000..068ab6bb --- /dev/null +++ b/backend/internal/service/ops_alert_evaluator_service_test.go @@ -0,0 +1,210 @@ +//go:build unit + +package service + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type stubOpsRepo struct { + OpsRepository + overview *OpsDashboardOverview + err error +} + +func (s *stubOpsRepo) GetDashboardOverview(ctx context.Context, filter *OpsDashboardFilter) (*OpsDashboardOverview, error) { + if s.err != nil { + return nil, s.err + } + if s.overview != nil { + return s.overview, nil + } + return &OpsDashboardOverview{}, nil +} + +func TestComputeGroupAvailableRatio(t *testing.T) { + t.Parallel() + + t.Run("正常情况: 10个账号, 8个可用 = 80%", func(t *testing.T) { + t.Parallel() + + got := computeGroupAvailableRatio(&GroupAvailability{ + TotalAccounts: 10, + AvailableCount: 8, + }) + require.InDelta(t, 80.0, got, 0.0001) + }) + + t.Run("边界情况: TotalAccounts = 0 应返回 0", func(t *testing.T) { + t.Parallel() + + got := computeGroupAvailableRatio(&GroupAvailability{ + TotalAccounts: 0, + AvailableCount: 8, + }) + require.Equal(t, 0.0, got) + }) + + t.Run("边界情况: AvailableCount = 0 应返回 0%", func(t *testing.T) { + t.Parallel() + + got := computeGroupAvailableRatio(&GroupAvailability{ + TotalAccounts: 10, + AvailableCount: 0, + }) + require.Equal(t, 0.0, got) + }) +} + +func TestCountAccountsByCondition(t *testing.T) { + t.Parallel() + + t.Run("测试限流账号统计: acc.IsRateLimited", func(t *testing.T) { + t.Parallel() + + accounts := map[int64]*AccountAvailability{ + 1: {IsRateLimited: true}, + 2: {IsRateLimited: false}, + 3: {IsRateLimited: true}, + } + + got := countAccountsByCondition(accounts, func(acc *AccountAvailability) bool { + return acc.IsRateLimited + }) + require.Equal(t, int64(2), got) + }) + + t.Run("测试错误账号统计(排除临时不可调度): acc.HasError && acc.TempUnschedulableUntil == nil", func(t *testing.T) { + t.Parallel() + + until := time.Now().UTC().Add(5 * time.Minute) + accounts := map[int64]*AccountAvailability{ + 1: {HasError: true}, + 2: {HasError: true, TempUnschedulableUntil: &until}, + 3: {HasError: false}, + } + + got := countAccountsByCondition(accounts, func(acc *AccountAvailability) bool { + return acc.HasError && acc.TempUnschedulableUntil == nil + }) + require.Equal(t, int64(1), got) + }) + + t.Run("边界情况: 空 map 应返回 0", func(t *testing.T) { + t.Parallel() + + got := countAccountsByCondition(map[int64]*AccountAvailability{}, func(acc *AccountAvailability) bool { + return acc.IsRateLimited + }) + require.Equal(t, int64(0), got) + }) +} + +func TestComputeRuleMetricNewIndicators(t *testing.T) { + t.Parallel() + + groupID := int64(101) + platform := "openai" + + availability := &OpsAccountAvailability{ + Group: &GroupAvailability{ + GroupID: groupID, + TotalAccounts: 10, + AvailableCount: 8, + }, + Accounts: map[int64]*AccountAvailability{ + 1: {IsRateLimited: true}, + 2: {IsRateLimited: true}, + 3: {HasError: true}, + 4: {HasError: true, TempUnschedulableUntil: timePtr(time.Now().UTC().Add(2 * time.Minute))}, + 5: {HasError: false, IsRateLimited: false}, + }, + } + + opsService := &OpsService{ + getAccountAvailability: func(_ context.Context, _ string, _ *int64) (*OpsAccountAvailability, error) { + return availability, nil + }, + } + + svc := &OpsAlertEvaluatorService{ + opsService: opsService, + opsRepo: &stubOpsRepo{overview: &OpsDashboardOverview{}}, + } + + start := time.Now().UTC().Add(-5 * time.Minute) + end := time.Now().UTC() + ctx := context.Background() + + tests := []struct { + name string + metricType string + groupID *int64 + wantValue float64 + wantOK bool + }{ + { + name: "group_available_accounts", + metricType: "group_available_accounts", + groupID: &groupID, + wantValue: 8, + wantOK: true, + }, + { + name: "group_available_ratio", + metricType: "group_available_ratio", + groupID: &groupID, + wantValue: 80.0, + wantOK: true, + }, + { + name: "account_rate_limited_count", + metricType: "account_rate_limited_count", + groupID: nil, + wantValue: 2, + wantOK: true, + }, + { + name: "account_error_count", + metricType: "account_error_count", + groupID: nil, + wantValue: 1, + wantOK: true, + }, + { + name: "group_available_accounts without group_id returns false", + metricType: "group_available_accounts", + groupID: nil, + wantValue: 0, + wantOK: false, + }, + { + name: "group_available_ratio without group_id returns false", + metricType: "group_available_ratio", + groupID: nil, + wantValue: 0, + wantOK: false, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + rule := &OpsAlertRule{ + MetricType: tt.metricType, + } + gotValue, gotOK := svc.computeRuleMetric(ctx, rule, nil, start, end, platform, tt.groupID) + require.Equal(t, tt.wantOK, gotOK) + if !tt.wantOK { + return + } + require.InDelta(t, tt.wantValue, gotValue, 0.0001) + }) + } +} diff --git a/backend/internal/service/ops_alert_models.go b/backend/internal/service/ops_alert_models.go new file mode 100644 index 00000000..a0caa990 --- /dev/null +++ b/backend/internal/service/ops_alert_models.go @@ -0,0 +1,95 @@ +package service + +import "time" + +// Ops alert rule/event models. +// +// NOTE: These are admin-facing DTOs and intentionally keep JSON naming aligned +// with the existing ops dashboard frontend (backup style). + +const ( + OpsAlertStatusFiring = "firing" + OpsAlertStatusResolved = "resolved" + OpsAlertStatusManualResolved = "manual_resolved" +) + +type OpsAlertRule struct { + ID int64 `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + + Enabled bool `json:"enabled"` + Severity string `json:"severity"` + + MetricType string `json:"metric_type"` + Operator string `json:"operator"` + Threshold float64 `json:"threshold"` + + WindowMinutes int `json:"window_minutes"` + SustainedMinutes int `json:"sustained_minutes"` + CooldownMinutes int `json:"cooldown_minutes"` + + NotifyEmail bool `json:"notify_email"` + + Filters map[string]any `json:"filters,omitempty"` + + LastTriggeredAt *time.Time `json:"last_triggered_at,omitempty"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` +} + +type OpsAlertEvent struct { + ID int64 `json:"id"` + RuleID int64 `json:"rule_id"` + Severity string `json:"severity"` + Status string `json:"status"` + + Title string `json:"title"` + Description string `json:"description"` + + MetricValue *float64 `json:"metric_value,omitempty"` + ThresholdValue *float64 `json:"threshold_value,omitempty"` + + Dimensions map[string]any `json:"dimensions,omitempty"` + + FiredAt time.Time `json:"fired_at"` + ResolvedAt *time.Time `json:"resolved_at,omitempty"` + + EmailSent bool `json:"email_sent"` + CreatedAt time.Time `json:"created_at"` +} + +type OpsAlertSilence struct { + ID int64 `json:"id"` + + RuleID int64 `json:"rule_id"` + Platform string `json:"platform"` + GroupID *int64 `json:"group_id,omitempty"` + Region *string `json:"region,omitempty"` + + Until time.Time `json:"until"` + Reason string `json:"reason"` + + CreatedBy *int64 `json:"created_by,omitempty"` + CreatedAt time.Time `json:"created_at"` +} + +type OpsAlertEventFilter struct { + Limit int + + // Cursor pagination (descending by fired_at, then id). + BeforeFiredAt *time.Time + BeforeID *int64 + + // Optional filters. + Status string + Severity string + EmailSent *bool + + StartTime *time.Time + EndTime *time.Time + + // Dimensions filters (best-effort). + Platform string + GroupID *int64 +} diff --git a/backend/internal/service/ops_alerts.go b/backend/internal/service/ops_alerts.go new file mode 100644 index 00000000..b4c09824 --- /dev/null +++ b/backend/internal/service/ops_alerts.go @@ -0,0 +1,232 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "strings" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +func (s *OpsService) ListAlertRules(ctx context.Context) ([]*OpsAlertRule, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return []*OpsAlertRule{}, nil + } + return s.opsRepo.ListAlertRules(ctx) +} + +func (s *OpsService) CreateAlertRule(ctx context.Context, rule *OpsAlertRule) (*OpsAlertRule, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if rule == nil { + return nil, infraerrors.BadRequest("INVALID_RULE", "invalid rule") + } + + created, err := s.opsRepo.CreateAlertRule(ctx, rule) + if err != nil { + return nil, err + } + return created, nil +} + +func (s *OpsService) UpdateAlertRule(ctx context.Context, rule *OpsAlertRule) (*OpsAlertRule, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if rule == nil || rule.ID <= 0 { + return nil, infraerrors.BadRequest("INVALID_RULE", "invalid rule") + } + + updated, err := s.opsRepo.UpdateAlertRule(ctx, rule) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, infraerrors.NotFound("OPS_ALERT_RULE_NOT_FOUND", "alert rule not found") + } + return nil, err + } + return updated, nil +} + +func (s *OpsService) DeleteAlertRule(ctx context.Context, id int64) error { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return err + } + if s.opsRepo == nil { + return infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if id <= 0 { + return infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id") + } + if err := s.opsRepo.DeleteAlertRule(ctx, id); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return infraerrors.NotFound("OPS_ALERT_RULE_NOT_FOUND", "alert rule not found") + } + return err + } + return nil +} + +func (s *OpsService) ListAlertEvents(ctx context.Context, filter *OpsAlertEventFilter) ([]*OpsAlertEvent, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return []*OpsAlertEvent{}, nil + } + return s.opsRepo.ListAlertEvents(ctx, filter) +} + +func (s *OpsService) GetAlertEventByID(ctx context.Context, eventID int64) (*OpsAlertEvent, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if eventID <= 0 { + return nil, infraerrors.BadRequest("INVALID_EVENT_ID", "invalid event id") + } + ev, err := s.opsRepo.GetAlertEventByID(ctx, eventID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, infraerrors.NotFound("OPS_ALERT_EVENT_NOT_FOUND", "alert event not found") + } + return nil, err + } + if ev == nil { + return nil, infraerrors.NotFound("OPS_ALERT_EVENT_NOT_FOUND", "alert event not found") + } + return ev, nil +} + +func (s *OpsService) GetActiveAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if ruleID <= 0 { + return nil, infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id") + } + return s.opsRepo.GetActiveAlertEvent(ctx, ruleID) +} + +func (s *OpsService) CreateAlertSilence(ctx context.Context, input *OpsAlertSilence) (*OpsAlertSilence, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if input == nil { + return nil, infraerrors.BadRequest("INVALID_SILENCE", "invalid silence") + } + if input.RuleID <= 0 { + return nil, infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id") + } + if strings.TrimSpace(input.Platform) == "" { + return nil, infraerrors.BadRequest("INVALID_PLATFORM", "invalid platform") + } + if input.Until.IsZero() { + return nil, infraerrors.BadRequest("INVALID_UNTIL", "invalid until") + } + + created, err := s.opsRepo.CreateAlertSilence(ctx, input) + if err != nil { + return nil, err + } + return created, nil +} + +func (s *OpsService) IsAlertSilenced(ctx context.Context, ruleID int64, platform string, groupID *int64, region *string, now time.Time) (bool, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return false, err + } + if s.opsRepo == nil { + return false, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if ruleID <= 0 { + return false, infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id") + } + if strings.TrimSpace(platform) == "" { + return false, nil + } + return s.opsRepo.IsAlertSilenced(ctx, ruleID, platform, groupID, region, now) +} + +func (s *OpsService) GetLatestAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if ruleID <= 0 { + return nil, infraerrors.BadRequest("INVALID_RULE_ID", "invalid rule id") + } + return s.opsRepo.GetLatestAlertEvent(ctx, ruleID) +} + +func (s *OpsService) CreateAlertEvent(ctx context.Context, event *OpsAlertEvent) (*OpsAlertEvent, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if event == nil { + return nil, infraerrors.BadRequest("INVALID_EVENT", "invalid event") + } + + created, err := s.opsRepo.CreateAlertEvent(ctx, event) + if err != nil { + return nil, err + } + return created, nil +} + +func (s *OpsService) UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return err + } + if s.opsRepo == nil { + return infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if eventID <= 0 { + return infraerrors.BadRequest("INVALID_EVENT_ID", "invalid event id") + } + status = strings.TrimSpace(status) + if status == "" { + return infraerrors.BadRequest("INVALID_STATUS", "invalid status") + } + if status != OpsAlertStatusResolved && status != OpsAlertStatusManualResolved { + return infraerrors.BadRequest("INVALID_STATUS", "invalid status") + } + return s.opsRepo.UpdateAlertEventStatus(ctx, eventID, status, resolvedAt) +} + +func (s *OpsService) UpdateAlertEventEmailSent(ctx context.Context, eventID int64, emailSent bool) error { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return err + } + if s.opsRepo == nil { + return infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if eventID <= 0 { + return infraerrors.BadRequest("INVALID_EVENT_ID", "invalid event id") + } + return s.opsRepo.UpdateAlertEventEmailSent(ctx, eventID, emailSent) +} diff --git a/backend/internal/service/ops_cleanup_service.go b/backend/internal/service/ops_cleanup_service.go new file mode 100644 index 00000000..1ade7176 --- /dev/null +++ b/backend/internal/service/ops_cleanup_service.go @@ -0,0 +1,367 @@ +package service + +import ( + "context" + "database/sql" + "fmt" + "log" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" + "github.com/robfig/cron/v3" +) + +const ( + opsCleanupJobName = "ops_cleanup" + + opsCleanupLeaderLockKeyDefault = "ops:cleanup:leader" + opsCleanupLeaderLockTTLDefault = 30 * time.Minute +) + +var opsCleanupCronParser = cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) + +var opsCleanupReleaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +// OpsCleanupService periodically deletes old ops data to prevent unbounded DB growth. +// +// - Scheduling: 5-field cron spec (minute hour dom month dow). +// - Multi-instance: best-effort Redis leader lock so only one node runs cleanup. +// - Safety: deletes in batches to avoid long transactions. +type OpsCleanupService struct { + opsRepo OpsRepository + db *sql.DB + redisClient *redis.Client + cfg *config.Config + + instanceID string + + cron *cron.Cron + + startOnce sync.Once + stopOnce sync.Once + + warnNoRedisOnce sync.Once +} + +func NewOpsCleanupService( + opsRepo OpsRepository, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsCleanupService { + return &OpsCleanupService{ + opsRepo: opsRepo, + db: db, + redisClient: redisClient, + cfg: cfg, + instanceID: uuid.NewString(), + } +} + +func (s *OpsCleanupService) Start() { + if s == nil { + return + } + if s.cfg != nil && !s.cfg.Ops.Enabled { + return + } + if s.cfg != nil && !s.cfg.Ops.Cleanup.Enabled { + log.Printf("[OpsCleanup] not started (disabled)") + return + } + if s.opsRepo == nil || s.db == nil { + log.Printf("[OpsCleanup] not started (missing deps)") + return + } + + s.startOnce.Do(func() { + schedule := "0 2 * * *" + if s.cfg != nil && strings.TrimSpace(s.cfg.Ops.Cleanup.Schedule) != "" { + schedule = strings.TrimSpace(s.cfg.Ops.Cleanup.Schedule) + } + + loc := time.Local + if s.cfg != nil && strings.TrimSpace(s.cfg.Timezone) != "" { + if parsed, err := time.LoadLocation(strings.TrimSpace(s.cfg.Timezone)); err == nil && parsed != nil { + loc = parsed + } + } + + c := cron.New(cron.WithParser(opsCleanupCronParser), cron.WithLocation(loc)) + _, err := c.AddFunc(schedule, func() { s.runScheduled() }) + if err != nil { + log.Printf("[OpsCleanup] not started (invalid schedule=%q): %v", schedule, err) + return + } + s.cron = c + s.cron.Start() + log.Printf("[OpsCleanup] started (schedule=%q tz=%s)", schedule, loc.String()) + }) +} + +func (s *OpsCleanupService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + if s.cron != nil { + ctx := s.cron.Stop() + select { + case <-ctx.Done(): + case <-time.After(3 * time.Second): + log.Printf("[OpsCleanup] cron stop timed out") + } + } + }) +} + +func (s *OpsCleanupService) runScheduled() { + if s == nil || s.db == nil || s.opsRepo == nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + + release, ok := s.tryAcquireLeaderLock(ctx) + if !ok { + return + } + if release != nil { + defer release() + } + + startedAt := time.Now().UTC() + runAt := startedAt + + counts, err := s.runCleanupOnce(ctx) + if err != nil { + s.recordHeartbeatError(runAt, time.Since(startedAt), err) + log.Printf("[OpsCleanup] cleanup failed: %v", err) + return + } + s.recordHeartbeatSuccess(runAt, time.Since(startedAt), counts) + log.Printf("[OpsCleanup] cleanup complete: %s", counts) +} + +type opsCleanupDeletedCounts struct { + errorLogs int64 + retryAttempts int64 + alertEvents int64 + systemMetrics int64 + hourlyPreagg int64 + dailyPreagg int64 +} + +func (c opsCleanupDeletedCounts) String() string { + return fmt.Sprintf( + "error_logs=%d retry_attempts=%d alert_events=%d system_metrics=%d hourly_preagg=%d daily_preagg=%d", + c.errorLogs, + c.retryAttempts, + c.alertEvents, + c.systemMetrics, + c.hourlyPreagg, + c.dailyPreagg, + ) +} + +func (s *OpsCleanupService) runCleanupOnce(ctx context.Context) (opsCleanupDeletedCounts, error) { + out := opsCleanupDeletedCounts{} + if s == nil || s.db == nil || s.cfg == nil { + return out, nil + } + + batchSize := 5000 + + now := time.Now().UTC() + + // Error-like tables: error logs / retry attempts / alert events. + if days := s.cfg.Ops.Cleanup.ErrorLogRetentionDays; days > 0 { + cutoff := now.AddDate(0, 0, -days) + n, err := deleteOldRowsByID(ctx, s.db, "ops_error_logs", "created_at", cutoff, batchSize, false) + if err != nil { + return out, err + } + out.errorLogs = n + + n, err = deleteOldRowsByID(ctx, s.db, "ops_retry_attempts", "created_at", cutoff, batchSize, false) + if err != nil { + return out, err + } + out.retryAttempts = n + + n, err = deleteOldRowsByID(ctx, s.db, "ops_alert_events", "created_at", cutoff, batchSize, false) + if err != nil { + return out, err + } + out.alertEvents = n + } + + // Minute-level metrics snapshots. + if days := s.cfg.Ops.Cleanup.MinuteMetricsRetentionDays; days > 0 { + cutoff := now.AddDate(0, 0, -days) + n, err := deleteOldRowsByID(ctx, s.db, "ops_system_metrics", "created_at", cutoff, batchSize, false) + if err != nil { + return out, err + } + out.systemMetrics = n + } + + // Pre-aggregation tables (hourly/daily). + if days := s.cfg.Ops.Cleanup.HourlyMetricsRetentionDays; days > 0 { + cutoff := now.AddDate(0, 0, -days) + n, err := deleteOldRowsByID(ctx, s.db, "ops_metrics_hourly", "bucket_start", cutoff, batchSize, false) + if err != nil { + return out, err + } + out.hourlyPreagg = n + + n, err = deleteOldRowsByID(ctx, s.db, "ops_metrics_daily", "bucket_date", cutoff, batchSize, true) + if err != nil { + return out, err + } + out.dailyPreagg = n + } + + return out, nil +} + +func deleteOldRowsByID( + ctx context.Context, + db *sql.DB, + table string, + timeColumn string, + cutoff time.Time, + batchSize int, + castCutoffToDate bool, +) (int64, error) { + if db == nil { + return 0, nil + } + if batchSize <= 0 { + batchSize = 5000 + } + + where := fmt.Sprintf("%s < $1", timeColumn) + if castCutoffToDate { + where = fmt.Sprintf("%s < $1::date", timeColumn) + } + + q := fmt.Sprintf(` +WITH batch AS ( + SELECT id FROM %s + WHERE %s + ORDER BY id + LIMIT $2 +) +DELETE FROM %s +WHERE id IN (SELECT id FROM batch) +`, table, where, table) + + var total int64 + for { + res, err := db.ExecContext(ctx, q, cutoff, batchSize) + if err != nil { + // If ops tables aren't present yet (partial deployments), treat as no-op. + if strings.Contains(strings.ToLower(err.Error()), "does not exist") && strings.Contains(strings.ToLower(err.Error()), "relation") { + return total, nil + } + return total, err + } + affected, err := res.RowsAffected() + if err != nil { + return total, err + } + total += affected + if affected == 0 { + break + } + } + return total, nil +} + +func (s *OpsCleanupService) tryAcquireLeaderLock(ctx context.Context) (func(), bool) { + if s == nil { + return nil, false + } + // In simple run mode, assume single instance. + if s.cfg != nil && s.cfg.RunMode == config.RunModeSimple { + return nil, true + } + + key := opsCleanupLeaderLockKeyDefault + ttl := opsCleanupLeaderLockTTLDefault + + // Prefer Redis leader lock when available, but avoid stampeding the DB when Redis is flaky by + // falling back to a DB advisory lock. + if s.redisClient != nil { + ok, err := s.redisClient.SetNX(ctx, key, s.instanceID, ttl).Result() + if err == nil { + if !ok { + return nil, false + } + return func() { + _, _ = opsCleanupReleaseScript.Run(ctx, s.redisClient, []string{key}, s.instanceID).Result() + }, true + } + // Redis error: fall back to DB advisory lock. + s.warnNoRedisOnce.Do(func() { + log.Printf("[OpsCleanup] leader lock SetNX failed; falling back to DB advisory lock: %v", err) + }) + } else { + s.warnNoRedisOnce.Do(func() { + log.Printf("[OpsCleanup] redis not configured; using DB advisory lock") + }) + } + + release, ok := tryAcquireDBAdvisoryLock(ctx, s.db, hashAdvisoryLockID(key)) + if !ok { + return nil, false + } + return release, true +} + +func (s *OpsCleanupService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration, counts opsCleanupDeletedCounts) { + if s == nil || s.opsRepo == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + result := truncateString(counts.String(), 2048) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = s.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsCleanupJobName, + LastRunAt: &runAt, + LastSuccessAt: &now, + LastDurationMs: &durMs, + LastResult: &result, + }) +} + +func (s *OpsCleanupService) recordHeartbeatError(runAt time.Time, duration time.Duration, err error) { + if s == nil || s.opsRepo == nil || err == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + msg := truncateString(err.Error(), 2048) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = s.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsCleanupJobName, + LastRunAt: &runAt, + LastErrorAt: &now, + LastError: &msg, + LastDurationMs: &durMs, + }) +} diff --git a/backend/internal/service/ops_concurrency.go b/backend/internal/service/ops_concurrency.go new file mode 100644 index 00000000..c3b7b853 --- /dev/null +++ b/backend/internal/service/ops_concurrency.go @@ -0,0 +1,257 @@ +package service + +import ( + "context" + "log" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +const ( + opsAccountsPageSize = 100 + opsConcurrencyBatchChunkSize = 200 +) + +func (s *OpsService) listAllAccountsForOps(ctx context.Context, platformFilter string) ([]Account, error) { + if s == nil || s.accountRepo == nil { + return []Account{}, nil + } + + out := make([]Account, 0, 128) + page := 1 + for { + accounts, pageInfo, err := s.accountRepo.ListWithFilters(ctx, pagination.PaginationParams{ + Page: page, + PageSize: opsAccountsPageSize, + }, platformFilter, "", "", "") + if err != nil { + return nil, err + } + if len(accounts) == 0 { + break + } + + out = append(out, accounts...) + if pageInfo != nil && int64(len(out)) >= pageInfo.Total { + break + } + if len(accounts) < opsAccountsPageSize { + break + } + + page++ + if page > 10_000 { + log.Printf("[Ops] listAllAccountsForOps: aborting after too many pages (platform=%q)", platformFilter) + break + } + } + + return out, nil +} + +func (s *OpsService) getAccountsLoadMapBestEffort(ctx context.Context, accounts []Account) map[int64]*AccountLoadInfo { + if s == nil || s.concurrencyService == nil { + return map[int64]*AccountLoadInfo{} + } + if len(accounts) == 0 { + return map[int64]*AccountLoadInfo{} + } + + // De-duplicate IDs (and keep the max concurrency to avoid under-reporting). + unique := make(map[int64]int, len(accounts)) + for _, acc := range accounts { + if acc.ID <= 0 { + continue + } + if prev, ok := unique[acc.ID]; !ok || acc.Concurrency > prev { + unique[acc.ID] = acc.Concurrency + } + } + + batch := make([]AccountWithConcurrency, 0, len(unique)) + for id, maxConc := range unique { + batch = append(batch, AccountWithConcurrency{ + ID: id, + MaxConcurrency: maxConc, + }) + } + + out := make(map[int64]*AccountLoadInfo, len(batch)) + for i := 0; i < len(batch); i += opsConcurrencyBatchChunkSize { + end := i + opsConcurrencyBatchChunkSize + if end > len(batch) { + end = len(batch) + } + part, err := s.concurrencyService.GetAccountsLoadBatch(ctx, batch[i:end]) + if err != nil { + // Best-effort: return zeros rather than failing the ops UI. + log.Printf("[Ops] GetAccountsLoadBatch failed: %v", err) + continue + } + for k, v := range part { + out[k] = v + } + } + + return out +} + +// GetConcurrencyStats returns real-time concurrency usage aggregated by platform/group/account. +// +// Optional filters: +// - platformFilter: only include accounts in that platform (best-effort reduces DB load) +// - groupIDFilter: only include accounts that belong to that group +func (s *OpsService) GetConcurrencyStats( + ctx context.Context, + platformFilter string, + groupIDFilter *int64, +) (map[string]*PlatformConcurrencyInfo, map[int64]*GroupConcurrencyInfo, map[int64]*AccountConcurrencyInfo, *time.Time, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, nil, nil, nil, err + } + + accounts, err := s.listAllAccountsForOps(ctx, platformFilter) + if err != nil { + return nil, nil, nil, nil, err + } + + collectedAt := time.Now() + loadMap := s.getAccountsLoadMapBestEffort(ctx, accounts) + + platform := make(map[string]*PlatformConcurrencyInfo) + group := make(map[int64]*GroupConcurrencyInfo) + account := make(map[int64]*AccountConcurrencyInfo) + + for _, acc := range accounts { + if acc.ID <= 0 { + continue + } + + var matchedGroup *Group + if groupIDFilter != nil && *groupIDFilter > 0 { + for _, grp := range acc.Groups { + if grp == nil || grp.ID <= 0 { + continue + } + if grp.ID == *groupIDFilter { + matchedGroup = grp + break + } + } + // Group filter provided: skip accounts not in that group. + if matchedGroup == nil { + continue + } + } + + load := loadMap[acc.ID] + currentInUse := int64(0) + waiting := int64(0) + if load != nil { + currentInUse = int64(load.CurrentConcurrency) + waiting = int64(load.WaitingCount) + } + + // Account-level view picks one display group (the first group). + displayGroupID := int64(0) + displayGroupName := "" + if matchedGroup != nil { + displayGroupID = matchedGroup.ID + displayGroupName = matchedGroup.Name + } else if len(acc.Groups) > 0 && acc.Groups[0] != nil { + displayGroupID = acc.Groups[0].ID + displayGroupName = acc.Groups[0].Name + } + + if _, ok := account[acc.ID]; !ok { + info := &AccountConcurrencyInfo{ + AccountID: acc.ID, + AccountName: acc.Name, + Platform: acc.Platform, + GroupID: displayGroupID, + GroupName: displayGroupName, + CurrentInUse: currentInUse, + MaxCapacity: int64(acc.Concurrency), + WaitingInQueue: waiting, + } + if info.MaxCapacity > 0 { + info.LoadPercentage = float64(info.CurrentInUse) / float64(info.MaxCapacity) * 100 + } + account[acc.ID] = info + } + + // Platform aggregation. + if acc.Platform != "" { + if _, ok := platform[acc.Platform]; !ok { + platform[acc.Platform] = &PlatformConcurrencyInfo{ + Platform: acc.Platform, + } + } + p := platform[acc.Platform] + p.MaxCapacity += int64(acc.Concurrency) + p.CurrentInUse += currentInUse + p.WaitingInQueue += waiting + } + + // Group aggregation (one account may contribute to multiple groups). + if matchedGroup != nil { + grp := matchedGroup + if _, ok := group[grp.ID]; !ok { + group[grp.ID] = &GroupConcurrencyInfo{ + GroupID: grp.ID, + GroupName: grp.Name, + Platform: grp.Platform, + } + } + g := group[grp.ID] + if g.GroupName == "" && grp.Name != "" { + g.GroupName = grp.Name + } + if g.Platform != "" && grp.Platform != "" && g.Platform != grp.Platform { + // Groups are expected to be platform-scoped. If mismatch is observed, avoid misleading labels. + g.Platform = "" + } + g.MaxCapacity += int64(acc.Concurrency) + g.CurrentInUse += currentInUse + g.WaitingInQueue += waiting + } else { + for _, grp := range acc.Groups { + if grp == nil || grp.ID <= 0 { + continue + } + if _, ok := group[grp.ID]; !ok { + group[grp.ID] = &GroupConcurrencyInfo{ + GroupID: grp.ID, + GroupName: grp.Name, + Platform: grp.Platform, + } + } + g := group[grp.ID] + if g.GroupName == "" && grp.Name != "" { + g.GroupName = grp.Name + } + if g.Platform != "" && grp.Platform != "" && g.Platform != grp.Platform { + // Groups are expected to be platform-scoped. If mismatch is observed, avoid misleading labels. + g.Platform = "" + } + g.MaxCapacity += int64(acc.Concurrency) + g.CurrentInUse += currentInUse + g.WaitingInQueue += waiting + } + } + } + + for _, info := range platform { + if info.MaxCapacity > 0 { + info.LoadPercentage = float64(info.CurrentInUse) / float64(info.MaxCapacity) * 100 + } + } + for _, info := range group { + if info.MaxCapacity > 0 { + info.LoadPercentage = float64(info.CurrentInUse) / float64(info.MaxCapacity) * 100 + } + } + + return platform, group, account, &collectedAt, nil +} diff --git a/backend/internal/service/ops_dashboard.go b/backend/internal/service/ops_dashboard.go new file mode 100644 index 00000000..31822ba8 --- /dev/null +++ b/backend/internal/service/ops_dashboard.go @@ -0,0 +1,90 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "log" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +func (s *OpsService) GetDashboardOverview(ctx context.Context, filter *OpsDashboardFilter) (*OpsDashboardOverview, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + + // Resolve query mode (requested via query param, or DB default). + filter.QueryMode = s.resolveOpsQueryMode(ctx, filter.QueryMode) + + overview, err := s.opsRepo.GetDashboardOverview(ctx, filter) + if err != nil { + if errors.Is(err, ErrOpsPreaggregatedNotPopulated) { + return nil, infraerrors.Conflict("OPS_PREAGG_NOT_READY", "Pre-aggregated ops metrics are not populated yet") + } + return nil, err + } + + // Best-effort system health + jobs; dashboard metrics should still render if these are missing. + if metrics, err := s.opsRepo.GetLatestSystemMetrics(ctx, 1); err == nil { + // Attach config-derived limits so the UI can show "current / max" for connection pools. + // These are best-effort and should never block the dashboard rendering. + if s != nil && s.cfg != nil { + if s.cfg.Database.MaxOpenConns > 0 { + metrics.DBMaxOpenConns = intPtr(s.cfg.Database.MaxOpenConns) + } + if s.cfg.Redis.PoolSize > 0 { + metrics.RedisPoolSize = intPtr(s.cfg.Redis.PoolSize) + } + } + overview.SystemMetrics = metrics + } else if err != nil && !errors.Is(err, sql.ErrNoRows) { + log.Printf("[Ops] GetLatestSystemMetrics failed: %v", err) + } + + if heartbeats, err := s.opsRepo.ListJobHeartbeats(ctx); err == nil { + overview.JobHeartbeats = heartbeats + } else { + log.Printf("[Ops] ListJobHeartbeats failed: %v", err) + } + + overview.HealthScore = computeDashboardHealthScore(time.Now().UTC(), overview) + + return overview, nil +} + +func (s *OpsService) resolveOpsQueryMode(ctx context.Context, requested OpsQueryMode) OpsQueryMode { + if requested.IsValid() { + // Allow "auto" to be disabled via config until preagg is proven stable in production. + // Forced `preagg` via query param still works. + if requested == OpsQueryModeAuto && s != nil && s.cfg != nil && !s.cfg.Ops.UsePreaggregatedTables { + return OpsQueryModeRaw + } + return requested + } + + mode := OpsQueryModeAuto + if s != nil && s.settingRepo != nil { + if raw, err := s.settingRepo.GetValue(ctx, SettingKeyOpsQueryModeDefault); err == nil { + mode = ParseOpsQueryMode(raw) + } + } + + if mode == OpsQueryModeAuto && s != nil && s.cfg != nil && !s.cfg.Ops.UsePreaggregatedTables { + return OpsQueryModeRaw + } + return mode +} diff --git a/backend/internal/service/ops_dashboard_models.go b/backend/internal/service/ops_dashboard_models.go new file mode 100644 index 00000000..f189031b --- /dev/null +++ b/backend/internal/service/ops_dashboard_models.go @@ -0,0 +1,87 @@ +package service + +import "time" + +type OpsDashboardFilter struct { + StartTime time.Time + EndTime time.Time + + Platform string + GroupID *int64 + + // QueryMode controls whether dashboard queries should use raw logs or pre-aggregated tables. + // Expected values: auto/raw/preagg (see OpsQueryMode). + QueryMode OpsQueryMode +} + +type OpsRateSummary struct { + Current float64 `json:"current"` + Peak float64 `json:"peak"` + Avg float64 `json:"avg"` +} + +type OpsPercentiles struct { + P50 *int `json:"p50_ms"` + P90 *int `json:"p90_ms"` + P95 *int `json:"p95_ms"` + P99 *int `json:"p99_ms"` + Avg *int `json:"avg_ms"` + Max *int `json:"max_ms"` +} + +type OpsDashboardOverview struct { + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + Platform string `json:"platform"` + GroupID *int64 `json:"group_id"` + + // HealthScore is a backend-computed overall health score (0-100). + // It is derived from the monitored metrics in this overview, plus best-effort system metrics/job heartbeats. + HealthScore int `json:"health_score"` + + // Latest system-level snapshot (window=1m, global). + SystemMetrics *OpsSystemMetricsSnapshot `json:"system_metrics"` + + // Background jobs health (heartbeats). + JobHeartbeats []*OpsJobHeartbeat `json:"job_heartbeats"` + + SuccessCount int64 `json:"success_count"` + ErrorCountTotal int64 `json:"error_count_total"` + BusinessLimitedCount int64 `json:"business_limited_count"` + + ErrorCountSLA int64 `json:"error_count_sla"` + RequestCountTotal int64 `json:"request_count_total"` + RequestCountSLA int64 `json:"request_count_sla"` + + TokenConsumed int64 `json:"token_consumed"` + + SLA float64 `json:"sla"` + ErrorRate float64 `json:"error_rate"` + UpstreamErrorRate float64 `json:"upstream_error_rate"` + UpstreamErrorCountExcl429529 int64 `json:"upstream_error_count_excl_429_529"` + Upstream429Count int64 `json:"upstream_429_count"` + Upstream529Count int64 `json:"upstream_529_count"` + + QPS OpsRateSummary `json:"qps"` + TPS OpsRateSummary `json:"tps"` + + Duration OpsPercentiles `json:"duration"` + TTFT OpsPercentiles `json:"ttft"` +} + +type OpsLatencyHistogramBucket struct { + Range string `json:"range"` + Count int64 `json:"count"` +} + +// OpsLatencyHistogramResponse is a coarse latency distribution histogram (success requests only). +// It is used by the Ops dashboard to quickly identify tail latency regressions. +type OpsLatencyHistogramResponse struct { + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + Platform string `json:"platform"` + GroupID *int64 `json:"group_id"` + + TotalRequests int64 `json:"total_requests"` + Buckets []*OpsLatencyHistogramBucket `json:"buckets"` +} diff --git a/backend/internal/service/ops_errors.go b/backend/internal/service/ops_errors.go new file mode 100644 index 00000000..76b5ce8b --- /dev/null +++ b/backend/internal/service/ops_errors.go @@ -0,0 +1,45 @@ +package service + +import ( + "context" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +func (s *OpsService) GetErrorTrend(ctx context.Context, filter *OpsDashboardFilter, bucketSeconds int) (*OpsErrorTrendResponse, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + return s.opsRepo.GetErrorTrend(ctx, filter, bucketSeconds) +} + +func (s *OpsService) GetErrorDistribution(ctx context.Context, filter *OpsDashboardFilter) (*OpsErrorDistributionResponse, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + return s.opsRepo.GetErrorDistribution(ctx, filter) +} diff --git a/backend/internal/service/ops_health_score.go b/backend/internal/service/ops_health_score.go new file mode 100644 index 00000000..5efae870 --- /dev/null +++ b/backend/internal/service/ops_health_score.go @@ -0,0 +1,143 @@ +package service + +import ( + "math" + "time" +) + +// computeDashboardHealthScore computes a 0-100 health score from the metrics returned by the dashboard overview. +// +// Design goals: +// - Backend-owned scoring (UI only displays). +// - Layered scoring: Business Health (70%) + Infrastructure Health (30%) +// - Avoids double-counting (e.g., DB failure affects both infra and business metrics) +// - Conservative + stable: penalize clear degradations; avoid overreacting to missing/idle data. +func computeDashboardHealthScore(now time.Time, overview *OpsDashboardOverview) int { + if overview == nil { + return 0 + } + + // Idle/no-data: avoid showing a "bad" score when there is no traffic. + // UI can still render a gray/idle state based on QPS + error rate. + if overview.RequestCountSLA <= 0 && overview.RequestCountTotal <= 0 && overview.ErrorCountTotal <= 0 { + return 100 + } + + businessHealth := computeBusinessHealth(overview) + infraHealth := computeInfraHealth(now, overview) + + // Weighted combination: 70% business + 30% infrastructure + score := businessHealth*0.7 + infraHealth*0.3 + return int(math.Round(clampFloat64(score, 0, 100))) +} + +// computeBusinessHealth calculates business health score (0-100) +// Components: Error Rate (50%) + TTFT (50%) +func computeBusinessHealth(overview *OpsDashboardOverview) float64 { + // Error rate score: 1% → 100, 10% → 0 (linear) + // Combines request errors and upstream errors + errorScore := 100.0 + errorPct := clampFloat64(overview.ErrorRate*100, 0, 100) + upstreamPct := clampFloat64(overview.UpstreamErrorRate*100, 0, 100) + combinedErrorPct := math.Max(errorPct, upstreamPct) // Use worst case + if combinedErrorPct > 1.0 { + if combinedErrorPct <= 10.0 { + errorScore = (10.0 - combinedErrorPct) / 9.0 * 100 + } else { + errorScore = 0 + } + } + + // TTFT score: 1s → 100, 3s → 0 (linear) + // Time to first token is critical for user experience + ttftScore := 100.0 + if overview.TTFT.P99 != nil { + p99 := float64(*overview.TTFT.P99) + if p99 > 1000 { + if p99 <= 3000 { + ttftScore = (3000 - p99) / 2000 * 100 + } else { + ttftScore = 0 + } + } + } + + // Weighted combination: 50% error rate + 50% TTFT + return errorScore*0.5 + ttftScore*0.5 +} + +// computeInfraHealth calculates infrastructure health score (0-100) +// Components: Storage (40%) + Compute Resources (30%) + Background Jobs (30%) +func computeInfraHealth(now time.Time, overview *OpsDashboardOverview) float64 { + // Storage score: DB critical, Redis less critical + storageScore := 100.0 + if overview.SystemMetrics != nil { + if overview.SystemMetrics.DBOK != nil && !*overview.SystemMetrics.DBOK { + storageScore = 0 // DB failure is critical + } else if overview.SystemMetrics.RedisOK != nil && !*overview.SystemMetrics.RedisOK { + storageScore = 50 // Redis failure is degraded but not critical + } + } + + // Compute resources score: CPU + Memory + computeScore := 100.0 + if overview.SystemMetrics != nil { + cpuScore := 100.0 + if overview.SystemMetrics.CPUUsagePercent != nil { + cpuPct := clampFloat64(*overview.SystemMetrics.CPUUsagePercent, 0, 100) + if cpuPct > 80 { + if cpuPct <= 100 { + cpuScore = (100 - cpuPct) / 20 * 100 + } else { + cpuScore = 0 + } + } + } + + memScore := 100.0 + if overview.SystemMetrics.MemoryUsagePercent != nil { + memPct := clampFloat64(*overview.SystemMetrics.MemoryUsagePercent, 0, 100) + if memPct > 85 { + if memPct <= 100 { + memScore = (100 - memPct) / 15 * 100 + } else { + memScore = 0 + } + } + } + + computeScore = (cpuScore + memScore) / 2 + } + + // Background jobs score + jobScore := 100.0 + failedJobs := 0 + totalJobs := 0 + for _, hb := range overview.JobHeartbeats { + if hb == nil { + continue + } + totalJobs++ + if hb.LastErrorAt != nil && (hb.LastSuccessAt == nil || hb.LastErrorAt.After(*hb.LastSuccessAt)) { + failedJobs++ + } else if hb.LastSuccessAt != nil && now.Sub(*hb.LastSuccessAt) > 15*time.Minute { + failedJobs++ + } + } + if totalJobs > 0 && failedJobs > 0 { + jobScore = (1 - float64(failedJobs)/float64(totalJobs)) * 100 + } + + // Weighted combination + return storageScore*0.4 + computeScore*0.3 + jobScore*0.3 +} + +func clampFloat64(v float64, min float64, max float64) float64 { + if v < min { + return min + } + if v > max { + return max + } + return v +} diff --git a/backend/internal/service/ops_health_score_test.go b/backend/internal/service/ops_health_score_test.go new file mode 100644 index 00000000..25bfb43d --- /dev/null +++ b/backend/internal/service/ops_health_score_test.go @@ -0,0 +1,442 @@ +//go:build unit + +package service + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestComputeDashboardHealthScore_IdleReturns100(t *testing.T) { + t.Parallel() + + score := computeDashboardHealthScore(time.Now().UTC(), &OpsDashboardOverview{}) + require.Equal(t, 100, score) +} + +func TestComputeDashboardHealthScore_DegradesOnBadSignals(t *testing.T) { + t.Parallel() + + ov := &OpsDashboardOverview{ + RequestCountTotal: 100, + RequestCountSLA: 100, + SuccessCount: 90, + ErrorCountTotal: 10, + ErrorCountSLA: 10, + + SLA: 0.90, + ErrorRate: 0.10, + UpstreamErrorRate: 0.08, + + Duration: OpsPercentiles{P99: intPtr(20_000)}, + TTFT: OpsPercentiles{P99: intPtr(2_000)}, + + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(false), + RedisOK: boolPtr(false), + CPUUsagePercent: float64Ptr(98.0), + MemoryUsagePercent: float64Ptr(97.0), + DBConnWaiting: intPtr(3), + ConcurrencyQueueDepth: intPtr(10), + }, + JobHeartbeats: []*OpsJobHeartbeat{ + { + JobName: "job-a", + LastErrorAt: timePtr(time.Now().UTC().Add(-1 * time.Minute)), + LastError: stringPtr("boom"), + }, + }, + } + + score := computeDashboardHealthScore(time.Now().UTC(), ov) + require.Less(t, score, 80) + require.GreaterOrEqual(t, score, 0) +} + +func TestComputeDashboardHealthScore_Comprehensive(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + overview *OpsDashboardOverview + wantMin int + wantMax int + }{ + { + name: "nil overview returns 0", + overview: nil, + wantMin: 0, + wantMax: 0, + }, + { + name: "perfect health", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 1.0, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + TTFT: OpsPercentiles{P99: intPtr(100)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "good health - SLA 99.8%", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.998, + ErrorRate: 0.003, + UpstreamErrorRate: 0.001, + Duration: OpsPercentiles{P99: intPtr(800)}, + TTFT: OpsPercentiles{P99: intPtr(200)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(50), + MemoryUsagePercent: float64Ptr(60), + }, + }, + wantMin: 95, + wantMax: 100, + }, + { + name: "medium health - SLA 96%", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.96, + ErrorRate: 0.02, + UpstreamErrorRate: 0.01, + Duration: OpsPercentiles{P99: intPtr(3000)}, + TTFT: OpsPercentiles{P99: intPtr(600)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(70), + MemoryUsagePercent: float64Ptr(75), + }, + }, + wantMin: 96, + wantMax: 97, + }, + { + name: "DB failure", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.995, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(false), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 70, + wantMax: 90, + }, + { + name: "Redis failure", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.995, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(false), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 85, + wantMax: 95, + }, + { + name: "high CPU usage", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.995, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(95), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 85, + wantMax: 100, + }, + { + name: "combined failures - business degraded + infra healthy", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.90, + ErrorRate: 0.05, + UpstreamErrorRate: 0.02, + Duration: OpsPercentiles{P99: intPtr(10000)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(20), + MemoryUsagePercent: float64Ptr(30), + }, + }, + wantMin: 84, + wantMax: 85, + }, + { + name: "combined failures - business healthy + infra degraded", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + RequestCountSLA: 1000, + SLA: 0.998, + ErrorRate: 0.001, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(600)}, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(false), + RedisOK: boolPtr(false), + CPUUsagePercent: float64Ptr(95), + MemoryUsagePercent: float64Ptr(95), + }, + }, + wantMin: 70, + wantMax: 90, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + score := computeDashboardHealthScore(time.Now().UTC(), tt.overview) + require.GreaterOrEqual(t, score, tt.wantMin, "score should be >= %d", tt.wantMin) + require.LessOrEqual(t, score, tt.wantMax, "score should be <= %d", tt.wantMax) + require.GreaterOrEqual(t, score, 0, "score must be >= 0") + require.LessOrEqual(t, score, 100, "score must be <= 100") + }) + } +} + +func TestComputeBusinessHealth(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + overview *OpsDashboardOverview + wantMin float64 + wantMax float64 + }{ + { + name: "perfect metrics", + overview: &OpsDashboardOverview{ + SLA: 1.0, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "SLA boundary 99.5%", + overview: &OpsDashboardOverview{ + SLA: 0.995, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "SLA boundary 95%", + overview: &OpsDashboardOverview{ + SLA: 0.95, + ErrorRate: 0, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "error rate boundary 1%", + overview: &OpsDashboardOverview{ + SLA: 0.99, + ErrorRate: 0.01, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "error rate 5%", + overview: &OpsDashboardOverview{ + SLA: 0.95, + ErrorRate: 0.05, + UpstreamErrorRate: 0, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 77, + wantMax: 78, + }, + { + name: "TTFT boundary 2s", + overview: &OpsDashboardOverview{ + SLA: 0.99, + ErrorRate: 0, + UpstreamErrorRate: 0, + TTFT: OpsPercentiles{P99: intPtr(2000)}, + }, + wantMin: 75, + wantMax: 75, + }, + { + name: "upstream error dominates", + overview: &OpsDashboardOverview{ + SLA: 0.995, + ErrorRate: 0.001, + UpstreamErrorRate: 0.03, + Duration: OpsPercentiles{P99: intPtr(500)}, + }, + wantMin: 88, + wantMax: 90, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + score := computeBusinessHealth(tt.overview) + require.GreaterOrEqual(t, score, tt.wantMin, "score should be >= %.1f", tt.wantMin) + require.LessOrEqual(t, score, tt.wantMax, "score should be <= %.1f", tt.wantMax) + require.GreaterOrEqual(t, score, 0.0, "score must be >= 0") + require.LessOrEqual(t, score, 100.0, "score must be <= 100") + }) + } +} + +func TestComputeInfraHealth(t *testing.T) { + t.Parallel() + + now := time.Now().UTC() + + tests := []struct { + name string + overview *OpsDashboardOverview + wantMin float64 + wantMax float64 + }{ + { + name: "all infrastructure healthy", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 100, + wantMax: 100, + }, + { + name: "DB down", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(false), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 50, + wantMax: 70, + }, + { + name: "Redis down", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(false), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 80, + wantMax: 95, + }, + { + name: "CPU at 90%", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(90), + MemoryUsagePercent: float64Ptr(40), + }, + }, + wantMin: 85, + wantMax: 95, + }, + { + name: "failed background job", + overview: &OpsDashboardOverview{ + RequestCountTotal: 1000, + SystemMetrics: &OpsSystemMetricsSnapshot{ + DBOK: boolPtr(true), + RedisOK: boolPtr(true), + CPUUsagePercent: float64Ptr(30), + MemoryUsagePercent: float64Ptr(40), + }, + JobHeartbeats: []*OpsJobHeartbeat{ + { + JobName: "test-job", + LastErrorAt: &now, + }, + }, + }, + wantMin: 70, + wantMax: 90, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + score := computeInfraHealth(now, tt.overview) + require.GreaterOrEqual(t, score, tt.wantMin, "score should be >= %.1f", tt.wantMin) + require.LessOrEqual(t, score, tt.wantMax, "score should be <= %.1f", tt.wantMax) + require.GreaterOrEqual(t, score, 0.0, "score must be >= 0") + require.LessOrEqual(t, score, 100.0, "score must be <= 100") + }) + } +} + +func timePtr(v time.Time) *time.Time { return &v } + +func stringPtr(v string) *string { return &v } diff --git a/backend/internal/service/ops_histograms.go b/backend/internal/service/ops_histograms.go new file mode 100644 index 00000000..9f5b514f --- /dev/null +++ b/backend/internal/service/ops_histograms.go @@ -0,0 +1,26 @@ +package service + +import ( + "context" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +func (s *OpsService) GetLatencyHistogram(ctx context.Context, filter *OpsDashboardFilter) (*OpsLatencyHistogramResponse, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + return s.opsRepo.GetLatencyHistogram(ctx, filter) +} diff --git a/backend/internal/service/ops_metrics_collector.go b/backend/internal/service/ops_metrics_collector.go new file mode 100644 index 00000000..edf32cf2 --- /dev/null +++ b/backend/internal/service/ops_metrics_collector.go @@ -0,0 +1,920 @@ +package service + +import ( + "context" + "database/sql" + "errors" + "fmt" + "log" + "math" + "os" + "runtime" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" + "github.com/shirou/gopsutil/v4/cpu" + "github.com/shirou/gopsutil/v4/mem" +) + +const ( + opsMetricsCollectorJobName = "ops_metrics_collector" + opsMetricsCollectorMinInterval = 60 * time.Second + opsMetricsCollectorMaxInterval = 1 * time.Hour + + opsMetricsCollectorTimeout = 10 * time.Second + + opsMetricsCollectorLeaderLockKey = "ops:metrics:collector:leader" + opsMetricsCollectorLeaderLockTTL = 90 * time.Second + + opsMetricsCollectorHeartbeatTimeout = 2 * time.Second + + bytesPerMB = 1024 * 1024 +) + +var opsMetricsCollectorAdvisoryLockID = hashAdvisoryLockID(opsMetricsCollectorLeaderLockKey) + +type OpsMetricsCollector struct { + opsRepo OpsRepository + settingRepo SettingRepository + cfg *config.Config + + accountRepo AccountRepository + concurrencyService *ConcurrencyService + + db *sql.DB + redisClient *redis.Client + instanceID string + + lastCgroupCPUUsageNanos uint64 + lastCgroupCPUSampleAt time.Time + + stopCh chan struct{} + startOnce sync.Once + stopOnce sync.Once + + skipLogMu sync.Mutex + skipLogAt time.Time +} + +func NewOpsMetricsCollector( + opsRepo OpsRepository, + settingRepo SettingRepository, + accountRepo AccountRepository, + concurrencyService *ConcurrencyService, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsMetricsCollector { + return &OpsMetricsCollector{ + opsRepo: opsRepo, + settingRepo: settingRepo, + cfg: cfg, + accountRepo: accountRepo, + concurrencyService: concurrencyService, + db: db, + redisClient: redisClient, + instanceID: uuid.NewString(), + } +} + +func (c *OpsMetricsCollector) Start() { + if c == nil { + return + } + c.startOnce.Do(func() { + if c.stopCh == nil { + c.stopCh = make(chan struct{}) + } + go c.run() + }) +} + +func (c *OpsMetricsCollector) Stop() { + if c == nil { + return + } + c.stopOnce.Do(func() { + if c.stopCh != nil { + close(c.stopCh) + } + }) +} + +func (c *OpsMetricsCollector) run() { + // First run immediately so the dashboard has data soon after startup. + c.collectOnce() + + for { + interval := c.getInterval() + timer := time.NewTimer(interval) + select { + case <-timer.C: + c.collectOnce() + case <-c.stopCh: + timer.Stop() + return + } + } +} + +func (c *OpsMetricsCollector) getInterval() time.Duration { + interval := opsMetricsCollectorMinInterval + + if c.settingRepo == nil { + return interval + } + + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + + raw, err := c.settingRepo.GetValue(ctx, SettingKeyOpsMetricsIntervalSeconds) + if err != nil { + return interval + } + raw = strings.TrimSpace(raw) + if raw == "" { + return interval + } + + seconds, err := strconv.Atoi(raw) + if err != nil { + return interval + } + if seconds < int(opsMetricsCollectorMinInterval.Seconds()) { + seconds = int(opsMetricsCollectorMinInterval.Seconds()) + } + if seconds > int(opsMetricsCollectorMaxInterval.Seconds()) { + seconds = int(opsMetricsCollectorMaxInterval.Seconds()) + } + return time.Duration(seconds) * time.Second +} + +func (c *OpsMetricsCollector) collectOnce() { + if c == nil { + return + } + if c.cfg != nil && !c.cfg.Ops.Enabled { + return + } + if c.opsRepo == nil { + return + } + if c.db == nil { + return + } + + ctx, cancel := context.WithTimeout(context.Background(), opsMetricsCollectorTimeout) + defer cancel() + + if !c.isMonitoringEnabled(ctx) { + return + } + + release, ok := c.tryAcquireLeaderLock(ctx) + if !ok { + return + } + if release != nil { + defer release() + } + + startedAt := time.Now().UTC() + err := c.collectAndPersist(ctx) + finishedAt := time.Now().UTC() + + durationMs := finishedAt.Sub(startedAt).Milliseconds() + dur := durationMs + runAt := startedAt + + if err != nil { + msg := truncateString(err.Error(), 2048) + errAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), opsMetricsCollectorHeartbeatTimeout) + defer hbCancel() + _ = c.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsMetricsCollectorJobName, + LastRunAt: &runAt, + LastErrorAt: &errAt, + LastError: &msg, + LastDurationMs: &dur, + }) + log.Printf("[OpsMetricsCollector] collect failed: %v", err) + return + } + + successAt := finishedAt + hbCtx, hbCancel := context.WithTimeout(context.Background(), opsMetricsCollectorHeartbeatTimeout) + defer hbCancel() + _ = c.opsRepo.UpsertJobHeartbeat(hbCtx, &OpsUpsertJobHeartbeatInput{ + JobName: opsMetricsCollectorJobName, + LastRunAt: &runAt, + LastSuccessAt: &successAt, + LastDurationMs: &dur, + }) +} + +func (c *OpsMetricsCollector) isMonitoringEnabled(ctx context.Context) bool { + if c == nil { + return false + } + if c.cfg != nil && !c.cfg.Ops.Enabled { + return false + } + if c.settingRepo == nil { + return true + } + if ctx == nil { + ctx = context.Background() + } + + value, err := c.settingRepo.GetValue(ctx, SettingKeyOpsMonitoringEnabled) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + return true + } + // Fail-open: collector should not become a hard dependency. + return true + } + switch strings.ToLower(strings.TrimSpace(value)) { + case "false", "0", "off", "disabled": + return false + default: + return true + } +} + +func (c *OpsMetricsCollector) collectAndPersist(ctx context.Context) error { + if ctx == nil { + ctx = context.Background() + } + + // Align to stable minute boundaries to avoid partial buckets and to maximize cache hits. + now := time.Now().UTC() + windowEnd := now.Truncate(time.Minute) + windowStart := windowEnd.Add(-1 * time.Minute) + + sys, err := c.collectSystemStats(ctx) + if err != nil { + // Continue; system stats are best-effort. + log.Printf("[OpsMetricsCollector] system stats error: %v", err) + } + + dbOK := c.checkDB(ctx) + redisOK := c.checkRedis(ctx) + active, idle := c.dbPoolStats() + redisTotal, redisIdle, redisStatsOK := c.redisPoolStats() + + successCount, tokenConsumed, err := c.queryUsageCounts(ctx, windowStart, windowEnd) + if err != nil { + return fmt.Errorf("query usage counts: %w", err) + } + + duration, ttft, err := c.queryUsageLatency(ctx, windowStart, windowEnd) + if err != nil { + return fmt.Errorf("query usage latency: %w", err) + } + + errorTotal, businessLimited, errorSLA, upstreamExcl, upstream429, upstream529, err := c.queryErrorCounts(ctx, windowStart, windowEnd) + if err != nil { + return fmt.Errorf("query error counts: %w", err) + } + + windowSeconds := windowEnd.Sub(windowStart).Seconds() + if windowSeconds <= 0 { + windowSeconds = 60 + } + requestTotal := successCount + errorTotal + qps := float64(requestTotal) / windowSeconds + tps := float64(tokenConsumed) / windowSeconds + + goroutines := runtime.NumGoroutine() + concurrencyQueueDepth := c.collectConcurrencyQueueDepth(ctx) + + input := &OpsInsertSystemMetricsInput{ + CreatedAt: windowEnd, + WindowMinutes: 1, + + SuccessCount: successCount, + ErrorCountTotal: errorTotal, + BusinessLimitedCount: businessLimited, + ErrorCountSLA: errorSLA, + + UpstreamErrorCountExcl429529: upstreamExcl, + Upstream429Count: upstream429, + Upstream529Count: upstream529, + + TokenConsumed: tokenConsumed, + QPS: float64Ptr(roundTo1DP(qps)), + TPS: float64Ptr(roundTo1DP(tps)), + + DurationP50Ms: duration.p50, + DurationP90Ms: duration.p90, + DurationP95Ms: duration.p95, + DurationP99Ms: duration.p99, + DurationAvgMs: duration.avg, + DurationMaxMs: duration.max, + + TTFTP50Ms: ttft.p50, + TTFTP90Ms: ttft.p90, + TTFTP95Ms: ttft.p95, + TTFTP99Ms: ttft.p99, + TTFTAvgMs: ttft.avg, + TTFTMaxMs: ttft.max, + + CPUUsagePercent: sys.cpuUsagePercent, + MemoryUsedMB: sys.memoryUsedMB, + MemoryTotalMB: sys.memoryTotalMB, + MemoryUsagePercent: sys.memoryUsagePercent, + + DBOK: boolPtr(dbOK), + RedisOK: boolPtr(redisOK), + + RedisConnTotal: func() *int { + if !redisStatsOK { + return nil + } + return intPtr(redisTotal) + }(), + RedisConnIdle: func() *int { + if !redisStatsOK { + return nil + } + return intPtr(redisIdle) + }(), + + DBConnActive: intPtr(active), + DBConnIdle: intPtr(idle), + GoroutineCount: intPtr(goroutines), + ConcurrencyQueueDepth: concurrencyQueueDepth, + } + + return c.opsRepo.InsertSystemMetrics(ctx, input) +} + +func (c *OpsMetricsCollector) collectConcurrencyQueueDepth(parentCtx context.Context) *int { + if c == nil || c.accountRepo == nil || c.concurrencyService == nil { + return nil + } + if parentCtx == nil { + parentCtx = context.Background() + } + + // Best-effort: never let concurrency sampling break the metrics collector. + ctx, cancel := context.WithTimeout(parentCtx, 2*time.Second) + defer cancel() + + accounts, err := c.accountRepo.ListSchedulable(ctx) + if err != nil { + return nil + } + if len(accounts) == 0 { + zero := 0 + return &zero + } + + batch := make([]AccountWithConcurrency, 0, len(accounts)) + for _, acc := range accounts { + if acc.ID <= 0 { + continue + } + maxConc := acc.Concurrency + if maxConc < 0 { + maxConc = 0 + } + batch = append(batch, AccountWithConcurrency{ + ID: acc.ID, + MaxConcurrency: maxConc, + }) + } + if len(batch) == 0 { + zero := 0 + return &zero + } + + loadMap, err := c.concurrencyService.GetAccountsLoadBatch(ctx, batch) + if err != nil { + return nil + } + + var total int64 + for _, info := range loadMap { + if info == nil || info.WaitingCount <= 0 { + continue + } + total += int64(info.WaitingCount) + } + if total < 0 { + total = 0 + } + + maxInt := int64(^uint(0) >> 1) + if total > maxInt { + total = maxInt + } + v := int(total) + return &v +} + +type opsCollectedPercentiles struct { + p50 *int + p90 *int + p95 *int + p99 *int + avg *float64 + max *int +} + +func (c *OpsMetricsCollector) queryUsageCounts(ctx context.Context, start, end time.Time) (successCount int64, tokenConsumed int64, err error) { + q := ` +SELECT + COALESCE(COUNT(*), 0) AS success_count, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_consumed +FROM usage_logs +WHERE created_at >= $1 AND created_at < $2` + + var tokens sql.NullInt64 + if err := c.db.QueryRowContext(ctx, q, start, end).Scan(&successCount, &tokens); err != nil { + return 0, 0, err + } + if tokens.Valid { + tokenConsumed = tokens.Int64 + } + return successCount, tokenConsumed, nil +} + +func (c *OpsMetricsCollector) queryUsageLatency(ctx context.Context, start, end time.Time) (duration opsCollectedPercentiles, ttft opsCollectedPercentiles, err error) { + { + q := ` +SELECT + percentile_cont(0.50) WITHIN GROUP (ORDER BY duration_ms) AS p50, + percentile_cont(0.90) WITHIN GROUP (ORDER BY duration_ms) AS p90, + percentile_cont(0.95) WITHIN GROUP (ORDER BY duration_ms) AS p95, + percentile_cont(0.99) WITHIN GROUP (ORDER BY duration_ms) AS p99, + AVG(duration_ms) AS avg_ms, + MAX(duration_ms) AS max_ms +FROM usage_logs +WHERE created_at >= $1 AND created_at < $2 + AND duration_ms IS NOT NULL` + + var p50, p90, p95, p99 sql.NullFloat64 + var avg sql.NullFloat64 + var max sql.NullInt64 + if err := c.db.QueryRowContext(ctx, q, start, end).Scan(&p50, &p90, &p95, &p99, &avg, &max); err != nil { + return opsCollectedPercentiles{}, opsCollectedPercentiles{}, err + } + duration.p50 = floatToIntPtr(p50) + duration.p90 = floatToIntPtr(p90) + duration.p95 = floatToIntPtr(p95) + duration.p99 = floatToIntPtr(p99) + if avg.Valid { + v := roundTo1DP(avg.Float64) + duration.avg = &v + } + if max.Valid { + v := int(max.Int64) + duration.max = &v + } + } + + { + q := ` +SELECT + percentile_cont(0.50) WITHIN GROUP (ORDER BY first_token_ms) AS p50, + percentile_cont(0.90) WITHIN GROUP (ORDER BY first_token_ms) AS p90, + percentile_cont(0.95) WITHIN GROUP (ORDER BY first_token_ms) AS p95, + percentile_cont(0.99) WITHIN GROUP (ORDER BY first_token_ms) AS p99, + AVG(first_token_ms) AS avg_ms, + MAX(first_token_ms) AS max_ms +FROM usage_logs +WHERE created_at >= $1 AND created_at < $2 + AND first_token_ms IS NOT NULL` + + var p50, p90, p95, p99 sql.NullFloat64 + var avg sql.NullFloat64 + var max sql.NullInt64 + if err := c.db.QueryRowContext(ctx, q, start, end).Scan(&p50, &p90, &p95, &p99, &avg, &max); err != nil { + return opsCollectedPercentiles{}, opsCollectedPercentiles{}, err + } + ttft.p50 = floatToIntPtr(p50) + ttft.p90 = floatToIntPtr(p90) + ttft.p95 = floatToIntPtr(p95) + ttft.p99 = floatToIntPtr(p99) + if avg.Valid { + v := roundTo1DP(avg.Float64) + ttft.avg = &v + } + if max.Valid { + v := int(max.Int64) + ttft.max = &v + } + } + + return duration, ttft, nil +} + +func (c *OpsMetricsCollector) queryErrorCounts(ctx context.Context, start, end time.Time) ( + errorTotal int64, + businessLimited int64, + errorSLA int64, + upstreamExcl429529 int64, + upstream429 int64, + upstream529 int64, + err error, +) { + q := ` +SELECT + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400), 0) AS error_total, + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND is_business_limited), 0) AS business_limited, + COALESCE(COUNT(*) FILTER (WHERE COALESCE(status_code, 0) >= 400 AND NOT is_business_limited), 0) AS error_sla, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) NOT IN (429, 529)), 0) AS upstream_excl, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 429), 0) AS upstream_429, + COALESCE(COUNT(*) FILTER (WHERE error_owner = 'provider' AND NOT is_business_limited AND COALESCE(upstream_status_code, status_code, 0) = 529), 0) AS upstream_529 +FROM ops_error_logs +WHERE created_at >= $1 AND created_at < $2` + + if err := c.db.QueryRowContext(ctx, q, start, end).Scan( + &errorTotal, + &businessLimited, + &errorSLA, + &upstreamExcl429529, + &upstream429, + &upstream529, + ); err != nil { + return 0, 0, 0, 0, 0, 0, err + } + return errorTotal, businessLimited, errorSLA, upstreamExcl429529, upstream429, upstream529, nil +} + +type opsCollectedSystemStats struct { + cpuUsagePercent *float64 + memoryUsedMB *int64 + memoryTotalMB *int64 + memoryUsagePercent *float64 +} + +func (c *OpsMetricsCollector) collectSystemStats(ctx context.Context) (*opsCollectedSystemStats, error) { + out := &opsCollectedSystemStats{} + if ctx == nil { + ctx = context.Background() + } + + sampleAt := time.Now().UTC() + + // Prefer cgroup (container) metrics when available. + if cpuPct := c.tryCgroupCPUPercent(sampleAt); cpuPct != nil { + out.cpuUsagePercent = cpuPct + } + + cgroupUsed, cgroupTotal, cgroupOK := readCgroupMemoryBytes() + if cgroupOK { + usedMB := int64(cgroupUsed / bytesPerMB) + out.memoryUsedMB = &usedMB + if cgroupTotal > 0 { + totalMB := int64(cgroupTotal / bytesPerMB) + out.memoryTotalMB = &totalMB + pct := roundTo1DP(float64(cgroupUsed) / float64(cgroupTotal) * 100) + out.memoryUsagePercent = &pct + } + } + + // Fallback to host metrics if cgroup metrics are unavailable (or incomplete). + if out.cpuUsagePercent == nil { + if cpuPercents, err := cpu.PercentWithContext(ctx, 0, false); err == nil && len(cpuPercents) > 0 { + v := roundTo1DP(cpuPercents[0]) + out.cpuUsagePercent = &v + } + } + + // If total memory isn't available from cgroup (e.g. memory.max = "max"), fill total from host. + if out.memoryUsedMB == nil || out.memoryTotalMB == nil || out.memoryUsagePercent == nil { + if vm, err := mem.VirtualMemoryWithContext(ctx); err == nil && vm != nil { + if out.memoryUsedMB == nil { + usedMB := int64(vm.Used / bytesPerMB) + out.memoryUsedMB = &usedMB + } + if out.memoryTotalMB == nil { + totalMB := int64(vm.Total / bytesPerMB) + out.memoryTotalMB = &totalMB + } + if out.memoryUsagePercent == nil { + if out.memoryUsedMB != nil && out.memoryTotalMB != nil && *out.memoryTotalMB > 0 { + pct := roundTo1DP(float64(*out.memoryUsedMB) / float64(*out.memoryTotalMB) * 100) + out.memoryUsagePercent = &pct + } else { + pct := roundTo1DP(vm.UsedPercent) + out.memoryUsagePercent = &pct + } + } + } + } + + return out, nil +} + +func (c *OpsMetricsCollector) tryCgroupCPUPercent(now time.Time) *float64 { + usageNanos, ok := readCgroupCPUUsageNanos() + if !ok { + return nil + } + + // Initialize baseline sample. + if c.lastCgroupCPUSampleAt.IsZero() { + c.lastCgroupCPUUsageNanos = usageNanos + c.lastCgroupCPUSampleAt = now + return nil + } + + elapsed := now.Sub(c.lastCgroupCPUSampleAt) + if elapsed <= 0 { + c.lastCgroupCPUUsageNanos = usageNanos + c.lastCgroupCPUSampleAt = now + return nil + } + + prev := c.lastCgroupCPUUsageNanos + c.lastCgroupCPUUsageNanos = usageNanos + c.lastCgroupCPUSampleAt = now + + if usageNanos < prev { + // Counter reset (container restarted). + return nil + } + + deltaUsageSec := float64(usageNanos-prev) / 1e9 + elapsedSec := elapsed.Seconds() + if elapsedSec <= 0 { + return nil + } + + cores := readCgroupCPULimitCores() + if cores <= 0 { + // Can't reliably normalize; skip and fall back to gopsutil. + return nil + } + + pct := (deltaUsageSec / (elapsedSec * cores)) * 100 + if pct < 0 { + pct = 0 + } + // Clamp to avoid noise/jitter showing impossible values. + if pct > 100 { + pct = 100 + } + v := roundTo1DP(pct) + return &v +} + +func readCgroupMemoryBytes() (usedBytes uint64, totalBytes uint64, ok bool) { + // cgroup v2 (most common in modern containers) + if used, ok1 := readUintFile("/sys/fs/cgroup/memory.current"); ok1 { + usedBytes = used + rawMax, err := os.ReadFile("/sys/fs/cgroup/memory.max") + if err == nil { + s := strings.TrimSpace(string(rawMax)) + if s != "" && s != "max" { + if v, err := strconv.ParseUint(s, 10, 64); err == nil { + totalBytes = v + } + } + } + return usedBytes, totalBytes, true + } + + // cgroup v1 fallback + if used, ok1 := readUintFile("/sys/fs/cgroup/memory/memory.usage_in_bytes"); ok1 { + usedBytes = used + if limit, ok2 := readUintFile("/sys/fs/cgroup/memory/memory.limit_in_bytes"); ok2 { + // Some environments report a very large number when unlimited. + if limit > 0 && limit < (1<<60) { + totalBytes = limit + } + } + return usedBytes, totalBytes, true + } + + return 0, 0, false +} + +func readCgroupCPUUsageNanos() (usageNanos uint64, ok bool) { + // cgroup v2: cpu.stat has usage_usec + if raw, err := os.ReadFile("/sys/fs/cgroup/cpu.stat"); err == nil { + lines := strings.Split(string(raw), "\n") + for _, line := range lines { + fields := strings.Fields(line) + if len(fields) != 2 { + continue + } + if fields[0] != "usage_usec" { + continue + } + v, err := strconv.ParseUint(fields[1], 10, 64) + if err != nil { + continue + } + return v * 1000, true + } + } + + // cgroup v1: cpuacct.usage is in nanoseconds + if v, ok := readUintFile("/sys/fs/cgroup/cpuacct/cpuacct.usage"); ok { + return v, true + } + + return 0, false +} + +func readCgroupCPULimitCores() float64 { + // cgroup v2: cpu.max => " " or "max " + if raw, err := os.ReadFile("/sys/fs/cgroup/cpu.max"); err == nil { + fields := strings.Fields(string(raw)) + if len(fields) >= 2 && fields[0] != "max" { + quota, err1 := strconv.ParseFloat(fields[0], 64) + period, err2 := strconv.ParseFloat(fields[1], 64) + if err1 == nil && err2 == nil && quota > 0 && period > 0 { + return quota / period + } + } + } + + // cgroup v1: cpu.cfs_quota_us / cpu.cfs_period_us + quota, okQuota := readIntFile("/sys/fs/cgroup/cpu/cpu.cfs_quota_us") + period, okPeriod := readIntFile("/sys/fs/cgroup/cpu/cpu.cfs_period_us") + if okQuota && okPeriod && quota > 0 && period > 0 { + return float64(quota) / float64(period) + } + + return 0 +} + +func readUintFile(path string) (uint64, bool) { + raw, err := os.ReadFile(path) + if err != nil { + return 0, false + } + s := strings.TrimSpace(string(raw)) + if s == "" { + return 0, false + } + v, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return 0, false + } + return v, true +} + +func readIntFile(path string) (int64, bool) { + raw, err := os.ReadFile(path) + if err != nil { + return 0, false + } + s := strings.TrimSpace(string(raw)) + if s == "" { + return 0, false + } + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, false + } + return v, true +} + +func (c *OpsMetricsCollector) checkDB(ctx context.Context) bool { + if c == nil || c.db == nil { + return false + } + if ctx == nil { + ctx = context.Background() + } + var one int + if err := c.db.QueryRowContext(ctx, "SELECT 1").Scan(&one); err != nil { + return false + } + return one == 1 +} + +func (c *OpsMetricsCollector) checkRedis(ctx context.Context) bool { + if c == nil || c.redisClient == nil { + return false + } + if ctx == nil { + ctx = context.Background() + } + return c.redisClient.Ping(ctx).Err() == nil +} + +func (c *OpsMetricsCollector) redisPoolStats() (total int, idle int, ok bool) { + if c == nil || c.redisClient == nil { + return 0, 0, false + } + stats := c.redisClient.PoolStats() + if stats == nil { + return 0, 0, false + } + return int(stats.TotalConns), int(stats.IdleConns), true +} + +func (c *OpsMetricsCollector) dbPoolStats() (active int, idle int) { + if c == nil || c.db == nil { + return 0, 0 + } + stats := c.db.Stats() + return stats.InUse, stats.Idle +} + +var opsMetricsCollectorReleaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +func (c *OpsMetricsCollector) tryAcquireLeaderLock(ctx context.Context) (func(), bool) { + if c == nil || c.redisClient == nil { + return nil, true + } + if ctx == nil { + ctx = context.Background() + } + + ok, err := c.redisClient.SetNX(ctx, opsMetricsCollectorLeaderLockKey, c.instanceID, opsMetricsCollectorLeaderLockTTL).Result() + if err != nil { + // Prefer fail-closed to avoid stampeding the database when Redis is flaky. + // Fallback to a DB advisory lock when Redis is present but unavailable. + release, ok := tryAcquireDBAdvisoryLock(ctx, c.db, opsMetricsCollectorAdvisoryLockID) + if !ok { + c.maybeLogSkip() + return nil, false + } + return release, true + } + if !ok { + c.maybeLogSkip() + return nil, false + } + + release := func() { + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _, _ = opsMetricsCollectorReleaseScript.Run(ctx, c.redisClient, []string{opsMetricsCollectorLeaderLockKey}, c.instanceID).Result() + } + return release, true +} + +func (c *OpsMetricsCollector) maybeLogSkip() { + c.skipLogMu.Lock() + defer c.skipLogMu.Unlock() + + now := time.Now() + if !c.skipLogAt.IsZero() && now.Sub(c.skipLogAt) < time.Minute { + return + } + c.skipLogAt = now + log.Printf("[OpsMetricsCollector] leader lock held by another instance; skipping") +} + +func floatToIntPtr(v sql.NullFloat64) *int { + if !v.Valid { + return nil + } + n := int(math.Round(v.Float64)) + return &n +} + +func roundTo1DP(v float64) float64 { + return math.Round(v*10) / 10 +} + +func truncateString(s string, max int) string { + if max <= 0 { + return "" + } + if len(s) <= max { + return s + } + cut := s[:max] + for len(cut) > 0 && !utf8.ValidString(cut) { + cut = cut[:len(cut)-1] + } + return cut +} + +func boolPtr(v bool) *bool { + out := v + return &out +} + +func intPtr(v int) *int { + out := v + return &out +} + +func float64Ptr(v float64) *float64 { + out := v + return &out +} diff --git a/backend/internal/service/ops_models.go b/backend/internal/service/ops_models.go new file mode 100644 index 00000000..347cd52b --- /dev/null +++ b/backend/internal/service/ops_models.go @@ -0,0 +1,169 @@ +package service + +import "time" + +type OpsErrorLog struct { + ID int64 `json:"id"` + CreatedAt time.Time `json:"created_at"` + + // Standardized classification + // - phase: request|auth|routing|upstream|network|internal + // - owner: client|provider|platform + // - source: client_request|upstream_http|gateway + Phase string `json:"phase"` + Type string `json:"type"` + + Owner string `json:"error_owner"` + Source string `json:"error_source"` + + Severity string `json:"severity"` + + StatusCode int `json:"status_code"` + Platform string `json:"platform"` + Model string `json:"model"` + + IsRetryable bool `json:"is_retryable"` + RetryCount int `json:"retry_count"` + + Resolved bool `json:"resolved"` + ResolvedAt *time.Time `json:"resolved_at"` + ResolvedByUserID *int64 `json:"resolved_by_user_id"` + ResolvedByUserName string `json:"resolved_by_user_name"` + ResolvedRetryID *int64 `json:"resolved_retry_id"` + ResolvedStatusRaw string `json:"-"` + + ClientRequestID string `json:"client_request_id"` + RequestID string `json:"request_id"` + Message string `json:"message"` + + UserID *int64 `json:"user_id"` + UserEmail string `json:"user_email"` + APIKeyID *int64 `json:"api_key_id"` + AccountID *int64 `json:"account_id"` + AccountName string `json:"account_name"` + GroupID *int64 `json:"group_id"` + GroupName string `json:"group_name"` + + ClientIP *string `json:"client_ip"` + RequestPath string `json:"request_path"` + Stream bool `json:"stream"` +} + +type OpsErrorLogDetail struct { + OpsErrorLog + + ErrorBody string `json:"error_body"` + UserAgent string `json:"user_agent"` + + // Upstream context (optional) + UpstreamStatusCode *int `json:"upstream_status_code,omitempty"` + UpstreamErrorMessage string `json:"upstream_error_message,omitempty"` + UpstreamErrorDetail string `json:"upstream_error_detail,omitempty"` + UpstreamErrors string `json:"upstream_errors,omitempty"` // JSON array (string) for display/parsing + + // Timings (optional) + AuthLatencyMs *int64 `json:"auth_latency_ms"` + RoutingLatencyMs *int64 `json:"routing_latency_ms"` + UpstreamLatencyMs *int64 `json:"upstream_latency_ms"` + ResponseLatencyMs *int64 `json:"response_latency_ms"` + TimeToFirstTokenMs *int64 `json:"time_to_first_token_ms"` + + // Retry context + RequestBody string `json:"request_body"` + RequestBodyTruncated bool `json:"request_body_truncated"` + RequestBodyBytes *int `json:"request_body_bytes"` + RequestHeaders string `json:"request_headers,omitempty"` + + // vNext metric semantics + IsBusinessLimited bool `json:"is_business_limited"` +} + +type OpsErrorLogFilter struct { + StartTime *time.Time + EndTime *time.Time + + Platform string + GroupID *int64 + AccountID *int64 + + StatusCodes []int + StatusCodesOther bool + Phase string + Owner string + Source string + Resolved *bool + Query string + UserQuery string // Search by user email + + // Optional correlation keys for exact matching. + RequestID string + ClientRequestID string + + // View controls error categorization for list endpoints. + // - errors: show actionable errors (exclude business-limited / 429 / 529) + // - excluded: only show excluded errors + // - all: show everything + View string + + Page int + PageSize int +} + +type OpsErrorLogList struct { + Errors []*OpsErrorLog `json:"errors"` + Total int `json:"total"` + Page int `json:"page"` + PageSize int `json:"page_size"` +} + +type OpsRetryAttempt struct { + ID int64 `json:"id"` + CreatedAt time.Time `json:"created_at"` + + RequestedByUserID int64 `json:"requested_by_user_id"` + SourceErrorID int64 `json:"source_error_id"` + Mode string `json:"mode"` + PinnedAccountID *int64 `json:"pinned_account_id"` + PinnedAccountName string `json:"pinned_account_name"` + + Status string `json:"status"` + StartedAt *time.Time `json:"started_at"` + FinishedAt *time.Time `json:"finished_at"` + DurationMs *int64 `json:"duration_ms"` + + // Persisted execution results (best-effort) + Success *bool `json:"success"` + HTTPStatusCode *int `json:"http_status_code"` + UpstreamRequestID *string `json:"upstream_request_id"` + UsedAccountID *int64 `json:"used_account_id"` + UsedAccountName string `json:"used_account_name"` + ResponsePreview *string `json:"response_preview"` + ResponseTruncated *bool `json:"response_truncated"` + + // Optional correlation + ResultRequestID *string `json:"result_request_id"` + ResultErrorID *int64 `json:"result_error_id"` + + ErrorMessage *string `json:"error_message"` +} + +type OpsRetryResult struct { + AttemptID int64 `json:"attempt_id"` + Mode string `json:"mode"` + Status string `json:"status"` + + PinnedAccountID *int64 `json:"pinned_account_id"` + UsedAccountID *int64 `json:"used_account_id"` + + HTTPStatusCode int `json:"http_status_code"` + UpstreamRequestID string `json:"upstream_request_id"` + + ResponsePreview string `json:"response_preview"` + ResponseTruncated bool `json:"response_truncated"` + + ErrorMessage string `json:"error_message"` + + StartedAt time.Time `json:"started_at"` + FinishedAt time.Time `json:"finished_at"` + DurationMs int64 `json:"duration_ms"` +} diff --git a/backend/internal/service/ops_port.go b/backend/internal/service/ops_port.go new file mode 100644 index 00000000..515b47bb --- /dev/null +++ b/backend/internal/service/ops_port.go @@ -0,0 +1,263 @@ +package service + +import ( + "context" + "time" +) + +type OpsRepository interface { + InsertErrorLog(ctx context.Context, input *OpsInsertErrorLogInput) (int64, error) + ListErrorLogs(ctx context.Context, filter *OpsErrorLogFilter) (*OpsErrorLogList, error) + GetErrorLogByID(ctx context.Context, id int64) (*OpsErrorLogDetail, error) + ListRequestDetails(ctx context.Context, filter *OpsRequestDetailFilter) ([]*OpsRequestDetail, int64, error) + + InsertRetryAttempt(ctx context.Context, input *OpsInsertRetryAttemptInput) (int64, error) + UpdateRetryAttempt(ctx context.Context, input *OpsUpdateRetryAttemptInput) error + GetLatestRetryAttemptForError(ctx context.Context, sourceErrorID int64) (*OpsRetryAttempt, error) + ListRetryAttemptsByErrorID(ctx context.Context, sourceErrorID int64, limit int) ([]*OpsRetryAttempt, error) + UpdateErrorResolution(ctx context.Context, errorID int64, resolved bool, resolvedByUserID *int64, resolvedRetryID *int64, resolvedAt *time.Time) error + + // Lightweight window stats (for realtime WS / quick sampling). + GetWindowStats(ctx context.Context, filter *OpsDashboardFilter) (*OpsWindowStats, error) + // Lightweight realtime traffic summary (for the Ops dashboard header card). + GetRealtimeTrafficSummary(ctx context.Context, filter *OpsDashboardFilter) (*OpsRealtimeTrafficSummary, error) + + GetDashboardOverview(ctx context.Context, filter *OpsDashboardFilter) (*OpsDashboardOverview, error) + GetThroughputTrend(ctx context.Context, filter *OpsDashboardFilter, bucketSeconds int) (*OpsThroughputTrendResponse, error) + GetLatencyHistogram(ctx context.Context, filter *OpsDashboardFilter) (*OpsLatencyHistogramResponse, error) + GetErrorTrend(ctx context.Context, filter *OpsDashboardFilter, bucketSeconds int) (*OpsErrorTrendResponse, error) + GetErrorDistribution(ctx context.Context, filter *OpsDashboardFilter) (*OpsErrorDistributionResponse, error) + + InsertSystemMetrics(ctx context.Context, input *OpsInsertSystemMetricsInput) error + GetLatestSystemMetrics(ctx context.Context, windowMinutes int) (*OpsSystemMetricsSnapshot, error) + + UpsertJobHeartbeat(ctx context.Context, input *OpsUpsertJobHeartbeatInput) error + ListJobHeartbeats(ctx context.Context) ([]*OpsJobHeartbeat, error) + + // Alerts (rules + events) + ListAlertRules(ctx context.Context) ([]*OpsAlertRule, error) + CreateAlertRule(ctx context.Context, input *OpsAlertRule) (*OpsAlertRule, error) + UpdateAlertRule(ctx context.Context, input *OpsAlertRule) (*OpsAlertRule, error) + DeleteAlertRule(ctx context.Context, id int64) error + + ListAlertEvents(ctx context.Context, filter *OpsAlertEventFilter) ([]*OpsAlertEvent, error) + GetAlertEventByID(ctx context.Context, eventID int64) (*OpsAlertEvent, error) + GetActiveAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) + GetLatestAlertEvent(ctx context.Context, ruleID int64) (*OpsAlertEvent, error) + CreateAlertEvent(ctx context.Context, event *OpsAlertEvent) (*OpsAlertEvent, error) + UpdateAlertEventStatus(ctx context.Context, eventID int64, status string, resolvedAt *time.Time) error + UpdateAlertEventEmailSent(ctx context.Context, eventID int64, emailSent bool) error + + // Alert silences + CreateAlertSilence(ctx context.Context, input *OpsAlertSilence) (*OpsAlertSilence, error) + IsAlertSilenced(ctx context.Context, ruleID int64, platform string, groupID *int64, region *string, now time.Time) (bool, error) + + // Pre-aggregation (hourly/daily) used for long-window dashboard performance. + UpsertHourlyMetrics(ctx context.Context, startTime, endTime time.Time) error + UpsertDailyMetrics(ctx context.Context, startTime, endTime time.Time) error + GetLatestHourlyBucketStart(ctx context.Context) (time.Time, bool, error) + GetLatestDailyBucketDate(ctx context.Context) (time.Time, bool, error) +} + +type OpsInsertErrorLogInput struct { + RequestID string + ClientRequestID string + + UserID *int64 + APIKeyID *int64 + AccountID *int64 + GroupID *int64 + ClientIP *string + + Platform string + Model string + RequestPath string + Stream bool + UserAgent string + + ErrorPhase string + ErrorType string + Severity string + StatusCode int + IsBusinessLimited bool + IsCountTokens bool // 是否为 count_tokens 请求 + + ErrorMessage string + ErrorBody string + + ErrorSource string + ErrorOwner string + + UpstreamStatusCode *int + UpstreamErrorMessage *string + UpstreamErrorDetail *string + // UpstreamErrors captures all upstream error attempts observed during handling this request. + // It is populated during request processing (gin context) and sanitized+serialized by OpsService. + UpstreamErrors []*OpsUpstreamErrorEvent + // UpstreamErrorsJSON is the sanitized JSON string stored into ops_error_logs.upstream_errors. + // It is set by OpsService.RecordError before persisting. + UpstreamErrorsJSON *string + + TimeToFirstTokenMs *int64 + + RequestBodyJSON *string // sanitized json string (not raw bytes) + RequestBodyTruncated bool + RequestBodyBytes *int + RequestHeadersJSON *string // optional json string + + IsRetryable bool + RetryCount int + + CreatedAt time.Time +} + +type OpsInsertRetryAttemptInput struct { + RequestedByUserID int64 + SourceErrorID int64 + Mode string + PinnedAccountID *int64 + + // running|queued etc. + Status string + StartedAt time.Time +} + +type OpsUpdateRetryAttemptInput struct { + ID int64 + + // succeeded|failed + Status string + FinishedAt time.Time + DurationMs int64 + + // Persisted execution results (best-effort) + Success *bool + HTTPStatusCode *int + UpstreamRequestID *string + UsedAccountID *int64 + ResponsePreview *string + ResponseTruncated *bool + + // Optional correlation (legacy fields kept) + ResultRequestID *string + ResultErrorID *int64 + + ErrorMessage *string +} + +type OpsInsertSystemMetricsInput struct { + CreatedAt time.Time + WindowMinutes int + + Platform *string + GroupID *int64 + + SuccessCount int64 + ErrorCountTotal int64 + BusinessLimitedCount int64 + ErrorCountSLA int64 + + UpstreamErrorCountExcl429529 int64 + Upstream429Count int64 + Upstream529Count int64 + + TokenConsumed int64 + + QPS *float64 + TPS *float64 + + DurationP50Ms *int + DurationP90Ms *int + DurationP95Ms *int + DurationP99Ms *int + DurationAvgMs *float64 + DurationMaxMs *int + + TTFTP50Ms *int + TTFTP90Ms *int + TTFTP95Ms *int + TTFTP99Ms *int + TTFTAvgMs *float64 + TTFTMaxMs *int + + CPUUsagePercent *float64 + MemoryUsedMB *int64 + MemoryTotalMB *int64 + MemoryUsagePercent *float64 + + DBOK *bool + RedisOK *bool + + RedisConnTotal *int + RedisConnIdle *int + + DBConnActive *int + DBConnIdle *int + DBConnWaiting *int + + GoroutineCount *int + ConcurrencyQueueDepth *int +} + +type OpsSystemMetricsSnapshot struct { + ID int64 `json:"id"` + CreatedAt time.Time `json:"created_at"` + WindowMinutes int `json:"window_minutes"` + + CPUUsagePercent *float64 `json:"cpu_usage_percent"` + MemoryUsedMB *int64 `json:"memory_used_mb"` + MemoryTotalMB *int64 `json:"memory_total_mb"` + MemoryUsagePercent *float64 `json:"memory_usage_percent"` + + DBOK *bool `json:"db_ok"` + RedisOK *bool `json:"redis_ok"` + + // Config-derived limits (best-effort). These are not historical metrics; they help UI render "current vs max". + DBMaxOpenConns *int `json:"db_max_open_conns"` + RedisPoolSize *int `json:"redis_pool_size"` + + RedisConnTotal *int `json:"redis_conn_total"` + RedisConnIdle *int `json:"redis_conn_idle"` + + DBConnActive *int `json:"db_conn_active"` + DBConnIdle *int `json:"db_conn_idle"` + DBConnWaiting *int `json:"db_conn_waiting"` + + GoroutineCount *int `json:"goroutine_count"` + ConcurrencyQueueDepth *int `json:"concurrency_queue_depth"` +} + +type OpsUpsertJobHeartbeatInput struct { + JobName string + + LastRunAt *time.Time + LastSuccessAt *time.Time + LastErrorAt *time.Time + LastError *string + LastDurationMs *int64 + + // LastResult is an optional human-readable summary of the last successful run. + LastResult *string +} + +type OpsJobHeartbeat struct { + JobName string `json:"job_name"` + + LastRunAt *time.Time `json:"last_run_at"` + LastSuccessAt *time.Time `json:"last_success_at"` + LastErrorAt *time.Time `json:"last_error_at"` + LastError *string `json:"last_error"` + LastDurationMs *int64 `json:"last_duration_ms"` + LastResult *string `json:"last_result"` + + UpdatedAt time.Time `json:"updated_at"` +} + +type OpsWindowStats struct { + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + + SuccessCount int64 `json:"success_count"` + ErrorCountTotal int64 `json:"error_count_total"` + TokenConsumed int64 `json:"token_consumed"` +} diff --git a/backend/internal/service/ops_query_mode.go b/backend/internal/service/ops_query_mode.go new file mode 100644 index 00000000..e6fa9c1e --- /dev/null +++ b/backend/internal/service/ops_query_mode.go @@ -0,0 +1,40 @@ +package service + +import ( + "errors" + "strings" +) + +type OpsQueryMode string + +const ( + OpsQueryModeAuto OpsQueryMode = "auto" + OpsQueryModeRaw OpsQueryMode = "raw" + OpsQueryModePreagg OpsQueryMode = "preagg" +) + +// ErrOpsPreaggregatedNotPopulated indicates that raw logs exist for a window, but the +// pre-aggregation tables are not populated yet. This is primarily used to implement +// the forced `preagg` mode UX. +var ErrOpsPreaggregatedNotPopulated = errors.New("ops pre-aggregated tables not populated") + +func ParseOpsQueryMode(raw string) OpsQueryMode { + v := strings.ToLower(strings.TrimSpace(raw)) + switch v { + case string(OpsQueryModeRaw): + return OpsQueryModeRaw + case string(OpsQueryModePreagg): + return OpsQueryModePreagg + default: + return OpsQueryModeAuto + } +} + +func (m OpsQueryMode) IsValid() bool { + switch m { + case OpsQueryModeAuto, OpsQueryModeRaw, OpsQueryModePreagg: + return true + default: + return false + } +} diff --git a/backend/internal/service/ops_realtime.go b/backend/internal/service/ops_realtime.go new file mode 100644 index 00000000..479b9482 --- /dev/null +++ b/backend/internal/service/ops_realtime.go @@ -0,0 +1,36 @@ +package service + +import ( + "context" + "errors" + "strings" +) + +// IsRealtimeMonitoringEnabled returns true when realtime ops features are enabled. +// +// This is a soft switch controlled by the DB setting `ops_realtime_monitoring_enabled`, +// and it is also gated by the hard switch/soft switch of overall ops monitoring. +func (s *OpsService) IsRealtimeMonitoringEnabled(ctx context.Context) bool { + if !s.IsMonitoringEnabled(ctx) { + return false + } + if s.settingRepo == nil { + return true + } + + value, err := s.settingRepo.GetValue(ctx, SettingKeyOpsRealtimeMonitoringEnabled) + if err != nil { + // Default enabled when key is missing; fail-open on transient errors. + if errors.Is(err, ErrSettingNotFound) { + return true + } + return true + } + + switch strings.ToLower(strings.TrimSpace(value)) { + case "false", "0", "off", "disabled": + return false + default: + return true + } +} diff --git a/backend/internal/service/ops_realtime_models.go b/backend/internal/service/ops_realtime_models.go new file mode 100644 index 00000000..f7514a24 --- /dev/null +++ b/backend/internal/service/ops_realtime_models.go @@ -0,0 +1,81 @@ +package service + +import "time" + +// PlatformConcurrencyInfo aggregates concurrency usage by platform. +type PlatformConcurrencyInfo struct { + Platform string `json:"platform"` + CurrentInUse int64 `json:"current_in_use"` + MaxCapacity int64 `json:"max_capacity"` + LoadPercentage float64 `json:"load_percentage"` + WaitingInQueue int64 `json:"waiting_in_queue"` +} + +// GroupConcurrencyInfo aggregates concurrency usage by group. +// +// Note: one account can belong to multiple groups; group totals are therefore not additive across groups. +type GroupConcurrencyInfo struct { + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + Platform string `json:"platform"` + CurrentInUse int64 `json:"current_in_use"` + MaxCapacity int64 `json:"max_capacity"` + LoadPercentage float64 `json:"load_percentage"` + WaitingInQueue int64 `json:"waiting_in_queue"` +} + +// AccountConcurrencyInfo represents real-time concurrency usage for a single account. +type AccountConcurrencyInfo struct { + AccountID int64 `json:"account_id"` + AccountName string `json:"account_name"` + Platform string `json:"platform"` + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + CurrentInUse int64 `json:"current_in_use"` + MaxCapacity int64 `json:"max_capacity"` + LoadPercentage float64 `json:"load_percentage"` + WaitingInQueue int64 `json:"waiting_in_queue"` +} + +// PlatformAvailability aggregates account availability by platform. +type PlatformAvailability struct { + Platform string `json:"platform"` + TotalAccounts int64 `json:"total_accounts"` + AvailableCount int64 `json:"available_count"` + RateLimitCount int64 `json:"rate_limit_count"` + ErrorCount int64 `json:"error_count"` +} + +// GroupAvailability aggregates account availability by group. +type GroupAvailability struct { + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + Platform string `json:"platform"` + TotalAccounts int64 `json:"total_accounts"` + AvailableCount int64 `json:"available_count"` + RateLimitCount int64 `json:"rate_limit_count"` + ErrorCount int64 `json:"error_count"` +} + +// AccountAvailability represents current availability for a single account. +type AccountAvailability struct { + AccountID int64 `json:"account_id"` + AccountName string `json:"account_name"` + Platform string `json:"platform"` + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + + Status string `json:"status"` + + IsAvailable bool `json:"is_available"` + IsRateLimited bool `json:"is_rate_limited"` + IsOverloaded bool `json:"is_overloaded"` + HasError bool `json:"has_error"` + + RateLimitResetAt *time.Time `json:"rate_limit_reset_at"` + RateLimitRemainingSec *int64 `json:"rate_limit_remaining_sec"` + OverloadUntil *time.Time `json:"overload_until"` + OverloadRemainingSec *int64 `json:"overload_remaining_sec"` + ErrorMessage string `json:"error_message"` + TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until,omitempty"` +} diff --git a/backend/internal/service/ops_realtime_traffic.go b/backend/internal/service/ops_realtime_traffic.go new file mode 100644 index 00000000..458905c5 --- /dev/null +++ b/backend/internal/service/ops_realtime_traffic.go @@ -0,0 +1,36 @@ +package service + +import ( + "context" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +// GetRealtimeTrafficSummary returns QPS/TPS current/peak/avg for the provided window. +// This is used by the Ops dashboard "Realtime Traffic" card and is intentionally lightweight. +func (s *OpsService) GetRealtimeTrafficSummary(ctx context.Context, filter *OpsDashboardFilter) (*OpsRealtimeTrafficSummary, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + if filter.EndTime.Sub(filter.StartTime) > time.Hour { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_TOO_LARGE", "invalid time range: max window is 1 hour") + } + + // Realtime traffic summary always uses raw logs (minute granularity peaks). + filter.QueryMode = OpsQueryModeRaw + + return s.opsRepo.GetRealtimeTrafficSummary(ctx, filter) +} diff --git a/backend/internal/service/ops_realtime_traffic_models.go b/backend/internal/service/ops_realtime_traffic_models.go new file mode 100644 index 00000000..e88a890b --- /dev/null +++ b/backend/internal/service/ops_realtime_traffic_models.go @@ -0,0 +1,19 @@ +package service + +import "time" + +// OpsRealtimeTrafficSummary is a lightweight summary used by the Ops dashboard "Realtime Traffic" card. +// It reports QPS/TPS current/peak/avg for the requested time window. +type OpsRealtimeTrafficSummary struct { + // Window is a normalized label (e.g. "1min", "5min", "30min", "1h"). + Window string `json:"window"` + + StartTime time.Time `json:"start_time"` + EndTime time.Time `json:"end_time"` + + Platform string `json:"platform"` + GroupID *int64 `json:"group_id"` + + QPS OpsRateSummary `json:"qps"` + TPS OpsRateSummary `json:"tps"` +} diff --git a/backend/internal/service/ops_request_details.go b/backend/internal/service/ops_request_details.go new file mode 100644 index 00000000..12b9aa1b --- /dev/null +++ b/backend/internal/service/ops_request_details.go @@ -0,0 +1,151 @@ +package service + +import ( + "context" + "time" +) + +type OpsRequestKind string + +const ( + OpsRequestKindSuccess OpsRequestKind = "success" + OpsRequestKindError OpsRequestKind = "error" +) + +// OpsRequestDetail is a request-level view across success (usage_logs) and error (ops_error_logs). +// It powers "request drilldown" UIs without exposing full request bodies for successful requests. +type OpsRequestDetail struct { + Kind OpsRequestKind `json:"kind"` + CreatedAt time.Time `json:"created_at"` + RequestID string `json:"request_id"` + + Platform string `json:"platform,omitempty"` + Model string `json:"model,omitempty"` + + DurationMs *int `json:"duration_ms,omitempty"` + StatusCode *int `json:"status_code,omitempty"` + + // When Kind == "error", ErrorID links to /admin/ops/errors/:id. + ErrorID *int64 `json:"error_id,omitempty"` + + Phase string `json:"phase,omitempty"` + Severity string `json:"severity,omitempty"` + Message string `json:"message,omitempty"` + + UserID *int64 `json:"user_id,omitempty"` + APIKeyID *int64 `json:"api_key_id,omitempty"` + AccountID *int64 `json:"account_id,omitempty"` + GroupID *int64 `json:"group_id,omitempty"` + + Stream bool `json:"stream"` +} + +type OpsRequestDetailFilter struct { + StartTime *time.Time + EndTime *time.Time + + // kind: success|error|all + Kind string + + Platform string + GroupID *int64 + + UserID *int64 + APIKeyID *int64 + AccountID *int64 + + Model string + RequestID string + Query string + + MinDurationMs *int + MaxDurationMs *int + + // Sort: created_at_desc (default) or duration_desc. + Sort string + + Page int + PageSize int +} + +func (f *OpsRequestDetailFilter) Normalize() (page, pageSize int, startTime, endTime time.Time) { + page = 1 + pageSize = 50 + endTime = time.Now() + startTime = endTime.Add(-1 * time.Hour) + + if f == nil { + return page, pageSize, startTime, endTime + } + + if f.Page > 0 { + page = f.Page + } + if f.PageSize > 0 { + pageSize = f.PageSize + } + if pageSize > 100 { + pageSize = 100 + } + + if f.EndTime != nil { + endTime = *f.EndTime + } + if f.StartTime != nil { + startTime = *f.StartTime + } else if f.EndTime != nil { + startTime = endTime.Add(-1 * time.Hour) + } + + if startTime.After(endTime) { + startTime, endTime = endTime, startTime + } + + return page, pageSize, startTime, endTime +} + +type OpsRequestDetailList struct { + Items []*OpsRequestDetail `json:"items"` + Total int64 `json:"total"` + Page int `json:"page"` + PageSize int `json:"page_size"` +} + +func (s *OpsService) ListRequestDetails(ctx context.Context, filter *OpsRequestDetailFilter) (*OpsRequestDetailList, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return &OpsRequestDetailList{ + Items: []*OpsRequestDetail{}, + Total: 0, + Page: 1, + PageSize: 50, + }, nil + } + + page, pageSize, startTime, endTime := filter.Normalize() + filterCopy := &OpsRequestDetailFilter{} + if filter != nil { + *filterCopy = *filter + } + filterCopy.Page = page + filterCopy.PageSize = pageSize + filterCopy.StartTime = &startTime + filterCopy.EndTime = &endTime + + items, total, err := s.opsRepo.ListRequestDetails(ctx, filterCopy) + if err != nil { + return nil, err + } + if items == nil { + items = []*OpsRequestDetail{} + } + + return &OpsRequestDetailList{ + Items: items, + Total: total, + Page: page, + PageSize: pageSize, + }, nil +} diff --git a/backend/internal/service/ops_retry.go b/backend/internal/service/ops_retry.go new file mode 100644 index 00000000..8d98e43f --- /dev/null +++ b/backend/internal/service/ops_retry.go @@ -0,0 +1,720 @@ +package service + +import ( + "bytes" + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "strings" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/gin-gonic/gin" + "github.com/lib/pq" +) + +const ( + OpsRetryModeClient = "client" + OpsRetryModeUpstream = "upstream" +) + +const ( + opsRetryStatusRunning = "running" + opsRetryStatusSucceeded = "succeeded" + opsRetryStatusFailed = "failed" +) + +const ( + opsRetryTimeout = 60 * time.Second + opsRetryCaptureBytesLimit = 64 * 1024 + opsRetryResponsePreviewMax = 8 * 1024 + opsRetryMinIntervalPerError = 10 * time.Second + opsRetryMaxAccountSwitches = 3 +) + +var opsRetryRequestHeaderAllowlist = map[string]bool{ + "anthropic-beta": true, + "anthropic-version": true, +} + +type opsRetryRequestType string + +const ( + opsRetryTypeMessages opsRetryRequestType = "messages" + opsRetryTypeOpenAI opsRetryRequestType = "openai_responses" + opsRetryTypeGeminiV1B opsRetryRequestType = "gemini_v1beta" +) + +type limitedResponseWriter struct { + header http.Header + wroteHeader bool + + limit int + totalWritten int64 + buf bytes.Buffer +} + +func newLimitedResponseWriter(limit int) *limitedResponseWriter { + if limit <= 0 { + limit = 1 + } + return &limitedResponseWriter{ + header: make(http.Header), + limit: limit, + } +} + +func (w *limitedResponseWriter) Header() http.Header { + return w.header +} + +func (w *limitedResponseWriter) WriteHeader(statusCode int) { + if w.wroteHeader { + return + } + w.wroteHeader = true +} + +func (w *limitedResponseWriter) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + w.totalWritten += int64(len(p)) + + if w.buf.Len() < w.limit { + remaining := w.limit - w.buf.Len() + if len(p) > remaining { + _, _ = w.buf.Write(p[:remaining]) + } else { + _, _ = w.buf.Write(p) + } + } + + // Pretend we wrote everything to avoid upstream/client code treating it as an error. + return len(p), nil +} + +func (w *limitedResponseWriter) Flush() {} + +func (w *limitedResponseWriter) bodyBytes() []byte { + return w.buf.Bytes() +} + +func (w *limitedResponseWriter) truncated() bool { + return w.totalWritten > int64(w.limit) +} + +const ( + OpsRetryModeUpstreamEvent = "upstream_event" +) + +func (s *OpsService) RetryError(ctx context.Context, requestedByUserID int64, errorID int64, mode string, pinnedAccountID *int64) (*OpsRetryResult, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + + mode = strings.ToLower(strings.TrimSpace(mode)) + switch mode { + case OpsRetryModeClient, OpsRetryModeUpstream: + default: + return nil, infraerrors.BadRequest("OPS_RETRY_INVALID_MODE", "mode must be client or upstream") + } + + errorLog, err := s.GetErrorLogByID(ctx, errorID) + if err != nil { + return nil, err + } + if errorLog == nil { + return nil, infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found") + } + if strings.TrimSpace(errorLog.RequestBody) == "" { + return nil, infraerrors.BadRequest("OPS_RETRY_NO_REQUEST_BODY", "No request body found to retry") + } + + var pinned *int64 + if mode == OpsRetryModeUpstream { + if pinnedAccountID != nil && *pinnedAccountID > 0 { + pinned = pinnedAccountID + } else if errorLog.AccountID != nil && *errorLog.AccountID > 0 { + pinned = errorLog.AccountID + } else { + return nil, infraerrors.BadRequest("OPS_RETRY_PINNED_ACCOUNT_REQUIRED", "pinned_account_id is required for upstream retry") + } + } + + return s.retryWithErrorLog(ctx, requestedByUserID, errorID, mode, mode, pinned, errorLog) +} + +// RetryUpstreamEvent retries a specific upstream attempt captured inside ops_error_logs.upstream_errors. +// idx is 0-based. It always pins the original event account_id. +func (s *OpsService) RetryUpstreamEvent(ctx context.Context, requestedByUserID int64, errorID int64, idx int) (*OpsRetryResult, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if idx < 0 { + return nil, infraerrors.BadRequest("OPS_RETRY_INVALID_UPSTREAM_IDX", "invalid upstream idx") + } + + errorLog, err := s.GetErrorLogByID(ctx, errorID) + if err != nil { + return nil, err + } + if errorLog == nil { + return nil, infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found") + } + + events, err := ParseOpsUpstreamErrors(errorLog.UpstreamErrors) + if err != nil { + return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_EVENTS_INVALID", "invalid upstream_errors") + } + if idx >= len(events) { + return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_IDX_OOB", "upstream idx out of range") + } + ev := events[idx] + if ev == nil { + return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_EVENT_MISSING", "upstream event missing") + } + if ev.AccountID <= 0 { + return nil, infraerrors.BadRequest("OPS_RETRY_PINNED_ACCOUNT_REQUIRED", "account_id is required for upstream retry") + } + + upstreamBody := strings.TrimSpace(ev.UpstreamRequestBody) + if upstreamBody == "" { + return nil, infraerrors.BadRequest("OPS_RETRY_UPSTREAM_NO_REQUEST_BODY", "No upstream request body found to retry") + } + + override := *errorLog + override.RequestBody = upstreamBody + pinned := ev.AccountID + + // Persist as upstream_event, execute as upstream pinned retry. + return s.retryWithErrorLog(ctx, requestedByUserID, errorID, OpsRetryModeUpstreamEvent, OpsRetryModeUpstream, &pinned, &override) +} + +func (s *OpsService) retryWithErrorLog(ctx context.Context, requestedByUserID int64, errorID int64, mode string, execMode string, pinnedAccountID *int64, errorLog *OpsErrorLogDetail) (*OpsRetryResult, error) { + latest, err := s.opsRepo.GetLatestRetryAttemptForError(ctx, errorID) + if err != nil && !errors.Is(err, sql.ErrNoRows) { + return nil, infraerrors.InternalServer("OPS_RETRY_LOAD_LATEST_FAILED", "Failed to check retry status").WithCause(err) + } + if latest != nil { + if strings.EqualFold(latest.Status, opsRetryStatusRunning) || strings.EqualFold(latest.Status, "queued") { + return nil, infraerrors.Conflict("OPS_RETRY_IN_PROGRESS", "A retry is already in progress for this error") + } + + lastAttemptAt := latest.CreatedAt + if latest.FinishedAt != nil && !latest.FinishedAt.IsZero() { + lastAttemptAt = *latest.FinishedAt + } else if latest.StartedAt != nil && !latest.StartedAt.IsZero() { + lastAttemptAt = *latest.StartedAt + } + + if time.Since(lastAttemptAt) < opsRetryMinIntervalPerError { + return nil, infraerrors.Conflict("OPS_RETRY_TOO_FREQUENT", "Please wait before retrying this error again") + } + } + + if errorLog == nil || strings.TrimSpace(errorLog.RequestBody) == "" { + return nil, infraerrors.BadRequest("OPS_RETRY_NO_REQUEST_BODY", "No request body found to retry") + } + + var pinned *int64 + if execMode == OpsRetryModeUpstream { + if pinnedAccountID != nil && *pinnedAccountID > 0 { + pinned = pinnedAccountID + } else if errorLog.AccountID != nil && *errorLog.AccountID > 0 { + pinned = errorLog.AccountID + } else { + return nil, infraerrors.BadRequest("OPS_RETRY_PINNED_ACCOUNT_REQUIRED", "account_id is required for upstream retry") + } + } + + startedAt := time.Now() + attemptID, err := s.opsRepo.InsertRetryAttempt(ctx, &OpsInsertRetryAttemptInput{ + RequestedByUserID: requestedByUserID, + SourceErrorID: errorID, + Mode: mode, + PinnedAccountID: pinned, + Status: opsRetryStatusRunning, + StartedAt: startedAt, + }) + if err != nil { + var pqErr *pq.Error + if errors.As(err, &pqErr) && string(pqErr.Code) == "23505" { + return nil, infraerrors.Conflict("OPS_RETRY_IN_PROGRESS", "A retry is already in progress for this error") + } + return nil, infraerrors.InternalServer("OPS_RETRY_CREATE_ATTEMPT_FAILED", "Failed to create retry attempt").WithCause(err) + } + + result := &OpsRetryResult{ + AttemptID: attemptID, + Mode: mode, + Status: opsRetryStatusFailed, + PinnedAccountID: pinned, + HTTPStatusCode: 0, + UpstreamRequestID: "", + ResponsePreview: "", + ResponseTruncated: false, + ErrorMessage: "", + StartedAt: startedAt, + } + + execCtx, cancel := context.WithTimeout(ctx, opsRetryTimeout) + defer cancel() + + execRes := s.executeRetry(execCtx, errorLog, execMode, pinned) + + finishedAt := time.Now() + result.FinishedAt = finishedAt + result.DurationMs = finishedAt.Sub(startedAt).Milliseconds() + + if execRes != nil { + result.Status = execRes.status + result.UsedAccountID = execRes.usedAccountID + result.HTTPStatusCode = execRes.httpStatusCode + result.UpstreamRequestID = execRes.upstreamRequestID + result.ResponsePreview = execRes.responsePreview + result.ResponseTruncated = execRes.responseTruncated + result.ErrorMessage = execRes.errorMessage + } + + updateCtx, updateCancel := context.WithTimeout(context.Background(), 3*time.Second) + defer updateCancel() + + var updateErrMsg *string + if strings.TrimSpace(result.ErrorMessage) != "" { + msg := result.ErrorMessage + updateErrMsg = &msg + } + // Keep legacy result_request_id empty; use upstream_request_id instead. + var resultRequestID *string + + finalStatus := result.Status + if strings.TrimSpace(finalStatus) == "" { + finalStatus = opsRetryStatusFailed + } + + success := strings.EqualFold(finalStatus, opsRetryStatusSucceeded) + httpStatus := result.HTTPStatusCode + upstreamReqID := result.UpstreamRequestID + usedAccountID := result.UsedAccountID + preview := result.ResponsePreview + truncated := result.ResponseTruncated + + if err := s.opsRepo.UpdateRetryAttempt(updateCtx, &OpsUpdateRetryAttemptInput{ + ID: attemptID, + Status: finalStatus, + FinishedAt: finishedAt, + DurationMs: result.DurationMs, + Success: &success, + HTTPStatusCode: &httpStatus, + UpstreamRequestID: &upstreamReqID, + UsedAccountID: usedAccountID, + ResponsePreview: &preview, + ResponseTruncated: &truncated, + ResultRequestID: resultRequestID, + ErrorMessage: updateErrMsg, + }); err != nil { + log.Printf("[Ops] UpdateRetryAttempt failed: %v", err) + } else if success { + if err := s.opsRepo.UpdateErrorResolution(updateCtx, errorID, true, &requestedByUserID, &attemptID, &finishedAt); err != nil { + log.Printf("[Ops] UpdateErrorResolution failed: %v", err) + } + } + + return result, nil +} + +type opsRetryExecution struct { + status string + + usedAccountID *int64 + httpStatusCode int + upstreamRequestID string + + responsePreview string + responseTruncated bool + + errorMessage string +} + +func (s *OpsService) executeRetry(ctx context.Context, errorLog *OpsErrorLogDetail, mode string, pinnedAccountID *int64) *opsRetryExecution { + if errorLog == nil { + return &opsRetryExecution{ + status: opsRetryStatusFailed, + errorMessage: "missing error log", + } + } + + reqType := detectOpsRetryType(errorLog.RequestPath) + bodyBytes := []byte(errorLog.RequestBody) + + switch reqType { + case opsRetryTypeMessages: + bodyBytes = FilterThinkingBlocksForRetry(bodyBytes) + case opsRetryTypeOpenAI, opsRetryTypeGeminiV1B: + // No-op + } + + switch strings.ToLower(strings.TrimSpace(mode)) { + case OpsRetryModeUpstream: + if pinnedAccountID == nil || *pinnedAccountID <= 0 { + return &opsRetryExecution{ + status: opsRetryStatusFailed, + errorMessage: "pinned_account_id required for upstream retry", + } + } + return s.executePinnedRetry(ctx, reqType, errorLog, bodyBytes, *pinnedAccountID) + case OpsRetryModeClient: + return s.executeClientRetry(ctx, reqType, errorLog, bodyBytes) + default: + return &opsRetryExecution{ + status: opsRetryStatusFailed, + errorMessage: "invalid retry mode", + } + } +} + +func detectOpsRetryType(path string) opsRetryRequestType { + p := strings.ToLower(strings.TrimSpace(path)) + switch { + case strings.Contains(p, "/responses"): + return opsRetryTypeOpenAI + case strings.Contains(p, "/v1beta/"): + return opsRetryTypeGeminiV1B + default: + return opsRetryTypeMessages + } +} + +func (s *OpsService) executePinnedRetry(ctx context.Context, reqType opsRetryRequestType, errorLog *OpsErrorLogDetail, body []byte, pinnedAccountID int64) *opsRetryExecution { + if s.accountRepo == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "account repository not available"} + } + + account, err := s.accountRepo.GetByID(ctx, pinnedAccountID) + if err != nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: fmt.Sprintf("account not found: %v", err)} + } + if account == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "account not found"} + } + if !account.IsSchedulable() { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "account is not schedulable"} + } + if errorLog.GroupID != nil && *errorLog.GroupID > 0 { + if !containsInt64(account.GroupIDs, *errorLog.GroupID) { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "pinned account is not in the same group as the original request"} + } + } + + var release func() + if s.concurrencyService != nil { + acq, err := s.concurrencyService.AcquireAccountSlot(ctx, account.ID, account.Concurrency) + if err != nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: fmt.Sprintf("acquire account slot failed: %v", err)} + } + if acq == nil || !acq.Acquired { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "account concurrency limit reached"} + } + release = acq.ReleaseFunc + } + if release != nil { + defer release() + } + + usedID := account.ID + exec := s.executeWithAccount(ctx, reqType, errorLog, body, account) + exec.usedAccountID = &usedID + if exec.status == "" { + exec.status = opsRetryStatusFailed + } + return exec +} + +func (s *OpsService) executeClientRetry(ctx context.Context, reqType opsRetryRequestType, errorLog *OpsErrorLogDetail, body []byte) *opsRetryExecution { + groupID := errorLog.GroupID + if groupID == nil || *groupID <= 0 { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "group_id missing; cannot reselect account"} + } + + model, stream, parsedErr := extractRetryModelAndStream(reqType, errorLog, body) + if parsedErr != nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: parsedErr.Error()} + } + _ = stream + + excluded := make(map[int64]struct{}) + switches := 0 + + for { + if switches >= opsRetryMaxAccountSwitches { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "retry failed after exhausting account failovers"} + } + + selection, selErr := s.selectAccountForRetry(ctx, reqType, groupID, model, excluded) + if selErr != nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: selErr.Error()} + } + if selection == nil || selection.Account == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "no available accounts"} + } + + account := selection.Account + if !selection.Acquired || selection.ReleaseFunc == nil { + excluded[account.ID] = struct{}{} + switches++ + continue + } + + exec := func() *opsRetryExecution { + defer selection.ReleaseFunc() + return s.executeWithAccount(ctx, reqType, errorLog, body, account) + }() + + if exec != nil { + if exec.status == opsRetryStatusSucceeded { + usedID := account.ID + exec.usedAccountID = &usedID + return exec + } + // If the gateway services ask for failover, try another account. + if s.isFailoverError(exec.errorMessage) { + excluded[account.ID] = struct{}{} + switches++ + continue + } + usedID := account.ID + exec.usedAccountID = &usedID + return exec + } + + excluded[account.ID] = struct{}{} + switches++ + } +} + +func (s *OpsService) selectAccountForRetry(ctx context.Context, reqType opsRetryRequestType, groupID *int64, model string, excludedIDs map[int64]struct{}) (*AccountSelectionResult, error) { + switch reqType { + case opsRetryTypeOpenAI: + if s.openAIGatewayService == nil { + return nil, fmt.Errorf("openai gateway service not available") + } + return s.openAIGatewayService.SelectAccountWithLoadAwareness(ctx, groupID, "", model, excludedIDs) + case opsRetryTypeGeminiV1B, opsRetryTypeMessages: + if s.gatewayService == nil { + return nil, fmt.Errorf("gateway service not available") + } + return s.gatewayService.SelectAccountWithLoadAwareness(ctx, groupID, "", model, excludedIDs, "") // 重试不使用会话限制 + default: + return nil, fmt.Errorf("unsupported retry type: %s", reqType) + } +} + +func extractRetryModelAndStream(reqType opsRetryRequestType, errorLog *OpsErrorLogDetail, body []byte) (model string, stream bool, err error) { + switch reqType { + case opsRetryTypeMessages: + parsed, parseErr := ParseGatewayRequest(body) + if parseErr != nil { + return "", false, fmt.Errorf("failed to parse messages request body: %w", parseErr) + } + return parsed.Model, parsed.Stream, nil + case opsRetryTypeOpenAI: + var v struct { + Model string `json:"model"` + Stream bool `json:"stream"` + } + if err := json.Unmarshal(body, &v); err != nil { + return "", false, fmt.Errorf("failed to parse openai request body: %w", err) + } + return strings.TrimSpace(v.Model), v.Stream, nil + case opsRetryTypeGeminiV1B: + if strings.TrimSpace(errorLog.Model) == "" { + return "", false, fmt.Errorf("missing model for gemini v1beta retry") + } + return strings.TrimSpace(errorLog.Model), errorLog.Stream, nil + default: + return "", false, fmt.Errorf("unsupported retry type: %s", reqType) + } +} + +func (s *OpsService) executeWithAccount(ctx context.Context, reqType opsRetryRequestType, errorLog *OpsErrorLogDetail, body []byte, account *Account) *opsRetryExecution { + if account == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "missing account"} + } + + c, w := newOpsRetryContext(ctx, errorLog) + + var err error + switch reqType { + case opsRetryTypeOpenAI: + if s.openAIGatewayService == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "openai gateway service not available"} + } + _, err = s.openAIGatewayService.Forward(ctx, c, account, body) + case opsRetryTypeGeminiV1B: + if s.geminiCompatService == nil || s.antigravityGatewayService == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "gemini services not available"} + } + modelName := strings.TrimSpace(errorLog.Model) + action := "generateContent" + if errorLog.Stream { + action = "streamGenerateContent" + } + if account.Platform == PlatformAntigravity { + _, err = s.antigravityGatewayService.ForwardGemini(ctx, c, account, modelName, action, errorLog.Stream, body) + } else { + _, err = s.geminiCompatService.ForwardNative(ctx, c, account, modelName, action, errorLog.Stream, body) + } + case opsRetryTypeMessages: + switch account.Platform { + case PlatformAntigravity: + if s.antigravityGatewayService == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "antigravity gateway service not available"} + } + _, err = s.antigravityGatewayService.Forward(ctx, c, account, body) + case PlatformGemini: + if s.geminiCompatService == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "gemini gateway service not available"} + } + _, err = s.geminiCompatService.Forward(ctx, c, account, body) + default: + if s.gatewayService == nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "gateway service not available"} + } + parsedReq, parseErr := ParseGatewayRequest(body) + if parseErr != nil { + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "failed to parse request body"} + } + _, err = s.gatewayService.Forward(ctx, c, account, parsedReq) + } + default: + return &opsRetryExecution{status: opsRetryStatusFailed, errorMessage: "unsupported retry type"} + } + + statusCode := http.StatusOK + if c != nil && c.Writer != nil { + statusCode = c.Writer.Status() + } + + upstreamReqID := extractUpstreamRequestID(c) + preview, truncated := extractResponsePreview(w) + + exec := &opsRetryExecution{ + status: opsRetryStatusFailed, + httpStatusCode: statusCode, + upstreamRequestID: upstreamReqID, + responsePreview: preview, + responseTruncated: truncated, + errorMessage: "", + } + + if err == nil && statusCode < 400 { + exec.status = opsRetryStatusSucceeded + return exec + } + + if err != nil { + exec.errorMessage = err.Error() + } else { + exec.errorMessage = fmt.Sprintf("upstream returned status %d", statusCode) + } + + return exec +} + +func newOpsRetryContext(ctx context.Context, errorLog *OpsErrorLogDetail) (*gin.Context, *limitedResponseWriter) { + w := newLimitedResponseWriter(opsRetryCaptureBytesLimit) + c, _ := gin.CreateTestContext(w) + + path := "/" + if errorLog != nil && strings.TrimSpace(errorLog.RequestPath) != "" { + path = errorLog.RequestPath + } + + req, _ := http.NewRequestWithContext(ctx, http.MethodPost, "http://localhost"+path, bytes.NewReader(nil)) + req.Header.Set("content-type", "application/json") + if errorLog != nil && strings.TrimSpace(errorLog.UserAgent) != "" { + req.Header.Set("user-agent", errorLog.UserAgent) + } + // Restore a minimal, whitelisted subset of request headers to improve retry fidelity + // (e.g. anthropic-beta / anthropic-version). Never replay auth credentials. + if errorLog != nil && strings.TrimSpace(errorLog.RequestHeaders) != "" { + var stored map[string]string + if err := json.Unmarshal([]byte(errorLog.RequestHeaders), &stored); err == nil { + for k, v := range stored { + key := strings.TrimSpace(k) + if key == "" { + continue + } + if !opsRetryRequestHeaderAllowlist[strings.ToLower(key)] { + continue + } + val := strings.TrimSpace(v) + if val == "" { + continue + } + req.Header.Set(key, val) + } + } + } + + c.Request = req + return c, w +} + +func extractUpstreamRequestID(c *gin.Context) string { + if c == nil || c.Writer == nil { + return "" + } + h := c.Writer.Header() + if h == nil { + return "" + } + for _, key := range []string{"x-request-id", "X-Request-Id", "X-Request-ID"} { + if v := strings.TrimSpace(h.Get(key)); v != "" { + return v + } + } + return "" +} + +func extractResponsePreview(w *limitedResponseWriter) (preview string, truncated bool) { + if w == nil { + return "", false + } + b := bytes.TrimSpace(w.bodyBytes()) + if len(b) == 0 { + return "", w.truncated() + } + if len(b) > opsRetryResponsePreviewMax { + return string(b[:opsRetryResponsePreviewMax]), true + } + return string(b), w.truncated() +} + +func containsInt64(items []int64, needle int64) bool { + for _, v := range items { + if v == needle { + return true + } + } + return false +} + +func (s *OpsService) isFailoverError(message string) bool { + msg := strings.ToLower(strings.TrimSpace(message)) + if msg == "" { + return false + } + return strings.Contains(msg, "upstream error:") && strings.Contains(msg, "failover") +} diff --git a/backend/internal/service/ops_scheduled_report_service.go b/backend/internal/service/ops_scheduled_report_service.go new file mode 100644 index 00000000..98b2045d --- /dev/null +++ b/backend/internal/service/ops_scheduled_report_service.go @@ -0,0 +1,721 @@ +package service + +import ( + "context" + "fmt" + "log" + "strconv" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/google/uuid" + "github.com/redis/go-redis/v9" + "github.com/robfig/cron/v3" +) + +const ( + opsScheduledReportJobName = "ops_scheduled_reports" + + opsScheduledReportLeaderLockKeyDefault = "ops:scheduled_reports:leader" + opsScheduledReportLeaderLockTTLDefault = 5 * time.Minute + + opsScheduledReportLastRunKeyPrefix = "ops:scheduled_reports:last_run:" + + opsScheduledReportTickInterval = 1 * time.Minute +) + +var opsScheduledReportCronParser = cron.NewParser(cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow) + +var opsScheduledReportReleaseScript = redis.NewScript(` +if redis.call("GET", KEYS[1]) == ARGV[1] then + return redis.call("DEL", KEYS[1]) +end +return 0 +`) + +type OpsScheduledReportService struct { + opsService *OpsService + userService *UserService + emailService *EmailService + redisClient *redis.Client + cfg *config.Config + + instanceID string + loc *time.Location + + distributedLockOn bool + warnNoRedisOnce sync.Once + + startOnce sync.Once + stopOnce sync.Once + stopCtx context.Context + stop context.CancelFunc + wg sync.WaitGroup +} + +func NewOpsScheduledReportService( + opsService *OpsService, + userService *UserService, + emailService *EmailService, + redisClient *redis.Client, + cfg *config.Config, +) *OpsScheduledReportService { + lockOn := cfg == nil || strings.TrimSpace(cfg.RunMode) != config.RunModeSimple + + loc := time.Local + if cfg != nil && strings.TrimSpace(cfg.Timezone) != "" { + if parsed, err := time.LoadLocation(strings.TrimSpace(cfg.Timezone)); err == nil && parsed != nil { + loc = parsed + } + } + return &OpsScheduledReportService{ + opsService: opsService, + userService: userService, + emailService: emailService, + redisClient: redisClient, + cfg: cfg, + + instanceID: uuid.NewString(), + loc: loc, + distributedLockOn: lockOn, + warnNoRedisOnce: sync.Once{}, + startOnce: sync.Once{}, + stopOnce: sync.Once{}, + stopCtx: nil, + stop: nil, + wg: sync.WaitGroup{}, + } +} + +func (s *OpsScheduledReportService) Start() { + s.StartWithContext(context.Background()) +} + +func (s *OpsScheduledReportService) StartWithContext(ctx context.Context) { + if s == nil { + return + } + if ctx == nil { + ctx = context.Background() + } + if s.cfg != nil && !s.cfg.Ops.Enabled { + return + } + if s.opsService == nil || s.emailService == nil { + return + } + + s.startOnce.Do(func() { + s.stopCtx, s.stop = context.WithCancel(ctx) + s.wg.Add(1) + go s.run() + }) +} + +func (s *OpsScheduledReportService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + if s.stop != nil { + s.stop() + } + }) + s.wg.Wait() +} + +func (s *OpsScheduledReportService) run() { + defer s.wg.Done() + + ticker := time.NewTicker(opsScheduledReportTickInterval) + defer ticker.Stop() + + s.runOnce() + for { + select { + case <-ticker.C: + s.runOnce() + case <-s.stopCtx.Done(): + return + } + } +} + +func (s *OpsScheduledReportService) runOnce() { + if s == nil || s.opsService == nil || s.emailService == nil { + return + } + + startedAt := time.Now().UTC() + runAt := startedAt + + ctx, cancel := context.WithTimeout(s.stopCtx, 60*time.Second) + defer cancel() + + // Respect ops monitoring enabled switch. + if !s.opsService.IsMonitoringEnabled(ctx) { + return + } + + release, ok := s.tryAcquireLeaderLock(ctx) + if !ok { + return + } + if release != nil { + defer release() + } + + now := time.Now() + if s.loc != nil { + now = now.In(s.loc) + } + + reports := s.listScheduledReports(ctx, now) + if len(reports) == 0 { + return + } + + reportsTotal := len(reports) + reportsDue := 0 + sentAttempts := 0 + + for _, report := range reports { + if report == nil || !report.Enabled { + continue + } + if report.NextRunAt.After(now) { + continue + } + reportsDue++ + + attempts, err := s.runReport(ctx, report, now) + if err != nil { + s.recordHeartbeatError(runAt, time.Since(startedAt), err) + return + } + sentAttempts += attempts + } + + result := truncateString(fmt.Sprintf("reports=%d due=%d send_attempts=%d", reportsTotal, reportsDue, sentAttempts), 2048) + s.recordHeartbeatSuccess(runAt, time.Since(startedAt), result) +} + +type opsScheduledReport struct { + Name string + ReportType string + Schedule string + Enabled bool + + TimeRange time.Duration + + Recipients []string + + ErrorDigestMinCount int + AccountHealthErrorRateThreshold float64 + + LastRunAt *time.Time + NextRunAt time.Time +} + +func (s *OpsScheduledReportService) listScheduledReports(ctx context.Context, now time.Time) []*opsScheduledReport { + if s == nil || s.opsService == nil { + return nil + } + if ctx == nil { + ctx = context.Background() + } + + emailCfg, err := s.opsService.GetEmailNotificationConfig(ctx) + if err != nil || emailCfg == nil { + return nil + } + if !emailCfg.Report.Enabled { + return nil + } + + recipients := normalizeEmails(emailCfg.Report.Recipients) + + type reportDef struct { + enabled bool + name string + kind string + timeRange time.Duration + schedule string + } + + defs := []reportDef{ + {enabled: emailCfg.Report.DailySummaryEnabled, name: "日报", kind: "daily_summary", timeRange: 24 * time.Hour, schedule: emailCfg.Report.DailySummarySchedule}, + {enabled: emailCfg.Report.WeeklySummaryEnabled, name: "周报", kind: "weekly_summary", timeRange: 7 * 24 * time.Hour, schedule: emailCfg.Report.WeeklySummarySchedule}, + {enabled: emailCfg.Report.ErrorDigestEnabled, name: "错误摘要", kind: "error_digest", timeRange: 24 * time.Hour, schedule: emailCfg.Report.ErrorDigestSchedule}, + {enabled: emailCfg.Report.AccountHealthEnabled, name: "账号健康", kind: "account_health", timeRange: 24 * time.Hour, schedule: emailCfg.Report.AccountHealthSchedule}, + } + + out := make([]*opsScheduledReport, 0, len(defs)) + for _, d := range defs { + if !d.enabled { + continue + } + spec := strings.TrimSpace(d.schedule) + if spec == "" { + continue + } + sched, err := opsScheduledReportCronParser.Parse(spec) + if err != nil { + log.Printf("[OpsScheduledReport] invalid cron spec=%q for report=%s: %v", spec, d.kind, err) + continue + } + + lastRun := s.getLastRunAt(ctx, d.kind) + base := lastRun + if base.IsZero() { + // Allow a schedule matching the current minute to trigger right after startup. + base = now.Add(-1 * time.Minute) + } + next := sched.Next(base) + if next.IsZero() { + continue + } + + var lastRunPtr *time.Time + if !lastRun.IsZero() { + lastCopy := lastRun + lastRunPtr = &lastCopy + } + + out = append(out, &opsScheduledReport{ + Name: d.name, + ReportType: d.kind, + Schedule: spec, + Enabled: true, + + TimeRange: d.timeRange, + + Recipients: recipients, + + ErrorDigestMinCount: emailCfg.Report.ErrorDigestMinCount, + AccountHealthErrorRateThreshold: emailCfg.Report.AccountHealthErrorRateThreshold, + + LastRunAt: lastRunPtr, + NextRunAt: next, + }) + } + + return out +} + +func (s *OpsScheduledReportService) runReport(ctx context.Context, report *opsScheduledReport, now time.Time) (int, error) { + if s == nil || s.opsService == nil || s.emailService == nil || report == nil { + return 0, nil + } + if ctx == nil { + ctx = context.Background() + } + + // Mark as "run" up-front so a broken SMTP config doesn't spam retries every minute. + s.setLastRunAt(ctx, report.ReportType, now) + + content, err := s.generateReportHTML(ctx, report, now) + if err != nil { + return 0, err + } + if strings.TrimSpace(content) == "" { + // Skip sending when the report decides not to emit content (e.g., digest below min count). + return 0, nil + } + + recipients := report.Recipients + if len(recipients) == 0 && s.userService != nil { + admin, err := s.userService.GetFirstAdmin(ctx) + if err == nil && admin != nil && strings.TrimSpace(admin.Email) != "" { + recipients = []string{strings.TrimSpace(admin.Email)} + } + } + if len(recipients) == 0 { + return 0, nil + } + + subject := fmt.Sprintf("[Ops Report] %s", strings.TrimSpace(report.Name)) + + attempts := 0 + for _, to := range recipients { + addr := strings.TrimSpace(to) + if addr == "" { + continue + } + attempts++ + if err := s.emailService.SendEmail(ctx, addr, subject, content); err != nil { + // Ignore per-recipient failures; continue best-effort. + continue + } + } + return attempts, nil +} + +func (s *OpsScheduledReportService) generateReportHTML(ctx context.Context, report *opsScheduledReport, now time.Time) (string, error) { + if s == nil || s.opsService == nil || report == nil { + return "", fmt.Errorf("service not initialized") + } + if report.TimeRange <= 0 { + return "", fmt.Errorf("invalid time range") + } + + end := now.UTC() + start := end.Add(-report.TimeRange) + + switch strings.TrimSpace(report.ReportType) { + case "daily_summary", "weekly_summary": + overview, err := s.opsService.GetDashboardOverview(ctx, &OpsDashboardFilter{ + StartTime: start, + EndTime: end, + Platform: "", + GroupID: nil, + QueryMode: OpsQueryModeAuto, + }) + if err != nil { + // If pre-aggregation isn't ready but the report is requested, fall back to raw. + if strings.TrimSpace(report.ReportType) == "daily_summary" || strings.TrimSpace(report.ReportType) == "weekly_summary" { + overview, err = s.opsService.GetDashboardOverview(ctx, &OpsDashboardFilter{ + StartTime: start, + EndTime: end, + Platform: "", + GroupID: nil, + QueryMode: OpsQueryModeRaw, + }) + } + if err != nil { + return "", err + } + } + return buildOpsSummaryEmailHTML(report.Name, start, end, overview), nil + case "error_digest": + // Lightweight digest: list recent errors (status>=400) and breakdown by type. + startTime := start + endTime := end + filter := &OpsErrorLogFilter{ + StartTime: &startTime, + EndTime: &endTime, + Page: 1, + PageSize: 100, + } + out, err := s.opsService.GetErrorLogs(ctx, filter) + if err != nil { + return "", err + } + if report.ErrorDigestMinCount > 0 && out != nil && out.Total < report.ErrorDigestMinCount { + return "", nil + } + return buildOpsErrorDigestEmailHTML(report.Name, start, end, out), nil + case "account_health": + // Best-effort: use account availability (not error rate yet). + avail, err := s.opsService.GetAccountAvailability(ctx, "", nil) + if err != nil { + return "", err + } + _ = report.AccountHealthErrorRateThreshold // reserved for future per-account error rate report + return buildOpsAccountHealthEmailHTML(report.Name, start, end, avail), nil + default: + return "", fmt.Errorf("unknown report type: %s", report.ReportType) + } +} + +func buildOpsSummaryEmailHTML(title string, start, end time.Time, overview *OpsDashboardOverview) string { + if overview == nil { + return fmt.Sprintf("

%s

No data.

", htmlEscape(title)) + } + + latP50 := "-" + latP99 := "-" + if overview.Duration.P50 != nil { + latP50 = fmt.Sprintf("%dms", *overview.Duration.P50) + } + if overview.Duration.P99 != nil { + latP99 = fmt.Sprintf("%dms", *overview.Duration.P99) + } + + ttftP50 := "-" + ttftP99 := "-" + if overview.TTFT.P50 != nil { + ttftP50 = fmt.Sprintf("%dms", *overview.TTFT.P50) + } + if overview.TTFT.P99 != nil { + ttftP99 = fmt.Sprintf("%dms", *overview.TTFT.P99) + } + + return fmt.Sprintf(` +

%s

+

Period: %s ~ %s (UTC)

+
    +
  • Total Requests: %d
  • +
  • Success: %d
  • +
  • Errors (SLA): %d
  • +
  • Business Limited: %d
  • +
  • SLA: %.2f%%
  • +
  • Error Rate: %.2f%%
  • +
  • Upstream Error Rate (excl 429/529): %.2f%%
  • +
  • Upstream Errors: excl429/529=%d, 429=%d, 529=%d
  • +
  • Latency: p50=%s, p99=%s
  • +
  • TTFT: p50=%s, p99=%s
  • +
  • Tokens: %d
  • +
  • QPS: current=%.1f, peak=%.1f, avg=%.1f
  • +
  • TPS: current=%.1f, peak=%.1f, avg=%.1f
  • +
+`, + htmlEscape(strings.TrimSpace(title)), + htmlEscape(start.UTC().Format(time.RFC3339)), + htmlEscape(end.UTC().Format(time.RFC3339)), + overview.RequestCountTotal, + overview.SuccessCount, + overview.ErrorCountSLA, + overview.BusinessLimitedCount, + overview.SLA*100, + overview.ErrorRate*100, + overview.UpstreamErrorRate*100, + overview.UpstreamErrorCountExcl429529, + overview.Upstream429Count, + overview.Upstream529Count, + htmlEscape(latP50), + htmlEscape(latP99), + htmlEscape(ttftP50), + htmlEscape(ttftP99), + overview.TokenConsumed, + overview.QPS.Current, + overview.QPS.Peak, + overview.QPS.Avg, + overview.TPS.Current, + overview.TPS.Peak, + overview.TPS.Avg, + ) +} + +func buildOpsErrorDigestEmailHTML(title string, start, end time.Time, list *OpsErrorLogList) string { + total := 0 + recent := []*OpsErrorLog{} + if list != nil { + total = list.Total + recent = list.Errors + } + if len(recent) > 10 { + recent = recent[:10] + } + + rows := "" + for _, item := range recent { + if item == nil { + continue + } + rows += fmt.Sprintf( + "%s%s%d%s", + htmlEscape(item.CreatedAt.UTC().Format(time.RFC3339)), + htmlEscape(item.Platform), + item.StatusCode, + htmlEscape(truncateString(item.Message, 180)), + ) + } + if rows == "" { + rows = "No recent errors." + } + + return fmt.Sprintf(` +

%s

+

Period: %s ~ %s (UTC)

+

Total Errors: %d

+

Recent

+ + + %s +
TimePlatformStatusMessage
+`, + htmlEscape(strings.TrimSpace(title)), + htmlEscape(start.UTC().Format(time.RFC3339)), + htmlEscape(end.UTC().Format(time.RFC3339)), + total, + rows, + ) +} + +func buildOpsAccountHealthEmailHTML(title string, start, end time.Time, avail *OpsAccountAvailability) string { + total := 0 + available := 0 + rateLimited := 0 + hasError := 0 + + if avail != nil && avail.Accounts != nil { + for _, a := range avail.Accounts { + if a == nil { + continue + } + total++ + if a.IsAvailable { + available++ + } + if a.IsRateLimited { + rateLimited++ + } + if a.HasError { + hasError++ + } + } + } + + return fmt.Sprintf(` +

%s

+

Period: %s ~ %s (UTC)

+
    +
  • Total Accounts: %d
  • +
  • Available: %d
  • +
  • Rate Limited: %d
  • +
  • Error: %d
  • +
+

Note: This report currently reflects account availability status only.

+`, + htmlEscape(strings.TrimSpace(title)), + htmlEscape(start.UTC().Format(time.RFC3339)), + htmlEscape(end.UTC().Format(time.RFC3339)), + total, + available, + rateLimited, + hasError, + ) +} + +func (s *OpsScheduledReportService) tryAcquireLeaderLock(ctx context.Context) (func(), bool) { + if s == nil || !s.distributedLockOn { + return nil, true + } + if s.redisClient == nil { + s.warnNoRedisOnce.Do(func() { + log.Printf("[OpsScheduledReport] redis not configured; running without distributed lock") + }) + return nil, true + } + if ctx == nil { + ctx = context.Background() + } + + key := opsScheduledReportLeaderLockKeyDefault + ttl := opsScheduledReportLeaderLockTTLDefault + if strings.TrimSpace(key) == "" { + key = "ops:scheduled_reports:leader" + } + if ttl <= 0 { + ttl = 5 * time.Minute + } + + ok, err := s.redisClient.SetNX(ctx, key, s.instanceID, ttl).Result() + if err != nil { + // Prefer fail-closed to avoid duplicate report sends when Redis is flaky. + log.Printf("[OpsScheduledReport] leader lock SetNX failed; skipping this cycle: %v", err) + return nil, false + } + if !ok { + return nil, false + } + return func() { + _, _ = opsScheduledReportReleaseScript.Run(ctx, s.redisClient, []string{key}, s.instanceID).Result() + }, true +} + +func (s *OpsScheduledReportService) getLastRunAt(ctx context.Context, reportType string) time.Time { + if s == nil || s.redisClient == nil { + return time.Time{} + } + kind := strings.TrimSpace(reportType) + if kind == "" { + return time.Time{} + } + key := opsScheduledReportLastRunKeyPrefix + kind + + raw, err := s.redisClient.Get(ctx, key).Result() + if err != nil || strings.TrimSpace(raw) == "" { + return time.Time{} + } + sec, err := strconv.ParseInt(strings.TrimSpace(raw), 10, 64) + if err != nil || sec <= 0 { + return time.Time{} + } + last := time.Unix(sec, 0) + // Cron schedules are interpreted in the configured timezone (s.loc). Ensure the base time + // passed into cron.Next() uses the same location; otherwise the job will drift by timezone + // offset (e.g. Asia/Shanghai default would run 8h later after the first execution). + if s.loc != nil { + return last.In(s.loc) + } + return last.UTC() +} + +func (s *OpsScheduledReportService) setLastRunAt(ctx context.Context, reportType string, t time.Time) { + if s == nil || s.redisClient == nil { + return + } + kind := strings.TrimSpace(reportType) + if kind == "" { + return + } + if t.IsZero() { + t = time.Now().UTC() + } + key := opsScheduledReportLastRunKeyPrefix + kind + _ = s.redisClient.Set(ctx, key, strconv.FormatInt(t.UTC().Unix(), 10), 14*24*time.Hour).Err() +} + +func (s *OpsScheduledReportService) recordHeartbeatSuccess(runAt time.Time, duration time.Duration, result string) { + if s == nil || s.opsService == nil || s.opsService.opsRepo == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + msg := strings.TrimSpace(result) + if msg == "" { + msg = "ok" + } + msg = truncateString(msg, 2048) + _ = s.opsService.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsScheduledReportJobName, + LastRunAt: &runAt, + LastSuccessAt: &now, + LastDurationMs: &durMs, + LastResult: &msg, + }) +} + +func (s *OpsScheduledReportService) recordHeartbeatError(runAt time.Time, duration time.Duration, err error) { + if s == nil || s.opsService == nil || s.opsService.opsRepo == nil || err == nil { + return + } + now := time.Now().UTC() + durMs := duration.Milliseconds() + msg := truncateString(err.Error(), 2048) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + _ = s.opsService.opsRepo.UpsertJobHeartbeat(ctx, &OpsUpsertJobHeartbeatInput{ + JobName: opsScheduledReportJobName, + LastRunAt: &runAt, + LastErrorAt: &now, + LastError: &msg, + LastDurationMs: &durMs, + }) +} + +func normalizeEmails(in []string) []string { + if len(in) == 0 { + return nil + } + seen := make(map[string]struct{}, len(in)) + out := make([]string, 0, len(in)) + for _, raw := range in { + addr := strings.ToLower(strings.TrimSpace(raw)) + if addr == "" { + continue + } + if _, ok := seen[addr]; ok { + continue + } + seen[addr] = struct{}{} + out = append(out, addr) + } + return out +} diff --git a/backend/internal/service/ops_service.go b/backend/internal/service/ops_service.go new file mode 100644 index 00000000..abb8ae12 --- /dev/null +++ b/backend/internal/service/ops_service.go @@ -0,0 +1,613 @@ +package service + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "log" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +var ErrOpsDisabled = infraerrors.NotFound("OPS_DISABLED", "Ops monitoring is disabled") + +const ( + opsMaxStoredRequestBodyBytes = 10 * 1024 + opsMaxStoredErrorBodyBytes = 20 * 1024 +) + +// OpsService provides ingestion and query APIs for the Ops monitoring module. +type OpsService struct { + opsRepo OpsRepository + settingRepo SettingRepository + cfg *config.Config + + accountRepo AccountRepository + + // getAccountAvailability is a unit-test hook for overriding account availability lookup. + getAccountAvailability func(ctx context.Context, platformFilter string, groupIDFilter *int64) (*OpsAccountAvailability, error) + + concurrencyService *ConcurrencyService + gatewayService *GatewayService + openAIGatewayService *OpenAIGatewayService + geminiCompatService *GeminiMessagesCompatService + antigravityGatewayService *AntigravityGatewayService +} + +func NewOpsService( + opsRepo OpsRepository, + settingRepo SettingRepository, + cfg *config.Config, + accountRepo AccountRepository, + concurrencyService *ConcurrencyService, + gatewayService *GatewayService, + openAIGatewayService *OpenAIGatewayService, + geminiCompatService *GeminiMessagesCompatService, + antigravityGatewayService *AntigravityGatewayService, +) *OpsService { + return &OpsService{ + opsRepo: opsRepo, + settingRepo: settingRepo, + cfg: cfg, + + accountRepo: accountRepo, + + concurrencyService: concurrencyService, + gatewayService: gatewayService, + openAIGatewayService: openAIGatewayService, + geminiCompatService: geminiCompatService, + antigravityGatewayService: antigravityGatewayService, + } +} + +func (s *OpsService) RequireMonitoringEnabled(ctx context.Context) error { + if s.IsMonitoringEnabled(ctx) { + return nil + } + return ErrOpsDisabled +} + +func (s *OpsService) IsMonitoringEnabled(ctx context.Context) bool { + // Hard switch: disable ops entirely. + if s.cfg != nil && !s.cfg.Ops.Enabled { + return false + } + if s.settingRepo == nil { + return true + } + value, err := s.settingRepo.GetValue(ctx, SettingKeyOpsMonitoringEnabled) + if err != nil { + // Default enabled when key is missing, and fail-open on transient errors + // (ops should never block gateway traffic). + if errors.Is(err, ErrSettingNotFound) { + return true + } + return true + } + switch strings.ToLower(strings.TrimSpace(value)) { + case "false", "0", "off", "disabled": + return false + default: + return true + } +} + +func (s *OpsService) RecordError(ctx context.Context, entry *OpsInsertErrorLogInput, rawRequestBody []byte) error { + if entry == nil { + return nil + } + if !s.IsMonitoringEnabled(ctx) { + return nil + } + if s.opsRepo == nil { + return nil + } + + // Ensure timestamps are always populated. + if entry.CreatedAt.IsZero() { + entry.CreatedAt = time.Now() + } + + // Ensure required fields exist (DB has NOT NULL constraints). + entry.ErrorPhase = strings.TrimSpace(entry.ErrorPhase) + entry.ErrorType = strings.TrimSpace(entry.ErrorType) + if entry.ErrorPhase == "" { + entry.ErrorPhase = "internal" + } + if entry.ErrorType == "" { + entry.ErrorType = "api_error" + } + + // Sanitize + trim request body (errors only). + if len(rawRequestBody) > 0 { + sanitized, truncated, bytesLen := sanitizeAndTrimRequestBody(rawRequestBody, opsMaxStoredRequestBodyBytes) + if sanitized != "" { + entry.RequestBodyJSON = &sanitized + } + entry.RequestBodyTruncated = truncated + entry.RequestBodyBytes = &bytesLen + } + + // Sanitize + truncate error_body to avoid storing sensitive data. + if strings.TrimSpace(entry.ErrorBody) != "" { + sanitized, _ := sanitizeErrorBodyForStorage(entry.ErrorBody, opsMaxStoredErrorBodyBytes) + entry.ErrorBody = sanitized + } + + // Sanitize upstream error context if provided by gateway services. + if entry.UpstreamStatusCode != nil && *entry.UpstreamStatusCode <= 0 { + entry.UpstreamStatusCode = nil + } + if entry.UpstreamErrorMessage != nil { + msg := strings.TrimSpace(*entry.UpstreamErrorMessage) + msg = sanitizeUpstreamErrorMessage(msg) + msg = truncateString(msg, 2048) + if strings.TrimSpace(msg) == "" { + entry.UpstreamErrorMessage = nil + } else { + entry.UpstreamErrorMessage = &msg + } + } + if entry.UpstreamErrorDetail != nil { + detail := strings.TrimSpace(*entry.UpstreamErrorDetail) + if detail == "" { + entry.UpstreamErrorDetail = nil + } else { + sanitized, _ := sanitizeErrorBodyForStorage(detail, opsMaxStoredErrorBodyBytes) + if strings.TrimSpace(sanitized) == "" { + entry.UpstreamErrorDetail = nil + } else { + entry.UpstreamErrorDetail = &sanitized + } + } + } + + // Sanitize + serialize upstream error events list. + if len(entry.UpstreamErrors) > 0 { + const maxEvents = 32 + events := entry.UpstreamErrors + if len(events) > maxEvents { + events = events[len(events)-maxEvents:] + } + + sanitized := make([]*OpsUpstreamErrorEvent, 0, len(events)) + for _, ev := range events { + if ev == nil { + continue + } + out := *ev + + out.Platform = strings.TrimSpace(out.Platform) + out.UpstreamRequestID = truncateString(strings.TrimSpace(out.UpstreamRequestID), 128) + out.Kind = truncateString(strings.TrimSpace(out.Kind), 64) + + if out.AccountID < 0 { + out.AccountID = 0 + } + if out.UpstreamStatusCode < 0 { + out.UpstreamStatusCode = 0 + } + if out.AtUnixMs < 0 { + out.AtUnixMs = 0 + } + + msg := sanitizeUpstreamErrorMessage(strings.TrimSpace(out.Message)) + msg = truncateString(msg, 2048) + out.Message = msg + + detail := strings.TrimSpace(out.Detail) + if detail != "" { + // Keep upstream detail small; request bodies are not stored here, only upstream error payloads. + sanitizedDetail, _ := sanitizeErrorBodyForStorage(detail, opsMaxStoredErrorBodyBytes) + out.Detail = sanitizedDetail + } else { + out.Detail = "" + } + + out.UpstreamRequestBody = strings.TrimSpace(out.UpstreamRequestBody) + if out.UpstreamRequestBody != "" { + // Reuse the same sanitization/trimming strategy as request body storage. + // Keep it small so it is safe to persist in ops_error_logs JSON. + sanitized, truncated, _ := sanitizeAndTrimRequestBody([]byte(out.UpstreamRequestBody), 10*1024) + if sanitized != "" { + out.UpstreamRequestBody = sanitized + if truncated { + out.Kind = strings.TrimSpace(out.Kind) + if out.Kind == "" { + out.Kind = "upstream" + } + out.Kind = out.Kind + ":request_body_truncated" + } + } else { + out.UpstreamRequestBody = "" + } + } + + // Drop fully-empty events (can happen if only status code was known). + if out.UpstreamStatusCode == 0 && out.Message == "" && out.Detail == "" { + continue + } + + evCopy := out + sanitized = append(sanitized, &evCopy) + } + + entry.UpstreamErrorsJSON = marshalOpsUpstreamErrors(sanitized) + entry.UpstreamErrors = nil + } + + if _, err := s.opsRepo.InsertErrorLog(ctx, entry); err != nil { + // Never bubble up to gateway; best-effort logging. + log.Printf("[Ops] RecordError failed: %v", err) + return err + } + return nil +} + +func (s *OpsService) GetErrorLogs(ctx context.Context, filter *OpsErrorLogFilter) (*OpsErrorLogList, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return &OpsErrorLogList{Errors: []*OpsErrorLog{}, Total: 0, Page: 1, PageSize: 20}, nil + } + result, err := s.opsRepo.ListErrorLogs(ctx, filter) + if err != nil { + log.Printf("[Ops] GetErrorLogs failed: %v", err) + return nil, err + } + + return result, nil +} + +func (s *OpsService) GetErrorLogByID(ctx context.Context, id int64) (*OpsErrorLogDetail, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found") + } + detail, err := s.opsRepo.GetErrorLogByID(ctx, id) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found") + } + return nil, infraerrors.InternalServer("OPS_ERROR_LOAD_FAILED", "Failed to load ops error log").WithCause(err) + } + return detail, nil +} + +func (s *OpsService) ListRetryAttemptsByErrorID(ctx context.Context, errorID int64, limit int) ([]*OpsRetryAttempt, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if errorID <= 0 { + return nil, infraerrors.BadRequest("OPS_ERROR_INVALID_ID", "invalid error id") + } + items, err := s.opsRepo.ListRetryAttemptsByErrorID(ctx, errorID, limit) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return []*OpsRetryAttempt{}, nil + } + return nil, infraerrors.InternalServer("OPS_RETRY_LIST_FAILED", "Failed to list retry attempts").WithCause(err) + } + return items, nil +} + +func (s *OpsService) UpdateErrorResolution(ctx context.Context, errorID int64, resolved bool, resolvedByUserID *int64, resolvedRetryID *int64) error { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return err + } + if s.opsRepo == nil { + return infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if errorID <= 0 { + return infraerrors.BadRequest("OPS_ERROR_INVALID_ID", "invalid error id") + } + // Best-effort ensure the error exists + if _, err := s.opsRepo.GetErrorLogByID(ctx, errorID); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return infraerrors.NotFound("OPS_ERROR_NOT_FOUND", "ops error log not found") + } + return infraerrors.InternalServer("OPS_ERROR_LOAD_FAILED", "Failed to load ops error log").WithCause(err) + } + return s.opsRepo.UpdateErrorResolution(ctx, errorID, resolved, resolvedByUserID, resolvedRetryID, nil) +} + +func sanitizeAndTrimRequestBody(raw []byte, maxBytes int) (jsonString string, truncated bool, bytesLen int) { + bytesLen = len(raw) + if len(raw) == 0 { + return "", false, 0 + } + + var decoded any + if err := json.Unmarshal(raw, &decoded); err != nil { + // If it's not valid JSON, don't store (retry would not be reliable anyway). + return "", false, bytesLen + } + + decoded = redactSensitiveJSON(decoded) + + encoded, err := json.Marshal(decoded) + if err != nil { + return "", false, bytesLen + } + if len(encoded) <= maxBytes { + return string(encoded), false, bytesLen + } + + // Trim conversation history to keep the most recent context. + if root, ok := decoded.(map[string]any); ok { + if trimmed, ok := trimConversationArrays(root, maxBytes); ok { + encoded2, err2 := json.Marshal(trimmed) + if err2 == nil && len(encoded2) <= maxBytes { + return string(encoded2), true, bytesLen + } + // Fallthrough: keep shrinking. + decoded = trimmed + } + + essential := shrinkToEssentials(root) + encoded3, err3 := json.Marshal(essential) + if err3 == nil && len(encoded3) <= maxBytes { + return string(encoded3), true, bytesLen + } + } + + // Last resort: keep JSON shape but drop big fields. + // This avoids downstream code that expects certain top-level keys from crashing. + if root, ok := decoded.(map[string]any); ok { + placeholder := shallowCopyMap(root) + placeholder["request_body_truncated"] = true + + // Replace potentially huge arrays/strings, but keep the keys present. + for _, k := range []string{"messages", "contents", "input", "prompt"} { + if _, exists := placeholder[k]; exists { + placeholder[k] = []any{} + } + } + for _, k := range []string{"text"} { + if _, exists := placeholder[k]; exists { + placeholder[k] = "" + } + } + + encoded4, err4 := json.Marshal(placeholder) + if err4 == nil { + if len(encoded4) <= maxBytes { + return string(encoded4), true, bytesLen + } + } + } + + // Final fallback: minimal valid JSON. + encoded4, err4 := json.Marshal(map[string]any{"request_body_truncated": true}) + if err4 != nil { + return "", true, bytesLen + } + return string(encoded4), true, bytesLen +} + +func redactSensitiveJSON(v any) any { + switch t := v.(type) { + case map[string]any: + out := make(map[string]any, len(t)) + for k, vv := range t { + if isSensitiveKey(k) { + out[k] = "[REDACTED]" + continue + } + out[k] = redactSensitiveJSON(vv) + } + return out + case []any: + out := make([]any, 0, len(t)) + for _, vv := range t { + out = append(out, redactSensitiveJSON(vv)) + } + return out + default: + return v + } +} + +func isSensitiveKey(key string) bool { + k := strings.ToLower(strings.TrimSpace(key)) + if k == "" { + return false + } + + // Exact matches (common credential fields). + switch k { + case "authorization", + "proxy-authorization", + "x-api-key", + "api_key", + "apikey", + "access_token", + "refresh_token", + "id_token", + "session_token", + "token", + "password", + "passwd", + "passphrase", + "secret", + "client_secret", + "private_key", + "jwt", + "signature", + "accesskeyid", + "secretaccesskey": + return true + } + + // Suffix matches. + for _, suffix := range []string{ + "_secret", + "_token", + "_id_token", + "_session_token", + "_password", + "_passwd", + "_passphrase", + "_key", + "secret_key", + "private_key", + } { + if strings.HasSuffix(k, suffix) { + return true + } + } + + // Substring matches (conservative, but errs on the side of privacy). + for _, sub := range []string{ + "secret", + "token", + "password", + "passwd", + "passphrase", + "privatekey", + "private_key", + "apikey", + "api_key", + "accesskeyid", + "secretaccesskey", + "bearer", + "cookie", + "credential", + "session", + "jwt", + "signature", + } { + if strings.Contains(k, sub) { + return true + } + } + + return false +} + +func trimConversationArrays(root map[string]any, maxBytes int) (map[string]any, bool) { + // Supported: anthropic/openai: messages; gemini: contents. + if out, ok := trimArrayField(root, "messages", maxBytes); ok { + return out, true + } + if out, ok := trimArrayField(root, "contents", maxBytes); ok { + return out, true + } + return root, false +} + +func trimArrayField(root map[string]any, field string, maxBytes int) (map[string]any, bool) { + raw, ok := root[field] + if !ok { + return nil, false + } + arr, ok := raw.([]any) + if !ok || len(arr) == 0 { + return nil, false + } + + // Keep at least the last message/content. Use binary search so we don't marshal O(n) times. + // We are dropping from the *front* of the array (oldest context first). + lo := 0 + hi := len(arr) - 1 // inclusive; hi ensures at least one item remains + + var best map[string]any + found := false + + for lo <= hi { + mid := (lo + hi) / 2 + candidateArr := arr[mid:] + if len(candidateArr) == 0 { + lo = mid + 1 + continue + } + + next := shallowCopyMap(root) + next[field] = candidateArr + encoded, err := json.Marshal(next) + if err != nil { + // If marshal fails, try dropping more. + lo = mid + 1 + continue + } + + if len(encoded) <= maxBytes { + best = next + found = true + // Try to keep more context by dropping fewer items. + hi = mid - 1 + continue + } + + // Need to drop more. + lo = mid + 1 + } + + if found { + return best, true + } + + // Nothing fit (even with only one element); return the smallest slice and let the + // caller fall back to shrinkToEssentials(). + next := shallowCopyMap(root) + next[field] = arr[len(arr)-1:] + return next, true +} + +func shrinkToEssentials(root map[string]any) map[string]any { + out := make(map[string]any) + for _, key := range []string{"model", "stream", "max_tokens", "temperature", "top_p", "top_k"} { + if v, ok := root[key]; ok { + out[key] = v + } + } + + // Keep only the last element of the conversation array. + if v, ok := root["messages"]; ok { + if arr, ok := v.([]any); ok && len(arr) > 0 { + out["messages"] = []any{arr[len(arr)-1]} + } + } + if v, ok := root["contents"]; ok { + if arr, ok := v.([]any); ok && len(arr) > 0 { + out["contents"] = []any{arr[len(arr)-1]} + } + } + return out +} + +func shallowCopyMap(m map[string]any) map[string]any { + out := make(map[string]any, len(m)) + for k, v := range m { + out[k] = v + } + return out +} + +func sanitizeErrorBodyForStorage(raw string, maxBytes int) (sanitized string, truncated bool) { + raw = strings.TrimSpace(raw) + if raw == "" { + return "", false + } + + // Prefer JSON-safe sanitization when possible. + if out, trunc, _ := sanitizeAndTrimRequestBody([]byte(raw), maxBytes); out != "" { + return out, trunc + } + + // Non-JSON: best-effort truncate. + if maxBytes > 0 && len(raw) > maxBytes { + return truncateString(raw, maxBytes), true + } + return raw, false +} diff --git a/backend/internal/service/ops_settings.go b/backend/internal/service/ops_settings.go new file mode 100644 index 00000000..a6a4a0d7 --- /dev/null +++ b/backend/internal/service/ops_settings.go @@ -0,0 +1,562 @@ +package service + +import ( + "context" + "encoding/json" + "errors" + "strings" + "time" +) + +const ( + opsAlertEvaluatorLeaderLockKeyDefault = "ops:alert:evaluator:leader" + opsAlertEvaluatorLeaderLockTTLDefault = 30 * time.Second +) + +// ========================= +// Email notification config +// ========================= + +func (s *OpsService) GetEmailNotificationConfig(ctx context.Context) (*OpsEmailNotificationConfig, error) { + defaultCfg := defaultOpsEmailNotificationConfig() + if s == nil || s.settingRepo == nil { + return defaultCfg, nil + } + if ctx == nil { + ctx = context.Background() + } + + raw, err := s.settingRepo.GetValue(ctx, SettingKeyOpsEmailNotificationConfig) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + // Initialize defaults on first read (best-effort). + if b, mErr := json.Marshal(defaultCfg); mErr == nil { + _ = s.settingRepo.Set(ctx, SettingKeyOpsEmailNotificationConfig, string(b)) + } + return defaultCfg, nil + } + return nil, err + } + + cfg := &OpsEmailNotificationConfig{} + if err := json.Unmarshal([]byte(raw), cfg); err != nil { + // Corrupted JSON should not break ops UI; fall back to defaults. + return defaultCfg, nil + } + normalizeOpsEmailNotificationConfig(cfg) + return cfg, nil +} + +func (s *OpsService) UpdateEmailNotificationConfig(ctx context.Context, req *OpsEmailNotificationConfigUpdateRequest) (*OpsEmailNotificationConfig, error) { + if s == nil || s.settingRepo == nil { + return nil, errors.New("setting repository not initialized") + } + if ctx == nil { + ctx = context.Background() + } + if req == nil { + return nil, errors.New("invalid request") + } + + cfg, err := s.GetEmailNotificationConfig(ctx) + if err != nil { + return nil, err + } + + if req.Alert != nil { + cfg.Alert.Enabled = req.Alert.Enabled + if req.Alert.Recipients != nil { + cfg.Alert.Recipients = req.Alert.Recipients + } + cfg.Alert.MinSeverity = strings.TrimSpace(req.Alert.MinSeverity) + cfg.Alert.RateLimitPerHour = req.Alert.RateLimitPerHour + cfg.Alert.BatchingWindowSeconds = req.Alert.BatchingWindowSeconds + cfg.Alert.IncludeResolvedAlerts = req.Alert.IncludeResolvedAlerts + } + + if req.Report != nil { + cfg.Report.Enabled = req.Report.Enabled + if req.Report.Recipients != nil { + cfg.Report.Recipients = req.Report.Recipients + } + cfg.Report.DailySummaryEnabled = req.Report.DailySummaryEnabled + cfg.Report.DailySummarySchedule = strings.TrimSpace(req.Report.DailySummarySchedule) + cfg.Report.WeeklySummaryEnabled = req.Report.WeeklySummaryEnabled + cfg.Report.WeeklySummarySchedule = strings.TrimSpace(req.Report.WeeklySummarySchedule) + cfg.Report.ErrorDigestEnabled = req.Report.ErrorDigestEnabled + cfg.Report.ErrorDigestSchedule = strings.TrimSpace(req.Report.ErrorDigestSchedule) + cfg.Report.ErrorDigestMinCount = req.Report.ErrorDigestMinCount + cfg.Report.AccountHealthEnabled = req.Report.AccountHealthEnabled + cfg.Report.AccountHealthSchedule = strings.TrimSpace(req.Report.AccountHealthSchedule) + cfg.Report.AccountHealthErrorRateThreshold = req.Report.AccountHealthErrorRateThreshold + } + + if err := validateOpsEmailNotificationConfig(cfg); err != nil { + return nil, err + } + + normalizeOpsEmailNotificationConfig(cfg) + raw, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + if err := s.settingRepo.Set(ctx, SettingKeyOpsEmailNotificationConfig, string(raw)); err != nil { + return nil, err + } + return cfg, nil +} + +func defaultOpsEmailNotificationConfig() *OpsEmailNotificationConfig { + return &OpsEmailNotificationConfig{ + Alert: OpsEmailAlertConfig{ + Enabled: true, + Recipients: []string{}, + MinSeverity: "", + RateLimitPerHour: 0, + BatchingWindowSeconds: 0, + IncludeResolvedAlerts: false, + }, + Report: OpsEmailReportConfig{ + Enabled: false, + Recipients: []string{}, + DailySummaryEnabled: false, + DailySummarySchedule: "0 9 * * *", + WeeklySummaryEnabled: false, + WeeklySummarySchedule: "0 9 * * 1", + ErrorDigestEnabled: false, + ErrorDigestSchedule: "0 9 * * *", + ErrorDigestMinCount: 10, + AccountHealthEnabled: false, + AccountHealthSchedule: "0 9 * * *", + AccountHealthErrorRateThreshold: 10.0, + }, + } +} + +func normalizeOpsEmailNotificationConfig(cfg *OpsEmailNotificationConfig) { + if cfg == nil { + return + } + if cfg.Alert.Recipients == nil { + cfg.Alert.Recipients = []string{} + } + if cfg.Report.Recipients == nil { + cfg.Report.Recipients = []string{} + } + + cfg.Alert.MinSeverity = strings.TrimSpace(cfg.Alert.MinSeverity) + cfg.Report.DailySummarySchedule = strings.TrimSpace(cfg.Report.DailySummarySchedule) + cfg.Report.WeeklySummarySchedule = strings.TrimSpace(cfg.Report.WeeklySummarySchedule) + cfg.Report.ErrorDigestSchedule = strings.TrimSpace(cfg.Report.ErrorDigestSchedule) + cfg.Report.AccountHealthSchedule = strings.TrimSpace(cfg.Report.AccountHealthSchedule) + + // Fill missing schedules with defaults to avoid breaking cron logic if clients send empty strings. + if cfg.Report.DailySummarySchedule == "" { + cfg.Report.DailySummarySchedule = "0 9 * * *" + } + if cfg.Report.WeeklySummarySchedule == "" { + cfg.Report.WeeklySummarySchedule = "0 9 * * 1" + } + if cfg.Report.ErrorDigestSchedule == "" { + cfg.Report.ErrorDigestSchedule = "0 9 * * *" + } + if cfg.Report.AccountHealthSchedule == "" { + cfg.Report.AccountHealthSchedule = "0 9 * * *" + } +} + +func validateOpsEmailNotificationConfig(cfg *OpsEmailNotificationConfig) error { + if cfg == nil { + return errors.New("invalid config") + } + + if cfg.Alert.RateLimitPerHour < 0 { + return errors.New("alert.rate_limit_per_hour must be >= 0") + } + if cfg.Alert.BatchingWindowSeconds < 0 { + return errors.New("alert.batching_window_seconds must be >= 0") + } + switch strings.TrimSpace(cfg.Alert.MinSeverity) { + case "", "critical", "warning", "info": + default: + return errors.New("alert.min_severity must be one of: critical, warning, info, or empty") + } + + if cfg.Report.ErrorDigestMinCount < 0 { + return errors.New("report.error_digest_min_count must be >= 0") + } + if cfg.Report.AccountHealthErrorRateThreshold < 0 || cfg.Report.AccountHealthErrorRateThreshold > 100 { + return errors.New("report.account_health_error_rate_threshold must be between 0 and 100") + } + return nil +} + +// ========================= +// Alert runtime settings +// ========================= + +func defaultOpsAlertRuntimeSettings() *OpsAlertRuntimeSettings { + return &OpsAlertRuntimeSettings{ + EvaluationIntervalSeconds: 60, + DistributedLock: OpsDistributedLockSettings{ + Enabled: true, + Key: opsAlertEvaluatorLeaderLockKeyDefault, + TTLSeconds: int(opsAlertEvaluatorLeaderLockTTLDefault.Seconds()), + }, + Silencing: OpsAlertSilencingSettings{ + Enabled: false, + GlobalUntilRFC3339: "", + GlobalReason: "", + Entries: []OpsAlertSilenceEntry{}, + }, + } +} + +func normalizeOpsDistributedLockSettings(s *OpsDistributedLockSettings, defaultKey string, defaultTTLSeconds int) { + if s == nil { + return + } + s.Key = strings.TrimSpace(s.Key) + if s.Key == "" { + s.Key = defaultKey + } + if s.TTLSeconds <= 0 { + s.TTLSeconds = defaultTTLSeconds + } +} + +func normalizeOpsAlertSilencingSettings(s *OpsAlertSilencingSettings) { + if s == nil { + return + } + s.GlobalUntilRFC3339 = strings.TrimSpace(s.GlobalUntilRFC3339) + s.GlobalReason = strings.TrimSpace(s.GlobalReason) + if s.Entries == nil { + s.Entries = []OpsAlertSilenceEntry{} + } + for i := range s.Entries { + s.Entries[i].UntilRFC3339 = strings.TrimSpace(s.Entries[i].UntilRFC3339) + s.Entries[i].Reason = strings.TrimSpace(s.Entries[i].Reason) + } +} + +func validateOpsDistributedLockSettings(s OpsDistributedLockSettings) error { + if strings.TrimSpace(s.Key) == "" { + return errors.New("distributed_lock.key is required") + } + if s.TTLSeconds <= 0 || s.TTLSeconds > int((24*time.Hour).Seconds()) { + return errors.New("distributed_lock.ttl_seconds must be between 1 and 86400") + } + return nil +} + +func validateOpsAlertSilencingSettings(s OpsAlertSilencingSettings) error { + parse := func(raw string) error { + if strings.TrimSpace(raw) == "" { + return nil + } + if _, err := time.Parse(time.RFC3339, raw); err != nil { + return errors.New("silencing time must be RFC3339") + } + return nil + } + + if err := parse(s.GlobalUntilRFC3339); err != nil { + return err + } + for _, entry := range s.Entries { + if strings.TrimSpace(entry.UntilRFC3339) == "" { + return errors.New("silencing.entries.until_rfc3339 is required") + } + if _, err := time.Parse(time.RFC3339, entry.UntilRFC3339); err != nil { + return errors.New("silencing.entries.until_rfc3339 must be RFC3339") + } + } + return nil +} + +func (s *OpsService) GetOpsAlertRuntimeSettings(ctx context.Context) (*OpsAlertRuntimeSettings, error) { + defaultCfg := defaultOpsAlertRuntimeSettings() + if s == nil || s.settingRepo == nil { + return defaultCfg, nil + } + if ctx == nil { + ctx = context.Background() + } + + raw, err := s.settingRepo.GetValue(ctx, SettingKeyOpsAlertRuntimeSettings) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + if b, mErr := json.Marshal(defaultCfg); mErr == nil { + _ = s.settingRepo.Set(ctx, SettingKeyOpsAlertRuntimeSettings, string(b)) + } + return defaultCfg, nil + } + return nil, err + } + + cfg := &OpsAlertRuntimeSettings{} + if err := json.Unmarshal([]byte(raw), cfg); err != nil { + return defaultCfg, nil + } + + if cfg.EvaluationIntervalSeconds <= 0 { + cfg.EvaluationIntervalSeconds = defaultCfg.EvaluationIntervalSeconds + } + normalizeOpsDistributedLockSettings(&cfg.DistributedLock, opsAlertEvaluatorLeaderLockKeyDefault, defaultCfg.DistributedLock.TTLSeconds) + normalizeOpsAlertSilencingSettings(&cfg.Silencing) + + return cfg, nil +} + +func (s *OpsService) UpdateOpsAlertRuntimeSettings(ctx context.Context, cfg *OpsAlertRuntimeSettings) (*OpsAlertRuntimeSettings, error) { + if s == nil || s.settingRepo == nil { + return nil, errors.New("setting repository not initialized") + } + if ctx == nil { + ctx = context.Background() + } + if cfg == nil { + return nil, errors.New("invalid config") + } + + if cfg.EvaluationIntervalSeconds < 1 || cfg.EvaluationIntervalSeconds > int((24*time.Hour).Seconds()) { + return nil, errors.New("evaluation_interval_seconds must be between 1 and 86400") + } + if cfg.DistributedLock.Enabled { + if err := validateOpsDistributedLockSettings(cfg.DistributedLock); err != nil { + return nil, err + } + } + if cfg.Silencing.Enabled { + if err := validateOpsAlertSilencingSettings(cfg.Silencing); err != nil { + return nil, err + } + } + + defaultCfg := defaultOpsAlertRuntimeSettings() + normalizeOpsDistributedLockSettings(&cfg.DistributedLock, opsAlertEvaluatorLeaderLockKeyDefault, defaultCfg.DistributedLock.TTLSeconds) + normalizeOpsAlertSilencingSettings(&cfg.Silencing) + + raw, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + if err := s.settingRepo.Set(ctx, SettingKeyOpsAlertRuntimeSettings, string(raw)); err != nil { + return nil, err + } + + // Return a fresh copy (avoid callers holding pointers into internal slices that may be mutated). + updated := &OpsAlertRuntimeSettings{} + _ = json.Unmarshal(raw, updated) + return updated, nil +} + +// ========================= +// Advanced settings +// ========================= + +func defaultOpsAdvancedSettings() *OpsAdvancedSettings { + return &OpsAdvancedSettings{ + DataRetention: OpsDataRetentionSettings{ + CleanupEnabled: false, + CleanupSchedule: "0 2 * * *", + ErrorLogRetentionDays: 30, + MinuteMetricsRetentionDays: 30, + HourlyMetricsRetentionDays: 30, + }, + Aggregation: OpsAggregationSettings{ + AggregationEnabled: false, + }, + IgnoreCountTokensErrors: false, + IgnoreContextCanceled: true, // Default to true - client disconnects are not errors + IgnoreNoAvailableAccounts: false, // Default to false - this is a real routing issue + AutoRefreshEnabled: false, + AutoRefreshIntervalSec: 30, + } +} + +func normalizeOpsAdvancedSettings(cfg *OpsAdvancedSettings) { + if cfg == nil { + return + } + cfg.DataRetention.CleanupSchedule = strings.TrimSpace(cfg.DataRetention.CleanupSchedule) + if cfg.DataRetention.CleanupSchedule == "" { + cfg.DataRetention.CleanupSchedule = "0 2 * * *" + } + if cfg.DataRetention.ErrorLogRetentionDays <= 0 { + cfg.DataRetention.ErrorLogRetentionDays = 30 + } + if cfg.DataRetention.MinuteMetricsRetentionDays <= 0 { + cfg.DataRetention.MinuteMetricsRetentionDays = 30 + } + if cfg.DataRetention.HourlyMetricsRetentionDays <= 0 { + cfg.DataRetention.HourlyMetricsRetentionDays = 30 + } + // Normalize auto refresh interval (default 30 seconds) + if cfg.AutoRefreshIntervalSec <= 0 { + cfg.AutoRefreshIntervalSec = 30 + } +} + +func validateOpsAdvancedSettings(cfg *OpsAdvancedSettings) error { + if cfg == nil { + return errors.New("invalid config") + } + if cfg.DataRetention.ErrorLogRetentionDays < 1 || cfg.DataRetention.ErrorLogRetentionDays > 365 { + return errors.New("error_log_retention_days must be between 1 and 365") + } + if cfg.DataRetention.MinuteMetricsRetentionDays < 1 || cfg.DataRetention.MinuteMetricsRetentionDays > 365 { + return errors.New("minute_metrics_retention_days must be between 1 and 365") + } + if cfg.DataRetention.HourlyMetricsRetentionDays < 1 || cfg.DataRetention.HourlyMetricsRetentionDays > 365 { + return errors.New("hourly_metrics_retention_days must be between 1 and 365") + } + if cfg.AutoRefreshIntervalSec < 15 || cfg.AutoRefreshIntervalSec > 300 { + return errors.New("auto_refresh_interval_seconds must be between 15 and 300") + } + return nil +} + +func (s *OpsService) GetOpsAdvancedSettings(ctx context.Context) (*OpsAdvancedSettings, error) { + defaultCfg := defaultOpsAdvancedSettings() + if s == nil || s.settingRepo == nil { + return defaultCfg, nil + } + if ctx == nil { + ctx = context.Background() + } + + raw, err := s.settingRepo.GetValue(ctx, SettingKeyOpsAdvancedSettings) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + if b, mErr := json.Marshal(defaultCfg); mErr == nil { + _ = s.settingRepo.Set(ctx, SettingKeyOpsAdvancedSettings, string(b)) + } + return defaultCfg, nil + } + return nil, err + } + + cfg := &OpsAdvancedSettings{} + if err := json.Unmarshal([]byte(raw), cfg); err != nil { + return defaultCfg, nil + } + + normalizeOpsAdvancedSettings(cfg) + return cfg, nil +} + +func (s *OpsService) UpdateOpsAdvancedSettings(ctx context.Context, cfg *OpsAdvancedSettings) (*OpsAdvancedSettings, error) { + if s == nil || s.settingRepo == nil { + return nil, errors.New("setting repository not initialized") + } + if ctx == nil { + ctx = context.Background() + } + if cfg == nil { + return nil, errors.New("invalid config") + } + + if err := validateOpsAdvancedSettings(cfg); err != nil { + return nil, err + } + + normalizeOpsAdvancedSettings(cfg) + raw, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + if err := s.settingRepo.Set(ctx, SettingKeyOpsAdvancedSettings, string(raw)); err != nil { + return nil, err + } + + updated := &OpsAdvancedSettings{} + _ = json.Unmarshal(raw, updated) + return updated, nil +} + +// ========================= +// Metric thresholds +// ========================= + +const SettingKeyOpsMetricThresholds = "ops_metric_thresholds" + +func defaultOpsMetricThresholds() *OpsMetricThresholds { + slaMin := 99.5 + ttftMax := 500.0 + reqErrMax := 5.0 + upstreamErrMax := 5.0 + return &OpsMetricThresholds{ + SLAPercentMin: &slaMin, + TTFTp99MsMax: &ttftMax, + RequestErrorRatePercentMax: &reqErrMax, + UpstreamErrorRatePercentMax: &upstreamErrMax, + } +} + +func (s *OpsService) GetMetricThresholds(ctx context.Context) (*OpsMetricThresholds, error) { + defaultCfg := defaultOpsMetricThresholds() + if s == nil || s.settingRepo == nil { + return defaultCfg, nil + } + if ctx == nil { + ctx = context.Background() + } + + raw, err := s.settingRepo.GetValue(ctx, SettingKeyOpsMetricThresholds) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + if b, mErr := json.Marshal(defaultCfg); mErr == nil { + _ = s.settingRepo.Set(ctx, SettingKeyOpsMetricThresholds, string(b)) + } + return defaultCfg, nil + } + return nil, err + } + + cfg := &OpsMetricThresholds{} + if err := json.Unmarshal([]byte(raw), cfg); err != nil { + return defaultCfg, nil + } + + return cfg, nil +} + +func (s *OpsService) UpdateMetricThresholds(ctx context.Context, cfg *OpsMetricThresholds) (*OpsMetricThresholds, error) { + if s == nil || s.settingRepo == nil { + return nil, errors.New("setting repository not initialized") + } + if ctx == nil { + ctx = context.Background() + } + if cfg == nil { + return nil, errors.New("invalid config") + } + + // Validate thresholds + if cfg.SLAPercentMin != nil && (*cfg.SLAPercentMin < 0 || *cfg.SLAPercentMin > 100) { + return nil, errors.New("sla_percent_min must be between 0 and 100") + } + if cfg.TTFTp99MsMax != nil && *cfg.TTFTp99MsMax < 0 { + return nil, errors.New("ttft_p99_ms_max must be >= 0") + } + if cfg.RequestErrorRatePercentMax != nil && (*cfg.RequestErrorRatePercentMax < 0 || *cfg.RequestErrorRatePercentMax > 100) { + return nil, errors.New("request_error_rate_percent_max must be between 0 and 100") + } + if cfg.UpstreamErrorRatePercentMax != nil && (*cfg.UpstreamErrorRatePercentMax < 0 || *cfg.UpstreamErrorRatePercentMax > 100) { + return nil, errors.New("upstream_error_rate_percent_max must be between 0 and 100") + } + + raw, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + if err := s.settingRepo.Set(ctx, SettingKeyOpsMetricThresholds, string(raw)); err != nil { + return nil, err + } + + updated := &OpsMetricThresholds{} + _ = json.Unmarshal(raw, updated) + return updated, nil +} diff --git a/backend/internal/service/ops_settings_models.go b/backend/internal/service/ops_settings_models.go new file mode 100644 index 00000000..df06f578 --- /dev/null +++ b/backend/internal/service/ops_settings_models.go @@ -0,0 +1,100 @@ +package service + +// Ops settings models stored in DB `settings` table (JSON blobs). + +type OpsEmailNotificationConfig struct { + Alert OpsEmailAlertConfig `json:"alert"` + Report OpsEmailReportConfig `json:"report"` +} + +type OpsEmailAlertConfig struct { + Enabled bool `json:"enabled"` + Recipients []string `json:"recipients"` + MinSeverity string `json:"min_severity"` + RateLimitPerHour int `json:"rate_limit_per_hour"` + BatchingWindowSeconds int `json:"batching_window_seconds"` + IncludeResolvedAlerts bool `json:"include_resolved_alerts"` +} + +type OpsEmailReportConfig struct { + Enabled bool `json:"enabled"` + Recipients []string `json:"recipients"` + DailySummaryEnabled bool `json:"daily_summary_enabled"` + DailySummarySchedule string `json:"daily_summary_schedule"` + WeeklySummaryEnabled bool `json:"weekly_summary_enabled"` + WeeklySummarySchedule string `json:"weekly_summary_schedule"` + ErrorDigestEnabled bool `json:"error_digest_enabled"` + ErrorDigestSchedule string `json:"error_digest_schedule"` + ErrorDigestMinCount int `json:"error_digest_min_count"` + AccountHealthEnabled bool `json:"account_health_enabled"` + AccountHealthSchedule string `json:"account_health_schedule"` + AccountHealthErrorRateThreshold float64 `json:"account_health_error_rate_threshold"` +} + +// OpsEmailNotificationConfigUpdateRequest allows partial updates, while the +// frontend can still send the full config shape. +type OpsEmailNotificationConfigUpdateRequest struct { + Alert *OpsEmailAlertConfig `json:"alert"` + Report *OpsEmailReportConfig `json:"report"` +} + +type OpsDistributedLockSettings struct { + Enabled bool `json:"enabled"` + Key string `json:"key"` + TTLSeconds int `json:"ttl_seconds"` +} + +type OpsAlertSilenceEntry struct { + RuleID *int64 `json:"rule_id,omitempty"` + Severities []string `json:"severities,omitempty"` + + UntilRFC3339 string `json:"until_rfc3339"` + Reason string `json:"reason"` +} + +type OpsAlertSilencingSettings struct { + Enabled bool `json:"enabled"` + + GlobalUntilRFC3339 string `json:"global_until_rfc3339"` + GlobalReason string `json:"global_reason"` + + Entries []OpsAlertSilenceEntry `json:"entries,omitempty"` +} + +type OpsMetricThresholds struct { + SLAPercentMin *float64 `json:"sla_percent_min,omitempty"` // SLA低于此值变红 + TTFTp99MsMax *float64 `json:"ttft_p99_ms_max,omitempty"` // TTFT P99高于此值变红 + RequestErrorRatePercentMax *float64 `json:"request_error_rate_percent_max,omitempty"` // 请求错误率高于此值变红 + UpstreamErrorRatePercentMax *float64 `json:"upstream_error_rate_percent_max,omitempty"` // 上游错误率高于此值变红 +} + +type OpsAlertRuntimeSettings struct { + EvaluationIntervalSeconds int `json:"evaluation_interval_seconds"` + + DistributedLock OpsDistributedLockSettings `json:"distributed_lock"` + Silencing OpsAlertSilencingSettings `json:"silencing"` + Thresholds OpsMetricThresholds `json:"thresholds"` // 指标阈值配置 +} + +// OpsAdvancedSettings stores advanced ops configuration (data retention, aggregation). +type OpsAdvancedSettings struct { + DataRetention OpsDataRetentionSettings `json:"data_retention"` + Aggregation OpsAggregationSettings `json:"aggregation"` + IgnoreCountTokensErrors bool `json:"ignore_count_tokens_errors"` + IgnoreContextCanceled bool `json:"ignore_context_canceled"` + IgnoreNoAvailableAccounts bool `json:"ignore_no_available_accounts"` + AutoRefreshEnabled bool `json:"auto_refresh_enabled"` + AutoRefreshIntervalSec int `json:"auto_refresh_interval_seconds"` +} + +type OpsDataRetentionSettings struct { + CleanupEnabled bool `json:"cleanup_enabled"` + CleanupSchedule string `json:"cleanup_schedule"` + ErrorLogRetentionDays int `json:"error_log_retention_days"` + MinuteMetricsRetentionDays int `json:"minute_metrics_retention_days"` + HourlyMetricsRetentionDays int `json:"hourly_metrics_retention_days"` +} + +type OpsAggregationSettings struct { + AggregationEnabled bool `json:"aggregation_enabled"` +} diff --git a/backend/internal/service/ops_trend_models.go b/backend/internal/service/ops_trend_models.go new file mode 100644 index 00000000..f6d07c14 --- /dev/null +++ b/backend/internal/service/ops_trend_models.go @@ -0,0 +1,65 @@ +package service + +import "time" + +type OpsThroughputTrendPoint struct { + BucketStart time.Time `json:"bucket_start"` + RequestCount int64 `json:"request_count"` + TokenConsumed int64 `json:"token_consumed"` + QPS float64 `json:"qps"` + TPS float64 `json:"tps"` +} + +type OpsThroughputPlatformBreakdownItem struct { + Platform string `json:"platform"` + RequestCount int64 `json:"request_count"` + TokenConsumed int64 `json:"token_consumed"` +} + +type OpsThroughputGroupBreakdownItem struct { + GroupID int64 `json:"group_id"` + GroupName string `json:"group_name"` + RequestCount int64 `json:"request_count"` + TokenConsumed int64 `json:"token_consumed"` +} + +type OpsThroughputTrendResponse struct { + Bucket string `json:"bucket"` + + Points []*OpsThroughputTrendPoint `json:"points"` + + // Optional drilldown helpers: + // - When no platform/group is selected: returns totals by platform. + // - When platform is selected but group is not: returns top groups in that platform. + ByPlatform []*OpsThroughputPlatformBreakdownItem `json:"by_platform,omitempty"` + TopGroups []*OpsThroughputGroupBreakdownItem `json:"top_groups,omitempty"` +} + +type OpsErrorTrendPoint struct { + BucketStart time.Time `json:"bucket_start"` + + ErrorCountTotal int64 `json:"error_count_total"` + BusinessLimitedCount int64 `json:"business_limited_count"` + ErrorCountSLA int64 `json:"error_count_sla"` + + UpstreamErrorCountExcl429529 int64 `json:"upstream_error_count_excl_429_529"` + Upstream429Count int64 `json:"upstream_429_count"` + Upstream529Count int64 `json:"upstream_529_count"` +} + +type OpsErrorTrendResponse struct { + Bucket string `json:"bucket"` + Points []*OpsErrorTrendPoint `json:"points"` +} + +type OpsErrorDistributionItem struct { + StatusCode int `json:"status_code"` + Total int64 `json:"total"` + SLA int64 `json:"sla"` + BusinessLimited int64 `json:"business_limited"` +} + +type OpsErrorDistributionResponse struct { + Total int64 `json:"total"` + Items []*OpsErrorDistributionItem `json:"items"` +} diff --git a/backend/internal/service/ops_trends.go b/backend/internal/service/ops_trends.go new file mode 100644 index 00000000..ec55c6ce --- /dev/null +++ b/backend/internal/service/ops_trends.go @@ -0,0 +1,26 @@ +package service + +import ( + "context" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +func (s *OpsService) GetThroughputTrend(ctx context.Context, filter *OpsDashboardFilter, bucketSeconds int) (*OpsThroughputTrendResponse, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + if filter == nil { + return nil, infraerrors.BadRequest("OPS_FILTER_REQUIRED", "filter is required") + } + if filter.StartTime.IsZero() || filter.EndTime.IsZero() { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_REQUIRED", "start_time/end_time are required") + } + if filter.StartTime.After(filter.EndTime) { + return nil, infraerrors.BadRequest("OPS_TIME_RANGE_INVALID", "start_time must be <= end_time") + } + return s.opsRepo.GetThroughputTrend(ctx, filter, bucketSeconds) +} diff --git a/backend/internal/service/ops_upstream_context.go b/backend/internal/service/ops_upstream_context.go new file mode 100644 index 00000000..96bcc9fe --- /dev/null +++ b/backend/internal/service/ops_upstream_context.go @@ -0,0 +1,131 @@ +package service + +import ( + "encoding/json" + "strings" + "time" + + "github.com/gin-gonic/gin" +) + +// Gin context keys used by Ops error logger for capturing upstream error details. +// These keys are set by gateway services and consumed by handler/ops_error_logger.go. +const ( + OpsUpstreamStatusCodeKey = "ops_upstream_status_code" + OpsUpstreamErrorMessageKey = "ops_upstream_error_message" + OpsUpstreamErrorDetailKey = "ops_upstream_error_detail" + OpsUpstreamErrorsKey = "ops_upstream_errors" + + // Best-effort capture of the current upstream request body so ops can + // retry the specific upstream attempt (not just the client request). + // This value is sanitized+trimmed before being persisted. + OpsUpstreamRequestBodyKey = "ops_upstream_request_body" +) + +func setOpsUpstreamError(c *gin.Context, upstreamStatusCode int, upstreamMessage, upstreamDetail string) { + if c == nil { + return + } + if upstreamStatusCode > 0 { + c.Set(OpsUpstreamStatusCodeKey, upstreamStatusCode) + } + if msg := strings.TrimSpace(upstreamMessage); msg != "" { + c.Set(OpsUpstreamErrorMessageKey, msg) + } + if detail := strings.TrimSpace(upstreamDetail); detail != "" { + c.Set(OpsUpstreamErrorDetailKey, detail) + } +} + +// OpsUpstreamErrorEvent describes one upstream error attempt during a single gateway request. +// It is stored in ops_error_logs.upstream_errors as a JSON array. +type OpsUpstreamErrorEvent struct { + AtUnixMs int64 `json:"at_unix_ms,omitempty"` + + // Context + Platform string `json:"platform,omitempty"` + AccountID int64 `json:"account_id,omitempty"` + AccountName string `json:"account_name,omitempty"` + + // Outcome + UpstreamStatusCode int `json:"upstream_status_code,omitempty"` + UpstreamRequestID string `json:"upstream_request_id,omitempty"` + + // Best-effort upstream request capture (sanitized+trimmed). + // Required for retrying a specific upstream attempt. + UpstreamRequestBody string `json:"upstream_request_body,omitempty"` + + // Best-effort upstream response capture (sanitized+trimmed). + UpstreamResponseBody string `json:"upstream_response_body,omitempty"` + + // Kind: http_error | request_error | retry_exhausted | failover + Kind string `json:"kind,omitempty"` + + Message string `json:"message,omitempty"` + Detail string `json:"detail,omitempty"` +} + +func appendOpsUpstreamError(c *gin.Context, ev OpsUpstreamErrorEvent) { + if c == nil { + return + } + if ev.AtUnixMs <= 0 { + ev.AtUnixMs = time.Now().UnixMilli() + } + ev.Platform = strings.TrimSpace(ev.Platform) + ev.UpstreamRequestID = strings.TrimSpace(ev.UpstreamRequestID) + ev.UpstreamRequestBody = strings.TrimSpace(ev.UpstreamRequestBody) + ev.UpstreamResponseBody = strings.TrimSpace(ev.UpstreamResponseBody) + ev.Kind = strings.TrimSpace(ev.Kind) + ev.Message = strings.TrimSpace(ev.Message) + ev.Detail = strings.TrimSpace(ev.Detail) + if ev.Message != "" { + ev.Message = sanitizeUpstreamErrorMessage(ev.Message) + } + + // If the caller didn't explicitly pass upstream request body but the gateway + // stored it on the context, attach it so ops can retry this specific attempt. + if ev.UpstreamRequestBody == "" { + if v, ok := c.Get(OpsUpstreamRequestBodyKey); ok { + if s, ok := v.(string); ok { + ev.UpstreamRequestBody = strings.TrimSpace(s) + } + } + } + + var existing []*OpsUpstreamErrorEvent + if v, ok := c.Get(OpsUpstreamErrorsKey); ok { + if arr, ok := v.([]*OpsUpstreamErrorEvent); ok { + existing = arr + } + } + + evCopy := ev + existing = append(existing, &evCopy) + c.Set(OpsUpstreamErrorsKey, existing) +} + +func marshalOpsUpstreamErrors(events []*OpsUpstreamErrorEvent) *string { + if len(events) == 0 { + return nil + } + // Ensure we always store a valid JSON value. + raw, err := json.Marshal(events) + if err != nil || len(raw) == 0 { + return nil + } + s := string(raw) + return &s +} + +func ParseOpsUpstreamErrors(raw string) ([]*OpsUpstreamErrorEvent, error) { + raw = strings.TrimSpace(raw) + if raw == "" { + return []*OpsUpstreamErrorEvent{}, nil + } + var out []*OpsUpstreamErrorEvent + if err := json.Unmarshal([]byte(raw), &out); err != nil { + return nil, err + } + return out, nil +} diff --git a/backend/internal/service/ops_window_stats.go b/backend/internal/service/ops_window_stats.go new file mode 100644 index 00000000..71021d15 --- /dev/null +++ b/backend/internal/service/ops_window_stats.go @@ -0,0 +1,24 @@ +package service + +import ( + "context" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +// GetWindowStats returns lightweight request/token counts for the provided window. +// It is intended for realtime sampling (e.g. WebSocket QPS push) without computing percentiles/peaks. +func (s *OpsService) GetWindowStats(ctx context.Context, startTime, endTime time.Time) (*OpsWindowStats, error) { + if err := s.RequireMonitoringEnabled(ctx); err != nil { + return nil, err + } + if s.opsRepo == nil { + return nil, infraerrors.ServiceUnavailable("OPS_REPO_UNAVAILABLE", "Ops repository not available") + } + filter := &OpsDashboardFilter{ + StartTime: startTime, + EndTime: endTime, + } + return s.opsRepo.GetWindowStats(ctx, filter) +} diff --git a/backend/internal/service/promo_code.go b/backend/internal/service/promo_code.go new file mode 100644 index 00000000..94e733a8 --- /dev/null +++ b/backend/internal/service/promo_code.go @@ -0,0 +1,73 @@ +package service + +import ( + "time" +) + +// PromoCode 注册优惠码 +type PromoCode struct { + ID int64 + Code string + BonusAmount float64 + MaxUses int + UsedCount int + Status string + ExpiresAt *time.Time + Notes string + CreatedAt time.Time + UpdatedAt time.Time + + // 关联 + UsageRecords []PromoCodeUsage +} + +// PromoCodeUsage 优惠码使用记录 +type PromoCodeUsage struct { + ID int64 + PromoCodeID int64 + UserID int64 + BonusAmount float64 + UsedAt time.Time + + // 关联 + PromoCode *PromoCode + User *User +} + +// CanUse 检查优惠码是否可用 +func (p *PromoCode) CanUse() bool { + if p.Status != PromoCodeStatusActive { + return false + } + if p.ExpiresAt != nil && time.Now().After(*p.ExpiresAt) { + return false + } + if p.MaxUses > 0 && p.UsedCount >= p.MaxUses { + return false + } + return true +} + +// IsExpired 检查是否已过期 +func (p *PromoCode) IsExpired() bool { + return p.ExpiresAt != nil && time.Now().After(*p.ExpiresAt) +} + +// CreatePromoCodeInput 创建优惠码输入 +type CreatePromoCodeInput struct { + Code string + BonusAmount float64 + MaxUses int + ExpiresAt *time.Time + Notes string +} + +// UpdatePromoCodeInput 更新优惠码输入 +type UpdatePromoCodeInput struct { + Code *string + BonusAmount *float64 + MaxUses *int + Status *string + ExpiresAt *time.Time + Notes *string +} diff --git a/backend/internal/service/promo_code_repository.go b/backend/internal/service/promo_code_repository.go new file mode 100644 index 00000000..f55f9a6b --- /dev/null +++ b/backend/internal/service/promo_code_repository.go @@ -0,0 +1,30 @@ +package service + +import ( + "context" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +// PromoCodeRepository 优惠码仓储接口 +type PromoCodeRepository interface { + // 基础 CRUD + Create(ctx context.Context, code *PromoCode) error + GetByID(ctx context.Context, id int64) (*PromoCode, error) + GetByCode(ctx context.Context, code string) (*PromoCode, error) + GetByCodeForUpdate(ctx context.Context, code string) (*PromoCode, error) // 带行锁的查询,用于并发控制 + Update(ctx context.Context, code *PromoCode) error + Delete(ctx context.Context, id int64) error + + // 列表查询 + List(ctx context.Context, params pagination.PaginationParams) ([]PromoCode, *pagination.PaginationResult, error) + ListWithFilters(ctx context.Context, params pagination.PaginationParams, status, search string) ([]PromoCode, *pagination.PaginationResult, error) + + // 使用记录 + CreateUsage(ctx context.Context, usage *PromoCodeUsage) error + GetUsageByPromoCodeAndUser(ctx context.Context, promoCodeID, userID int64) (*PromoCodeUsage, error) + ListUsagesByPromoCode(ctx context.Context, promoCodeID int64, params pagination.PaginationParams) ([]PromoCodeUsage, *pagination.PaginationResult, error) + + // 计数操作 + IncrementUsedCount(ctx context.Context, id int64) error +} diff --git a/backend/internal/service/promo_service.go b/backend/internal/service/promo_service.go new file mode 100644 index 00000000..5ff63bdc --- /dev/null +++ b/backend/internal/service/promo_service.go @@ -0,0 +1,268 @@ +package service + +import ( + "context" + "crypto/rand" + "encoding/hex" + "fmt" + "strings" + "time" + + dbent "github.com/Wei-Shaw/sub2api/ent" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" +) + +var ( + ErrPromoCodeNotFound = infraerrors.NotFound("PROMO_CODE_NOT_FOUND", "promo code not found") + ErrPromoCodeExpired = infraerrors.BadRequest("PROMO_CODE_EXPIRED", "promo code has expired") + ErrPromoCodeDisabled = infraerrors.BadRequest("PROMO_CODE_DISABLED", "promo code is disabled") + ErrPromoCodeMaxUsed = infraerrors.BadRequest("PROMO_CODE_MAX_USED", "promo code has reached maximum uses") + ErrPromoCodeAlreadyUsed = infraerrors.Conflict("PROMO_CODE_ALREADY_USED", "you have already used this promo code") + ErrPromoCodeInvalid = infraerrors.BadRequest("PROMO_CODE_INVALID", "invalid promo code") +) + +// PromoService 优惠码服务 +type PromoService struct { + promoRepo PromoCodeRepository + userRepo UserRepository + billingCacheService *BillingCacheService + entClient *dbent.Client + authCacheInvalidator APIKeyAuthCacheInvalidator +} + +// NewPromoService 创建优惠码服务实例 +func NewPromoService( + promoRepo PromoCodeRepository, + userRepo UserRepository, + billingCacheService *BillingCacheService, + entClient *dbent.Client, + authCacheInvalidator APIKeyAuthCacheInvalidator, +) *PromoService { + return &PromoService{ + promoRepo: promoRepo, + userRepo: userRepo, + billingCacheService: billingCacheService, + entClient: entClient, + authCacheInvalidator: authCacheInvalidator, + } +} + +// ValidatePromoCode 验证优惠码(注册前调用) +// 返回 nil, nil 表示空码(不报错) +func (s *PromoService) ValidatePromoCode(ctx context.Context, code string) (*PromoCode, error) { + code = strings.TrimSpace(code) + if code == "" { + return nil, nil // 空码不报错,直接返回 + } + + promoCode, err := s.promoRepo.GetByCode(ctx, code) + if err != nil { + // 保留原始错误类型,不要统一映射为 NotFound + return nil, err + } + + if err := s.validatePromoCodeStatus(promoCode); err != nil { + return nil, err + } + + return promoCode, nil +} + +// validatePromoCodeStatus 验证优惠码状态 +func (s *PromoService) validatePromoCodeStatus(promoCode *PromoCode) error { + if !promoCode.CanUse() { + if promoCode.IsExpired() { + return ErrPromoCodeExpired + } + if promoCode.Status == PromoCodeStatusDisabled { + return ErrPromoCodeDisabled + } + if promoCode.MaxUses > 0 && promoCode.UsedCount >= promoCode.MaxUses { + return ErrPromoCodeMaxUsed + } + return ErrPromoCodeInvalid + } + return nil +} + +// ApplyPromoCode 应用优惠码(注册成功后调用) +// 使用事务和行锁确保并发安全 +func (s *PromoService) ApplyPromoCode(ctx context.Context, userID int64, code string) error { + code = strings.TrimSpace(code) + if code == "" { + return nil + } + + // 开启事务 + tx, err := s.entClient.Tx(ctx) + if err != nil { + return fmt.Errorf("begin transaction: %w", err) + } + defer func() { _ = tx.Rollback() }() + + txCtx := dbent.NewTxContext(ctx, tx) + + // 在事务中获取并锁定优惠码记录(FOR UPDATE) + promoCode, err := s.promoRepo.GetByCodeForUpdate(txCtx, code) + if err != nil { + return err + } + + // 在事务中验证优惠码状态 + if err := s.validatePromoCodeStatus(promoCode); err != nil { + return err + } + + // 在事务中检查用户是否已使用过此优惠码 + existing, err := s.promoRepo.GetUsageByPromoCodeAndUser(txCtx, promoCode.ID, userID) + if err != nil { + return fmt.Errorf("check existing usage: %w", err) + } + if existing != nil { + return ErrPromoCodeAlreadyUsed + } + + // 增加用户余额 + if err := s.userRepo.UpdateBalance(txCtx, userID, promoCode.BonusAmount); err != nil { + return fmt.Errorf("update user balance: %w", err) + } + + // 创建使用记录 + usage := &PromoCodeUsage{ + PromoCodeID: promoCode.ID, + UserID: userID, + BonusAmount: promoCode.BonusAmount, + UsedAt: time.Now(), + } + if err := s.promoRepo.CreateUsage(txCtx, usage); err != nil { + return fmt.Errorf("create usage record: %w", err) + } + + // 增加使用次数 + if err := s.promoRepo.IncrementUsedCount(txCtx, promoCode.ID); err != nil { + return fmt.Errorf("increment used count: %w", err) + } + + if err := tx.Commit(); err != nil { + return fmt.Errorf("commit transaction: %w", err) + } + + s.invalidatePromoCaches(ctx, userID, promoCode.BonusAmount) + + // 失效余额缓存 + if s.billingCacheService != nil { + go func() { + cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = s.billingCacheService.InvalidateUserBalance(cacheCtx, userID) + }() + } + + return nil +} + +func (s *PromoService) invalidatePromoCaches(ctx context.Context, userID int64, bonusAmount float64) { + if bonusAmount == 0 || s.authCacheInvalidator == nil { + return + } + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) +} + +// GenerateRandomCode 生成随机优惠码 +func (s *PromoService) GenerateRandomCode() (string, error) { + bytes := make([]byte, 8) + if _, err := rand.Read(bytes); err != nil { + return "", fmt.Errorf("generate random bytes: %w", err) + } + return strings.ToUpper(hex.EncodeToString(bytes)), nil +} + +// Create 创建优惠码 +func (s *PromoService) Create(ctx context.Context, input *CreatePromoCodeInput) (*PromoCode, error) { + code := strings.TrimSpace(input.Code) + if code == "" { + // 自动生成 + var err error + code, err = s.GenerateRandomCode() + if err != nil { + return nil, err + } + } + + promoCode := &PromoCode{ + Code: strings.ToUpper(code), + BonusAmount: input.BonusAmount, + MaxUses: input.MaxUses, + UsedCount: 0, + Status: PromoCodeStatusActive, + ExpiresAt: input.ExpiresAt, + Notes: input.Notes, + } + + if err := s.promoRepo.Create(ctx, promoCode); err != nil { + return nil, fmt.Errorf("create promo code: %w", err) + } + + return promoCode, nil +} + +// GetByID 根据ID获取优惠码 +func (s *PromoService) GetByID(ctx context.Context, id int64) (*PromoCode, error) { + code, err := s.promoRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + return code, nil +} + +// Update 更新优惠码 +func (s *PromoService) Update(ctx context.Context, id int64, input *UpdatePromoCodeInput) (*PromoCode, error) { + promoCode, err := s.promoRepo.GetByID(ctx, id) + if err != nil { + return nil, err + } + + if input.Code != nil { + promoCode.Code = strings.ToUpper(strings.TrimSpace(*input.Code)) + } + if input.BonusAmount != nil { + promoCode.BonusAmount = *input.BonusAmount + } + if input.MaxUses != nil { + promoCode.MaxUses = *input.MaxUses + } + if input.Status != nil { + promoCode.Status = *input.Status + } + if input.ExpiresAt != nil { + promoCode.ExpiresAt = input.ExpiresAt + } + if input.Notes != nil { + promoCode.Notes = *input.Notes + } + + if err := s.promoRepo.Update(ctx, promoCode); err != nil { + return nil, fmt.Errorf("update promo code: %w", err) + } + + return promoCode, nil +} + +// Delete 删除优惠码 +func (s *PromoService) Delete(ctx context.Context, id int64) error { + if err := s.promoRepo.Delete(ctx, id); err != nil { + return fmt.Errorf("delete promo code: %w", err) + } + return nil +} + +// List 获取优惠码列表 +func (s *PromoService) List(ctx context.Context, params pagination.PaginationParams, status, search string) ([]PromoCode, *pagination.PaginationResult, error) { + return s.promoRepo.ListWithFilters(ctx, params, status, search) +} + +// ListUsages 获取使用记录 +func (s *PromoService) ListUsages(ctx context.Context, promoCodeID int64, params pagination.PaginationParams) ([]PromoCodeUsage, *pagination.PaginationResult, error) { + return s.promoRepo.ListUsagesByPromoCode(ctx, promoCodeID, params) +} diff --git a/backend/internal/service/prompts/codex_cli_instructions.md b/backend/internal/service/prompts/codex_cli_instructions.md new file mode 100644 index 00000000..4886c7ef --- /dev/null +++ b/backend/internal/service/prompts/codex_cli_instructions.md @@ -0,0 +1,275 @@ +You are a coding agent running in the Codex CLI, a terminal-based coding assistant. Codex CLI is an open source project led by OpenAI. You are expected to be precise, safe, and helpful. + +Your capabilities: + +- Receive user prompts and other context provided by the harness, such as files in the workspace. +- Communicate with the user by streaming thinking & responses, and by making & updating plans. +- Emit function calls to run terminal commands and apply patches. Depending on how this specific run is configured, you can request that these function calls be escalated to the user for approval before running. More on this in the "Sandbox and approvals" section. + +Within this context, Codex refers to the open-source agentic coding interface (not the old Codex language model built by OpenAI). + +# How you work + +## Personality + +Your default personality and tone is concise, direct, and friendly. You communicate efficiently, always keeping the user clearly informed about ongoing actions without unnecessary detail. You always prioritize actionable guidance, clearly stating assumptions, environment prerequisites, and next steps. Unless explicitly asked, you avoid excessively verbose explanations about your work. + +# AGENTS.md spec +- Repos often contain AGENTS.md files. These files can appear anywhere within the repository. +- These files are a way for humans to give you (the agent) instructions or tips for working within the container. +- Some examples might be: coding conventions, info about how code is organized, or instructions for how to run or test code. +- Instructions in AGENTS.md files: + - The scope of an AGENTS.md file is the entire directory tree rooted at the folder that contains it. + - For every file you touch in the final patch, you must obey instructions in any AGENTS.md file whose scope includes that file. + - Instructions about code style, structure, naming, etc. apply only to code within the AGENTS.md file's scope, unless the file states otherwise. + - More-deeply-nested AGENTS.md files take precedence in the case of conflicting instructions. + - Direct system/developer/user instructions (as part of a prompt) take precedence over AGENTS.md instructions. +- The contents of the AGENTS.md file at the root of the repo and any directories from the CWD up to the root are included with the developer message and don't need to be re-read. When working in a subdirectory of CWD, or a directory outside the CWD, check for any AGENTS.md files that may be applicable. + +## Responsiveness + +### Preamble messages + +Before making tool calls, send a brief preamble to the user explaining what you’re about to do. When sending preamble messages, follow these principles and examples: + +- **Logically group related actions**: if you’re about to run several related commands, describe them together in one preamble rather than sending a separate note for each. +- **Keep it concise**: be no more than 1-2 sentences, focused on immediate, tangible next steps. (8–12 words for quick updates). +- **Build on prior context**: if this is not your first tool call, use the preamble message to connect the dots with what’s been done so far and create a sense of momentum and clarity for the user to understand your next actions. +- **Keep your tone light, friendly and curious**: add small touches of personality in preambles feel collaborative and engaging. +- **Exception**: Avoid adding a preamble for every trivial read (e.g., `cat` a single file) unless it’s part of a larger grouped action. + +**Examples:** + +- “I’ve explored the repo; now checking the API route definitions.” +- “Next, I’ll patch the config and update the related tests.” +- “I’m about to scaffold the CLI commands and helper functions.” +- “Ok cool, so I’ve wrapped my head around the repo. Now digging into the API routes.” +- “Config’s looking tidy. Next up is patching helpers to keep things in sync.” +- “Finished poking at the DB gateway. I will now chase down error handling.” +- “Alright, build pipeline order is interesting. Checking how it reports failures.” +- “Spotted a clever caching util; now hunting where it gets used.” + +## Planning + +You have access to an `update_plan` tool which tracks steps and progress and renders them to the user. Using the tool helps demonstrate that you've understood the task and convey how you're approaching it. Plans can help to make complex, ambiguous, or multi-phase work clearer and more collaborative for the user. A good plan should break the task into meaningful, logically ordered steps that are easy to verify as you go. + +Note that plans are not for padding out simple work with filler steps or stating the obvious. The content of your plan should not involve doing anything that you aren't capable of doing (i.e. don't try to test things that you can't test). Do not use plans for simple or single-step queries that you can just do or answer immediately. + +Do not repeat the full contents of the plan after an `update_plan` call — the harness already displays it. Instead, summarize the change made and highlight any important context or next step. + +Before running a command, consider whether or not you have completed the previous step, and make sure to mark it as completed before moving on to the next step. It may be the case that you complete all steps in your plan after a single pass of implementation. If this is the case, you can simply mark all the planned steps as completed. Sometimes, you may need to change plans in the middle of a task: call `update_plan` with the updated plan and make sure to provide an `explanation` of the rationale when doing so. + +Use a plan when: + +- The task is non-trivial and will require multiple actions over a long time horizon. +- There are logical phases or dependencies where sequencing matters. +- The work has ambiguity that benefits from outlining high-level goals. +- You want intermediate checkpoints for feedback and validation. +- When the user asked you to do more than one thing in a single prompt +- The user has asked you to use the plan tool (aka "TODOs") +- You generate additional steps while working, and plan to do them before yielding to the user + +### Examples + +**High-quality plans** + +Example 1: + +1. Add CLI entry with file args +2. Parse Markdown via CommonMark library +3. Apply semantic HTML template +4. Handle code blocks, images, links +5. Add error handling for invalid files + +Example 2: + +1. Define CSS variables for colors +2. Add toggle with localStorage state +3. Refactor components to use variables +4. Verify all views for readability +5. Add smooth theme-change transition + +Example 3: + +1. Set up Node.js + WebSocket server +2. Add join/leave broadcast events +3. Implement messaging with timestamps +4. Add usernames + mention highlighting +5. Persist messages in lightweight DB +6. Add typing indicators + unread count + +**Low-quality plans** + +Example 1: + +1. Create CLI tool +2. Add Markdown parser +3. Convert to HTML + +Example 2: + +1. Add dark mode toggle +2. Save preference +3. Make styles look good + +Example 3: + +1. Create single-file HTML game +2. Run quick sanity check +3. Summarize usage instructions + +If you need to write a plan, only write high quality plans, not low quality ones. + +## Task execution + +You are a coding agent. Please keep going until the query is completely resolved, before ending your turn and yielding back to the user. Only terminate your turn when you are sure that the problem is solved. Autonomously resolve the query to the best of your ability, using the tools available to you, before coming back to the user. Do NOT guess or make up an answer. + +You MUST adhere to the following criteria when solving queries: + +- Working on the repo(s) in the current environment is allowed, even if they are proprietary. +- Analyzing code for vulnerabilities is allowed. +- Showing user code and tool call details is allowed. +- Use the `apply_patch` tool to edit files (NEVER try `applypatch` or `apply-patch`, only `apply_patch`): {"command":["apply_patch","*** Begin Patch\\n*** Update File: path/to/file.py\\n@@ def example():\\n- pass\\n+ return 123\\n*** End Patch"]} + +If completing the user's task requires writing or modifying files, your code and final answer should follow these coding guidelines, though user instructions (i.e. AGENTS.md) may override these guidelines: + +- Fix the problem at the root cause rather than applying surface-level patches, when possible. +- Avoid unneeded complexity in your solution. +- Do not attempt to fix unrelated bugs or broken tests. It is not your responsibility to fix them. (You may mention them to the user in your final message though.) +- Update documentation as necessary. +- Keep changes consistent with the style of the existing codebase. Changes should be minimal and focused on the task. +- Use `git log` and `git blame` to search the history of the codebase if additional context is required. +- NEVER add copyright or license headers unless specifically requested. +- Do not waste tokens by re-reading files after calling `apply_patch` on them. The tool call will fail if it didn't work. The same goes for making folders, deleting folders, etc. +- Do not `git commit` your changes or create new git branches unless explicitly requested. +- Do not add inline comments within code unless explicitly requested. +- Do not use one-letter variable names unless explicitly requested. +- NEVER output inline citations like "【F:README.md†L5-L14】" in your outputs. The CLI is not able to render these so they will just be broken in the UI. Instead, if you output valid filepaths, users will be able to click on them to open the files in their editor. + +## Validating your work + +If the codebase has tests or the ability to build or run, consider using them to verify that your work is complete. + +When testing, your philosophy should be to start as specific as possible to the code you changed so that you can catch issues efficiently, then make your way to broader tests as you build confidence. If there's no test for the code you changed, and if the adjacent patterns in the codebases show that there's a logical place for you to add a test, you may do so. However, do not add tests to codebases with no tests. + +Similarly, once you're confident in correctness, you can suggest or use formatting commands to ensure that your code is well formatted. If there are issues you can iterate up to 3 times to get formatting right, but if you still can't manage it's better to save the user time and present them a correct solution where you call out the formatting in your final message. If the codebase does not have a formatter configured, do not add one. + +For all of testing, running, building, and formatting, do not attempt to fix unrelated bugs. It is not your responsibility to fix them. (You may mention them to the user in your final message though.) + +Be mindful of whether to run validation commands proactively. In the absence of behavioral guidance: + +- When running in non-interactive approval modes like **never** or **on-failure**, proactively run tests, lint and do whatever you need to ensure you've completed the task. +- When working in interactive approval modes like **untrusted**, or **on-request**, hold off on running tests or lint commands until the user is ready for you to finalize your output, because these commands take time to run and slow down iteration. Instead suggest what you want to do next, and let the user confirm first. +- When working on test-related tasks, such as adding tests, fixing tests, or reproducing a bug to verify behavior, you may proactively run tests regardless of approval mode. Use your judgement to decide whether this is a test-related task. + +## Ambition vs. precision + +For tasks that have no prior context (i.e. the user is starting something brand new), you should feel free to be ambitious and demonstrate creativity with your implementation. + +If you're operating in an existing codebase, you should make sure you do exactly what the user asks with surgical precision. Treat the surrounding codebase with respect, and don't overstep (i.e. changing filenames or variables unnecessarily). You should balance being sufficiently ambitious and proactive when completing tasks of this nature. + +You should use judicious initiative to decide on the right level of detail and complexity to deliver based on the user's needs. This means showing good judgment that you're capable of doing the right extras without gold-plating. This might be demonstrated by high-value, creative touches when scope of the task is vague; while being surgical and targeted when scope is tightly specified. + +## Sharing progress updates + +For especially longer tasks that you work on (i.e. requiring many tool calls, or a plan with multiple steps), you should provide progress updates back to the user at reasonable intervals. These updates should be structured as a concise sentence or two (no more than 8-10 words long) recapping progress so far in plain language: this update demonstrates your understanding of what needs to be done, progress so far (i.e. files explores, subtasks complete), and where you're going next. + +Before doing large chunks of work that may incur latency as experienced by the user (i.e. writing a new file), you should send a concise message to the user with an update indicating what you're about to do to ensure they know what you're spending time on. Don't start editing or writing large files before informing the user what you are doing and why. + +The messages you send before tool calls should describe what is immediately about to be done next in very concise language. If there was previous work done, this preamble message should also include a note about the work done so far to bring the user along. + +## Presenting your work and final message + +Your final message should read naturally, like an update from a concise teammate. For casual conversation, brainstorming tasks, or quick questions from the user, respond in a friendly, conversational tone. You should ask questions, suggest ideas, and adapt to the user’s style. If you've finished a large amount of work, when describing what you've done to the user, you should follow the final answer formatting guidelines to communicate substantive changes. You don't need to add structured formatting for one-word answers, greetings, or purely conversational exchanges. + +You can skip heavy formatting for single, simple actions or confirmations. In these cases, respond in plain sentences with any relevant next step or quick option. Reserve multi-section structured responses for results that need grouping or explanation. + +The user is working on the same computer as you, and has access to your work. As such there's no need to show the full contents of large files you have already written unless the user explicitly asks for them. Similarly, if you've created or modified files using `apply_patch`, there's no need to tell users to "save the file" or "copy the code into a file"—just reference the file path. + +If there's something that you think you could help with as a logical next step, concisely ask the user if they want you to do so. Good examples of this are running tests, committing changes, or building out the next logical component. If there’s something that you couldn't do (even with approval) but that the user might want to do (such as verifying changes by running the app), include those instructions succinctly. + +Brevity is very important as a default. You should be very concise (i.e. no more than 10 lines), but can relax this requirement for tasks where additional detail and comprehensiveness is important for the user's understanding. + +### Final answer structure and style guidelines + +You are producing plain text that will later be styled by the CLI. Follow these rules exactly. Formatting should make results easy to scan, but not feel mechanical. Use judgment to decide how much structure adds value. + +**Section Headers** + +- Use only when they improve clarity — they are not mandatory for every answer. +- Choose descriptive names that fit the content +- Keep headers short (1–3 words) and in `**Title Case**`. Always start headers with `**` and end with `**` +- Leave no blank line before the first bullet under a header. +- Section headers should only be used where they genuinely improve scanability; avoid fragmenting the answer. + +**Bullets** + +- Use `-` followed by a space for every bullet. +- Merge related points when possible; avoid a bullet for every trivial detail. +- Keep bullets to one line unless breaking for clarity is unavoidable. +- Group into short lists (4–6 bullets) ordered by importance. +- Use consistent keyword phrasing and formatting across sections. + +**Monospace** + +- Wrap all commands, file paths, env vars, and code identifiers in backticks (`` `...` ``). +- Apply to inline examples and to bullet keywords if the keyword itself is a literal file/command. +- Never mix monospace and bold markers; choose one based on whether it’s a keyword (`**`) or inline code/path (`` ` ``). + +**File References** +When referencing files in your response, make sure to include the relevant start line and always follow the below rules: + * Use inline code to make file paths clickable. + * Each reference should have a stand alone path. Even if it's the same file. + * Accepted: absolute, workspace‑relative, a/ or b/ diff prefixes, or bare filename/suffix. + * Line/column (1‑based, optional): :line[:column] or #Lline[Ccolumn] (column defaults to 1). + * Do not use URIs like file://, vscode://, or https://. + * Do not provide range of lines + * Examples: src/app.ts, src/app.ts:42, b/server/index.js#L10, C:\repo\project\main.rs:12:5 + +**Structure** + +- Place related bullets together; don’t mix unrelated concepts in the same section. +- Order sections from general → specific → supporting info. +- For subsections (e.g., “Binaries” under “Rust Workspace”), introduce with a bolded keyword bullet, then list items under it. +- Match structure to complexity: + - Multi-part or detailed results → use clear headers and grouped bullets. + - Simple results → minimal headers, possibly just a short list or paragraph. + +**Tone** + +- Keep the voice collaborative and natural, like a coding partner handing off work. +- Be concise and factual — no filler or conversational commentary and avoid unnecessary repetition +- Use present tense and active voice (e.g., “Runs tests” not “This will run tests”). +- Keep descriptions self-contained; don’t refer to “above” or “below”. +- Use parallel structure in lists for consistency. + +**Don’t** + +- Don’t use literal words “bold” or “monospace” in the content. +- Don’t nest bullets or create deep hierarchies. +- Don’t output ANSI escape codes directly — the CLI renderer applies them. +- Don’t cram unrelated keywords into a single bullet; split for clarity. +- Don’t let keyword lists run long — wrap or reformat for scanability. + +Generally, ensure your final answers adapt their shape and depth to the request. For example, answers to code explanations should have a precise, structured explanation with code references that answer the question directly. For tasks with a simple implementation, lead with the outcome and supplement only with what’s needed for clarity. Larger changes can be presented as a logical walkthrough of your approach, grouping related steps, explaining rationale where it adds value, and highlighting next actions to accelerate the user. Your answers should provide the right level of detail while being easily scannable. + +For casual greetings, acknowledgements, or other one-off conversational messages that are not delivering substantive information or structured results, respond naturally without section headers or bullet formatting. + +# Tool Guidelines + +## Shell commands + +When using the shell, you must adhere to the following guidelines: + +- When searching for text or files, prefer using `rg` or `rg --files` respectively because `rg` is much faster than alternatives like `grep`. (If the `rg` command is not found, then use alternatives.) +- Do not use python scripts to attempt to output larger chunks of a file. + +## `update_plan` + +A tool named `update_plan` is available to you. You can use it to keep an up‑to‑date, step‑by‑step plan for the task. + +To create a new plan, call `update_plan` with a short list of 1‑sentence steps (no more than 5-7 words each) with a `status` for each step (`pending`, `in_progress`, or `completed`). + +When steps have been completed, use `update_plan` to mark each finished step as `completed` and the next step you are working on as `in_progress`. There should always be exactly one `in_progress` step until everything is done. You can mark multiple items as complete in a single `update_plan` call. + +If all steps are complete, ensure you call `update_plan` to mark all steps as `completed`. diff --git a/backend/internal/service/prompts/codex_opencode_bridge.txt b/backend/internal/service/prompts/codex_opencode_bridge.txt new file mode 100644 index 00000000..093aa0f2 --- /dev/null +++ b/backend/internal/service/prompts/codex_opencode_bridge.txt @@ -0,0 +1,122 @@ +# Codex Running in OpenCode + +You are running Codex through OpenCode, an open-source terminal coding assistant. OpenCode provides different tools but follows Codex operating principles. + +## CRITICAL: Tool Replacements + + +❌ APPLY_PATCH DOES NOT EXIST → ✅ USE "edit" INSTEAD +- NEVER use: apply_patch, applyPatch +- ALWAYS use: edit tool for ALL file modifications +- Before modifying files: Verify you're using "edit", NOT "apply_patch" + + + +❌ UPDATE_PLAN DOES NOT EXIST → ✅ USE "todowrite" INSTEAD +- NEVER use: update_plan, updatePlan, read_plan, readPlan +- ALWAYS use: todowrite for task/plan updates, todoread to read plans +- Before plan operations: Verify you're using "todowrite", NOT "update_plan" + + +## Available OpenCode Tools + +**File Operations:** +- `write` - Create new files + - Overwriting existing files requires a prior Read in this session; default to ASCII unless the file already uses Unicode. +- `edit` - Modify existing files (REPLACES apply_patch) + - Requires a prior Read in this session; preserve exact indentation; ensure `oldString` uniquely matches or use `replaceAll`; edit fails if ambiguous or missing. +- `read` - Read file contents + +**Search/Discovery:** +- `grep` - Search file contents (tool, not bash grep); use `include` to filter patterns; set `path` only when not searching workspace root; for cross-file match counts use bash with `rg`. +- `glob` - Find files by pattern; defaults to workspace cwd unless `path` is set. +- `list` - List directories (requires absolute paths) + +**Execution:** +- `bash` - Run shell commands + - No workdir parameter; do not include it in tool calls. + - Always include a short description for the command. + - Do not use cd; use absolute paths in commands. + - Quote paths containing spaces with double quotes. + - Chain multiple commands with ';' or '&&'; avoid newlines. + - Use Grep/Glob tools for searches; only use bash with `rg` when you need counts or advanced features. + - Do not use `ls`/`cat` in bash; use `list`/`read` tools instead. + - For deletions (rm), verify by listing parent dir with `list`. + +**Network:** +- `webfetch` - Fetch web content + - Use fully-formed URLs (http/https; http auto-upgrades to https). + - Always set `format` to one of: text | markdown | html; prefer markdown unless otherwise required. + - Read-only; short cache window. + +**Task Management:** +- `todowrite` - Manage tasks/plans (REPLACES update_plan) +- `todoread` - Read current plan + +## Substitution Rules + +Base instruction says: You MUST use instead: +apply_patch → edit +update_plan → todowrite +read_plan → todoread + +**Path Usage:** Use per-tool conventions to avoid conflicts: +- Tool calls: `read`, `edit`, `write`, `list` require absolute paths. +- Searches: `grep`/`glob` default to the workspace cwd; prefer relative include patterns; set `path` only when a different root is needed. +- Presentation: In assistant messages, show workspace-relative paths; use absolute paths only inside tool calls. +- Tool schema overrides general path preferences—do not convert required absolute paths to relative. + +## Verification Checklist + +Before file/plan modifications: +1. Am I using "edit" NOT "apply_patch"? +2. Am I using "todowrite" NOT "update_plan"? +3. Is this tool in the approved list above? +4. Am I following each tool's path requirements? + +If ANY answer is NO → STOP and correct before proceeding. + +## OpenCode Working Style + +**Communication:** +- Send brief preambles (8-12 words) before tool calls, building on prior context +- Provide progress updates during longer tasks + +**Execution:** +- Keep working autonomously until query is fully resolved before yielding +- Don't return to user with partial solutions + +**Code Approach:** +- New projects: Be ambitious and creative +- Existing codebases: Surgical precision - modify only what's requested unless explicitly instructed to do otherwise + +**Testing:** +- If tests exist: Start specific to your changes, then broader validation + +## Advanced Tools + +**Task Tool (Sub-Agents):** +- Use the Task tool (functions.task) to launch sub-agents +- Check the Task tool description for current agent types and their capabilities +- Useful for complex analysis, specialized workflows, or tasks requiring isolated context +- The agent list is dynamically generated - refer to tool schema for available agents + +**Parallelization:** +- When multiple independent tool calls are needed, use multi_tool_use.parallel to run them concurrently. +- Reserve sequential calls for ordered or data-dependent steps. + +**MCP Tools:** +- Model Context Protocol servers provide additional capabilities +- MCP tools are prefixed: `mcp____` +- Check your available tools for MCP integrations +- Use when the tool's functionality matches your task needs + +## What Remains from Codex + +Sandbox policies, approval mechanisms, final answer formatting, git commit protocols, and file reference formats all follow Codex instructions. In approval policy "never", never request escalations. + +## Approvals & Safety +- Assume workspace-write filesystem, network enabled, approval on-failure unless explicitly stated otherwise. +- When a command fails due to sandboxing or permissions, retry with escalated permissions if allowed by policy, including a one-line justification. +- Treat destructive commands (e.g., `rm`, `git reset --hard`) as requiring explicit user request or approval. +- When uncertain, prefer non-destructive verification first (e.g., confirm file existence with `list`, then delete with `bash`). \ No newline at end of file diff --git a/backend/internal/service/prompts/tool_remap_message.txt b/backend/internal/service/prompts/tool_remap_message.txt new file mode 100644 index 00000000..4ff986e1 --- /dev/null +++ b/backend/internal/service/prompts/tool_remap_message.txt @@ -0,0 +1,63 @@ + + +YOU ARE IN A DIFFERENT ENVIRONMENT. These instructions override ALL previous tool references. + + + + +❌ APPLY_PATCH DOES NOT EXIST → ✅ USE "edit" INSTEAD +- NEVER use: apply_patch, applyPatch +- ALWAYS use: edit tool for ALL file modifications +- Before modifying files: Verify you're using "edit", NOT "apply_patch" + + + +❌ UPDATE_PLAN DOES NOT EXIST → ✅ USE "todowrite" INSTEAD +- NEVER use: update_plan, updatePlan +- ALWAYS use: todowrite for ALL task/plan operations +- Use todoread to read current plan +- Before plan operations: Verify you're using "todowrite", NOT "update_plan" + + + + +File Operations: + • write - Create new files + • edit - Modify existing files (REPLACES apply_patch) + • patch - Apply diff patches + • read - Read file contents + +Search/Discovery: + • grep - Search file contents + • glob - Find files by pattern + • list - List directories (use relative paths) + +Execution: + • bash - Run shell commands + +Network: + • webfetch - Fetch web content + +Task Management: + • todowrite - Manage tasks/plans (REPLACES update_plan) + • todoread - Read current plan + + + +Base instruction says: You MUST use instead: +apply_patch → edit +update_plan → todowrite +read_plan → todoread +absolute paths → relative paths + + + +Before file/plan modifications: +1. Am I using "edit" NOT "apply_patch"? +2. Am I using "todowrite" NOT "update_plan"? +3. Is this tool in the approved list above? +4. Am I using relative paths? + +If ANY answer is NO → STOP and correct before proceeding. + + \ No newline at end of file diff --git a/backend/internal/service/proxy.go b/backend/internal/service/proxy.go index 768e2a0a..7eb7728f 100644 --- a/backend/internal/service/proxy.go +++ b/backend/internal/service/proxy.go @@ -31,5 +31,21 @@ func (p *Proxy) URL() string { type ProxyWithAccountCount struct { Proxy - AccountCount int64 + AccountCount int64 + LatencyMs *int64 + LatencyStatus string + LatencyMessage string + IPAddress string + Country string + CountryCode string + Region string + City string +} + +type ProxyAccountSummary struct { + ID int64 + Name string + Platform string + Type string + Notes *string } diff --git a/backend/internal/service/proxy_latency_cache.go b/backend/internal/service/proxy_latency_cache.go new file mode 100644 index 00000000..4a1cc77b --- /dev/null +++ b/backend/internal/service/proxy_latency_cache.go @@ -0,0 +1,23 @@ +package service + +import ( + "context" + "time" +) + +type ProxyLatencyInfo struct { + Success bool `json:"success"` + LatencyMs *int64 `json:"latency_ms,omitempty"` + Message string `json:"message,omitempty"` + IPAddress string `json:"ip_address,omitempty"` + Country string `json:"country,omitempty"` + CountryCode string `json:"country_code,omitempty"` + Region string `json:"region,omitempty"` + City string `json:"city,omitempty"` + UpdatedAt time.Time `json:"updated_at"` +} + +type ProxyLatencyCache interface { + GetProxyLatencies(ctx context.Context, proxyIDs []int64) (map[int64]*ProxyLatencyInfo, error) + SetProxyLatency(ctx context.Context, proxyID int64, info *ProxyLatencyInfo) error +} diff --git a/backend/internal/service/proxy_service.go b/backend/internal/service/proxy_service.go index 58408d04..a5d897f6 100644 --- a/backend/internal/service/proxy_service.go +++ b/backend/internal/service/proxy_service.go @@ -10,6 +10,7 @@ import ( var ( ErrProxyNotFound = infraerrors.NotFound("PROXY_NOT_FOUND", "proxy not found") + ErrProxyInUse = infraerrors.Conflict("PROXY_IN_USE", "proxy is in use by accounts") ) type ProxyRepository interface { @@ -26,6 +27,7 @@ type ProxyRepository interface { ExistsByHostPortAuth(ctx context.Context, host string, port int, username, password string) (bool, error) CountAccountsByProxyID(ctx context.Context, proxyID int64) (int64, error) + ListAccountSummariesByProxyID(ctx context.Context, proxyID int64) ([]ProxyAccountSummary, error) } // CreateProxyRequest 创建代理请求 diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go index f1362646..47a04cf5 100644 --- a/backend/internal/service/ratelimit_service.go +++ b/backend/internal/service/ratelimit_service.go @@ -3,7 +3,7 @@ package service import ( "context" "encoding/json" - "log" + "log/slog" "net/http" "strconv" "strings" @@ -15,13 +15,16 @@ import ( // RateLimitService 处理限流和过载状态管理 type RateLimitService struct { - accountRepo AccountRepository - usageRepo UsageLogRepository - cfg *config.Config - geminiQuotaService *GeminiQuotaService - tempUnschedCache TempUnschedCache - usageCacheMu sync.RWMutex - usageCache map[int64]*geminiUsageCacheEntry + accountRepo AccountRepository + usageRepo UsageLogRepository + cfg *config.Config + geminiQuotaService *GeminiQuotaService + tempUnschedCache TempUnschedCache + timeoutCounterCache TimeoutCounterCache + settingService *SettingService + tokenCacheInvalidator TokenCacheInvalidator + usageCacheMu sync.RWMutex + usageCache map[int64]*geminiUsageCacheEntry } type geminiUsageCacheEntry struct { @@ -44,43 +47,105 @@ func NewRateLimitService(accountRepo AccountRepository, usageRepo UsageLogReposi } } +// SetTimeoutCounterCache 设置超时计数器缓存(可选依赖) +func (s *RateLimitService) SetTimeoutCounterCache(cache TimeoutCounterCache) { + s.timeoutCounterCache = cache +} + +// SetSettingService 设置系统设置服务(可选依赖) +func (s *RateLimitService) SetSettingService(settingService *SettingService) { + s.settingService = settingService +} + +// SetTokenCacheInvalidator 设置 token 缓存清理器(可选依赖) +func (s *RateLimitService) SetTokenCacheInvalidator(invalidator TokenCacheInvalidator) { + s.tokenCacheInvalidator = invalidator +} + // HandleUpstreamError 处理上游错误响应,标记账号状态 // 返回是否应该停止该账号的调度 func (s *RateLimitService) HandleUpstreamError(ctx context.Context, account *Account, statusCode int, headers http.Header, responseBody []byte) (shouldDisable bool) { // apikey 类型账号:检查自定义错误码配置 // 如果启用且错误码不在列表中,则不处理(不停止调度、不标记限流/过载) + customErrorCodesEnabled := account.IsCustomErrorCodesEnabled() if !account.ShouldHandleErrorCode(statusCode) { - log.Printf("Account %d: error %d skipped (not in custom error codes)", account.ID, statusCode) + slog.Info("account_error_code_skipped", "account_id", account.ID, "status_code", statusCode) return false } - tempMatched := s.tryTempUnschedulable(ctx, account, statusCode, responseBody) + tempMatched := false + if statusCode != 401 { + tempMatched = s.tryTempUnschedulable(ctx, account, statusCode, responseBody) + } + upstreamMsg := strings.TrimSpace(extractUpstreamErrorMessage(responseBody)) + upstreamMsg = sanitizeUpstreamErrorMessage(upstreamMsg) + if upstreamMsg != "" { + upstreamMsg = truncateForLog([]byte(upstreamMsg), 512) + } switch statusCode { case 401: - // 认证失败:停止调度,记录错误 - s.handleAuthError(ctx, account, "Authentication failed (401): invalid or expired credentials") + // 对所有 OAuth 账号在 401 错误时调用缓存失效并强制下次刷新 + if account.Type == AccountTypeOAuth { + // 1. 失效缓存 + if s.tokenCacheInvalidator != nil { + if err := s.tokenCacheInvalidator.InvalidateToken(ctx, account); err != nil { + slog.Warn("oauth_401_invalidate_cache_failed", "account_id", account.ID, "error", err) + } + } + // 2. 设置 expires_at 为当前时间,强制下次请求刷新 token + if account.Credentials == nil { + account.Credentials = make(map[string]any) + } + account.Credentials["expires_at"] = time.Now().Format(time.RFC3339) + if err := s.accountRepo.Update(ctx, account); err != nil { + slog.Warn("oauth_401_force_refresh_update_failed", "account_id", account.ID, "error", err) + } else { + slog.Info("oauth_401_force_refresh_set", "account_id", account.ID, "platform", account.Platform) + } + } + msg := "Authentication failed (401): invalid or expired credentials" + if upstreamMsg != "" { + msg = "Authentication failed (401): " + upstreamMsg + } + s.handleAuthError(ctx, account, msg) shouldDisable = true case 402: // 支付要求:余额不足或计费问题,停止调度 - s.handleAuthError(ctx, account, "Payment required (402): insufficient balance or billing issue") + msg := "Payment required (402): insufficient balance or billing issue" + if upstreamMsg != "" { + msg = "Payment required (402): " + upstreamMsg + } + s.handleAuthError(ctx, account, msg) shouldDisable = true case 403: // 禁止访问:停止调度,记录错误 - s.handleAuthError(ctx, account, "Access forbidden (403): account may be suspended or lack permissions") + msg := "Access forbidden (403): account may be suspended or lack permissions" + if upstreamMsg != "" { + msg = "Access forbidden (403): " + upstreamMsg + } + s.handleAuthError(ctx, account, msg) shouldDisable = true case 429: - s.handle429(ctx, account, headers) + s.handle429(ctx, account, headers, responseBody) shouldDisable = false case 529: s.handle529(ctx, account) shouldDisable = false default: - // 其他5xx错误:记录但不停止调度 - if statusCode >= 500 { - log.Printf("Account %d received upstream error %d", account.ID, statusCode) + // 自定义错误码启用时:在列表中的错误码都应该停止调度 + if customErrorCodesEnabled { + msg := "Custom error code triggered" + if upstreamMsg != "" { + msg = upstreamMsg + } + s.handleCustomErrorCode(ctx, account, statusCode, msg) + shouldDisable = true + } else if statusCode >= 500 { + // 未启用自定义错误码时:仅记录5xx错误 + slog.Warn("account_upstream_error", "account_id", account.ID, "status_code", statusCode) + shouldDisable = false } - shouldDisable = false } if tempMatched { @@ -125,7 +190,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, start := geminiDailyWindowStart(now) totals, ok := s.getGeminiUsageTotals(account.ID, start, now) if !ok { - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID) + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil) if err != nil { return true, err } @@ -150,7 +215,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, // NOTE: // - This is a local precheck to reduce upstream 429s. // - Do NOT mark the account as rate-limited here; rate_limit_reset_at should reflect real upstream 429s. - log.Printf("[Gemini PreCheck] Account %d reached daily quota (%d/%d), skip until %v", account.ID, used, limit, resetAt) + slog.Info("gemini_precheck_daily_quota_reached", "account_id", account.ID, "used", used, "limit", limit, "reset_at", resetAt) return false, nil } } @@ -172,7 +237,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, if limit > 0 { start := now.Truncate(time.Minute) - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID) + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil) if err != nil { return true, err } @@ -193,7 +258,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, if used >= limit { resetAt := start.Add(time.Minute) // Do not persist "rate limited" status from local precheck. See note above. - log.Printf("[Gemini PreCheck] Account %d reached minute quota (%d/%d), skip until %v", account.ID, used, limit, resetAt) + slog.Info("gemini_precheck_minute_quota_reached", "account_id", account.ID, "used", used, "limit", limit, "reset_at", resetAt) return false, nil } } @@ -250,22 +315,40 @@ func (s *RateLimitService) GeminiCooldown(ctx context.Context, account *Account) // handleAuthError 处理认证类错误(401/403),停止账号调度 func (s *RateLimitService) handleAuthError(ctx context.Context, account *Account, errorMsg string) { if err := s.accountRepo.SetError(ctx, account.ID, errorMsg); err != nil { - log.Printf("SetError failed for account %d: %v", account.ID, err) + slog.Warn("account_set_error_failed", "account_id", account.ID, "error", err) return } - log.Printf("Account %d disabled due to auth error: %s", account.ID, errorMsg) + slog.Warn("account_disabled_auth_error", "account_id", account.ID, "error", errorMsg) +} + +// handleCustomErrorCode 处理自定义错误码,停止账号调度 +func (s *RateLimitService) handleCustomErrorCode(ctx context.Context, account *Account, statusCode int, errorMsg string) { + msg := "Custom error code " + strconv.Itoa(statusCode) + ": " + errorMsg + if err := s.accountRepo.SetError(ctx, account.ID, msg); err != nil { + slog.Warn("account_set_error_failed", "account_id", account.ID, "status_code", statusCode, "error", err) + return + } + slog.Warn("account_disabled_custom_error", "account_id", account.ID, "status_code", statusCode, "error", errorMsg) } // handle429 处理429限流错误 // 解析响应头获取重置时间,标记账号为限流状态 -func (s *RateLimitService) handle429(ctx context.Context, account *Account, headers http.Header) { +func (s *RateLimitService) handle429(ctx context.Context, account *Account, headers http.Header, responseBody []byte) { // 解析重置时间戳 resetTimestamp := headers.Get("anthropic-ratelimit-unified-reset") if resetTimestamp == "" { // 没有重置时间,使用默认5分钟 resetAt := time.Now().Add(5 * time.Minute) + if s.shouldScopeClaudeSonnetRateLimit(account, responseBody) { + if err := s.accountRepo.SetModelRateLimit(ctx, account.ID, modelRateLimitScopeClaudeSonnet, resetAt); err != nil { + slog.Warn("model_rate_limit_set_failed", "account_id", account.ID, "scope", modelRateLimitScopeClaudeSonnet, "error", err) + } else { + slog.Info("account_model_rate_limited", "account_id", account.ID, "scope", modelRateLimitScopeClaudeSonnet, "reset_at", resetAt) + } + return + } if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetAt); err != nil { - log.Printf("SetRateLimited failed for account %d: %v", account.ID, err) + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) } return } @@ -273,19 +356,36 @@ func (s *RateLimitService) handle429(ctx context.Context, account *Account, head // 解析Unix时间戳 ts, err := strconv.ParseInt(resetTimestamp, 10, 64) if err != nil { - log.Printf("Parse reset timestamp failed: %v", err) + slog.Warn("rate_limit_reset_parse_failed", "reset_timestamp", resetTimestamp, "error", err) resetAt := time.Now().Add(5 * time.Minute) + if s.shouldScopeClaudeSonnetRateLimit(account, responseBody) { + if err := s.accountRepo.SetModelRateLimit(ctx, account.ID, modelRateLimitScopeClaudeSonnet, resetAt); err != nil { + slog.Warn("model_rate_limit_set_failed", "account_id", account.ID, "scope", modelRateLimitScopeClaudeSonnet, "error", err) + } else { + slog.Info("account_model_rate_limited", "account_id", account.ID, "scope", modelRateLimitScopeClaudeSonnet, "reset_at", resetAt) + } + return + } if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetAt); err != nil { - log.Printf("SetRateLimited failed for account %d: %v", account.ID, err) + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) } return } resetAt := time.Unix(ts, 0) + if s.shouldScopeClaudeSonnetRateLimit(account, responseBody) { + if err := s.accountRepo.SetModelRateLimit(ctx, account.ID, modelRateLimitScopeClaudeSonnet, resetAt); err != nil { + slog.Warn("model_rate_limit_set_failed", "account_id", account.ID, "scope", modelRateLimitScopeClaudeSonnet, "error", err) + return + } + slog.Info("account_model_rate_limited", "account_id", account.ID, "scope", modelRateLimitScopeClaudeSonnet, "reset_at", resetAt) + return + } + // 标记限流状态 if err := s.accountRepo.SetRateLimited(ctx, account.ID, resetAt); err != nil { - log.Printf("SetRateLimited failed for account %d: %v", account.ID, err) + slog.Warn("rate_limit_set_failed", "account_id", account.ID, "error", err) return } @@ -293,10 +393,21 @@ func (s *RateLimitService) handle429(ctx context.Context, account *Account, head windowEnd := resetAt windowStart := resetAt.Add(-5 * time.Hour) if err := s.accountRepo.UpdateSessionWindow(ctx, account.ID, &windowStart, &windowEnd, "rejected"); err != nil { - log.Printf("UpdateSessionWindow failed for account %d: %v", account.ID, err) + slog.Warn("rate_limit_update_session_window_failed", "account_id", account.ID, "error", err) } - log.Printf("Account %d rate limited until %v", account.ID, resetAt) + slog.Info("account_rate_limited", "account_id", account.ID, "reset_at", resetAt) +} + +func (s *RateLimitService) shouldScopeClaudeSonnetRateLimit(account *Account, responseBody []byte) bool { + if account == nil || account.Platform != PlatformAnthropic { + return false + } + msg := strings.ToLower(strings.TrimSpace(extractUpstreamErrorMessage(responseBody))) + if msg == "" { + return false + } + return strings.Contains(msg, "sonnet") } // handle529 处理529过载错误 @@ -309,11 +420,11 @@ func (s *RateLimitService) handle529(ctx context.Context, account *Account) { until := time.Now().Add(time.Duration(cooldownMinutes) * time.Minute) if err := s.accountRepo.SetOverloaded(ctx, account.ID, until); err != nil { - log.Printf("SetOverloaded failed for account %d: %v", account.ID, err) + slog.Warn("overload_set_failed", "account_id", account.ID, "error", err) return } - log.Printf("Account %d overloaded until %v", account.ID, until) + slog.Info("account_overloaded", "account_id", account.ID, "until", until) } // UpdateSessionWindow 从成功响应更新5h窗口状态 @@ -336,17 +447,17 @@ func (s *RateLimitService) UpdateSessionWindow(ctx context.Context, account *Acc end := start.Add(5 * time.Hour) windowStart = &start windowEnd = &end - log.Printf("Account %d: initializing 5h window from %v to %v (status: %s)", account.ID, start, end, status) + slog.Info("account_session_window_initialized", "account_id", account.ID, "window_start", start, "window_end", end, "status", status) } if err := s.accountRepo.UpdateSessionWindow(ctx, account.ID, windowStart, windowEnd, status); err != nil { - log.Printf("UpdateSessionWindow failed for account %d: %v", account.ID, err) + slog.Warn("session_window_update_failed", "account_id", account.ID, "error", err) } // 如果状态为allowed且之前有限流,说明窗口已重置,清除限流状态 if status == "allowed" && account.IsRateLimited() { if err := s.ClearRateLimit(ctx, account.ID); err != nil { - log.Printf("ClearRateLimit failed for account %d: %v", account.ID, err) + slog.Warn("rate_limit_clear_failed", "account_id", account.ID, "error", err) } } } @@ -356,7 +467,10 @@ func (s *RateLimitService) ClearRateLimit(ctx context.Context, accountID int64) if err := s.accountRepo.ClearRateLimit(ctx, accountID); err != nil { return err } - return s.accountRepo.ClearAntigravityQuotaScopes(ctx, accountID) + if err := s.accountRepo.ClearAntigravityQuotaScopes(ctx, accountID); err != nil { + return err + } + return s.accountRepo.ClearModelRateLimits(ctx, accountID) } func (s *RateLimitService) ClearTempUnschedulable(ctx context.Context, accountID int64) error { @@ -365,7 +479,7 @@ func (s *RateLimitService) ClearTempUnschedulable(ctx context.Context, accountID } if s.tempUnschedCache != nil { if err := s.tempUnschedCache.DeleteTempUnsched(ctx, accountID); err != nil { - log.Printf("DeleteTempUnsched failed for account %d: %v", accountID, err) + slog.Warn("temp_unsched_cache_delete_failed", "account_id", accountID, "error", err) } } return nil @@ -412,7 +526,7 @@ func (s *RateLimitService) GetTempUnschedStatus(ctx context.Context, accountID i if s.tempUnschedCache != nil { if err := s.tempUnschedCache.SetTempUnsched(ctx, accountID, state); err != nil { - log.Printf("SetTempUnsched failed for account %d: %v", accountID, err) + slog.Warn("temp_unsched_cache_set_failed", "account_id", accountID, "error", err) } } @@ -515,17 +629,17 @@ func (s *RateLimitService) triggerTempUnschedulable(ctx context.Context, account } if err := s.accountRepo.SetTempUnschedulable(ctx, account.ID, until, reason); err != nil { - log.Printf("SetTempUnschedulable failed for account %d: %v", account.ID, err) + slog.Warn("temp_unsched_set_failed", "account_id", account.ID, "error", err) return false } if s.tempUnschedCache != nil { if err := s.tempUnschedCache.SetTempUnsched(ctx, account.ID, state); err != nil { - log.Printf("SetTempUnsched cache failed for account %d: %v", account.ID, err) + slog.Warn("temp_unsched_cache_set_failed", "account_id", account.ID, "error", err) } } - log.Printf("Account %d temp unschedulable until %v (rule %d, code %d)", account.ID, until, ruleIndex, statusCode) + slog.Info("account_temp_unschedulable", "account_id", account.ID, "until", until, "rule_index", ruleIndex, "status_code", statusCode) return true } @@ -538,3 +652,124 @@ func truncateTempUnschedMessage(body []byte, maxBytes int) string { } return strings.TrimSpace(string(body)) } + +// HandleStreamTimeout 处理流数据超时 +// 根据系统设置决定是否标记账户为临时不可调度或错误状态 +// 返回是否应该停止该账号的调度 +func (s *RateLimitService) HandleStreamTimeout(ctx context.Context, account *Account, model string) bool { + if account == nil { + return false + } + + // 获取系统设置 + if s.settingService == nil { + slog.Warn("stream_timeout_setting_service_missing", "account_id", account.ID) + return false + } + + settings, err := s.settingService.GetStreamTimeoutSettings(ctx) + if err != nil { + slog.Warn("stream_timeout_get_settings_failed", "account_id", account.ID, "error", err) + return false + } + + if !settings.Enabled { + return false + } + + if settings.Action == StreamTimeoutActionNone { + return false + } + + // 增加超时计数 + var count int64 = 1 + if s.timeoutCounterCache != nil { + count, err = s.timeoutCounterCache.IncrementTimeoutCount(ctx, account.ID, settings.ThresholdWindowMinutes) + if err != nil { + slog.Warn("stream_timeout_increment_count_failed", "account_id", account.ID, "error", err) + // 继续处理,使用 count=1 + count = 1 + } + } + + slog.Info("stream_timeout_count", "account_id", account.ID, "count", count, "threshold", settings.ThresholdCount, "window_minutes", settings.ThresholdWindowMinutes, "model", model) + + // 检查是否达到阈值 + if count < int64(settings.ThresholdCount) { + return false + } + + // 达到阈值,执行相应操作 + switch settings.Action { + case StreamTimeoutActionTempUnsched: + return s.triggerStreamTimeoutTempUnsched(ctx, account, settings, model) + case StreamTimeoutActionError: + return s.triggerStreamTimeoutError(ctx, account, model) + default: + return false + } +} + +// triggerStreamTimeoutTempUnsched 触发流超时临时不可调度 +func (s *RateLimitService) triggerStreamTimeoutTempUnsched(ctx context.Context, account *Account, settings *StreamTimeoutSettings, model string) bool { + now := time.Now() + until := now.Add(time.Duration(settings.TempUnschedMinutes) * time.Minute) + + state := &TempUnschedState{ + UntilUnix: until.Unix(), + TriggeredAtUnix: now.Unix(), + StatusCode: 0, // 超时没有状态码 + MatchedKeyword: "stream_timeout", + RuleIndex: -1, // 表示系统级规则 + ErrorMessage: "Stream data interval timeout for model: " + model, + } + + reason := "" + if raw, err := json.Marshal(state); err == nil { + reason = string(raw) + } + if reason == "" { + reason = state.ErrorMessage + } + + if err := s.accountRepo.SetTempUnschedulable(ctx, account.ID, until, reason); err != nil { + slog.Warn("stream_timeout_set_temp_unsched_failed", "account_id", account.ID, "error", err) + return false + } + + if s.tempUnschedCache != nil { + if err := s.tempUnschedCache.SetTempUnsched(ctx, account.ID, state); err != nil { + slog.Warn("stream_timeout_set_temp_unsched_cache_failed", "account_id", account.ID, "error", err) + } + } + + // 重置超时计数 + if s.timeoutCounterCache != nil { + if err := s.timeoutCounterCache.ResetTimeoutCount(ctx, account.ID); err != nil { + slog.Warn("stream_timeout_reset_count_failed", "account_id", account.ID, "error", err) + } + } + + slog.Info("stream_timeout_temp_unschedulable", "account_id", account.ID, "until", until, "model", model) + return true +} + +// triggerStreamTimeoutError 触发流超时错误状态 +func (s *RateLimitService) triggerStreamTimeoutError(ctx context.Context, account *Account, model string) bool { + errorMsg := "Stream data interval timeout (repeated failures) for model: " + model + + if err := s.accountRepo.SetError(ctx, account.ID, errorMsg); err != nil { + slog.Warn("stream_timeout_set_error_failed", "account_id", account.ID, "error", err) + return false + } + + // 重置超时计数 + if s.timeoutCounterCache != nil { + if err := s.timeoutCounterCache.ResetTimeoutCount(ctx, account.ID); err != nil { + slog.Warn("stream_timeout_reset_count_failed", "account_id", account.ID, "error", err) + } + } + + slog.Warn("stream_timeout_account_error", "account_id", account.ID, "model", model) + return true +} diff --git a/backend/internal/service/ratelimit_service_401_test.go b/backend/internal/service/ratelimit_service_401_test.go new file mode 100644 index 00000000..36357a4b --- /dev/null +++ b/backend/internal/service/ratelimit_service_401_test.go @@ -0,0 +1,121 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "net/http" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type rateLimitAccountRepoStub struct { + mockAccountRepoForGemini + setErrorCalls int + tempCalls int + lastErrorMsg string +} + +func (r *rateLimitAccountRepoStub) SetError(ctx context.Context, id int64, errorMsg string) error { + r.setErrorCalls++ + r.lastErrorMsg = errorMsg + return nil +} + +func (r *rateLimitAccountRepoStub) SetTempUnschedulable(ctx context.Context, id int64, until time.Time, reason string) error { + r.tempCalls++ + return nil +} + +type tokenCacheInvalidatorRecorder struct { + accounts []*Account + err error +} + +func (r *tokenCacheInvalidatorRecorder) InvalidateToken(ctx context.Context, account *Account) error { + r.accounts = append(r.accounts, account) + return r.err +} + +func TestRateLimitService_HandleUpstreamError_OAuth401MarksError(t *testing.T) { + tests := []struct { + name string + platform string + }{ + {name: "gemini", platform: PlatformGemini}, + {name: "antigravity", platform: PlatformAntigravity}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + repo := &rateLimitAccountRepoStub{} + invalidator := &tokenCacheInvalidatorRecorder{} + service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + service.SetTokenCacheInvalidator(invalidator) + account := &Account{ + ID: 100, + Platform: tt.platform, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "temp_unschedulable_enabled": true, + "temp_unschedulable_rules": []any{ + map[string]any{ + "error_code": 401, + "keywords": []any{"unauthorized"}, + "duration_minutes": 30, + "description": "custom rule", + }, + }, + }, + } + + shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized")) + + require.True(t, shouldDisable) + require.Equal(t, 1, repo.setErrorCalls) + require.Equal(t, 0, repo.tempCalls) + require.Contains(t, repo.lastErrorMsg, "Authentication failed (401)") + require.Len(t, invalidator.accounts, 1) + }) + } +} + +func TestRateLimitService_HandleUpstreamError_OAuth401InvalidatorError(t *testing.T) { + repo := &rateLimitAccountRepoStub{} + invalidator := &tokenCacheInvalidatorRecorder{err: errors.New("boom")} + service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + service.SetTokenCacheInvalidator(invalidator) + account := &Account{ + ID: 101, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + + shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized")) + + require.True(t, shouldDisable) + require.Equal(t, 1, repo.setErrorCalls) + require.Len(t, invalidator.accounts, 1) +} + +func TestRateLimitService_HandleUpstreamError_NonOAuth401(t *testing.T) { + repo := &rateLimitAccountRepoStub{} + invalidator := &tokenCacheInvalidatorRecorder{} + service := NewRateLimitService(repo, nil, &config.Config{}, nil, nil) + service.SetTokenCacheInvalidator(invalidator) + account := &Account{ + ID: 102, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + } + + shouldDisable := service.HandleUpstreamError(context.Background(), account, 401, http.Header{}, []byte("unauthorized")) + + require.True(t, shouldDisable) + require.Equal(t, 1, repo.setErrorCalls) + require.Empty(t, invalidator.accounts) +} diff --git a/backend/internal/service/redeem_service.go b/backend/internal/service/redeem_service.go index b6324235..ff52dc47 100644 --- a/backend/internal/service/redeem_service.go +++ b/backend/internal/service/redeem_service.go @@ -68,12 +68,13 @@ type RedeemCodeResponse struct { // RedeemService 兑换码服务 type RedeemService struct { - redeemRepo RedeemCodeRepository - userRepo UserRepository - subscriptionService *SubscriptionService - cache RedeemCache - billingCacheService *BillingCacheService - entClient *dbent.Client + redeemRepo RedeemCodeRepository + userRepo UserRepository + subscriptionService *SubscriptionService + cache RedeemCache + billingCacheService *BillingCacheService + entClient *dbent.Client + authCacheInvalidator APIKeyAuthCacheInvalidator } // NewRedeemService 创建兑换码服务实例 @@ -84,14 +85,16 @@ func NewRedeemService( cache RedeemCache, billingCacheService *BillingCacheService, entClient *dbent.Client, + authCacheInvalidator APIKeyAuthCacheInvalidator, ) *RedeemService { return &RedeemService{ - redeemRepo: redeemRepo, - userRepo: userRepo, - subscriptionService: subscriptionService, - cache: cache, - billingCacheService: billingCacheService, - entClient: entClient, + redeemRepo: redeemRepo, + userRepo: userRepo, + subscriptionService: subscriptionService, + cache: cache, + billingCacheService: billingCacheService, + entClient: entClient, + authCacheInvalidator: authCacheInvalidator, } } @@ -324,18 +327,33 @@ func (s *RedeemService) Redeem(ctx context.Context, userID int64, code string) ( // invalidateRedeemCaches 失效兑换相关的缓存 func (s *RedeemService) invalidateRedeemCaches(ctx context.Context, userID int64, redeemCode *RedeemCode) { - if s.billingCacheService == nil { - return - } - switch redeemCode.Type { case RedeemTypeBalance: + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + if s.billingCacheService == nil { + return + } go func() { cacheCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() _ = s.billingCacheService.InvalidateUserBalance(cacheCtx, userID) }() + case RedeemTypeConcurrency: + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + if s.billingCacheService == nil { + return + } case RedeemTypeSubscription: + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } + if s.billingCacheService == nil { + return + } if redeemCode.GroupID != nil { groupID := *redeemCode.GroupID go func() { diff --git a/backend/internal/service/scheduler_cache.go b/backend/internal/service/scheduler_cache.go new file mode 100644 index 00000000..f36135e0 --- /dev/null +++ b/backend/internal/service/scheduler_cache.go @@ -0,0 +1,68 @@ +package service + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" +) + +const ( + SchedulerModeSingle = "single" + SchedulerModeMixed = "mixed" + SchedulerModeForced = "forced" +) + +type SchedulerBucket struct { + GroupID int64 + Platform string + Mode string +} + +func (b SchedulerBucket) String() string { + return fmt.Sprintf("%d:%s:%s", b.GroupID, b.Platform, b.Mode) +} + +func ParseSchedulerBucket(raw string) (SchedulerBucket, bool) { + parts := strings.Split(raw, ":") + if len(parts) != 3 { + return SchedulerBucket{}, false + } + groupID, err := strconv.ParseInt(parts[0], 10, 64) + if err != nil { + return SchedulerBucket{}, false + } + if parts[1] == "" || parts[2] == "" { + return SchedulerBucket{}, false + } + return SchedulerBucket{ + GroupID: groupID, + Platform: parts[1], + Mode: parts[2], + }, true +} + +// SchedulerCache 负责调度快照与账号快照的缓存读写。 +type SchedulerCache interface { + // GetSnapshot 读取快照并返回命中与否(ready + active + 数据完整)。 + GetSnapshot(ctx context.Context, bucket SchedulerBucket) ([]*Account, bool, error) + // SetSnapshot 写入快照并切换激活版本。 + SetSnapshot(ctx context.Context, bucket SchedulerBucket, accounts []Account) error + // GetAccount 获取单账号快照。 + GetAccount(ctx context.Context, accountID int64) (*Account, error) + // SetAccount 写入单账号快照(包含不可调度状态)。 + SetAccount(ctx context.Context, account *Account) error + // DeleteAccount 删除单账号快照。 + DeleteAccount(ctx context.Context, accountID int64) error + // UpdateLastUsed 批量更新账号的最后使用时间。 + UpdateLastUsed(ctx context.Context, updates map[int64]time.Time) error + // TryLockBucket 尝试获取分桶重建锁。 + TryLockBucket(ctx context.Context, bucket SchedulerBucket, ttl time.Duration) (bool, error) + // ListBuckets 返回已注册的分桶集合。 + ListBuckets(ctx context.Context) ([]SchedulerBucket, error) + // GetOutboxWatermark 读取 outbox 水位。 + GetOutboxWatermark(ctx context.Context) (int64, error) + // SetOutboxWatermark 保存 outbox 水位。 + SetOutboxWatermark(ctx context.Context, id int64) error +} diff --git a/backend/internal/service/scheduler_events.go b/backend/internal/service/scheduler_events.go new file mode 100644 index 00000000..5a3e72ce --- /dev/null +++ b/backend/internal/service/scheduler_events.go @@ -0,0 +1,10 @@ +package service + +const ( + SchedulerOutboxEventAccountChanged = "account_changed" + SchedulerOutboxEventAccountGroupsChanged = "account_groups_changed" + SchedulerOutboxEventAccountBulkChanged = "account_bulk_changed" + SchedulerOutboxEventAccountLastUsed = "account_last_used" + SchedulerOutboxEventGroupChanged = "group_changed" + SchedulerOutboxEventFullRebuild = "full_rebuild" +) diff --git a/backend/internal/service/scheduler_outbox.go b/backend/internal/service/scheduler_outbox.go new file mode 100644 index 00000000..32bfcfaa --- /dev/null +++ b/backend/internal/service/scheduler_outbox.go @@ -0,0 +1,21 @@ +package service + +import ( + "context" + "time" +) + +type SchedulerOutboxEvent struct { + ID int64 + EventType string + AccountID *int64 + GroupID *int64 + Payload map[string]any + CreatedAt time.Time +} + +// SchedulerOutboxRepository 提供调度 outbox 的读取接口。 +type SchedulerOutboxRepository interface { + ListAfter(ctx context.Context, afterID int64, limit int) ([]SchedulerOutboxEvent, error) + MaxID(ctx context.Context) (int64, error) +} diff --git a/backend/internal/service/scheduler_snapshot_service.go b/backend/internal/service/scheduler_snapshot_service.go new file mode 100644 index 00000000..b3714ed1 --- /dev/null +++ b/backend/internal/service/scheduler_snapshot_service.go @@ -0,0 +1,786 @@ +package service + +import ( + "context" + "encoding/json" + "errors" + "log" + "strconv" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +var ( + ErrSchedulerCacheNotReady = errors.New("scheduler cache not ready") + ErrSchedulerFallbackLimited = errors.New("scheduler db fallback limited") +) + +const outboxEventTimeout = 2 * time.Minute + +type SchedulerSnapshotService struct { + cache SchedulerCache + outboxRepo SchedulerOutboxRepository + accountRepo AccountRepository + groupRepo GroupRepository + cfg *config.Config + stopCh chan struct{} + stopOnce sync.Once + wg sync.WaitGroup + fallbackLimit *fallbackLimiter + lagMu sync.Mutex + lagFailures int +} + +func NewSchedulerSnapshotService( + cache SchedulerCache, + outboxRepo SchedulerOutboxRepository, + accountRepo AccountRepository, + groupRepo GroupRepository, + cfg *config.Config, +) *SchedulerSnapshotService { + maxQPS := 0 + if cfg != nil { + maxQPS = cfg.Gateway.Scheduling.DbFallbackMaxQPS + } + return &SchedulerSnapshotService{ + cache: cache, + outboxRepo: outboxRepo, + accountRepo: accountRepo, + groupRepo: groupRepo, + cfg: cfg, + stopCh: make(chan struct{}), + fallbackLimit: newFallbackLimiter(maxQPS), + } +} + +func (s *SchedulerSnapshotService) Start() { + if s == nil || s.cache == nil { + return + } + + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.runInitialRebuild() + }() + + interval := s.outboxPollInterval() + if s.outboxRepo != nil && interval > 0 { + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.runOutboxWorker(interval) + }() + } + + fullInterval := s.fullRebuildInterval() + if fullInterval > 0 { + s.wg.Add(1) + go func() { + defer s.wg.Done() + s.runFullRebuildWorker(fullInterval) + }() + } +} + +func (s *SchedulerSnapshotService) Stop() { + if s == nil { + return + } + s.stopOnce.Do(func() { + close(s.stopCh) + }) + s.wg.Wait() +} + +func (s *SchedulerSnapshotService) ListSchedulableAccounts(ctx context.Context, groupID *int64, platform string, hasForcePlatform bool) ([]Account, bool, error) { + useMixed := (platform == PlatformAnthropic || platform == PlatformGemini) && !hasForcePlatform + mode := s.resolveMode(platform, hasForcePlatform) + bucket := s.bucketFor(groupID, platform, mode) + + if s.cache != nil { + cached, hit, err := s.cache.GetSnapshot(ctx, bucket) + if err != nil { + log.Printf("[Scheduler] cache read failed: bucket=%s err=%v", bucket.String(), err) + } else if hit { + return derefAccounts(cached), useMixed, nil + } + } + + if err := s.guardFallback(ctx); err != nil { + return nil, useMixed, err + } + + fallbackCtx, cancel := s.withFallbackTimeout(ctx) + defer cancel() + + accounts, err := s.loadAccountsFromDB(fallbackCtx, bucket, useMixed) + if err != nil { + return nil, useMixed, err + } + + if s.cache != nil { + if err := s.cache.SetSnapshot(fallbackCtx, bucket, accounts); err != nil { + log.Printf("[Scheduler] cache write failed: bucket=%s err=%v", bucket.String(), err) + } + } + + return accounts, useMixed, nil +} + +func (s *SchedulerSnapshotService) GetAccount(ctx context.Context, accountID int64) (*Account, error) { + if accountID <= 0 { + return nil, nil + } + if s.cache != nil { + account, err := s.cache.GetAccount(ctx, accountID) + if err != nil { + log.Printf("[Scheduler] account cache read failed: id=%d err=%v", accountID, err) + } else if account != nil { + return account, nil + } + } + + if err := s.guardFallback(ctx); err != nil { + return nil, err + } + fallbackCtx, cancel := s.withFallbackTimeout(ctx) + defer cancel() + return s.accountRepo.GetByID(fallbackCtx, accountID) +} + +func (s *SchedulerSnapshotService) runInitialRebuild() { + if s.cache == nil { + return + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + buckets, err := s.cache.ListBuckets(ctx) + if err != nil { + log.Printf("[Scheduler] list buckets failed: %v", err) + } + if len(buckets) == 0 { + buckets, err = s.defaultBuckets(ctx) + if err != nil { + log.Printf("[Scheduler] default buckets failed: %v", err) + return + } + } + if err := s.rebuildBuckets(ctx, buckets, "startup"); err != nil { + log.Printf("[Scheduler] rebuild startup failed: %v", err) + } +} + +func (s *SchedulerSnapshotService) runOutboxWorker(interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + s.pollOutbox() + for { + select { + case <-ticker.C: + s.pollOutbox() + case <-s.stopCh: + return + } + } +} + +func (s *SchedulerSnapshotService) runFullRebuildWorker(interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + if err := s.triggerFullRebuild("interval"); err != nil { + log.Printf("[Scheduler] full rebuild failed: %v", err) + } + case <-s.stopCh: + return + } + } +} + +func (s *SchedulerSnapshotService) pollOutbox() { + if s.outboxRepo == nil || s.cache == nil { + return + } + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + watermark, err := s.cache.GetOutboxWatermark(ctx) + if err != nil { + log.Printf("[Scheduler] outbox watermark read failed: %v", err) + return + } + + events, err := s.outboxRepo.ListAfter(ctx, watermark, 200) + if err != nil { + log.Printf("[Scheduler] outbox poll failed: %v", err) + return + } + if len(events) == 0 { + return + } + + watermarkForCheck := watermark + for _, event := range events { + eventCtx, cancel := context.WithTimeout(context.Background(), outboxEventTimeout) + err := s.handleOutboxEvent(eventCtx, event) + cancel() + if err != nil { + log.Printf("[Scheduler] outbox handle failed: id=%d type=%s err=%v", event.ID, event.EventType, err) + return + } + } + + lastID := events[len(events)-1].ID + if err := s.cache.SetOutboxWatermark(ctx, lastID); err != nil { + log.Printf("[Scheduler] outbox watermark write failed: %v", err) + } else { + watermarkForCheck = lastID + } + + s.checkOutboxLag(ctx, events[0], watermarkForCheck) +} + +func (s *SchedulerSnapshotService) handleOutboxEvent(ctx context.Context, event SchedulerOutboxEvent) error { + switch event.EventType { + case SchedulerOutboxEventAccountLastUsed: + return s.handleLastUsedEvent(ctx, event.Payload) + case SchedulerOutboxEventAccountBulkChanged: + return s.handleBulkAccountEvent(ctx, event.Payload) + case SchedulerOutboxEventAccountGroupsChanged: + return s.handleAccountEvent(ctx, event.AccountID, event.Payload) + case SchedulerOutboxEventAccountChanged: + return s.handleAccountEvent(ctx, event.AccountID, event.Payload) + case SchedulerOutboxEventGroupChanged: + return s.handleGroupEvent(ctx, event.GroupID) + case SchedulerOutboxEventFullRebuild: + return s.triggerFullRebuild("outbox") + default: + return nil + } +} + +func (s *SchedulerSnapshotService) handleLastUsedEvent(ctx context.Context, payload map[string]any) error { + if s.cache == nil || payload == nil { + return nil + } + raw, ok := payload["last_used"].(map[string]any) + if !ok || len(raw) == 0 { + return nil + } + updates := make(map[int64]time.Time, len(raw)) + for key, value := range raw { + id, err := strconv.ParseInt(key, 10, 64) + if err != nil || id <= 0 { + continue + } + sec, ok := toInt64(value) + if !ok || sec <= 0 { + continue + } + updates[id] = time.Unix(sec, 0) + } + if len(updates) == 0 { + return nil + } + return s.cache.UpdateLastUsed(ctx, updates) +} + +func (s *SchedulerSnapshotService) handleBulkAccountEvent(ctx context.Context, payload map[string]any) error { + if payload == nil { + return nil + } + ids := parseInt64Slice(payload["account_ids"]) + for _, id := range ids { + if err := s.handleAccountEvent(ctx, &id, payload); err != nil { + return err + } + } + return nil +} + +func (s *SchedulerSnapshotService) handleAccountEvent(ctx context.Context, accountID *int64, payload map[string]any) error { + if accountID == nil || *accountID <= 0 { + return nil + } + if s.accountRepo == nil { + return nil + } + + var groupIDs []int64 + if payload != nil { + groupIDs = parseInt64Slice(payload["group_ids"]) + } + + account, err := s.accountRepo.GetByID(ctx, *accountID) + if err != nil { + if errors.Is(err, ErrAccountNotFound) { + if s.cache != nil { + if err := s.cache.DeleteAccount(ctx, *accountID); err != nil { + return err + } + } + return s.rebuildByGroupIDs(ctx, groupIDs, "account_miss") + } + return err + } + if s.cache != nil { + if err := s.cache.SetAccount(ctx, account); err != nil { + return err + } + } + if len(groupIDs) == 0 { + groupIDs = account.GroupIDs + } + return s.rebuildByAccount(ctx, account, groupIDs, "account_change") +} + +func (s *SchedulerSnapshotService) handleGroupEvent(ctx context.Context, groupID *int64) error { + if groupID == nil || *groupID <= 0 { + return nil + } + groupIDs := []int64{*groupID} + return s.rebuildByGroupIDs(ctx, groupIDs, "group_change") +} + +func (s *SchedulerSnapshotService) rebuildByAccount(ctx context.Context, account *Account, groupIDs []int64, reason string) error { + if account == nil { + return nil + } + groupIDs = s.normalizeGroupIDs(groupIDs) + if len(groupIDs) == 0 { + return nil + } + + var firstErr error + if err := s.rebuildBucketsForPlatform(ctx, account.Platform, groupIDs, reason); err != nil && firstErr == nil { + firstErr = err + } + if account.Platform == PlatformAntigravity && account.IsMixedSchedulingEnabled() { + if err := s.rebuildBucketsForPlatform(ctx, PlatformAnthropic, groupIDs, reason); err != nil && firstErr == nil { + firstErr = err + } + if err := s.rebuildBucketsForPlatform(ctx, PlatformGemini, groupIDs, reason); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func (s *SchedulerSnapshotService) rebuildByGroupIDs(ctx context.Context, groupIDs []int64, reason string) error { + groupIDs = s.normalizeGroupIDs(groupIDs) + if len(groupIDs) == 0 { + return nil + } + platforms := []string{PlatformAnthropic, PlatformGemini, PlatformOpenAI, PlatformAntigravity} + var firstErr error + for _, platform := range platforms { + if err := s.rebuildBucketsForPlatform(ctx, platform, groupIDs, reason); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func (s *SchedulerSnapshotService) rebuildBucketsForPlatform(ctx context.Context, platform string, groupIDs []int64, reason string) error { + if platform == "" { + return nil + } + var firstErr error + for _, gid := range groupIDs { + if err := s.rebuildBucket(ctx, SchedulerBucket{GroupID: gid, Platform: platform, Mode: SchedulerModeSingle}, reason); err != nil && firstErr == nil { + firstErr = err + } + if err := s.rebuildBucket(ctx, SchedulerBucket{GroupID: gid, Platform: platform, Mode: SchedulerModeForced}, reason); err != nil && firstErr == nil { + firstErr = err + } + if platform == PlatformAnthropic || platform == PlatformGemini { + if err := s.rebuildBucket(ctx, SchedulerBucket{GroupID: gid, Platform: platform, Mode: SchedulerModeMixed}, reason); err != nil && firstErr == nil { + firstErr = err + } + } + } + return firstErr +} + +func (s *SchedulerSnapshotService) rebuildBuckets(ctx context.Context, buckets []SchedulerBucket, reason string) error { + var firstErr error + for _, bucket := range buckets { + if err := s.rebuildBucket(ctx, bucket, reason); err != nil && firstErr == nil { + firstErr = err + } + } + return firstErr +} + +func (s *SchedulerSnapshotService) rebuildBucket(ctx context.Context, bucket SchedulerBucket, reason string) error { + if s.cache == nil { + return ErrSchedulerCacheNotReady + } + ok, err := s.cache.TryLockBucket(ctx, bucket, 30*time.Second) + if err != nil { + return err + } + if !ok { + return nil + } + + rebuildCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + accounts, err := s.loadAccountsFromDB(rebuildCtx, bucket, bucket.Mode == SchedulerModeMixed) + if err != nil { + log.Printf("[Scheduler] rebuild failed: bucket=%s reason=%s err=%v", bucket.String(), reason, err) + return err + } + if err := s.cache.SetSnapshot(rebuildCtx, bucket, accounts); err != nil { + log.Printf("[Scheduler] rebuild cache failed: bucket=%s reason=%s err=%v", bucket.String(), reason, err) + return err + } + log.Printf("[Scheduler] rebuild ok: bucket=%s reason=%s size=%d", bucket.String(), reason, len(accounts)) + return nil +} + +func (s *SchedulerSnapshotService) triggerFullRebuild(reason string) error { + if s.cache == nil { + return ErrSchedulerCacheNotReady + } + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute) + defer cancel() + + buckets, err := s.cache.ListBuckets(ctx) + if err != nil { + log.Printf("[Scheduler] list buckets failed: %v", err) + return err + } + if len(buckets) == 0 { + buckets, err = s.defaultBuckets(ctx) + if err != nil { + log.Printf("[Scheduler] default buckets failed: %v", err) + return err + } + } + return s.rebuildBuckets(ctx, buckets, reason) +} + +func (s *SchedulerSnapshotService) checkOutboxLag(ctx context.Context, oldest SchedulerOutboxEvent, watermark int64) { + if oldest.CreatedAt.IsZero() || s.cfg == nil { + return + } + + lag := time.Since(oldest.CreatedAt) + if lagSeconds := int(lag.Seconds()); lagSeconds >= s.cfg.Gateway.Scheduling.OutboxLagWarnSeconds && s.cfg.Gateway.Scheduling.OutboxLagWarnSeconds > 0 { + log.Printf("[Scheduler] outbox lag warning: %ds", lagSeconds) + } + + if s.cfg.Gateway.Scheduling.OutboxLagRebuildSeconds > 0 && int(lag.Seconds()) >= s.cfg.Gateway.Scheduling.OutboxLagRebuildSeconds { + s.lagMu.Lock() + s.lagFailures++ + failures := s.lagFailures + s.lagMu.Unlock() + + if failures >= s.cfg.Gateway.Scheduling.OutboxLagRebuildFailures { + log.Printf("[Scheduler] outbox lag rebuild triggered: lag=%s failures=%d", lag, failures) + s.lagMu.Lock() + s.lagFailures = 0 + s.lagMu.Unlock() + if err := s.triggerFullRebuild("outbox_lag"); err != nil { + log.Printf("[Scheduler] outbox lag rebuild failed: %v", err) + } + } + } else { + s.lagMu.Lock() + s.lagFailures = 0 + s.lagMu.Unlock() + } + + threshold := s.cfg.Gateway.Scheduling.OutboxBacklogRebuildRows + if threshold <= 0 || s.outboxRepo == nil { + return + } + maxID, err := s.outboxRepo.MaxID(ctx) + if err != nil { + return + } + if maxID-watermark >= int64(threshold) { + log.Printf("[Scheduler] outbox backlog rebuild triggered: backlog=%d", maxID-watermark) + if err := s.triggerFullRebuild("outbox_backlog"); err != nil { + log.Printf("[Scheduler] outbox backlog rebuild failed: %v", err) + } + } +} + +func (s *SchedulerSnapshotService) loadAccountsFromDB(ctx context.Context, bucket SchedulerBucket, useMixed bool) ([]Account, error) { + if s.accountRepo == nil { + return nil, ErrSchedulerCacheNotReady + } + groupID := bucket.GroupID + if s.isRunModeSimple() { + groupID = 0 + } + + if useMixed { + platforms := []string{bucket.Platform, PlatformAntigravity} + var accounts []Account + var err error + if groupID > 0 { + accounts, err = s.accountRepo.ListSchedulableByGroupIDAndPlatforms(ctx, groupID, platforms) + } else { + accounts, err = s.accountRepo.ListSchedulableByPlatforms(ctx, platforms) + } + if err != nil { + return nil, err + } + filtered := make([]Account, 0, len(accounts)) + for _, acc := range accounts { + if acc.Platform == PlatformAntigravity && !acc.IsMixedSchedulingEnabled() { + continue + } + filtered = append(filtered, acc) + } + return filtered, nil + } + + if groupID > 0 { + return s.accountRepo.ListSchedulableByGroupIDAndPlatform(ctx, groupID, bucket.Platform) + } + return s.accountRepo.ListSchedulableByPlatform(ctx, bucket.Platform) +} + +func (s *SchedulerSnapshotService) bucketFor(groupID *int64, platform string, mode string) SchedulerBucket { + return SchedulerBucket{ + GroupID: s.normalizeGroupID(groupID), + Platform: platform, + Mode: mode, + } +} + +func (s *SchedulerSnapshotService) normalizeGroupID(groupID *int64) int64 { + if s.isRunModeSimple() { + return 0 + } + if groupID == nil || *groupID <= 0 { + return 0 + } + return *groupID +} + +func (s *SchedulerSnapshotService) normalizeGroupIDs(groupIDs []int64) []int64 { + if s.isRunModeSimple() { + return []int64{0} + } + if len(groupIDs) == 0 { + return []int64{0} + } + seen := make(map[int64]struct{}, len(groupIDs)) + out := make([]int64, 0, len(groupIDs)) + for _, id := range groupIDs { + if id <= 0 { + continue + } + if _, ok := seen[id]; ok { + continue + } + seen[id] = struct{}{} + out = append(out, id) + } + if len(out) == 0 { + return []int64{0} + } + return out +} + +func (s *SchedulerSnapshotService) resolveMode(platform string, hasForcePlatform bool) string { + if hasForcePlatform { + return SchedulerModeForced + } + if platform == PlatformAnthropic || platform == PlatformGemini { + return SchedulerModeMixed + } + return SchedulerModeSingle +} + +func (s *SchedulerSnapshotService) guardFallback(ctx context.Context) error { + if s.cfg == nil || s.cfg.Gateway.Scheduling.DbFallbackEnabled { + if s.fallbackLimit == nil || s.fallbackLimit.Allow() { + return nil + } + return ErrSchedulerFallbackLimited + } + return ErrSchedulerCacheNotReady +} + +func (s *SchedulerSnapshotService) withFallbackTimeout(ctx context.Context) (context.Context, context.CancelFunc) { + if s.cfg == nil || s.cfg.Gateway.Scheduling.DbFallbackTimeoutSeconds <= 0 { + return context.WithCancel(ctx) + } + timeout := time.Duration(s.cfg.Gateway.Scheduling.DbFallbackTimeoutSeconds) * time.Second + if deadline, ok := ctx.Deadline(); ok { + remaining := time.Until(deadline) + if remaining <= 0 { + return context.WithCancel(ctx) + } + if remaining < timeout { + timeout = remaining + } + } + return context.WithTimeout(ctx, timeout) +} + +func (s *SchedulerSnapshotService) isRunModeSimple() bool { + return s.cfg != nil && s.cfg.RunMode == config.RunModeSimple +} + +func (s *SchedulerSnapshotService) outboxPollInterval() time.Duration { + if s.cfg == nil { + return time.Second + } + sec := s.cfg.Gateway.Scheduling.OutboxPollIntervalSeconds + if sec <= 0 { + return time.Second + } + return time.Duration(sec) * time.Second +} + +func (s *SchedulerSnapshotService) fullRebuildInterval() time.Duration { + if s.cfg == nil { + return 0 + } + sec := s.cfg.Gateway.Scheduling.FullRebuildIntervalSeconds + if sec <= 0 { + return 0 + } + return time.Duration(sec) * time.Second +} + +func (s *SchedulerSnapshotService) defaultBuckets(ctx context.Context) ([]SchedulerBucket, error) { + buckets := make([]SchedulerBucket, 0) + platforms := []string{PlatformAnthropic, PlatformGemini, PlatformOpenAI, PlatformAntigravity} + for _, platform := range platforms { + buckets = append(buckets, SchedulerBucket{GroupID: 0, Platform: platform, Mode: SchedulerModeSingle}) + buckets = append(buckets, SchedulerBucket{GroupID: 0, Platform: platform, Mode: SchedulerModeForced}) + if platform == PlatformAnthropic || platform == PlatformGemini { + buckets = append(buckets, SchedulerBucket{GroupID: 0, Platform: platform, Mode: SchedulerModeMixed}) + } + } + + if s.isRunModeSimple() || s.groupRepo == nil { + return dedupeBuckets(buckets), nil + } + + groups, err := s.groupRepo.ListActive(ctx) + if err != nil { + return dedupeBuckets(buckets), nil + } + for _, group := range groups { + if group.Platform == "" { + continue + } + buckets = append(buckets, SchedulerBucket{GroupID: group.ID, Platform: group.Platform, Mode: SchedulerModeSingle}) + buckets = append(buckets, SchedulerBucket{GroupID: group.ID, Platform: group.Platform, Mode: SchedulerModeForced}) + if group.Platform == PlatformAnthropic || group.Platform == PlatformGemini { + buckets = append(buckets, SchedulerBucket{GroupID: group.ID, Platform: group.Platform, Mode: SchedulerModeMixed}) + } + } + return dedupeBuckets(buckets), nil +} + +func dedupeBuckets(in []SchedulerBucket) []SchedulerBucket { + seen := make(map[string]struct{}, len(in)) + out := make([]SchedulerBucket, 0, len(in)) + for _, bucket := range in { + key := bucket.String() + if _, ok := seen[key]; ok { + continue + } + seen[key] = struct{}{} + out = append(out, bucket) + } + return out +} + +func derefAccounts(accounts []*Account) []Account { + if len(accounts) == 0 { + return []Account{} + } + out := make([]Account, 0, len(accounts)) + for _, account := range accounts { + if account == nil { + continue + } + out = append(out, *account) + } + return out +} + +func parseInt64Slice(value any) []int64 { + raw, ok := value.([]any) + if !ok { + return nil + } + out := make([]int64, 0, len(raw)) + for _, item := range raw { + if v, ok := toInt64(item); ok && v > 0 { + out = append(out, v) + } + } + return out +} + +func toInt64(value any) (int64, bool) { + switch v := value.(type) { + case float64: + return int64(v), true + case int64: + return v, true + case int: + return int64(v), true + case json.Number: + parsed, err := strconv.ParseInt(v.String(), 10, 64) + return parsed, err == nil + default: + return 0, false + } +} + +type fallbackLimiter struct { + maxQPS int + mu sync.Mutex + window time.Time + count int +} + +func newFallbackLimiter(maxQPS int) *fallbackLimiter { + if maxQPS <= 0 { + return nil + } + return &fallbackLimiter{ + maxQPS: maxQPS, + window: time.Now(), + } +} + +func (l *fallbackLimiter) Allow() bool { + if l == nil || l.maxQPS <= 0 { + return true + } + l.mu.Lock() + defer l.mu.Unlock() + + now := time.Now() + if now.Sub(l.window) >= time.Second { + l.window = now + l.count = 0 + } + if l.count >= l.maxQPS { + return false + } + l.count++ + return true +} diff --git a/backend/internal/service/session_limit_cache.go b/backend/internal/service/session_limit_cache.go new file mode 100644 index 00000000..f6f0c26a --- /dev/null +++ b/backend/internal/service/session_limit_cache.go @@ -0,0 +1,63 @@ +package service + +import ( + "context" + "time" +) + +// SessionLimitCache 管理账号级别的活跃会话跟踪 +// 用于 Anthropic OAuth/SetupToken 账号的会话数量限制 +// +// Key 格式: session_limit:account:{accountID} +// 数据结构: Sorted Set (member=sessionUUID, score=timestamp) +// +// 会话在空闲超时后自动过期,无需手动清理 +type SessionLimitCache interface { + // RegisterSession 注册会话活动 + // - 如果会话已存在,刷新其时间戳并返回 true + // - 如果会话不存在且活跃会话数 < maxSessions,添加新会话并返回 true + // - 如果会话不存在且活跃会话数 >= maxSessions,返回 false(拒绝) + // + // 参数: + // accountID: 账号 ID + // sessionUUID: 从 metadata.user_id 中提取的会话 UUID + // maxSessions: 最大并发会话数限制 + // idleTimeout: 会话空闲超时时间 + // + // 返回: + // allowed: true 表示允许(在限制内或会话已存在),false 表示拒绝(超出限制且是新会话) + // error: 操作错误 + RegisterSession(ctx context.Context, accountID int64, sessionUUID string, maxSessions int, idleTimeout time.Duration) (allowed bool, err error) + + // RefreshSession 刷新现有会话的时间戳 + // 用于活跃会话保持活动状态 + RefreshSession(ctx context.Context, accountID int64, sessionUUID string, idleTimeout time.Duration) error + + // GetActiveSessionCount 获取当前活跃会话数 + // 返回未过期的会话数量 + GetActiveSessionCount(ctx context.Context, accountID int64) (int, error) + + // GetActiveSessionCountBatch 批量获取多个账号的活跃会话数 + // 返回 map[accountID]count,查询失败的账号不在 map 中 + GetActiveSessionCountBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) + + // IsSessionActive 检查特定会话是否活跃(未过期) + IsSessionActive(ctx context.Context, accountID int64, sessionUUID string) (bool, error) + + // ========== 5h窗口费用缓存 ========== + // Key 格式: window_cost:account:{accountID} + // 用于缓存账号在当前5h窗口内的标准费用,减少数据库聚合查询压力 + + // GetWindowCost 获取缓存的窗口费用 + // 返回 (cost, true, nil) 如果缓存命中 + // 返回 (0, false, nil) 如果缓存未命中 + // 返回 (0, false, err) 如果发生错误 + GetWindowCost(ctx context.Context, accountID int64) (cost float64, hit bool, err error) + + // SetWindowCost 设置窗口费用缓存 + SetWindowCost(ctx context.Context, accountID int64, cost float64) error + + // GetWindowCostBatch 批量获取窗口费用缓存 + // 返回 map[accountID]cost,缓存未命中的账号不在 map 中 + GetWindowCostBatch(ctx context.Context, accountIDs []int64) (map[int64]float64, error) +} diff --git a/backend/internal/service/setting_service.go b/backend/internal/service/setting_service.go index d25698de..0a7426f8 100644 --- a/backend/internal/service/setting_service.go +++ b/backend/internal/service/setting_service.go @@ -4,6 +4,7 @@ import ( "context" "crypto/rand" "encoding/hex" + "encoding/json" "errors" "fmt" "strconv" @@ -32,6 +33,8 @@ type SettingRepository interface { type SettingService struct { settingRepo SettingRepository cfg *config.Config + onUpdate func() // Callback when settings are updated (for cache invalidation) + version string // Application version } // NewSettingService 创建系统设置服务实例 @@ -65,6 +68,7 @@ func (s *SettingService) GetPublicSettings(ctx context.Context) (*PublicSettings SettingKeyAPIBaseURL, SettingKeyContactInfo, SettingKeyDocURL, + SettingKeyHomeContent, SettingKeyLinuxDoConnectEnabled, } @@ -91,10 +95,62 @@ func (s *SettingService) GetPublicSettings(ctx context.Context) (*PublicSettings APIBaseURL: settings[SettingKeyAPIBaseURL], ContactInfo: settings[SettingKeyContactInfo], DocURL: settings[SettingKeyDocURL], + HomeContent: settings[SettingKeyHomeContent], LinuxDoOAuthEnabled: linuxDoEnabled, }, nil } +// SetOnUpdateCallback sets a callback function to be called when settings are updated +// This is used for cache invalidation (e.g., HTML cache in frontend server) +func (s *SettingService) SetOnUpdateCallback(callback func()) { + s.onUpdate = callback +} + +// SetVersion sets the application version for injection into public settings +func (s *SettingService) SetVersion(version string) { + s.version = version +} + +// GetPublicSettingsForInjection returns public settings in a format suitable for HTML injection +// This implements the web.PublicSettingsProvider interface +func (s *SettingService) GetPublicSettingsForInjection(ctx context.Context) (any, error) { + settings, err := s.GetPublicSettings(ctx) + if err != nil { + return nil, err + } + + // Return a struct that matches the frontend's expected format + return &struct { + RegistrationEnabled bool `json:"registration_enabled"` + EmailVerifyEnabled bool `json:"email_verify_enabled"` + TurnstileEnabled bool `json:"turnstile_enabled"` + TurnstileSiteKey string `json:"turnstile_site_key,omitempty"` + SiteName string `json:"site_name"` + SiteLogo string `json:"site_logo,omitempty"` + SiteSubtitle string `json:"site_subtitle,omitempty"` + APIBaseURL string `json:"api_base_url,omitempty"` + ContactInfo string `json:"contact_info,omitempty"` + DocURL string `json:"doc_url,omitempty"` + HomeContent string `json:"home_content,omitempty"` + LinuxDoOAuthEnabled bool `json:"linuxdo_oauth_enabled"` + Version string `json:"version,omitempty"` + }{ + RegistrationEnabled: settings.RegistrationEnabled, + EmailVerifyEnabled: settings.EmailVerifyEnabled, + TurnstileEnabled: settings.TurnstileEnabled, + TurnstileSiteKey: settings.TurnstileSiteKey, + SiteName: settings.SiteName, + SiteLogo: settings.SiteLogo, + SiteSubtitle: settings.SiteSubtitle, + APIBaseURL: settings.APIBaseURL, + ContactInfo: settings.ContactInfo, + DocURL: settings.DocURL, + HomeContent: settings.HomeContent, + LinuxDoOAuthEnabled: settings.LinuxDoOAuthEnabled, + Version: s.version, + }, nil +} + // UpdateSettings 更新系统设置 func (s *SettingService) UpdateSettings(ctx context.Context, settings *SystemSettings) error { updates := make(map[string]string) @@ -121,7 +177,7 @@ func (s *SettingService) UpdateSettings(ctx context.Context, settings *SystemSet updates[SettingKeyTurnstileSecretKey] = settings.TurnstileSecretKey } - // LinuxDo Connect OAuth 登录(终端用户 SSO) + // LinuxDo Connect OAuth 登录 updates[SettingKeyLinuxDoConnectEnabled] = strconv.FormatBool(settings.LinuxDoConnectEnabled) updates[SettingKeyLinuxDoConnectClientID] = settings.LinuxDoConnectClientID updates[SettingKeyLinuxDoConnectRedirectURL] = settings.LinuxDoConnectRedirectURL @@ -136,6 +192,7 @@ func (s *SettingService) UpdateSettings(ctx context.Context, settings *SystemSet updates[SettingKeyAPIBaseURL] = settings.APIBaseURL updates[SettingKeyContactInfo] = settings.ContactInfo updates[SettingKeyDocURL] = settings.DocURL + updates[SettingKeyHomeContent] = settings.HomeContent // 默认配置 updates[SettingKeyDefaultConcurrency] = strconv.Itoa(settings.DefaultConcurrency) @@ -152,7 +209,19 @@ func (s *SettingService) UpdateSettings(ctx context.Context, settings *SystemSet updates[SettingKeyEnableIdentityPatch] = strconv.FormatBool(settings.EnableIdentityPatch) updates[SettingKeyIdentityPatchPrompt] = settings.IdentityPatchPrompt - return s.settingRepo.SetMultiple(ctx, updates) + // Ops monitoring (vNext) + updates[SettingKeyOpsMonitoringEnabled] = strconv.FormatBool(settings.OpsMonitoringEnabled) + updates[SettingKeyOpsRealtimeMonitoringEnabled] = strconv.FormatBool(settings.OpsRealtimeMonitoringEnabled) + updates[SettingKeyOpsQueryModeDefault] = string(ParseOpsQueryMode(settings.OpsQueryModeDefault)) + if settings.OpsMetricsIntervalSeconds > 0 { + updates[SettingKeyOpsMetricsIntervalSeconds] = strconv.Itoa(settings.OpsMetricsIntervalSeconds) + } + + err := s.settingRepo.SetMultiple(ctx, updates) + if err == nil && s.onUpdate != nil { + s.onUpdate() // Invalidate cache after settings update + } + return err } // IsRegistrationEnabled 检查是否开放注册 @@ -238,6 +307,12 @@ func (s *SettingService) InitializeDefaultSettings(ctx context.Context) error { // Identity patch defaults SettingKeyEnableIdentityPatch: "true", SettingKeyIdentityPatchPrompt: "", + + // Ops monitoring defaults (vNext) + SettingKeyOpsMonitoringEnabled: "true", + SettingKeyOpsRealtimeMonitoringEnabled: "true", + SettingKeyOpsQueryModeDefault: "auto", + SettingKeyOpsMetricsIntervalSeconds: "60", } return s.settingRepo.SetMultiple(ctx, defaults) @@ -263,6 +338,7 @@ func (s *SettingService) parseSettings(settings map[string]string) *SystemSettin APIBaseURL: settings[SettingKeyAPIBaseURL], ContactInfo: settings[SettingKeyContactInfo], DocURL: settings[SettingKeyDocURL], + HomeContent: settings[SettingKeyHomeContent], } // 解析整数类型 @@ -336,100 +412,33 @@ func (s *SettingService) parseSettings(settings map[string]string) *SystemSettin } result.IdentityPatchPrompt = settings[SettingKeyIdentityPatchPrompt] + // Ops monitoring settings (default: enabled, fail-open) + result.OpsMonitoringEnabled = !isFalseSettingValue(settings[SettingKeyOpsMonitoringEnabled]) + result.OpsRealtimeMonitoringEnabled = !isFalseSettingValue(settings[SettingKeyOpsRealtimeMonitoringEnabled]) + result.OpsQueryModeDefault = string(ParseOpsQueryMode(settings[SettingKeyOpsQueryModeDefault])) + result.OpsMetricsIntervalSeconds = 60 + if raw := strings.TrimSpace(settings[SettingKeyOpsMetricsIntervalSeconds]); raw != "" { + if v, err := strconv.Atoi(raw); err == nil { + if v < 60 { + v = 60 + } + if v > 3600 { + v = 3600 + } + result.OpsMetricsIntervalSeconds = v + } + } + return result } -// GetLinuxDoConnectOAuthConfig 返回用于登录的“最终生效” LinuxDo Connect 配置。 -// -// 优先级: -// - 若对应系统设置键存在,则覆盖 config.yaml/env 的值 -// - 否则回退到 config.yaml/env 的值 -func (s *SettingService) GetLinuxDoConnectOAuthConfig(ctx context.Context) (config.LinuxDoConnectConfig, error) { - if s == nil || s.cfg == nil { - return config.LinuxDoConnectConfig{}, infraerrors.ServiceUnavailable("CONFIG_NOT_READY", "config not loaded") - } - - effective := s.cfg.LinuxDo - - keys := []string{ - SettingKeyLinuxDoConnectEnabled, - SettingKeyLinuxDoConnectClientID, - SettingKeyLinuxDoConnectClientSecret, - SettingKeyLinuxDoConnectRedirectURL, - } - settings, err := s.settingRepo.GetMultiple(ctx, keys) - if err != nil { - return config.LinuxDoConnectConfig{}, fmt.Errorf("get linuxdo connect settings: %w", err) - } - - if raw, ok := settings[SettingKeyLinuxDoConnectEnabled]; ok { - effective.Enabled = raw == "true" - } - if v, ok := settings[SettingKeyLinuxDoConnectClientID]; ok && strings.TrimSpace(v) != "" { - effective.ClientID = strings.TrimSpace(v) - } - if v, ok := settings[SettingKeyLinuxDoConnectClientSecret]; ok && strings.TrimSpace(v) != "" { - effective.ClientSecret = strings.TrimSpace(v) - } - if v, ok := settings[SettingKeyLinuxDoConnectRedirectURL]; ok && strings.TrimSpace(v) != "" { - effective.RedirectURL = strings.TrimSpace(v) - } - - if !effective.Enabled { - return config.LinuxDoConnectConfig{}, infraerrors.NotFound("OAUTH_DISABLED", "oauth login is disabled") - } - - // 基础健壮性校验(避免把用户重定向到一个必然失败或不安全的 OAuth 流程里)。 - if strings.TrimSpace(effective.ClientID) == "" { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth client id not configured") - } - if strings.TrimSpace(effective.AuthorizeURL) == "" { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth authorize url not configured") - } - if strings.TrimSpace(effective.TokenURL) == "" { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth token url not configured") - } - if strings.TrimSpace(effective.UserInfoURL) == "" { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth userinfo url not configured") - } - if strings.TrimSpace(effective.RedirectURL) == "" { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth redirect url not configured") - } - if strings.TrimSpace(effective.FrontendRedirectURL) == "" { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth frontend redirect url not configured") - } - - if err := config.ValidateAbsoluteHTTPURL(effective.AuthorizeURL); err != nil { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth authorize url invalid") - } - if err := config.ValidateAbsoluteHTTPURL(effective.TokenURL); err != nil { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth token url invalid") - } - if err := config.ValidateAbsoluteHTTPURL(effective.UserInfoURL); err != nil { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth userinfo url invalid") - } - if err := config.ValidateAbsoluteHTTPURL(effective.RedirectURL); err != nil { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth redirect url invalid") - } - if err := config.ValidateFrontendRedirectURL(effective.FrontendRedirectURL); err != nil { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth frontend redirect url invalid") - } - - method := strings.ToLower(strings.TrimSpace(effective.TokenAuthMethod)) - switch method { - case "", "client_secret_post", "client_secret_basic": - if strings.TrimSpace(effective.ClientSecret) == "" { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth client secret not configured") - } - case "none": - if !effective.UsePKCE { - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth pkce must be enabled when token_auth_method=none") - } +func isFalseSettingValue(value string) bool { + switch strings.ToLower(strings.TrimSpace(value)) { + case "false", "0", "off", "disabled": + return true default: - return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth token_auth_method invalid") + return false } - - return effective, nil } // getStringOrDefault 获取字符串值或默认值 @@ -574,3 +583,177 @@ func (s *SettingService) GetFallbackModel(ctx context.Context, platform string) } return value } + +// GetLinuxDoConnectOAuthConfig 返回用于登录的"最终生效" LinuxDo Connect 配置。 +// +// 优先级: +// - 若对应系统设置键存在,则覆盖 config.yaml/env 的值 +// - 否则回退到 config.yaml/env 的值 +func (s *SettingService) GetLinuxDoConnectOAuthConfig(ctx context.Context) (config.LinuxDoConnectConfig, error) { + if s == nil || s.cfg == nil { + return config.LinuxDoConnectConfig{}, infraerrors.ServiceUnavailable("CONFIG_NOT_READY", "config not loaded") + } + + effective := s.cfg.LinuxDo + + keys := []string{ + SettingKeyLinuxDoConnectEnabled, + SettingKeyLinuxDoConnectClientID, + SettingKeyLinuxDoConnectClientSecret, + SettingKeyLinuxDoConnectRedirectURL, + } + settings, err := s.settingRepo.GetMultiple(ctx, keys) + if err != nil { + return config.LinuxDoConnectConfig{}, fmt.Errorf("get linuxdo connect settings: %w", err) + } + + if raw, ok := settings[SettingKeyLinuxDoConnectEnabled]; ok { + effective.Enabled = raw == "true" + } + if v, ok := settings[SettingKeyLinuxDoConnectClientID]; ok && strings.TrimSpace(v) != "" { + effective.ClientID = strings.TrimSpace(v) + } + if v, ok := settings[SettingKeyLinuxDoConnectClientSecret]; ok && strings.TrimSpace(v) != "" { + effective.ClientSecret = strings.TrimSpace(v) + } + if v, ok := settings[SettingKeyLinuxDoConnectRedirectURL]; ok && strings.TrimSpace(v) != "" { + effective.RedirectURL = strings.TrimSpace(v) + } + + if !effective.Enabled { + return config.LinuxDoConnectConfig{}, infraerrors.NotFound("OAUTH_DISABLED", "oauth login is disabled") + } + + // 基础健壮性校验(避免把用户重定向到一个必然失败或不安全的 OAuth 流程里)。 + if strings.TrimSpace(effective.ClientID) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth client id not configured") + } + if strings.TrimSpace(effective.AuthorizeURL) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth authorize url not configured") + } + if strings.TrimSpace(effective.TokenURL) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth token url not configured") + } + if strings.TrimSpace(effective.UserInfoURL) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth userinfo url not configured") + } + if strings.TrimSpace(effective.RedirectURL) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth redirect url not configured") + } + if strings.TrimSpace(effective.FrontendRedirectURL) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth frontend redirect url not configured") + } + + if err := config.ValidateAbsoluteHTTPURL(effective.AuthorizeURL); err != nil { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth authorize url invalid") + } + if err := config.ValidateAbsoluteHTTPURL(effective.TokenURL); err != nil { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth token url invalid") + } + if err := config.ValidateAbsoluteHTTPURL(effective.UserInfoURL); err != nil { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth userinfo url invalid") + } + if err := config.ValidateAbsoluteHTTPURL(effective.RedirectURL); err != nil { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth redirect url invalid") + } + if err := config.ValidateFrontendRedirectURL(effective.FrontendRedirectURL); err != nil { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth frontend redirect url invalid") + } + + method := strings.ToLower(strings.TrimSpace(effective.TokenAuthMethod)) + switch method { + case "", "client_secret_post", "client_secret_basic": + if strings.TrimSpace(effective.ClientSecret) == "" { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth client secret not configured") + } + case "none": + if !effective.UsePKCE { + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth pkce must be enabled when token_auth_method=none") + } + default: + return config.LinuxDoConnectConfig{}, infraerrors.InternalServer("OAUTH_CONFIG_INVALID", "oauth token_auth_method invalid") + } + + return effective, nil +} + +// GetStreamTimeoutSettings 获取流超时处理配置 +func (s *SettingService) GetStreamTimeoutSettings(ctx context.Context) (*StreamTimeoutSettings, error) { + value, err := s.settingRepo.GetValue(ctx, SettingKeyStreamTimeoutSettings) + if err != nil { + if errors.Is(err, ErrSettingNotFound) { + return DefaultStreamTimeoutSettings(), nil + } + return nil, fmt.Errorf("get stream timeout settings: %w", err) + } + if value == "" { + return DefaultStreamTimeoutSettings(), nil + } + + var settings StreamTimeoutSettings + if err := json.Unmarshal([]byte(value), &settings); err != nil { + return DefaultStreamTimeoutSettings(), nil + } + + // 验证并修正配置值 + if settings.TempUnschedMinutes < 1 { + settings.TempUnschedMinutes = 1 + } + if settings.TempUnschedMinutes > 60 { + settings.TempUnschedMinutes = 60 + } + if settings.ThresholdCount < 1 { + settings.ThresholdCount = 1 + } + if settings.ThresholdCount > 10 { + settings.ThresholdCount = 10 + } + if settings.ThresholdWindowMinutes < 1 { + settings.ThresholdWindowMinutes = 1 + } + if settings.ThresholdWindowMinutes > 60 { + settings.ThresholdWindowMinutes = 60 + } + + // 验证 action + switch settings.Action { + case StreamTimeoutActionTempUnsched, StreamTimeoutActionError, StreamTimeoutActionNone: + // valid + default: + settings.Action = StreamTimeoutActionTempUnsched + } + + return &settings, nil +} + +// SetStreamTimeoutSettings 设置流超时处理配置 +func (s *SettingService) SetStreamTimeoutSettings(ctx context.Context, settings *StreamTimeoutSettings) error { + if settings == nil { + return fmt.Errorf("settings cannot be nil") + } + + // 验证配置值 + if settings.TempUnschedMinutes < 1 || settings.TempUnschedMinutes > 60 { + return fmt.Errorf("temp_unsched_minutes must be between 1-60") + } + if settings.ThresholdCount < 1 || settings.ThresholdCount > 10 { + return fmt.Errorf("threshold_count must be between 1-10") + } + if settings.ThresholdWindowMinutes < 1 || settings.ThresholdWindowMinutes > 60 { + return fmt.Errorf("threshold_window_minutes must be between 1-60") + } + + switch settings.Action { + case StreamTimeoutActionTempUnsched, StreamTimeoutActionError, StreamTimeoutActionNone: + // valid + default: + return fmt.Errorf("invalid action: %s", settings.Action) + } + + data, err := json.Marshal(settings) + if err != nil { + return fmt.Errorf("marshal stream timeout settings: %w", err) + } + + return s.settingRepo.Set(ctx, SettingKeyStreamTimeoutSettings, string(data)) +} diff --git a/backend/internal/service/settings_view.go b/backend/internal/service/settings_view.go index 26051418..e4ee2826 100644 --- a/backend/internal/service/settings_view.go +++ b/backend/internal/service/settings_view.go @@ -18,7 +18,7 @@ type SystemSettings struct { TurnstileSecretKey string TurnstileSecretKeyConfigured bool - // LinuxDo Connect OAuth 登录(终端用户 SSO) + // LinuxDo Connect OAuth 登录 LinuxDoConnectEnabled bool LinuxDoConnectClientID string LinuxDoConnectClientSecret string @@ -31,6 +31,7 @@ type SystemSettings struct { APIBaseURL string ContactInfo string DocURL string + HomeContent string DefaultConcurrency int DefaultBalance float64 @@ -45,6 +46,12 @@ type SystemSettings struct { // Identity patch configuration (Claude -> Gemini) EnableIdentityPatch bool `json:"enable_identity_patch"` IdentityPatchPrompt string `json:"identity_patch_prompt"` + + // Ops monitoring (vNext) + OpsMonitoringEnabled bool + OpsRealtimeMonitoringEnabled bool + OpsQueryModeDefault string + OpsMetricsIntervalSeconds int } type PublicSettings struct { @@ -58,6 +65,39 @@ type PublicSettings struct { APIBaseURL string ContactInfo string DocURL string + HomeContent string LinuxDoOAuthEnabled bool Version string } + +// StreamTimeoutSettings 流超时处理配置(仅控制超时后的处理方式,超时判定由网关配置控制) +type StreamTimeoutSettings struct { + // Enabled 是否启用流超时处理 + Enabled bool `json:"enabled"` + // Action 超时后的处理方式: "temp_unsched" | "error" | "none" + Action string `json:"action"` + // TempUnschedMinutes 临时不可调度持续时间(分钟) + TempUnschedMinutes int `json:"temp_unsched_minutes"` + // ThresholdCount 触发阈值次数(累计多少次超时才触发) + ThresholdCount int `json:"threshold_count"` + // ThresholdWindowMinutes 阈值窗口时间(分钟) + ThresholdWindowMinutes int `json:"threshold_window_minutes"` +} + +// StreamTimeoutAction 流超时处理方式常量 +const ( + StreamTimeoutActionTempUnsched = "temp_unsched" // 临时不可调度 + StreamTimeoutActionError = "error" // 标记为错误状态 + StreamTimeoutActionNone = "none" // 不处理 +) + +// DefaultStreamTimeoutSettings 返回默认的流超时配置 +func DefaultStreamTimeoutSettings() *StreamTimeoutSettings { + return &StreamTimeoutSettings{ + Enabled: false, + Action: StreamTimeoutActionTempUnsched, + TempUnschedMinutes: 5, + ThresholdCount: 3, + ThresholdWindowMinutes: 10, + } +} diff --git a/backend/internal/service/temp_unsched.go b/backend/internal/service/temp_unsched.go index fcb5025e..3871b72b 100644 --- a/backend/internal/service/temp_unsched.go +++ b/backend/internal/service/temp_unsched.go @@ -2,6 +2,7 @@ package service import ( "context" + "time" ) // TempUnschedState 临时不可调度状态 @@ -20,3 +21,16 @@ type TempUnschedCache interface { GetTempUnsched(ctx context.Context, accountID int64) (*TempUnschedState, error) DeleteTempUnsched(ctx context.Context, accountID int64) error } + +// TimeoutCounterCache 超时计数器缓存接口 +type TimeoutCounterCache interface { + // IncrementTimeoutCount 增加账户的超时计数,返回当前计数值 + // windowMinutes 是计数窗口时间(分钟),超过此时间计数器会自动重置 + IncrementTimeoutCount(ctx context.Context, accountID int64, windowMinutes int) (int64, error) + // GetTimeoutCount 获取账户当前的超时计数 + GetTimeoutCount(ctx context.Context, accountID int64) (int64, error) + // ResetTimeoutCount 重置账户的超时计数 + ResetTimeoutCount(ctx context.Context, accountID int64) error + // GetTimeoutCountTTL 获取计数器剩余过期时间 + GetTimeoutCountTTL(ctx context.Context, accountID int64) (time.Duration, error) +} diff --git a/backend/internal/service/timing_wheel_service.go b/backend/internal/service/timing_wheel_service.go index c4e64e33..5a2dea75 100644 --- a/backend/internal/service/timing_wheel_service.go +++ b/backend/internal/service/timing_wheel_service.go @@ -1,6 +1,7 @@ package service import ( + "fmt" "log" "sync" "time" @@ -8,6 +9,8 @@ import ( "github.com/zeromicro/go-zero/core/collection" ) +var newTimingWheel = collection.NewTimingWheel + // TimingWheelService wraps go-zero's TimingWheel for task scheduling type TimingWheelService struct { tw *collection.TimingWheel @@ -15,18 +18,18 @@ type TimingWheelService struct { } // NewTimingWheelService creates a new TimingWheelService instance -func NewTimingWheelService() *TimingWheelService { +func NewTimingWheelService() (*TimingWheelService, error) { // 1 second tick, 3600 slots = supports up to 1 hour delay // execute function: runs func() type tasks - tw, err := collection.NewTimingWheel(1*time.Second, 3600, func(key, value any) { + tw, err := newTimingWheel(1*time.Second, 3600, func(key, value any) { if fn, ok := value.(func()); ok { fn() } }) if err != nil { - panic(err) + return nil, fmt.Errorf("创建 timing wheel 失败: %w", err) } - return &TimingWheelService{tw: tw} + return &TimingWheelService{tw: tw}, nil } // Start starts the timing wheel diff --git a/backend/internal/service/timing_wheel_service_test.go b/backend/internal/service/timing_wheel_service_test.go new file mode 100644 index 00000000..cd0bffb7 --- /dev/null +++ b/backend/internal/service/timing_wheel_service_test.go @@ -0,0 +1,146 @@ +package service + +import ( + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/zeromicro/go-zero/core/collection" +) + +func TestNewTimingWheelService_InitFail_NoPanicAndReturnError(t *testing.T) { + original := newTimingWheel + t.Cleanup(func() { newTimingWheel = original }) + + newTimingWheel = func(_ time.Duration, _ int, _ collection.Execute) (*collection.TimingWheel, error) { + return nil, errors.New("boom") + } + + svc, err := NewTimingWheelService() + if err == nil { + t.Fatalf("期望返回 error,但得到 nil") + } + if svc != nil { + t.Fatalf("期望返回 nil svc,但得到非空") + } +} + +func TestNewTimingWheelService_Success(t *testing.T) { + svc, err := NewTimingWheelService() + if err != nil { + t.Fatalf("期望 err 为 nil,但得到: %v", err) + } + if svc == nil { + t.Fatalf("期望 svc 非空,但得到 nil") + } + svc.Stop() +} + +func TestNewTimingWheelService_ExecuteCallbackRunsFunc(t *testing.T) { + original := newTimingWheel + t.Cleanup(func() { newTimingWheel = original }) + + var captured collection.Execute + newTimingWheel = func(interval time.Duration, numSlots int, execute collection.Execute) (*collection.TimingWheel, error) { + captured = execute + return original(interval, numSlots, execute) + } + + svc, err := NewTimingWheelService() + if err != nil { + t.Fatalf("期望 err 为 nil,但得到: %v", err) + } + if captured == nil { + t.Fatalf("期望 captured 非空,但得到 nil") + } + + called := false + captured("k", func() { called = true }) + if !called { + t.Fatalf("期望 execute 回调触发传入函数执行") + } + + svc.Stop() +} + +func TestTimingWheelService_Schedule_ExecutesOnce(t *testing.T) { + original := newTimingWheel + t.Cleanup(func() { newTimingWheel = original }) + + newTimingWheel = func(_ time.Duration, _ int, execute collection.Execute) (*collection.TimingWheel, error) { + return original(10*time.Millisecond, 128, execute) + } + + svc, err := NewTimingWheelService() + if err != nil { + t.Fatalf("期望 err 为 nil,但得到: %v", err) + } + defer svc.Stop() + + ch := make(chan struct{}, 1) + svc.Schedule("once", 30*time.Millisecond, func() { ch <- struct{}{} }) + + select { + case <-ch: + case <-time.After(500 * time.Millisecond): + t.Fatalf("等待任务执行超时") + } + + select { + case <-ch: + t.Fatalf("任务不应重复执行") + case <-time.After(80 * time.Millisecond): + } +} + +func TestTimingWheelService_Cancel_PreventsExecution(t *testing.T) { + original := newTimingWheel + t.Cleanup(func() { newTimingWheel = original }) + + newTimingWheel = func(_ time.Duration, _ int, execute collection.Execute) (*collection.TimingWheel, error) { + return original(10*time.Millisecond, 128, execute) + } + + svc, err := NewTimingWheelService() + if err != nil { + t.Fatalf("期望 err 为 nil,但得到: %v", err) + } + defer svc.Stop() + + ch := make(chan struct{}, 1) + svc.Schedule("cancel", 80*time.Millisecond, func() { ch <- struct{}{} }) + svc.Cancel("cancel") + + select { + case <-ch: + t.Fatalf("任务已取消,不应执行") + case <-time.After(200 * time.Millisecond): + } +} + +func TestTimingWheelService_ScheduleRecurring_ExecutesMultipleTimes(t *testing.T) { + original := newTimingWheel + t.Cleanup(func() { newTimingWheel = original }) + + newTimingWheel = func(_ time.Duration, _ int, execute collection.Execute) (*collection.TimingWheel, error) { + return original(10*time.Millisecond, 128, execute) + } + + svc, err := NewTimingWheelService() + if err != nil { + t.Fatalf("期望 err 为 nil,但得到: %v", err) + } + defer svc.Stop() + + var count int32 + svc.ScheduleRecurring("rec", 30*time.Millisecond, func() { atomic.AddInt32(&count, 1) }) + + deadline := time.Now().Add(500 * time.Millisecond) + for atomic.LoadInt32(&count) < 2 && time.Now().Before(deadline) { + time.Sleep(10 * time.Millisecond) + } + if atomic.LoadInt32(&count) < 2 { + t.Fatalf("期望周期任务至少执行 2 次,但只执行了 %d 次", atomic.LoadInt32(&count)) + } +} diff --git a/backend/internal/service/token_cache_invalidator.go b/backend/internal/service/token_cache_invalidator.go new file mode 100644 index 00000000..1117d2f1 --- /dev/null +++ b/backend/internal/service/token_cache_invalidator.go @@ -0,0 +1,41 @@ +package service + +import "context" + +type TokenCacheInvalidator interface { + InvalidateToken(ctx context.Context, account *Account) error +} + +type CompositeTokenCacheInvalidator struct { + cache GeminiTokenCache // 统一使用一个缓存接口,通过缓存键前缀区分平台 +} + +func NewCompositeTokenCacheInvalidator(cache GeminiTokenCache) *CompositeTokenCacheInvalidator { + return &CompositeTokenCacheInvalidator{ + cache: cache, + } +} + +func (c *CompositeTokenCacheInvalidator) InvalidateToken(ctx context.Context, account *Account) error { + if c == nil || c.cache == nil || account == nil { + return nil + } + if account.Type != AccountTypeOAuth { + return nil + } + + var cacheKey string + switch account.Platform { + case PlatformGemini: + cacheKey = GeminiTokenCacheKey(account) + case PlatformAntigravity: + cacheKey = AntigravityTokenCacheKey(account) + case PlatformOpenAI: + cacheKey = OpenAITokenCacheKey(account) + case PlatformAnthropic: + cacheKey = ClaudeTokenCacheKey(account) + default: + return nil + } + return c.cache.DeleteAccessToken(ctx, cacheKey) +} diff --git a/backend/internal/service/token_cache_invalidator_test.go b/backend/internal/service/token_cache_invalidator_test.go new file mode 100644 index 00000000..30d208ce --- /dev/null +++ b/backend/internal/service/token_cache_invalidator_test.go @@ -0,0 +1,268 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type geminiTokenCacheStub struct { + deletedKeys []string + deleteErr error +} + +func (s *geminiTokenCacheStub) GetAccessToken(ctx context.Context, cacheKey string) (string, error) { + return "", nil +} + +func (s *geminiTokenCacheStub) SetAccessToken(ctx context.Context, cacheKey string, token string, ttl time.Duration) error { + return nil +} + +func (s *geminiTokenCacheStub) DeleteAccessToken(ctx context.Context, cacheKey string) error { + s.deletedKeys = append(s.deletedKeys, cacheKey) + return s.deleteErr +} + +func (s *geminiTokenCacheStub) AcquireRefreshLock(ctx context.Context, cacheKey string, ttl time.Duration) (bool, error) { + return true, nil +} + +func (s *geminiTokenCacheStub) ReleaseRefreshLock(ctx context.Context, cacheKey string) error { + return nil +} + +func TestCompositeTokenCacheInvalidator_Gemini(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + account := &Account{ + ID: 10, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "project_id": "project-x", + }, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, []string{"gemini:project-x"}, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_Antigravity(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + account := &Account{ + ID: 99, + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "project_id": "ag-project", + }, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, []string{"ag:ag-project"}, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_OpenAI(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + account := &Account{ + ID: 500, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "openai-token", + }, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, []string{"openai:account:500"}, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_Claude(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + account := &Account{ + ID: 600, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Credentials: map[string]any{ + "access_token": "claude-token", + }, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) + require.Equal(t, []string{"claude:account:600"}, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_SkipNonOAuth(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + + tests := []struct { + name string + account *Account + }{ + { + name: "gemini_api_key", + account: &Account{ + ID: 1, + Platform: PlatformGemini, + Type: AccountTypeAPIKey, + }, + }, + { + name: "openai_api_key", + account: &Account{ + ID: 2, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + }, + }, + { + name: "claude_api_key", + account: &Account{ + ID: 3, + Platform: PlatformAnthropic, + Type: AccountTypeAPIKey, + }, + }, + { + name: "claude_setup_token", + account: &Account{ + ID: 4, + Platform: PlatformAnthropic, + Type: AccountTypeSetupToken, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cache.deletedKeys = nil + err := invalidator.InvalidateToken(context.Background(), tt.account) + require.NoError(t, err) + require.Empty(t, cache.deletedKeys) + }) + } +} + +func TestCompositeTokenCacheInvalidator_SkipUnsupportedPlatform(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + account := &Account{ + ID: 100, + Platform: "unknown-platform", + Type: AccountTypeOAuth, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) + require.Empty(t, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_NilCache(t *testing.T) { + invalidator := NewCompositeTokenCacheInvalidator(nil) + account := &Account{ + ID: 2, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) +} + +func TestCompositeTokenCacheInvalidator_NilAccount(t *testing.T) { + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + + err := invalidator.InvalidateToken(context.Background(), nil) + require.NoError(t, err) + require.Empty(t, cache.deletedKeys) +} + +func TestCompositeTokenCacheInvalidator_NilInvalidator(t *testing.T) { + var invalidator *CompositeTokenCacheInvalidator + account := &Account{ + ID: 5, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + + err := invalidator.InvalidateToken(context.Background(), account) + require.NoError(t, err) +} + +func TestCompositeTokenCacheInvalidator_DeleteError(t *testing.T) { + expectedErr := errors.New("redis connection failed") + cache := &geminiTokenCacheStub{deleteErr: expectedErr} + invalidator := NewCompositeTokenCacheInvalidator(cache) + + tests := []struct { + name string + account *Account + }{ + { + name: "openai_delete_error", + account: &Account{ + ID: 700, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + }, + }, + { + name: "claude_delete_error", + account: &Account{ + ID: 800, + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := invalidator.InvalidateToken(context.Background(), tt.account) + require.Error(t, err) + require.Equal(t, expectedErr, err) + }) + } +} + +func TestCompositeTokenCacheInvalidator_AllPlatformsIntegration(t *testing.T) { + // 测试所有平台的缓存键生成和删除 + cache := &geminiTokenCacheStub{} + invalidator := NewCompositeTokenCacheInvalidator(cache) + + accounts := []*Account{ + {ID: 1, Platform: PlatformGemini, Type: AccountTypeOAuth, Credentials: map[string]any{"project_id": "gemini-proj"}}, + {ID: 2, Platform: PlatformAntigravity, Type: AccountTypeOAuth, Credentials: map[string]any{"project_id": "ag-proj"}}, + {ID: 3, Platform: PlatformOpenAI, Type: AccountTypeOAuth}, + {ID: 4, Platform: PlatformAnthropic, Type: AccountTypeOAuth}, + } + + expectedKeys := []string{ + "gemini:gemini-proj", + "ag:ag-proj", + "openai:account:3", + "claude:account:4", + } + + for _, acc := range accounts { + err := invalidator.InvalidateToken(context.Background(), acc) + require.NoError(t, err) + } + + require.Equal(t, expectedKeys, cache.deletedKeys) +} diff --git a/backend/internal/service/token_cache_key.go b/backend/internal/service/token_cache_key.go new file mode 100644 index 00000000..df0c025e --- /dev/null +++ b/backend/internal/service/token_cache_key.go @@ -0,0 +1,15 @@ +package service + +import "strconv" + +// OpenAITokenCacheKey 生成 OpenAI OAuth 账号的缓存键 +// 格式: "openai:account:{account_id}" +func OpenAITokenCacheKey(account *Account) string { + return "openai:account:" + strconv.FormatInt(account.ID, 10) +} + +// ClaudeTokenCacheKey 生成 Claude (Anthropic) OAuth 账号的缓存键 +// 格式: "claude:account:{account_id}" +func ClaudeTokenCacheKey(account *Account) string { + return "claude:account:" + strconv.FormatInt(account.ID, 10) +} diff --git a/backend/internal/service/token_cache_key_test.go b/backend/internal/service/token_cache_key_test.go new file mode 100644 index 00000000..6215eeaf --- /dev/null +++ b/backend/internal/service/token_cache_key_test.go @@ -0,0 +1,259 @@ +//go:build unit + +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGeminiTokenCacheKey(t *testing.T) { + tests := []struct { + name string + account *Account + expected string + }{ + { + name: "with_project_id", + account: &Account{ + ID: 100, + Credentials: map[string]any{ + "project_id": "my-project-123", + }, + }, + expected: "gemini:my-project-123", + }, + { + name: "project_id_with_whitespace", + account: &Account{ + ID: 101, + Credentials: map[string]any{ + "project_id": " project-with-spaces ", + }, + }, + expected: "gemini:project-with-spaces", + }, + { + name: "empty_project_id_fallback_to_account_id", + account: &Account{ + ID: 102, + Credentials: map[string]any{ + "project_id": "", + }, + }, + expected: "gemini:account:102", + }, + { + name: "whitespace_only_project_id_fallback_to_account_id", + account: &Account{ + ID: 103, + Credentials: map[string]any{ + "project_id": " ", + }, + }, + expected: "gemini:account:103", + }, + { + name: "no_project_id_key_fallback_to_account_id", + account: &Account{ + ID: 104, + Credentials: map[string]any{}, + }, + expected: "gemini:account:104", + }, + { + name: "nil_credentials_fallback_to_account_id", + account: &Account{ + ID: 105, + Credentials: nil, + }, + expected: "gemini:account:105", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := GeminiTokenCacheKey(tt.account) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestAntigravityTokenCacheKey(t *testing.T) { + tests := []struct { + name string + account *Account + expected string + }{ + { + name: "with_project_id", + account: &Account{ + ID: 200, + Credentials: map[string]any{ + "project_id": "ag-project-456", + }, + }, + expected: "ag:ag-project-456", + }, + { + name: "project_id_with_whitespace", + account: &Account{ + ID: 201, + Credentials: map[string]any{ + "project_id": " ag-project-spaces ", + }, + }, + expected: "ag:ag-project-spaces", + }, + { + name: "empty_project_id_fallback_to_account_id", + account: &Account{ + ID: 202, + Credentials: map[string]any{ + "project_id": "", + }, + }, + expected: "ag:account:202", + }, + { + name: "whitespace_only_project_id_fallback_to_account_id", + account: &Account{ + ID: 203, + Credentials: map[string]any{ + "project_id": " ", + }, + }, + expected: "ag:account:203", + }, + { + name: "no_project_id_key_fallback_to_account_id", + account: &Account{ + ID: 204, + Credentials: map[string]any{}, + }, + expected: "ag:account:204", + }, + { + name: "nil_credentials_fallback_to_account_id", + account: &Account{ + ID: 205, + Credentials: nil, + }, + expected: "ag:account:205", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := AntigravityTokenCacheKey(tt.account) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestOpenAITokenCacheKey(t *testing.T) { + tests := []struct { + name string + account *Account + expected string + }{ + { + name: "basic_account", + account: &Account{ + ID: 300, + }, + expected: "openai:account:300", + }, + { + name: "account_with_credentials", + account: &Account{ + ID: 301, + Credentials: map[string]any{ + "access_token": "test-token", + }, + }, + expected: "openai:account:301", + }, + { + name: "account_id_zero", + account: &Account{ + ID: 0, + }, + expected: "openai:account:0", + }, + { + name: "large_account_id", + account: &Account{ + ID: 9999999999, + }, + expected: "openai:account:9999999999", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := OpenAITokenCacheKey(tt.account) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestClaudeTokenCacheKey(t *testing.T) { + tests := []struct { + name string + account *Account + expected string + }{ + { + name: "basic_account", + account: &Account{ + ID: 400, + }, + expected: "claude:account:400", + }, + { + name: "account_with_credentials", + account: &Account{ + ID: 401, + Credentials: map[string]any{ + "access_token": "claude-token", + }, + }, + expected: "claude:account:401", + }, + { + name: "account_id_zero", + account: &Account{ + ID: 0, + }, + expected: "claude:account:0", + }, + { + name: "large_account_id", + account: &Account{ + ID: 9999999999, + }, + expected: "claude:account:9999999999", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ClaudeTokenCacheKey(tt.account) + require.Equal(t, tt.expected, result) + }) + } +} + +func TestCacheKeyUniqueness(t *testing.T) { + // 确保不同平台的缓存键不会冲突 + account := &Account{ID: 123} + + openaiKey := OpenAITokenCacheKey(account) + claudeKey := ClaudeTokenCacheKey(account) + + require.NotEqual(t, openaiKey, claudeKey, "OpenAI and Claude cache keys should be different") + require.Contains(t, openaiKey, "openai:") + require.Contains(t, claudeKey, "claude:") +} diff --git a/backend/internal/service/token_refresh_service.go b/backend/internal/service/token_refresh_service.go index 4ae4bec8..02e7d445 100644 --- a/backend/internal/service/token_refresh_service.go +++ b/backend/internal/service/token_refresh_service.go @@ -14,9 +14,10 @@ import ( // TokenRefreshService OAuth token自动刷新服务 // 定期检查并刷新即将过期的token type TokenRefreshService struct { - accountRepo AccountRepository - refreshers []TokenRefresher - cfg *config.TokenRefreshConfig + accountRepo AccountRepository + refreshers []TokenRefresher + cfg *config.TokenRefreshConfig + cacheInvalidator TokenCacheInvalidator stopCh chan struct{} wg sync.WaitGroup @@ -29,12 +30,14 @@ func NewTokenRefreshService( openaiOAuthService *OpenAIOAuthService, geminiOAuthService *GeminiOAuthService, antigravityOAuthService *AntigravityOAuthService, + cacheInvalidator TokenCacheInvalidator, cfg *config.Config, ) *TokenRefreshService { s := &TokenRefreshService{ - accountRepo: accountRepo, - cfg: &cfg.TokenRefresh, - stopCh: make(chan struct{}), + accountRepo: accountRepo, + cfg: &cfg.TokenRefresh, + cacheInvalidator: cacheInvalidator, + stopCh: make(chan struct{}), } // 注册平台特定的刷新器 @@ -183,6 +186,14 @@ func (s *TokenRefreshService) refreshWithRetry(ctx context.Context, account *Acc log.Printf("[TokenRefresh] Account %d: cleared missing_project_id error", account.ID) } } + // 对所有 OAuth 账号调用缓存失效(InvalidateToken 内部根据平台判断是否需要处理) + if s.cacheInvalidator != nil && account.Type == AccountTypeOAuth { + if err := s.cacheInvalidator.InvalidateToken(ctx, account); err != nil { + log.Printf("[TokenRefresh] Failed to invalidate token cache for account %d: %v", account.ID, err) + } else { + log.Printf("[TokenRefresh] Token cache invalidated for account %d", account.ID) + } + } return nil } diff --git a/backend/internal/service/token_refresh_service_test.go b/backend/internal/service/token_refresh_service_test.go new file mode 100644 index 00000000..d23a0bb6 --- /dev/null +++ b/backend/internal/service/token_refresh_service_test.go @@ -0,0 +1,361 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type tokenRefreshAccountRepo struct { + mockAccountRepoForGemini + updateCalls int + setErrorCalls int + lastAccount *Account + updateErr error +} + +func (r *tokenRefreshAccountRepo) Update(ctx context.Context, account *Account) error { + r.updateCalls++ + r.lastAccount = account + return r.updateErr +} + +func (r *tokenRefreshAccountRepo) SetError(ctx context.Context, id int64, errorMsg string) error { + r.setErrorCalls++ + return nil +} + +type tokenCacheInvalidatorStub struct { + calls int + err error +} + +func (s *tokenCacheInvalidatorStub) InvalidateToken(ctx context.Context, account *Account) error { + s.calls++ + return s.err +} + +type tokenRefresherStub struct { + credentials map[string]any + err error +} + +func (r *tokenRefresherStub) CanRefresh(account *Account) bool { + return true +} + +func (r *tokenRefresherStub) NeedsRefresh(account *Account, refreshWindowDuration time.Duration) bool { + return true +} + +func (r *tokenRefresherStub) Refresh(ctx context.Context, account *Account) (map[string]any, error) { + if r.err != nil { + return nil, r.err + } + return r.credentials, nil +} + +func TestTokenRefreshService_RefreshWithRetry_InvalidatesCache(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 5, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "new-token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 1, invalidator.calls) + require.Equal(t, "new-token", account.GetCredential("access_token")) +} + +func TestTokenRefreshService_RefreshWithRetry_InvalidatorErrorIgnored(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{err: errors.New("invalidate failed")} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 6, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 1, invalidator.calls) +} + +func TestTokenRefreshService_RefreshWithRetry_NilInvalidator(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, nil, cfg) + account := &Account{ + ID: 7, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) +} + +// TestTokenRefreshService_RefreshWithRetry_Antigravity 测试 Antigravity 平台的缓存失效 +func TestTokenRefreshService_RefreshWithRetry_Antigravity(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 8, + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "ag-token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 1, invalidator.calls) // Antigravity 也应触发缓存失效 +} + +// TestTokenRefreshService_RefreshWithRetry_NonOAuthAccount 测试非 OAuth 账号不触发缓存失效 +func TestTokenRefreshService_RefreshWithRetry_NonOAuthAccount(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 9, + Platform: PlatformGemini, + Type: AccountTypeAPIKey, // 非 OAuth + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 0, invalidator.calls) // 非 OAuth 不触发缓存失效 +} + +// TestTokenRefreshService_RefreshWithRetry_OtherPlatformOAuth 测试所有 OAuth 平台都触发缓存失效 +func TestTokenRefreshService_RefreshWithRetry_OtherPlatformOAuth(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 10, + Platform: PlatformOpenAI, // OpenAI OAuth 账户 + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.NoError(t, err) + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 1, invalidator.calls) // 所有 OAuth 账户刷新后触发缓存失效 +} + +// TestTokenRefreshService_RefreshWithRetry_UpdateFailed 测试更新失败的情况 +func TestTokenRefreshService_RefreshWithRetry_UpdateFailed(t *testing.T) { + repo := &tokenRefreshAccountRepo{updateErr: errors.New("update failed")} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 11, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + credentials: map[string]any{ + "access_token": "token", + }, + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to save credentials") + require.Equal(t, 1, repo.updateCalls) + require.Equal(t, 0, invalidator.calls) // 更新失败时不应触发缓存失效 +} + +// TestTokenRefreshService_RefreshWithRetry_RefreshFailed 测试刷新失败的情况 +func TestTokenRefreshService_RefreshWithRetry_RefreshFailed(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 2, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 12, + Platform: PlatformGemini, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + err: errors.New("refresh failed"), + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.Error(t, err) + require.Equal(t, 0, repo.updateCalls) // 刷新失败不应更新 + require.Equal(t, 0, invalidator.calls) // 刷新失败不应触发缓存失效 + require.Equal(t, 1, repo.setErrorCalls) // 应设置错误状态 +} + +// TestTokenRefreshService_RefreshWithRetry_AntigravityRefreshFailed 测试 Antigravity 刷新失败不设置错误状态 +func TestTokenRefreshService_RefreshWithRetry_AntigravityRefreshFailed(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 1, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 13, + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + err: errors.New("network error"), // 可重试错误 + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.Error(t, err) + require.Equal(t, 0, repo.updateCalls) + require.Equal(t, 0, invalidator.calls) + require.Equal(t, 0, repo.setErrorCalls) // Antigravity 可重试错误不设置错误状态 +} + +// TestTokenRefreshService_RefreshWithRetry_AntigravityNonRetryableError 测试 Antigravity 不可重试错误 +func TestTokenRefreshService_RefreshWithRetry_AntigravityNonRetryableError(t *testing.T) { + repo := &tokenRefreshAccountRepo{} + invalidator := &tokenCacheInvalidatorStub{} + cfg := &config.Config{ + TokenRefresh: config.TokenRefreshConfig{ + MaxRetries: 3, + RetryBackoffSeconds: 0, + }, + } + service := NewTokenRefreshService(repo, nil, nil, nil, nil, invalidator, cfg) + account := &Account{ + ID: 14, + Platform: PlatformAntigravity, + Type: AccountTypeOAuth, + } + refresher := &tokenRefresherStub{ + err: errors.New("invalid_grant: token revoked"), // 不可重试错误 + } + + err := service.refreshWithRetry(context.Background(), account, refresher) + require.Error(t, err) + require.Equal(t, 0, repo.updateCalls) + require.Equal(t, 0, invalidator.calls) + require.Equal(t, 1, repo.setErrorCalls) // 不可重试错误应设置错误状态 +} + +// TestIsNonRetryableRefreshError 测试不可重试错误判断 +func TestIsNonRetryableRefreshError(t *testing.T) { + tests := []struct { + name string + err error + expected bool + }{ + {name: "nil_error", err: nil, expected: false}, + {name: "network_error", err: errors.New("network timeout"), expected: false}, + {name: "invalid_grant", err: errors.New("invalid_grant"), expected: true}, + {name: "invalid_client", err: errors.New("invalid_client"), expected: true}, + {name: "unauthorized_client", err: errors.New("unauthorized_client"), expected: true}, + {name: "access_denied", err: errors.New("access_denied"), expected: true}, + {name: "invalid_grant_with_desc", err: errors.New("Error: invalid_grant - token revoked"), expected: true}, + {name: "case_insensitive", err: errors.New("INVALID_GRANT"), expected: true}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := isNonRetryableRefreshError(tt.err) + require.Equal(t, tt.expected, result) + }) + } +} diff --git a/backend/internal/service/usage_log.go b/backend/internal/service/usage_log.go index 9ecb7098..3b0e934f 100644 --- a/backend/internal/service/usage_log.go +++ b/backend/internal/service/usage_log.go @@ -33,12 +33,15 @@ type UsageLog struct { TotalCost float64 ActualCost float64 RateMultiplier float64 + // AccountRateMultiplier 账号计费倍率快照(nil 表示历史数据,按 1.0 处理) + AccountRateMultiplier *float64 BillingType int8 Stream bool DurationMs *int FirstTokenMs *int UserAgent *string + IPAddress *string // 图片生成字段 ImageCount int diff --git a/backend/internal/service/usage_service.go b/backend/internal/service/usage_service.go index 10a294ae..aa0a5b87 100644 --- a/backend/internal/service/usage_service.go +++ b/backend/internal/service/usage_service.go @@ -54,17 +54,19 @@ type UsageStats struct { // UsageService 使用统计服务 type UsageService struct { - usageRepo UsageLogRepository - userRepo UserRepository - entClient *dbent.Client + usageRepo UsageLogRepository + userRepo UserRepository + entClient *dbent.Client + authCacheInvalidator APIKeyAuthCacheInvalidator } // NewUsageService 创建使用统计服务实例 -func NewUsageService(usageRepo UsageLogRepository, userRepo UserRepository, entClient *dbent.Client) *UsageService { +func NewUsageService(usageRepo UsageLogRepository, userRepo UserRepository, entClient *dbent.Client, authCacheInvalidator APIKeyAuthCacheInvalidator) *UsageService { return &UsageService{ - usageRepo: usageRepo, - userRepo: userRepo, - entClient: entClient, + usageRepo: usageRepo, + userRepo: userRepo, + entClient: entClient, + authCacheInvalidator: authCacheInvalidator, } } @@ -118,10 +120,12 @@ func (s *UsageService) Create(ctx context.Context, req CreateUsageLogRequest) (* } // 扣除用户余额 + balanceUpdated := false if inserted && req.ActualCost > 0 { if err := s.userRepo.UpdateBalance(txCtx, req.UserID, -req.ActualCost); err != nil { return nil, fmt.Errorf("update user balance: %w", err) } + balanceUpdated = true } if tx != nil { @@ -130,9 +134,18 @@ func (s *UsageService) Create(ctx context.Context, req CreateUsageLogRequest) (* } } + s.invalidateUsageCaches(ctx, req.UserID, balanceUpdated) + return usageLog, nil } +func (s *UsageService) invalidateUsageCaches(ctx context.Context, userID int64, balanceUpdated bool) { + if !balanceUpdated || s.authCacheInvalidator == nil { + return + } + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) +} + // GetByID 根据ID获取使用日志 func (s *UsageService) GetByID(ctx context.Context, id int64) (*UsageLog, error) { log, err := s.usageRepo.GetByID(ctx, id) diff --git a/backend/internal/service/user_service.go b/backend/internal/service/user_service.go index 08fa40b5..1734914a 100644 --- a/backend/internal/service/user_service.go +++ b/backend/internal/service/user_service.go @@ -55,13 +55,15 @@ type ChangePasswordRequest struct { // UserService 用户服务 type UserService struct { - userRepo UserRepository + userRepo UserRepository + authCacheInvalidator APIKeyAuthCacheInvalidator } // NewUserService 创建用户服务实例 -func NewUserService(userRepo UserRepository) *UserService { +func NewUserService(userRepo UserRepository, authCacheInvalidator APIKeyAuthCacheInvalidator) *UserService { return &UserService{ - userRepo: userRepo, + userRepo: userRepo, + authCacheInvalidator: authCacheInvalidator, } } @@ -89,6 +91,7 @@ func (s *UserService) UpdateProfile(ctx context.Context, userID int64, req Updat if err != nil { return nil, fmt.Errorf("get user: %w", err) } + oldConcurrency := user.Concurrency // 更新字段 if req.Email != nil { @@ -114,6 +117,9 @@ func (s *UserService) UpdateProfile(ctx context.Context, userID int64, req Updat if err := s.userRepo.Update(ctx, user); err != nil { return nil, fmt.Errorf("update user: %w", err) } + if s.authCacheInvalidator != nil && user.Concurrency != oldConcurrency { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } return user, nil } @@ -169,6 +175,9 @@ func (s *UserService) UpdateBalance(ctx context.Context, userID int64, amount fl if err := s.userRepo.UpdateBalance(ctx, userID, amount); err != nil { return fmt.Errorf("update balance: %w", err) } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } return nil } @@ -177,6 +186,9 @@ func (s *UserService) UpdateConcurrency(ctx context.Context, userID int64, concu if err := s.userRepo.UpdateConcurrency(ctx, userID, concurrency); err != nil { return fmt.Errorf("update concurrency: %w", err) } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } return nil } @@ -192,12 +204,18 @@ func (s *UserService) UpdateStatus(ctx context.Context, userID int64, status str if err := s.userRepo.Update(ctx, user); err != nil { return fmt.Errorf("update user: %w", err) } + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } return nil } // Delete 删除用户(管理员功能) func (s *UserService) Delete(ctx context.Context, userID int64) error { + if s.authCacheInvalidator != nil { + s.authCacheInvalidator.InvalidateAuthCacheByUserID(ctx, userID) + } if err := s.userRepo.Delete(ctx, userID); err != nil { return fmt.Errorf("delete user: %w", err) } diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index cb73409b..acc0a5fb 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -1,10 +1,12 @@ package service import ( + "database/sql" "time" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/google/wire" + "github.com/redis/go-redis/v9" ) // BuildInfo contains build information @@ -40,9 +42,17 @@ func ProvideTokenRefreshService( openaiOAuthService *OpenAIOAuthService, geminiOAuthService *GeminiOAuthService, antigravityOAuthService *AntigravityOAuthService, + cacheInvalidator TokenCacheInvalidator, cfg *config.Config, ) *TokenRefreshService { - svc := NewTokenRefreshService(accountRepo, oauthService, openaiOAuthService, geminiOAuthService, antigravityOAuthService, cfg) + svc := NewTokenRefreshService(accountRepo, oauthService, openaiOAuthService, geminiOAuthService, antigravityOAuthService, cacheInvalidator, cfg) + svc.Start() + return svc +} + +// ProvideDashboardAggregationService 创建并启动仪表盘聚合服务 +func ProvideDashboardAggregationService(repo DashboardAggregationRepository, timingWheel *TimingWheelService, cfg *config.Config) *DashboardAggregationService { + svc := NewDashboardAggregationService(repo, timingWheel, cfg) svc.Start() return svc } @@ -55,10 +65,13 @@ func ProvideAccountExpiryService(accountRepo AccountRepository) *AccountExpirySe } // ProvideTimingWheelService creates and starts TimingWheelService -func ProvideTimingWheelService() *TimingWheelService { - svc := NewTimingWheelService() +func ProvideTimingWheelService() (*TimingWheelService, error) { + svc, err := NewTimingWheelService() + if err != nil { + return nil, err + } svc.Start() - return svc + return svc, nil } // ProvideDeferredService creates and starts DeferredService @@ -77,16 +90,120 @@ func ProvideConcurrencyService(cache ConcurrencyCache, accountRepo AccountReposi return svc } +// ProvideSchedulerSnapshotService creates and starts SchedulerSnapshotService. +func ProvideSchedulerSnapshotService( + cache SchedulerCache, + outboxRepo SchedulerOutboxRepository, + accountRepo AccountRepository, + groupRepo GroupRepository, + cfg *config.Config, +) *SchedulerSnapshotService { + svc := NewSchedulerSnapshotService(cache, outboxRepo, accountRepo, groupRepo, cfg) + svc.Start() + return svc +} + +// ProvideRateLimitService creates RateLimitService with optional dependencies. +func ProvideRateLimitService( + accountRepo AccountRepository, + usageRepo UsageLogRepository, + cfg *config.Config, + geminiQuotaService *GeminiQuotaService, + tempUnschedCache TempUnschedCache, + timeoutCounterCache TimeoutCounterCache, + settingService *SettingService, + tokenCacheInvalidator TokenCacheInvalidator, +) *RateLimitService { + svc := NewRateLimitService(accountRepo, usageRepo, cfg, geminiQuotaService, tempUnschedCache) + svc.SetTimeoutCounterCache(timeoutCounterCache) + svc.SetSettingService(settingService) + svc.SetTokenCacheInvalidator(tokenCacheInvalidator) + return svc +} + +// ProvideOpsMetricsCollector creates and starts OpsMetricsCollector. +func ProvideOpsMetricsCollector( + opsRepo OpsRepository, + settingRepo SettingRepository, + accountRepo AccountRepository, + concurrencyService *ConcurrencyService, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsMetricsCollector { + collector := NewOpsMetricsCollector(opsRepo, settingRepo, accountRepo, concurrencyService, db, redisClient, cfg) + collector.Start() + return collector +} + +// ProvideOpsAggregationService creates and starts OpsAggregationService (hourly/daily pre-aggregation). +func ProvideOpsAggregationService( + opsRepo OpsRepository, + settingRepo SettingRepository, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsAggregationService { + svc := NewOpsAggregationService(opsRepo, settingRepo, db, redisClient, cfg) + svc.Start() + return svc +} + +// ProvideOpsAlertEvaluatorService creates and starts OpsAlertEvaluatorService. +func ProvideOpsAlertEvaluatorService( + opsService *OpsService, + opsRepo OpsRepository, + emailService *EmailService, + redisClient *redis.Client, + cfg *config.Config, +) *OpsAlertEvaluatorService { + svc := NewOpsAlertEvaluatorService(opsService, opsRepo, emailService, redisClient, cfg) + svc.Start() + return svc +} + +// ProvideOpsCleanupService creates and starts OpsCleanupService (cron scheduled). +func ProvideOpsCleanupService( + opsRepo OpsRepository, + db *sql.DB, + redisClient *redis.Client, + cfg *config.Config, +) *OpsCleanupService { + svc := NewOpsCleanupService(opsRepo, db, redisClient, cfg) + svc.Start() + return svc +} + +// ProvideOpsScheduledReportService creates and starts OpsScheduledReportService. +func ProvideOpsScheduledReportService( + opsService *OpsService, + userService *UserService, + emailService *EmailService, + redisClient *redis.Client, + cfg *config.Config, +) *OpsScheduledReportService { + svc := NewOpsScheduledReportService(opsService, userService, emailService, redisClient, cfg) + svc.Start() + return svc +} + +// ProvideAPIKeyAuthCacheInvalidator 提供 API Key 认证缓存失效能力 +func ProvideAPIKeyAuthCacheInvalidator(apiKeyService *APIKeyService) APIKeyAuthCacheInvalidator { + return apiKeyService +} + // ProviderSet is the Wire provider set for all services var ProviderSet = wire.NewSet( // Core services NewAuthService, NewUserService, NewAPIKeyService, + ProvideAPIKeyAuthCacheInvalidator, NewGroupService, NewAccountService, NewProxyService, NewRedeemService, + NewPromoService, NewUsageService, NewDashboardService, ProvidePricingService, @@ -99,26 +216,38 @@ var ProviderSet = wire.NewSet( NewOpenAIOAuthService, NewGeminiOAuthService, NewGeminiQuotaService, + NewCompositeTokenCacheInvalidator, + wire.Bind(new(TokenCacheInvalidator), new(*CompositeTokenCacheInvalidator)), NewAntigravityOAuthService, NewGeminiTokenProvider, NewGeminiMessagesCompatService, NewAntigravityTokenProvider, + NewOpenAITokenProvider, + NewClaudeTokenProvider, NewAntigravityGatewayService, - NewRateLimitService, + ProvideRateLimitService, NewAccountUsageService, NewAccountTestService, NewSettingService, + NewOpsService, + ProvideOpsMetricsCollector, + ProvideOpsAggregationService, + ProvideOpsAlertEvaluatorService, + ProvideOpsCleanupService, + ProvideOpsScheduledReportService, NewEmailService, ProvideEmailQueueService, NewTurnstileService, NewSubscriptionService, ProvideConcurrencyService, + ProvideSchedulerSnapshotService, NewIdentityService, NewCRSSyncService, ProvideUpdateService, ProvideTokenRefreshService, ProvideAccountExpiryService, ProvideTimingWheelService, + ProvideDashboardAggregationService, ProvideDeferredService, NewAntigravityQuotaFetcher, NewUserAttributeService, diff --git a/backend/internal/service/wire_test.go b/backend/internal/service/wire_test.go new file mode 100644 index 00000000..5f7866f6 --- /dev/null +++ b/backend/internal/service/wire_test.go @@ -0,0 +1,37 @@ +package service + +import ( + "errors" + "testing" + "time" + + "github.com/zeromicro/go-zero/core/collection" +) + +func TestProvideTimingWheelService_ReturnsError(t *testing.T) { + original := newTimingWheel + t.Cleanup(func() { newTimingWheel = original }) + + newTimingWheel = func(_ time.Duration, _ int, _ collection.Execute) (*collection.TimingWheel, error) { + return nil, errors.New("boom") + } + + svc, err := ProvideTimingWheelService() + if err == nil { + t.Fatalf("期望返回 error,但得到 nil") + } + if svc != nil { + t.Fatalf("期望返回 nil svc,但得到非空") + } +} + +func TestProvideTimingWheelService_Success(t *testing.T) { + svc, err := ProvideTimingWheelService() + if err != nil { + t.Fatalf("期望 err 为 nil,但得到: %v", err) + } + if svc == nil { + t.Fatalf("期望 svc 非空,但得到 nil") + } + svc.Stop() +} diff --git a/backend/internal/web/embed_off.go b/backend/internal/web/embed_off.go index 60a42bd3..346c31e9 100644 --- a/backend/internal/web/embed_off.go +++ b/backend/internal/web/embed_off.go @@ -4,11 +4,38 @@ package web import ( + "context" + "errors" "net/http" "github.com/gin-gonic/gin" ) +// PublicSettingsProvider is an interface to fetch public settings +// This stub is needed for compilation when frontend is not embedded +type PublicSettingsProvider interface { + GetPublicSettingsForInjection(ctx context.Context) (any, error) +} + +// FrontendServer is a stub for non-embed builds +type FrontendServer struct{} + +// NewFrontendServer returns an error when frontend is not embedded +func NewFrontendServer(settingsProvider PublicSettingsProvider) (*FrontendServer, error) { + return nil, errors.New("frontend not embedded") +} + +// InvalidateCache is a no-op for non-embed builds +func (s *FrontendServer) InvalidateCache() {} + +// Middleware returns a handler that returns 404 for non-embed builds +func (s *FrontendServer) Middleware() gin.HandlerFunc { + return func(c *gin.Context) { + c.String(http.StatusNotFound, "Frontend not embedded. Build with -tags embed to include frontend.") + c.Abort() + } +} + func ServeEmbeddedFrontend() gin.HandlerFunc { return func(c *gin.Context) { c.String(http.StatusNotFound, "Frontend not embedded. Build with -tags embed to include frontend.") diff --git a/backend/internal/web/embed_on.go b/backend/internal/web/embed_on.go index 0ee8d614..7f37d59c 100644 --- a/backend/internal/web/embed_on.go +++ b/backend/internal/web/embed_on.go @@ -3,18 +3,199 @@ package web import ( + "bytes" + "context" "embed" + "encoding/json" "io" "io/fs" "net/http" "strings" + "time" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/gin-gonic/gin" ) +const ( + // NonceHTMLPlaceholder is the placeholder for nonce in HTML script tags + NonceHTMLPlaceholder = "__CSP_NONCE_VALUE__" +) + //go:embed all:dist var frontendFS embed.FS +// PublicSettingsProvider is an interface to fetch public settings +type PublicSettingsProvider interface { + GetPublicSettingsForInjection(ctx context.Context) (any, error) +} + +// FrontendServer serves the embedded frontend with settings injection +type FrontendServer struct { + distFS fs.FS + fileServer http.Handler + baseHTML []byte + cache *HTMLCache + settings PublicSettingsProvider +} + +// NewFrontendServer creates a new frontend server with settings injection +func NewFrontendServer(settingsProvider PublicSettingsProvider) (*FrontendServer, error) { + distFS, err := fs.Sub(frontendFS, "dist") + if err != nil { + return nil, err + } + + // Read base HTML once + file, err := distFS.Open("index.html") + if err != nil { + return nil, err + } + defer func() { _ = file.Close() }() + + baseHTML, err := io.ReadAll(file) + if err != nil { + return nil, err + } + + cache := NewHTMLCache() + cache.SetBaseHTML(baseHTML) + + return &FrontendServer{ + distFS: distFS, + fileServer: http.FileServer(http.FS(distFS)), + baseHTML: baseHTML, + cache: cache, + settings: settingsProvider, + }, nil +} + +// InvalidateCache invalidates the HTML cache (call when settings change) +func (s *FrontendServer) InvalidateCache() { + if s != nil && s.cache != nil { + s.cache.Invalidate() + } +} + +// Middleware returns the Gin middleware handler +func (s *FrontendServer) Middleware() gin.HandlerFunc { + return func(c *gin.Context) { + path := c.Request.URL.Path + + // Skip API routes + if strings.HasPrefix(path, "/api/") || + strings.HasPrefix(path, "/v1/") || + strings.HasPrefix(path, "/v1beta/") || + strings.HasPrefix(path, "/antigravity/") || + strings.HasPrefix(path, "/setup/") || + path == "/health" || + path == "/responses" { + c.Next() + return + } + + cleanPath := strings.TrimPrefix(path, "/") + if cleanPath == "" { + cleanPath = "index.html" + } + + // For index.html or SPA routes, serve with injected settings + if cleanPath == "index.html" || !s.fileExists(cleanPath) { + s.serveIndexHTML(c) + return + } + + // Serve static files normally + s.fileServer.ServeHTTP(c.Writer, c.Request) + c.Abort() + } +} + +func (s *FrontendServer) fileExists(path string) bool { + file, err := s.distFS.Open(path) + if err != nil { + return false + } + _ = file.Close() + return true +} + +func (s *FrontendServer) serveIndexHTML(c *gin.Context) { + // Get nonce from context (generated by SecurityHeaders middleware) + nonce := middleware.GetNonceFromContext(c) + + // Check cache first + cached := s.cache.Get() + if cached != nil { + // Check If-None-Match for 304 response + if match := c.GetHeader("If-None-Match"); match == cached.ETag { + c.Status(http.StatusNotModified) + c.Abort() + return + } + + // Replace nonce placeholder with actual nonce before serving + content := replaceNoncePlaceholder(cached.Content, nonce) + + c.Header("ETag", cached.ETag) + c.Header("Cache-Control", "no-cache") // Must revalidate + c.Data(http.StatusOK, "text/html; charset=utf-8", content) + c.Abort() + return + } + + // Cache miss - fetch settings and render + ctx, cancel := context.WithTimeout(c.Request.Context(), 2*time.Second) + defer cancel() + + settings, err := s.settings.GetPublicSettingsForInjection(ctx) + if err != nil { + // Fallback: serve without injection + c.Data(http.StatusOK, "text/html; charset=utf-8", s.baseHTML) + c.Abort() + return + } + + settingsJSON, err := json.Marshal(settings) + if err != nil { + // Fallback: serve without injection + c.Data(http.StatusOK, "text/html; charset=utf-8", s.baseHTML) + c.Abort() + return + } + + rendered := s.injectSettings(settingsJSON) + s.cache.Set(rendered, settingsJSON) + + // Replace nonce placeholder with actual nonce before serving + content := replaceNoncePlaceholder(rendered, nonce) + + cached = s.cache.Get() + if cached != nil { + c.Header("ETag", cached.ETag) + } + c.Header("Cache-Control", "no-cache") + c.Data(http.StatusOK, "text/html; charset=utf-8", content) + c.Abort() +} + +func (s *FrontendServer) injectSettings(settingsJSON []byte) []byte { + // Create the script tag to inject with nonce placeholder + // The placeholder will be replaced with actual nonce at request time + script := []byte(``) + + // Inject before + headClose := []byte("") + return bytes.Replace(s.baseHTML, headClose, append(script, headClose...), 1) +} + +// replaceNoncePlaceholder replaces the nonce placeholder with actual nonce value +func replaceNoncePlaceholder(html []byte, nonce string) []byte { + return bytes.ReplaceAll(html, []byte(NonceHTMLPlaceholder), []byte(nonce)) +} + +// ServeEmbeddedFrontend returns a middleware for serving embedded frontend +// This is the legacy function for backward compatibility when no settings provider is available func ServeEmbeddedFrontend() gin.HandlerFunc { distFS, err := fs.Sub(frontendFS, "dist") if err != nil { diff --git a/backend/internal/web/embed_test.go b/backend/internal/web/embed_test.go new file mode 100644 index 00000000..50f5a323 --- /dev/null +++ b/backend/internal/web/embed_test.go @@ -0,0 +1,660 @@ +//go:build embed + +package web + +import ( + "bytes" + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func init() { + gin.SetMode(gin.TestMode) +} + +func TestReplaceNoncePlaceholder(t *testing.T) { + t.Run("replaces_single_placeholder", func(t *testing.T) { + html := []byte(``) + nonce := "abc123xyz" + + result := replaceNoncePlaceholder(html, nonce) + + expected := `` + assert.Equal(t, expected, string(result)) + }) + + t.Run("replaces_multiple_placeholders", func(t *testing.T) { + html := []byte(``) + nonce := "nonce123" + + result := replaceNoncePlaceholder(html, nonce) + + assert.Equal(t, 2, strings.Count(string(result), `nonce="nonce123"`)) + assert.NotContains(t, string(result), NonceHTMLPlaceholder) + }) + + t.Run("handles_empty_nonce", func(t *testing.T) { + html := []byte(``) + nonce := "" + + result := replaceNoncePlaceholder(html, nonce) + + assert.Equal(t, ``, string(result)) + }) + + t.Run("no_placeholder_returns_unchanged", func(t *testing.T) { + html := []byte(``) + nonce := "abc123" + + result := replaceNoncePlaceholder(html, nonce) + + assert.Equal(t, string(html), string(result)) + }) + + t.Run("handles_empty_html", func(t *testing.T) { + html := []byte(``) + nonce := "abc123" + + result := replaceNoncePlaceholder(html, nonce) + + assert.Empty(t, result) + }) +} + +func TestNonceHTMLPlaceholder(t *testing.T) { + t.Run("constant_value", func(t *testing.T) { + assert.Equal(t, "__CSP_NONCE_VALUE__", NonceHTMLPlaceholder) + }) +} + +// mockSettingsProvider implements PublicSettingsProvider for testing +type mockSettingsProvider struct { + settings any + err error + called int +} + +func (m *mockSettingsProvider) GetPublicSettingsForInjection(ctx context.Context) (any, error) { + m.called++ + return m.settings, m.err +} + +func TestFrontendServer_InjectSettings(t *testing.T) { + t.Run("injects_settings_with_nonce_placeholder", func(t *testing.T) { + provider := &mockSettingsProvider{ + settings: map[string]string{"key": "value"}, + } + + server, err := NewFrontendServer(provider) + require.NoError(t, err) + + settingsJSON := []byte(`{"test":"data"}`) + result := server.injectSettings(settingsJSON) + + // Should contain the script with nonce placeholder + assert.Contains(t, string(result), ``) + }) + + t.Run("injects_before_head_close", func(t *testing.T) { + provider := &mockSettingsProvider{ + settings: map[string]string{"key": "value"}, + } + + server, err := NewFrontendServer(provider) + require.NoError(t, err) + + settingsJSON := []byte(`{}`) + result := server.injectSettings(settingsJSON) + + // Script should be injected before + headCloseIndex := bytes.Index(result, []byte("")) + scriptIndex := bytes.Index(result, []byte(``) + nonce := "abcdefghijklmnop123456==" + + b.ResetTimer() + for i := 0; i < b.N; i++ { + replaceNoncePlaceholder(html, nonce) + } +} + +func BenchmarkFrontendServerServeIndexHTML(b *testing.B) { + provider := &mockSettingsProvider{ + settings: map[string]string{"test": "value"}, + } + + server, _ := NewFrontendServer(provider) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodGet, "/", nil) + c.Set(middleware.CSPNonceKey, "test-nonce") + + server.serveIndexHTML(c) + } +} diff --git a/backend/internal/web/html_cache.go b/backend/internal/web/html_cache.go new file mode 100644 index 00000000..28269c89 --- /dev/null +++ b/backend/internal/web/html_cache.go @@ -0,0 +1,77 @@ +//go:build embed + +package web + +import ( + "crypto/sha256" + "encoding/hex" + "sync" +) + +// HTMLCache manages the cached index.html with injected settings +type HTMLCache struct { + mu sync.RWMutex + cachedHTML []byte + etag string + baseHTMLHash string // Hash of the original index.html (immutable after build) + settingsVersion uint64 // Incremented when settings change +} + +// CachedHTML represents the cache state +type CachedHTML struct { + Content []byte + ETag string +} + +// NewHTMLCache creates a new HTML cache instance +func NewHTMLCache() *HTMLCache { + return &HTMLCache{} +} + +// SetBaseHTML initializes the cache with the base HTML template +func (c *HTMLCache) SetBaseHTML(baseHTML []byte) { + c.mu.Lock() + defer c.mu.Unlock() + + hash := sha256.Sum256(baseHTML) + c.baseHTMLHash = hex.EncodeToString(hash[:8]) // First 8 bytes for brevity +} + +// Invalidate marks the cache as stale +func (c *HTMLCache) Invalidate() { + c.mu.Lock() + defer c.mu.Unlock() + + c.settingsVersion++ + c.cachedHTML = nil + c.etag = "" +} + +// Get returns the cached HTML or nil if cache is stale +func (c *HTMLCache) Get() *CachedHTML { + c.mu.RLock() + defer c.mu.RUnlock() + + if c.cachedHTML == nil { + return nil + } + return &CachedHTML{ + Content: c.cachedHTML, + ETag: c.etag, + } +} + +// Set updates the cache with new rendered HTML +func (c *HTMLCache) Set(html []byte, settingsJSON []byte) { + c.mu.Lock() + defer c.mu.Unlock() + + c.cachedHTML = html + c.etag = c.generateETag(settingsJSON) +} + +// generateETag creates an ETag from base HTML hash + settings hash +func (c *HTMLCache) generateETag(settingsJSON []byte) string { + settingsHash := sha256.Sum256(settingsJSON) + return `"` + c.baseHTMLHash + "-" + hex.EncodeToString(settingsHash[:8]) + `"` +} diff --git a/backend/migrations/031_add_ip_address.sql b/backend/migrations/031_add_ip_address.sql new file mode 100644 index 00000000..7f557830 --- /dev/null +++ b/backend/migrations/031_add_ip_address.sql @@ -0,0 +1,5 @@ +-- Add IP address field to usage_logs table for request tracking (admin-only visibility) +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS ip_address VARCHAR(45); + +-- Create index for IP address queries +CREATE INDEX IF NOT EXISTS idx_usage_logs_ip_address ON usage_logs(ip_address); diff --git a/backend/migrations/032_add_api_key_ip_restriction.sql b/backend/migrations/032_add_api_key_ip_restriction.sql new file mode 100644 index 00000000..2dfe2c92 --- /dev/null +++ b/backend/migrations/032_add_api_key_ip_restriction.sql @@ -0,0 +1,9 @@ +-- Add IP restriction fields to api_keys table +-- ip_whitelist: JSON array of allowed IPs/CIDRs (if set, only these IPs can use the key) +-- ip_blacklist: JSON array of blocked IPs/CIDRs (these IPs are always blocked) + +ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS ip_whitelist JSONB DEFAULT NULL; +ALTER TABLE api_keys ADD COLUMN IF NOT EXISTS ip_blacklist JSONB DEFAULT NULL; + +COMMENT ON COLUMN api_keys.ip_whitelist IS 'JSON array of allowed IPs/CIDRs, e.g. ["192.168.1.100", "10.0.0.0/8"]'; +COMMENT ON COLUMN api_keys.ip_blacklist IS 'JSON array of blocked IPs/CIDRs, e.g. ["1.2.3.4", "5.6.0.0/16"]'; diff --git a/backend/migrations/033_add_promo_codes.sql b/backend/migrations/033_add_promo_codes.sql new file mode 100644 index 00000000..7f6ae9a0 --- /dev/null +++ b/backend/migrations/033_add_promo_codes.sql @@ -0,0 +1,34 @@ +-- 创建注册优惠码表 +CREATE TABLE IF NOT EXISTS promo_codes ( + id BIGSERIAL PRIMARY KEY, + code VARCHAR(32) NOT NULL UNIQUE, + bonus_amount DECIMAL(20,8) NOT NULL DEFAULT 0, + max_uses INT NOT NULL DEFAULT 0, + used_count INT NOT NULL DEFAULT 0, + status VARCHAR(20) NOT NULL DEFAULT 'active', + expires_at TIMESTAMPTZ DEFAULT NULL, + notes TEXT DEFAULT NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- 创建优惠码使用记录表 +CREATE TABLE IF NOT EXISTS promo_code_usages ( + id BIGSERIAL PRIMARY KEY, + promo_code_id BIGINT NOT NULL REFERENCES promo_codes(id) ON DELETE CASCADE, + user_id BIGINT NOT NULL REFERENCES users(id) ON DELETE CASCADE, + bonus_amount DECIMAL(20,8) NOT NULL, + used_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + UNIQUE(promo_code_id, user_id) +); + +-- 索引 +CREATE INDEX IF NOT EXISTS idx_promo_codes_status ON promo_codes(status); +CREATE INDEX IF NOT EXISTS idx_promo_codes_expires_at ON promo_codes(expires_at); +CREATE INDEX IF NOT EXISTS idx_promo_code_usages_promo_code_id ON promo_code_usages(promo_code_id); +CREATE INDEX IF NOT EXISTS idx_promo_code_usages_user_id ON promo_code_usages(user_id); + +COMMENT ON TABLE promo_codes IS '注册优惠码'; +COMMENT ON TABLE promo_code_usages IS '优惠码使用记录'; +COMMENT ON COLUMN promo_codes.max_uses IS '最大使用次数,0表示无限制'; +COMMENT ON COLUMN promo_codes.status IS '状态: active, disabled'; diff --git a/backend/migrations/033_ops_monitoring_vnext.sql b/backend/migrations/033_ops_monitoring_vnext.sql new file mode 100644 index 00000000..a18c061d --- /dev/null +++ b/backend/migrations/033_ops_monitoring_vnext.sql @@ -0,0 +1,717 @@ +-- Ops Monitoring (vNext): squashed migration (030) +-- +-- This repository originally planned Ops vNext as migrations 030-036: +-- 030 drop legacy ops tables +-- 031 core schema +-- 032 pre-aggregation tables +-- 033 indexes + optional extensions +-- 034 add avg/max to preagg +-- 035 add notify_email to alert rules +-- 036 seed default alert rules +-- +-- Since these migrations have NOT been applied to any environment yet, we squash them +-- into a single 030 migration for easier review and a cleaner migration history. +-- +-- Notes: +-- - This is intentionally destructive for ops_* data (error logs / metrics / alerts). +-- - It is idempotent (DROP/CREATE/ALTER IF EXISTS/IF NOT EXISTS), but will wipe ops_* data if re-run. + +-- ===================================================================== +-- 030_ops_drop_legacy_ops_tables.sql +-- ===================================================================== + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- Legacy pre-aggregation tables (from 026 and/or previous branches) +DROP TABLE IF EXISTS ops_metrics_daily CASCADE; +DROP TABLE IF EXISTS ops_metrics_hourly CASCADE; + +-- Core ops tables that may exist in some deployments / branches +DROP TABLE IF EXISTS ops_system_metrics CASCADE; +DROP TABLE IF EXISTS ops_error_logs CASCADE; +DROP TABLE IF EXISTS ops_alert_events CASCADE; +DROP TABLE IF EXISTS ops_alert_rules CASCADE; +DROP TABLE IF EXISTS ops_job_heartbeats CASCADE; +DROP TABLE IF EXISTS ops_retry_attempts CASCADE; + +-- Optional legacy tables (best-effort cleanup) +DROP TABLE IF EXISTS ops_scheduled_reports CASCADE; +DROP TABLE IF EXISTS ops_group_availability_configs CASCADE; +DROP TABLE IF EXISTS ops_group_availability_events CASCADE; + +-- Optional legacy views/indexes +DROP VIEW IF EXISTS ops_latest_metrics CASCADE; + +-- ===================================================================== +-- 031_ops_core_schema.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): core schema (errors / retries / metrics / jobs / alerts) +-- +-- Design goals: +-- - Support global filtering (time/platform/group) across all ops modules. +-- - Persist enough context for two retry modes (client retry / pinned upstream retry). +-- - Make ops background jobs observable via job heartbeats. +-- - Keep schema stable and indexes targeted (high-write tables). +-- +-- Notes: +-- - This migration is idempotent. +-- - ops_* tables intentionally avoid strict foreign keys to reduce write amplification/locks. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- ============================================ +-- 1) ops_error_logs: error log details (high-write) +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_error_logs ( + id BIGSERIAL PRIMARY KEY, + + -- Correlation / identities + request_id VARCHAR(64), + client_request_id VARCHAR(64), + user_id BIGINT, + api_key_id BIGINT, + account_id BIGINT, + group_id BIGINT, + client_ip inet, + + -- Dimensions for global filtering + platform VARCHAR(32), + + -- Request metadata + model VARCHAR(100), + request_path VARCHAR(256), + stream BOOLEAN NOT NULL DEFAULT false, + user_agent TEXT, + + -- Core error classification + error_phase VARCHAR(32) NOT NULL, + error_type VARCHAR(64) NOT NULL, + severity VARCHAR(8) NOT NULL DEFAULT 'P2', + status_code INT, + + -- vNext metric semantics + is_business_limited BOOLEAN NOT NULL DEFAULT false, + + -- Error details (sanitized/truncated at ingest time) + error_message TEXT, + error_body TEXT, + + -- Provider/upstream details (optional; useful for trends & account health) + error_source VARCHAR(64), + error_owner VARCHAR(32), + account_status VARCHAR(50), + upstream_status_code INT, + upstream_error_message TEXT, + upstream_error_detail TEXT, + provider_error_code VARCHAR(64), + provider_error_type VARCHAR(64), + network_error_type VARCHAR(50), + retry_after_seconds INT, + + -- Timings (ms) - optional + duration_ms INT, + time_to_first_token_ms BIGINT, + auth_latency_ms BIGINT, + routing_latency_ms BIGINT, + upstream_latency_ms BIGINT, + response_latency_ms BIGINT, + + -- Retry context (only stored for error requests) + request_body JSONB, + request_headers JSONB, + request_body_truncated BOOLEAN NOT NULL DEFAULT false, + request_body_bytes INT, + + -- Retryability flags (best-effort classification) + is_retryable BOOLEAN NOT NULL DEFAULT false, + retry_count INT NOT NULL DEFAULT 0, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +COMMENT ON TABLE ops_error_logs IS 'Ops error logs (vNext). Stores sanitized error details and request_body for retries (errors only).'; + +-- ============================================ +-- 2) ops_retry_attempts: audit log for retries +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_retry_attempts ( + id BIGSERIAL PRIMARY KEY, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + + requested_by_user_id BIGINT, + source_error_id BIGINT, + + -- client|upstream + mode VARCHAR(16) NOT NULL, + pinned_account_id BIGINT, + + -- queued|running|succeeded|failed + status VARCHAR(16) NOT NULL DEFAULT 'queued', + started_at TIMESTAMPTZ, + finished_at TIMESTAMPTZ, + duration_ms BIGINT, + + -- Optional result correlation + result_request_id VARCHAR(64), + result_error_id BIGINT, + result_usage_request_id VARCHAR(64), + + error_message TEXT +); + +COMMENT ON TABLE ops_retry_attempts IS 'Audit table for ops retries (client retry / pinned upstream retry).'; + +-- ============================================ +-- 3) ops_system_metrics: system + request window snapshots +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_system_metrics ( + id BIGSERIAL PRIMARY KEY, + + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + window_minutes INT NOT NULL DEFAULT 1, + + -- Optional dimensions (only if collector chooses to write per-dimension snapshots) + platform VARCHAR(32), + group_id BIGINT, + + -- Core counts + success_count BIGINT NOT NULL DEFAULT 0, + error_count_total BIGINT NOT NULL DEFAULT 0, + business_limited_count BIGINT NOT NULL DEFAULT 0, + error_count_sla BIGINT NOT NULL DEFAULT 0, + + upstream_error_count_excl_429_529 BIGINT NOT NULL DEFAULT 0, + upstream_429_count BIGINT NOT NULL DEFAULT 0, + upstream_529_count BIGINT NOT NULL DEFAULT 0, + + token_consumed BIGINT NOT NULL DEFAULT 0, + + -- Rates + qps DOUBLE PRECISION, + tps DOUBLE PRECISION, + + -- Duration percentiles (ms) - success requests + duration_p50_ms INT, + duration_p90_ms INT, + duration_p95_ms INT, + duration_p99_ms INT, + duration_avg_ms DOUBLE PRECISION, + duration_max_ms INT, + + -- TTFT percentiles (ms) - success requests (streaming) + ttft_p50_ms INT, + ttft_p90_ms INT, + ttft_p95_ms INT, + ttft_p99_ms INT, + ttft_avg_ms DOUBLE PRECISION, + ttft_max_ms INT, + + -- System resources + cpu_usage_percent DOUBLE PRECISION, + memory_used_mb BIGINT, + memory_total_mb BIGINT, + memory_usage_percent DOUBLE PRECISION, + + -- Dependency health (best-effort) + db_ok BOOLEAN, + redis_ok BOOLEAN, + + -- DB pool & runtime + db_conn_active INT, + db_conn_idle INT, + db_conn_waiting INT, + goroutine_count INT, + + -- Queue / concurrency + concurrency_queue_depth INT +); + +COMMENT ON TABLE ops_system_metrics IS 'Ops system/request metrics snapshots (vNext). Used for dashboard overview and realtime rates.'; + +-- ============================================ +-- 4) ops_job_heartbeats: background jobs health +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_job_heartbeats ( + job_name VARCHAR(64) PRIMARY KEY, + + last_run_at TIMESTAMPTZ, + last_success_at TIMESTAMPTZ, + last_error_at TIMESTAMPTZ, + last_error TEXT, + last_duration_ms BIGINT, + + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +COMMENT ON TABLE ops_job_heartbeats IS 'Ops background jobs heartbeats (vNext).'; + +-- ============================================ +-- 5) ops_alert_rules / ops_alert_events +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_alert_rules ( + id BIGSERIAL PRIMARY KEY, + + name VARCHAR(128) NOT NULL, + description TEXT, + enabled BOOLEAN NOT NULL DEFAULT true, + + severity VARCHAR(16) NOT NULL DEFAULT 'warning', + + -- Metric definition + -- Metric definition + metric_type VARCHAR(64) NOT NULL, + operator VARCHAR(8) NOT NULL, + threshold DOUBLE PRECISION NOT NULL, + + window_minutes INT NOT NULL DEFAULT 5, + sustained_minutes INT NOT NULL DEFAULT 5, + cooldown_minutes INT NOT NULL DEFAULT 10, + + -- Optional scoping: platform/group filters etc. + filters JSONB, + + last_triggered_at TIMESTAMPTZ, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_alert_rules_name_unique + ON ops_alert_rules (name); + +CREATE INDEX IF NOT EXISTS idx_ops_alert_rules_enabled + ON ops_alert_rules (enabled); + +CREATE TABLE IF NOT EXISTS ops_alert_events ( + id BIGSERIAL PRIMARY KEY, + + rule_id BIGINT, + severity VARCHAR(16) NOT NULL, + status VARCHAR(16) NOT NULL DEFAULT 'firing', + + title VARCHAR(200), + description TEXT, + + metric_value DOUBLE PRECISION, + threshold_value DOUBLE PRECISION, + dimensions JSONB, + + fired_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + resolved_at TIMESTAMPTZ, + + email_sent BOOLEAN NOT NULL DEFAULT false, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_ops_alert_events_rule_status + ON ops_alert_events (rule_id, status); + +CREATE INDEX IF NOT EXISTS idx_ops_alert_events_fired_at + ON ops_alert_events (fired_at DESC); + +-- ===================================================================== +-- 032_ops_preaggregation_tables.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): pre-aggregation tables +-- +-- Purpose: +-- - Provide stable query performance for 1–24h windows (and beyond), avoiding expensive +-- percentile_cont scans on raw logs for every dashboard refresh. +-- - Support global filter dimensions: overall / platform / group. +-- +-- Design note: +-- - We keep a single table with nullable platform/group_id, and enforce uniqueness via a +-- COALESCE-based unique index (because UNIQUE with NULLs allows duplicates in Postgres). + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- ============================================ +-- 1) ops_metrics_hourly +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_metrics_hourly ( + id BIGSERIAL PRIMARY KEY, + + bucket_start TIMESTAMPTZ NOT NULL, + platform VARCHAR(32), + group_id BIGINT, + + success_count BIGINT NOT NULL DEFAULT 0, + error_count_total BIGINT NOT NULL DEFAULT 0, + business_limited_count BIGINT NOT NULL DEFAULT 0, + error_count_sla BIGINT NOT NULL DEFAULT 0, + + upstream_error_count_excl_429_529 BIGINT NOT NULL DEFAULT 0, + upstream_429_count BIGINT NOT NULL DEFAULT 0, + upstream_529_count BIGINT NOT NULL DEFAULT 0, + + token_consumed BIGINT NOT NULL DEFAULT 0, + + -- Duration percentiles (ms) + duration_p50_ms INT, + duration_p90_ms INT, + duration_p95_ms INT, + duration_p99_ms INT, + + -- TTFT percentiles (ms) + ttft_p50_ms INT, + ttft_p90_ms INT, + ttft_p95_ms INT, + ttft_p99_ms INT, + + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +-- Uniqueness across three “dimension modes” (overall / platform / group). +-- Postgres UNIQUE treats NULLs as distinct, so we enforce uniqueness via COALESCE. +CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_metrics_hourly_unique_dim + ON ops_metrics_hourly ( + bucket_start, + COALESCE(platform, ''), + COALESCE(group_id, 0) + ); + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_bucket + ON ops_metrics_hourly (bucket_start DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_platform_bucket + ON ops_metrics_hourly (platform, bucket_start DESC) + WHERE platform IS NOT NULL AND platform <> '' AND group_id IS NULL; + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_hourly_group_bucket + ON ops_metrics_hourly (group_id, bucket_start DESC) + WHERE group_id IS NOT NULL AND group_id <> 0; + +COMMENT ON TABLE ops_metrics_hourly IS 'vNext hourly pre-aggregated ops metrics (overall/platform/group).'; + +-- ============================================ +-- 2) ops_metrics_daily (optional; for longer windows) +-- ============================================ + +CREATE TABLE IF NOT EXISTS ops_metrics_daily ( + id BIGSERIAL PRIMARY KEY, + + bucket_date DATE NOT NULL, + platform VARCHAR(32), + group_id BIGINT, + + success_count BIGINT NOT NULL DEFAULT 0, + error_count_total BIGINT NOT NULL DEFAULT 0, + business_limited_count BIGINT NOT NULL DEFAULT 0, + error_count_sla BIGINT NOT NULL DEFAULT 0, + + upstream_error_count_excl_429_529 BIGINT NOT NULL DEFAULT 0, + upstream_429_count BIGINT NOT NULL DEFAULT 0, + upstream_529_count BIGINT NOT NULL DEFAULT 0, + + token_consumed BIGINT NOT NULL DEFAULT 0, + + duration_p50_ms INT, + duration_p90_ms INT, + duration_p95_ms INT, + duration_p99_ms INT, + + ttft_p50_ms INT, + ttft_p90_ms INT, + ttft_p95_ms INT, + ttft_p99_ms INT, + + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_metrics_daily_unique_dim + ON ops_metrics_daily ( + bucket_date, + COALESCE(platform, ''), + COALESCE(group_id, 0) + ); + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_bucket + ON ops_metrics_daily (bucket_date DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_platform_bucket + ON ops_metrics_daily (platform, bucket_date DESC) + WHERE platform IS NOT NULL AND platform <> '' AND group_id IS NULL; + +CREATE INDEX IF NOT EXISTS idx_ops_metrics_daily_group_bucket + ON ops_metrics_daily (group_id, bucket_date DESC) + WHERE group_id IS NOT NULL AND group_id <> 0; + +COMMENT ON TABLE ops_metrics_daily IS 'vNext daily pre-aggregated ops metrics (overall/platform/group).'; + +-- ===================================================================== +-- 033_ops_indexes_and_extensions.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): indexes and optional extensions +-- +-- This migration intentionally keeps "optional" objects (like pg_trgm) best-effort, +-- so environments without extension privileges won't fail the whole migration chain. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- ============================================ +-- 1) Core btree indexes (always safe) +-- ============================================ + +-- ops_error_logs +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_created_at + ON ops_error_logs (created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_platform_time + ON ops_error_logs (platform, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_group_time + ON ops_error_logs (group_id, created_at DESC) + WHERE group_id IS NOT NULL; + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_account_time + ON ops_error_logs (account_id, created_at DESC) + WHERE account_id IS NOT NULL; + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_status_time + ON ops_error_logs (status_code, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_phase_time + ON ops_error_logs (error_phase, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_type_time + ON ops_error_logs (error_type, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_request_id + ON ops_error_logs (request_id); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_client_request_id + ON ops_error_logs (client_request_id); + +-- ops_system_metrics +CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_created_at + ON ops_system_metrics (created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_window_time + ON ops_system_metrics (window_minutes, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_platform_time + ON ops_system_metrics (platform, created_at DESC) + WHERE platform IS NOT NULL AND platform <> '' AND group_id IS NULL; + +CREATE INDEX IF NOT EXISTS idx_ops_system_metrics_group_time + ON ops_system_metrics (group_id, created_at DESC) + WHERE group_id IS NOT NULL; + +-- ops_retry_attempts +CREATE INDEX IF NOT EXISTS idx_ops_retry_attempts_created_at + ON ops_retry_attempts (created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_retry_attempts_source_error + ON ops_retry_attempts (source_error_id, created_at DESC) + WHERE source_error_id IS NOT NULL; + +-- Prevent concurrent retries for the same ops_error_logs row (race-free, multi-instance safe). +CREATE UNIQUE INDEX IF NOT EXISTS idx_ops_retry_attempts_unique_active + ON ops_retry_attempts (source_error_id) + WHERE source_error_id IS NOT NULL AND status IN ('queued', 'running'); + +-- ============================================ +-- 2) Optional: pg_trgm + trigram indexes for fuzzy search +-- ============================================ + +DO $$ +BEGIN + BEGIN + CREATE EXTENSION IF NOT EXISTS pg_trgm; + EXCEPTION WHEN OTHERS THEN + -- Missing privileges or extension package should not block migrations. + RAISE NOTICE 'pg_trgm extension not created: %', SQLERRM; + END; + + IF EXISTS (SELECT 1 FROM pg_extension WHERE extname = 'pg_trgm') THEN + -- request_id / client_request_id fuzzy search + EXECUTE 'CREATE INDEX IF NOT EXISTS idx_ops_error_logs_request_id_trgm + ON ops_error_logs USING gin (request_id gin_trgm_ops)'; + EXECUTE 'CREATE INDEX IF NOT EXISTS idx_ops_error_logs_client_request_id_trgm + ON ops_error_logs USING gin (client_request_id gin_trgm_ops)'; + + -- error_message fuzzy search + EXECUTE 'CREATE INDEX IF NOT EXISTS idx_ops_error_logs_error_message_trgm + ON ops_error_logs USING gin (error_message gin_trgm_ops)'; + END IF; +END $$; + +-- ===================================================================== +-- 034_ops_preaggregation_add_avg_max.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): extend pre-aggregation tables with avg/max latency fields +-- +-- Why: +-- - The dashboard overview returns avg/max for duration/TTFT. +-- - Hourly/daily pre-aggregation tables originally stored only p50/p90/p95/p99, which makes +-- it impossible to answer avg/max in preagg mode without falling back to raw scans. +-- +-- This migration is idempotent and safe to run multiple times. +-- +-- NOTE: We keep the existing p50/p90/p95/p99 columns as-is; these are still used for +-- approximate long-window summaries. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- Hourly table +ALTER TABLE ops_metrics_hourly + ADD COLUMN IF NOT EXISTS duration_avg_ms DOUBLE PRECISION, + ADD COLUMN IF NOT EXISTS duration_max_ms INT, + ADD COLUMN IF NOT EXISTS ttft_avg_ms DOUBLE PRECISION, + ADD COLUMN IF NOT EXISTS ttft_max_ms INT; + +-- Daily table +ALTER TABLE ops_metrics_daily + ADD COLUMN IF NOT EXISTS duration_avg_ms DOUBLE PRECISION, + ADD COLUMN IF NOT EXISTS duration_max_ms INT, + ADD COLUMN IF NOT EXISTS ttft_avg_ms DOUBLE PRECISION, + ADD COLUMN IF NOT EXISTS ttft_max_ms INT; + +-- ===================================================================== +-- 035_ops_alert_rules_notify_email.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): alert rule notify settings +-- +-- Adds notify_email flag to ops_alert_rules to keep UI parity with the backup Ops dashboard. +-- Migration is idempotent. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +ALTER TABLE ops_alert_rules + ADD COLUMN IF NOT EXISTS notify_email BOOLEAN NOT NULL DEFAULT true; + +-- ===================================================================== +-- 036_ops_seed_default_alert_rules.sql +-- ===================================================================== + +-- Ops Monitoring (vNext): seed default alert rules (idempotent) +-- +-- Goal: +-- - Provide "out of the box" alert rules so the Ops dashboard can immediately show alert events. +-- - Keep inserts idempotent via ON CONFLICT (name) DO NOTHING. +-- +-- Notes: +-- - Thresholds are intentionally conservative defaults and should be tuned per deployment. +-- - Metric semantics follow vNext: +-- - success_rate / error_rate are based on SLA-scope counts (exclude is_business_limited). +-- - upstream_error_rate excludes 429/529. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- 1) High error rate (P1) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + '错误率过高', + '当错误率超过 5% 且持续 5 分钟时触发告警', + true, 'error_rate', '>', 5.0, 5, 5, 'P1', true, 20, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 2) Low success rate (P0) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + '成功率过低', + '当成功率低于 95% 且持续 5 分钟时触发告警(服务可用性下降)', + true, 'success_rate', '<', 95.0, 5, 5, 'P0', true, 15, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 3) P99 latency too high (P2) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + 'P99延迟过高', + '当 P99 延迟超过 3000ms 且持续 10 分钟时触发告警', + true, 'p99_latency_ms', '>', 3000.0, 5, 10, 'P2', true, 30, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 4) P95 latency too high (P2) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + 'P95延迟过高', + '当 P95 延迟超过 2000ms 且持续 10 分钟时触发告警', + true, 'p95_latency_ms', '>', 2000.0, 5, 10, 'P2', true, 30, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 5) CPU usage too high (P2) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + 'CPU使用率过高', + '当 CPU 使用率超过 85% 且持续 10 分钟时触发告警', + true, 'cpu_usage_percent', '>', 85.0, 5, 10, 'P2', true, 30, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 6) Memory usage too high (P1) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + '内存使用率过高', + '当内存使用率超过 90% 且持续 10 分钟时触发告警(可能导致 OOM)', + true, 'memory_usage_percent', '>', 90.0, 5, 10, 'P1', true, 20, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 7) Concurrency queue buildup (P1) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + '并发队列积压', + '当并发队列深度超过 100 且持续 5 分钟时触发告警(系统处理能力不足)', + true, 'concurrency_queue_depth', '>', 100.0, 5, 5, 'P1', true, 20, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- 8) Extremely high error rate (P0) +INSERT INTO ops_alert_rules ( + name, description, enabled, metric_type, operator, threshold, + window_minutes, sustained_minutes, severity, notify_email, cooldown_minutes, + created_at, updated_at +) VALUES ( + '错误率极高', + '当错误率超过 20% 且持续 1 分钟时触发告警(服务严重异常)', + true, 'error_rate', '>', 20.0, 1, 1, 'P0', true, 15, NOW(), NOW() +) ON CONFLICT (name) DO NOTHING; + +-- Ops Monitoring vNext: add Redis pool stats fields to system metrics snapshots. +-- This migration is intentionally idempotent. + +ALTER TABLE ops_system_metrics + ADD COLUMN IF NOT EXISTS redis_conn_total INT, + ADD COLUMN IF NOT EXISTS redis_conn_idle INT; + +COMMENT ON COLUMN ops_system_metrics.redis_conn_total IS 'Redis pool total connections (go-redis PoolStats.TotalConns).'; +COMMENT ON COLUMN ops_system_metrics.redis_conn_idle IS 'Redis pool idle connections (go-redis PoolStats.IdleConns).'; diff --git a/backend/migrations/034_ops_upstream_error_events.sql b/backend/migrations/034_ops_upstream_error_events.sql new file mode 100644 index 00000000..f8bfa5e2 --- /dev/null +++ b/backend/migrations/034_ops_upstream_error_events.sql @@ -0,0 +1,9 @@ +-- Add upstream error events list (JSONB) to ops_error_logs for per-request correlation. +-- +-- This is intentionally idempotent. + +ALTER TABLE ops_error_logs + ADD COLUMN IF NOT EXISTS upstream_errors JSONB; + +COMMENT ON COLUMN ops_error_logs.upstream_errors IS + 'Sanitized upstream error events list (JSON array), correlated per gateway request (request_id/client_request_id); used for per-request upstream debugging.'; diff --git a/backend/migrations/034_usage_dashboard_aggregation_tables.sql b/backend/migrations/034_usage_dashboard_aggregation_tables.sql new file mode 100644 index 00000000..64b383d4 --- /dev/null +++ b/backend/migrations/034_usage_dashboard_aggregation_tables.sql @@ -0,0 +1,77 @@ +-- Usage dashboard aggregation tables (hourly/daily) + active-user dedup + watermark. +-- These tables support Admin Dashboard statistics without full-table scans on usage_logs. + +-- Hourly aggregates (UTC buckets). +CREATE TABLE IF NOT EXISTS usage_dashboard_hourly ( + bucket_start TIMESTAMPTZ PRIMARY KEY, + total_requests BIGINT NOT NULL DEFAULT 0, + input_tokens BIGINT NOT NULL DEFAULT 0, + output_tokens BIGINT NOT NULL DEFAULT 0, + cache_creation_tokens BIGINT NOT NULL DEFAULT 0, + cache_read_tokens BIGINT NOT NULL DEFAULT 0, + total_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + actual_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + total_duration_ms BIGINT NOT NULL DEFAULT 0, + active_users BIGINT NOT NULL DEFAULT 0, + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_usage_dashboard_hourly_bucket_start + ON usage_dashboard_hourly (bucket_start DESC); + +COMMENT ON TABLE usage_dashboard_hourly IS 'Pre-aggregated hourly usage metrics for admin dashboard (UTC buckets).'; +COMMENT ON COLUMN usage_dashboard_hourly.bucket_start IS 'UTC start timestamp of the hour bucket.'; +COMMENT ON COLUMN usage_dashboard_hourly.computed_at IS 'When the hourly row was last computed/refreshed.'; + +-- Daily aggregates (UTC dates). +CREATE TABLE IF NOT EXISTS usage_dashboard_daily ( + bucket_date DATE PRIMARY KEY, + total_requests BIGINT NOT NULL DEFAULT 0, + input_tokens BIGINT NOT NULL DEFAULT 0, + output_tokens BIGINT NOT NULL DEFAULT 0, + cache_creation_tokens BIGINT NOT NULL DEFAULT 0, + cache_read_tokens BIGINT NOT NULL DEFAULT 0, + total_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + actual_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, + total_duration_ms BIGINT NOT NULL DEFAULT 0, + active_users BIGINT NOT NULL DEFAULT 0, + computed_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_usage_dashboard_daily_bucket_date + ON usage_dashboard_daily (bucket_date DESC); + +COMMENT ON TABLE usage_dashboard_daily IS 'Pre-aggregated daily usage metrics for admin dashboard (UTC dates).'; +COMMENT ON COLUMN usage_dashboard_daily.bucket_date IS 'UTC date of the day bucket.'; +COMMENT ON COLUMN usage_dashboard_daily.computed_at IS 'When the daily row was last computed/refreshed.'; + +-- Hourly active user dedup table. +CREATE TABLE IF NOT EXISTS usage_dashboard_hourly_users ( + bucket_start TIMESTAMPTZ NOT NULL, + user_id BIGINT NOT NULL, + PRIMARY KEY (bucket_start, user_id) +); + +CREATE INDEX IF NOT EXISTS idx_usage_dashboard_hourly_users_bucket_start + ON usage_dashboard_hourly_users (bucket_start); + +-- Daily active user dedup table. +CREATE TABLE IF NOT EXISTS usage_dashboard_daily_users ( + bucket_date DATE NOT NULL, + user_id BIGINT NOT NULL, + PRIMARY KEY (bucket_date, user_id) +); + +CREATE INDEX IF NOT EXISTS idx_usage_dashboard_daily_users_bucket_date + ON usage_dashboard_daily_users (bucket_date); + +-- Aggregation watermark table (single row). +CREATE TABLE IF NOT EXISTS usage_dashboard_aggregation_watermark ( + id INT PRIMARY KEY, + last_aggregated_at TIMESTAMPTZ NOT NULL DEFAULT TIMESTAMPTZ '1970-01-01 00:00:00+00', + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +INSERT INTO usage_dashboard_aggregation_watermark (id) +VALUES (1) +ON CONFLICT (id) DO NOTHING; diff --git a/backend/migrations/035_usage_logs_partitioning.sql b/backend/migrations/035_usage_logs_partitioning.sql new file mode 100644 index 00000000..e25a105e --- /dev/null +++ b/backend/migrations/035_usage_logs_partitioning.sql @@ -0,0 +1,54 @@ +-- usage_logs monthly partition bootstrap. +-- Only creates partitions when usage_logs is already partitioned. +-- Converting usage_logs to a partitioned table requires a manual migration plan. + +DO $$ +DECLARE + is_partitioned BOOLEAN := FALSE; + has_data BOOLEAN := FALSE; + month_start DATE; + prev_month DATE; + next_month DATE; +BEGIN + SELECT EXISTS( + SELECT 1 + FROM pg_partitioned_table pt + JOIN pg_class c ON c.oid = pt.partrelid + WHERE c.relname = 'usage_logs' + ) INTO is_partitioned; + + IF NOT is_partitioned THEN + SELECT EXISTS(SELECT 1 FROM usage_logs LIMIT 1) INTO has_data; + IF NOT has_data THEN + -- Automatic conversion is intentionally skipped; see manual migration plan. + RAISE NOTICE 'usage_logs is not partitioned; skip automatic partitioning'; + END IF; + END IF; + + IF is_partitioned THEN + month_start := date_trunc('month', now() AT TIME ZONE 'UTC')::date; + prev_month := (month_start - INTERVAL '1 month')::date; + next_month := (month_start + INTERVAL '1 month')::date; + + EXECUTE format( + 'CREATE TABLE IF NOT EXISTS usage_logs_%s PARTITION OF usage_logs FOR VALUES FROM (%L) TO (%L)', + to_char(prev_month, 'YYYYMM'), + prev_month, + month_start + ); + + EXECUTE format( + 'CREATE TABLE IF NOT EXISTS usage_logs_%s PARTITION OF usage_logs FOR VALUES FROM (%L) TO (%L)', + to_char(month_start, 'YYYYMM'), + month_start, + next_month + ); + + EXECUTE format( + 'CREATE TABLE IF NOT EXISTS usage_logs_%s PARTITION OF usage_logs FOR VALUES FROM (%L) TO (%L)', + to_char(next_month, 'YYYYMM'), + next_month, + (next_month + INTERVAL '1 month')::date + ); + END IF; +END $$; diff --git a/backend/migrations/036_ops_error_logs_add_is_count_tokens.sql b/backend/migrations/036_ops_error_logs_add_is_count_tokens.sql new file mode 100644 index 00000000..dedb1154 --- /dev/null +++ b/backend/migrations/036_ops_error_logs_add_is_count_tokens.sql @@ -0,0 +1,16 @@ +-- Migration: 添加 is_count_tokens 字段到 ops_error_logs 表 +-- Purpose: 标记 count_tokens 请求的错误,以便在统计和告警中根据配置动态过滤 +-- Author: System +-- Date: 2026-01-12 + +-- Add is_count_tokens column to ops_error_logs table +ALTER TABLE ops_error_logs +ADD COLUMN is_count_tokens BOOLEAN NOT NULL DEFAULT FALSE; + +-- Add comment +COMMENT ON COLUMN ops_error_logs.is_count_tokens IS '是否为 count_tokens 请求的错误(用于统计过滤)'; + +-- Create index for filtering (optional, improves query performance) +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_is_count_tokens +ON ops_error_logs(is_count_tokens) +WHERE is_count_tokens = TRUE; diff --git a/backend/migrations/036_scheduler_outbox.sql b/backend/migrations/036_scheduler_outbox.sql new file mode 100644 index 00000000..a548841c --- /dev/null +++ b/backend/migrations/036_scheduler_outbox.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS scheduler_outbox ( + id BIGSERIAL PRIMARY KEY, + event_type TEXT NOT NULL, + account_id BIGINT NULL, + group_id BIGINT NULL, + payload JSONB NULL, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_scheduler_outbox_created_at ON scheduler_outbox (created_at); diff --git a/backend/migrations/037_add_account_rate_multiplier.sql b/backend/migrations/037_add_account_rate_multiplier.sql new file mode 100644 index 00000000..06f5b090 --- /dev/null +++ b/backend/migrations/037_add_account_rate_multiplier.sql @@ -0,0 +1,14 @@ +-- Add account billing rate multiplier and per-usage snapshot. +-- +-- accounts.rate_multiplier: 账号计费倍率(>=0,允许 0 表示该账号计费为 0)。 +-- usage_logs.account_rate_multiplier: 每条 usage log 的账号倍率快照,用于实现 +-- “倍率调整仅影响之后请求”,并支持同一天分段倍率加权统计。 +-- +-- 注意:usage_logs.account_rate_multiplier 不做回填、不设置 NOT NULL。 +-- 老数据为 NULL 时,统计口径按 1.0 处理(COALESCE)。 + +ALTER TABLE IF EXISTS accounts + ADD COLUMN IF NOT EXISTS rate_multiplier DECIMAL(10,4) NOT NULL DEFAULT 1.0; + +ALTER TABLE IF EXISTS usage_logs + ADD COLUMN IF NOT EXISTS account_rate_multiplier DECIMAL(10,4); diff --git a/backend/migrations/037_ops_alert_silences.sql b/backend/migrations/037_ops_alert_silences.sql new file mode 100644 index 00000000..95b61a09 --- /dev/null +++ b/backend/migrations/037_ops_alert_silences.sql @@ -0,0 +1,28 @@ +-- +goose Up +-- +goose StatementBegin +-- Ops alert silences: scoped (rule_id + platform + group_id + region) + +CREATE TABLE IF NOT EXISTS ops_alert_silences ( + id BIGSERIAL PRIMARY KEY, + + rule_id BIGINT NOT NULL, + platform VARCHAR(64) NOT NULL, + group_id BIGINT, + region VARCHAR(64), + + until TIMESTAMPTZ NOT NULL, + reason TEXT, + + created_by BIGINT, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_ops_alert_silences_lookup + ON ops_alert_silences (rule_id, platform, group_id, region, until); + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin +DROP TABLE IF EXISTS ops_alert_silences; +-- +goose StatementEnd diff --git a/backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql b/backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql new file mode 100644 index 00000000..adaacf1c --- /dev/null +++ b/backend/migrations/038_ops_errors_resolution_retry_results_and_standardize_classification.sql @@ -0,0 +1,111 @@ +-- Add resolution tracking to ops_error_logs, persist retry results, and standardize error classification enums. +-- +-- This migration is intentionally idempotent. + +SET LOCAL lock_timeout = '5s'; +SET LOCAL statement_timeout = '10min'; + +-- ============================================ +-- 1) ops_error_logs: resolution fields +-- ============================================ + +ALTER TABLE ops_error_logs + ADD COLUMN IF NOT EXISTS resolved BOOLEAN NOT NULL DEFAULT false; + +ALTER TABLE ops_error_logs + ADD COLUMN IF NOT EXISTS resolved_at TIMESTAMPTZ; + +ALTER TABLE ops_error_logs + ADD COLUMN IF NOT EXISTS resolved_by_user_id BIGINT; + +ALTER TABLE ops_error_logs + ADD COLUMN IF NOT EXISTS resolved_retry_id BIGINT; + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_resolved_time + ON ops_error_logs (resolved, created_at DESC); + +CREATE INDEX IF NOT EXISTS idx_ops_error_logs_unresolved_time + ON ops_error_logs (created_at DESC) + WHERE resolved = false; + +-- ============================================ +-- 2) ops_retry_attempts: persist execution results +-- ============================================ + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS success BOOLEAN; + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS http_status_code INT; + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS upstream_request_id VARCHAR(128); + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS used_account_id BIGINT; + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS response_preview TEXT; + +ALTER TABLE ops_retry_attempts + ADD COLUMN IF NOT EXISTS response_truncated BOOLEAN NOT NULL DEFAULT false; + +CREATE INDEX IF NOT EXISTS idx_ops_retry_attempts_success_time + ON ops_retry_attempts (success, created_at DESC); + +-- Backfill best-effort fields for existing rows. +UPDATE ops_retry_attempts +SET success = (LOWER(COALESCE(status, '')) = 'succeeded') +WHERE success IS NULL; + +UPDATE ops_retry_attempts +SET upstream_request_id = result_request_id +WHERE upstream_request_id IS NULL AND result_request_id IS NOT NULL; + +-- ============================================ +-- 3) Standardize classification enums in ops_error_logs +-- +-- New enums: +-- error_phase: request|auth|routing|upstream|network|internal +-- error_owner: client|provider|platform +-- error_source: client_request|upstream_http|gateway +-- ============================================ + +-- Owner: legacy sub2api => platform. +UPDATE ops_error_logs +SET error_owner = 'platform' +WHERE LOWER(COALESCE(error_owner, '')) = 'sub2api'; + +-- Owner: normalize empty/null to platform (best-effort). +UPDATE ops_error_logs +SET error_owner = 'platform' +WHERE COALESCE(TRIM(error_owner), '') = ''; + +-- Phase: map legacy phases. +UPDATE ops_error_logs +SET error_phase = CASE + WHEN COALESCE(TRIM(error_phase), '') = '' THEN 'internal' + WHEN LOWER(error_phase) IN ('billing', 'concurrency', 'response') THEN 'request' + WHEN LOWER(error_phase) IN ('scheduling') THEN 'routing' + WHEN LOWER(error_phase) IN ('request', 'auth', 'routing', 'upstream', 'network', 'internal') THEN LOWER(error_phase) + ELSE 'internal' +END; + +-- Source: map legacy sources. +UPDATE ops_error_logs +SET error_source = CASE + WHEN COALESCE(TRIM(error_source), '') = '' THEN 'gateway' + WHEN LOWER(error_source) IN ('billing', 'concurrency') THEN 'client_request' + WHEN LOWER(error_source) IN ('upstream_http') THEN 'upstream_http' + WHEN LOWER(error_source) IN ('upstream_network') THEN 'gateway' + WHEN LOWER(error_source) IN ('internal') THEN 'gateway' + WHEN LOWER(error_source) IN ('client_request', 'upstream_http', 'gateway') THEN LOWER(error_source) + ELSE 'gateway' +END; + +-- Auto-resolve recovered upstream errors (client status < 400). +UPDATE ops_error_logs +SET + resolved = true, + resolved_at = COALESCE(resolved_at, created_at) +WHERE resolved = false AND COALESCE(status_code, 0) > 0 AND COALESCE(status_code, 0) < 400; diff --git a/backend/migrations/039_ops_job_heartbeats_add_last_result.sql b/backend/migrations/039_ops_job_heartbeats_add_last_result.sql new file mode 100644 index 00000000..7d6dc743 --- /dev/null +++ b/backend/migrations/039_ops_job_heartbeats_add_last_result.sql @@ -0,0 +1,6 @@ +-- Add last_result to ops_job_heartbeats for UI job details. + +ALTER TABLE IF EXISTS ops_job_heartbeats + ADD COLUMN IF NOT EXISTS last_result TEXT; + +COMMENT ON COLUMN ops_job_heartbeats.last_result IS 'Last successful run result summary (human readable).'; diff --git a/backend/migrations/040_add_group_model_routing.sql b/backend/migrations/040_add_group_model_routing.sql new file mode 100644 index 00000000..303fcb2a --- /dev/null +++ b/backend/migrations/040_add_group_model_routing.sql @@ -0,0 +1,11 @@ +-- 040_add_group_model_routing.sql +-- 添加分组级别的模型路由配置功能 + +-- 添加 model_routing 字段:模型路由配置(JSONB 格式) +-- 格式: {"model_pattern": [account_id1, account_id2], ...} +-- 例如: {"claude-opus-*": [1, 2], "claude-sonnet-*": [3, 4, 5]} +ALTER TABLE groups +ADD COLUMN IF NOT EXISTS model_routing JSONB DEFAULT '{}'; + +-- 添加字段注释 +COMMENT ON COLUMN groups.model_routing IS '模型路由配置:{"model_pattern": [account_id1, account_id2], ...},支持通配符匹配'; diff --git a/backend/migrations/041_add_model_routing_enabled.sql b/backend/migrations/041_add_model_routing_enabled.sql new file mode 100644 index 00000000..8691cf1f --- /dev/null +++ b/backend/migrations/041_add_model_routing_enabled.sql @@ -0,0 +1,2 @@ +-- Add model_routing_enabled field to groups table +ALTER TABLE groups ADD COLUMN model_routing_enabled BOOLEAN NOT NULL DEFAULT false; diff --git a/config.yaml b/config.yaml index 54b591f3..424ce9eb 100644 --- a/config.yaml +++ b/config.yaml @@ -159,7 +159,7 @@ gateway: max_line_size: 41943040 # Log upstream error response body summary (safe/truncated; does not log request content) # 记录上游错误响应体摘要(安全/截断;不记录请求内容) - log_upstream_error_body: false + log_upstream_error_body: true # Max bytes to log from upstream error body # 记录上游错误响应体的最大字节数 log_upstream_error_body_max_bytes: 2048 @@ -170,6 +170,87 @@ gateway: # 允许在特定 400 错误时进行故障转移(默认:关闭) failover_on_400: false +# ============================================================================= +# API Key Auth Cache Configuration +# API Key 认证缓存配置 +# ============================================================================= +api_key_auth_cache: + # L1 cache size (entries), in-process LRU/TTL cache + # L1 缓存容量(条目数),进程内 LRU/TTL 缓存 + l1_size: 65535 + # L1 cache TTL (seconds) + # L1 缓存 TTL(秒) + l1_ttl_seconds: 15 + # L2 cache TTL (seconds), stored in Redis + # L2 缓存 TTL(秒),Redis 中存储 + l2_ttl_seconds: 300 + # Negative cache TTL (seconds) + # 负缓存 TTL(秒) + negative_ttl_seconds: 30 + # TTL jitter percent (0-100) + # TTL 抖动百分比(0-100) + jitter_percent: 10 + # Enable singleflight for cache misses + # 缓存未命中时启用 singleflight 合并回源 + singleflight: true + +# ============================================================================= +# Dashboard Cache Configuration +# 仪表盘缓存配置 +# ============================================================================= +dashboard_cache: + # Enable dashboard cache + # 启用仪表盘缓存 + enabled: true + # Redis key prefix for multi-environment isolation + # Redis key 前缀,用于多环境隔离 + key_prefix: "sub2api:" + # Fresh TTL (seconds); within this window cached stats are considered fresh + # 新鲜阈值(秒);命中后处于该窗口视为新鲜数据 + stats_fresh_ttl_seconds: 15 + # Cache TTL (seconds) stored in Redis + # Redis 缓存 TTL(秒) + stats_ttl_seconds: 30 + # Async refresh timeout (seconds) + # 异步刷新超时(秒) + stats_refresh_timeout_seconds: 30 + +# ============================================================================= +# Dashboard Aggregation Configuration +# 仪表盘预聚合配置(重启生效) +# ============================================================================= +dashboard_aggregation: + # Enable aggregation job + # 启用聚合作业 + enabled: true + # Refresh interval (seconds) + # 刷新间隔(秒) + interval_seconds: 60 + # Lookback window (seconds) for late-arriving data + # 回看窗口(秒),处理迟到数据 + lookback_seconds: 120 + # Allow manual backfill + # 允许手动回填 + backfill_enabled: false + # Backfill max range (days) + # 回填最大跨度(天) + backfill_max_days: 31 + # Recompute recent N days on startup + # 启动时重算最近 N 天 + recompute_days: 2 + # Retention windows (days) + # 保留窗口(天) + retention: + # Raw usage_logs retention + # 原始 usage_logs 保留天数 + usage_logs_days: 90 + # Hourly aggregation retention + # 小时聚合保留天数 + hourly_days: 180 + # Daily aggregation retention + # 日聚合保留天数 + daily_days: 730 + # ============================================================================= # Concurrency Wait Configuration # 并发等待配置 @@ -221,6 +302,41 @@ redis: # 数据库编号(0-15) db: 0 +# ============================================================================= +# Ops Monitoring (Optional) +# 运维监控 (可选) +# ============================================================================= +ops: + # Hard switch: disable all ops background jobs and APIs when false + # 硬开关:为 false 时禁用所有 Ops 后台任务与接口 + enabled: true + + # Prefer pre-aggregated tables (ops_metrics_hourly/ops_metrics_daily) for long-window dashboard queries. + # 优先使用预聚合表(用于长时间窗口查询性能) + use_preaggregated_tables: false + + # Data cleanup configuration + # 数据清理配置(vNext 默认统一保留 30 天) + cleanup: + enabled: true + # Cron expression (minute hour dom month dow), e.g. "0 2 * * *" = daily at 2 AM + # Cron 表达式(分 时 日 月 周),例如 "0 2 * * *" = 每天凌晨 2 点 + schedule: "0 2 * * *" + error_log_retention_days: 30 + minute_metrics_retention_days: 30 + hourly_metrics_retention_days: 30 + + # Pre-aggregation configuration + # 预聚合任务配置 + aggregation: + enabled: true + + # OpsMetricsCollector Redis cache (reduces duplicate expensive window aggregation in multi-replica deployments) + # 指标采集 Redis 缓存(多副本部署时减少重复计算) + metrics_collector_cache: + enabled: true + ttl: 65s + # ============================================================================= # JWT Configuration # JWT 配置 diff --git a/deploy/.env.example b/deploy/.env.example index bd8abc5c..f21a3c62 100644 --- a/deploy/.env.example +++ b/deploy/.env.example @@ -69,6 +69,76 @@ JWT_EXPIRE_HOUR=24 # Leave unset to use default ./config.yaml #CONFIG_FILE=./config.yaml +# ----------------------------------------------------------------------------- +# Rate Limiting (Optional) +# 速率限制(可选) +# ----------------------------------------------------------------------------- +# Cooldown time (in minutes) when upstream returns 529 (overloaded) +# 上游返回 529(过载)时的冷却时间(分钟) +RATE_LIMIT_OVERLOAD_COOLDOWN_MINUTES=10 + +# ----------------------------------------------------------------------------- +# Gateway Scheduling (Optional) +# 调度缓存与受控回源配置(缓存就绪且命中时不读 DB) +# ----------------------------------------------------------------------------- +# 粘性会话最大排队长度 +GATEWAY_SCHEDULING_STICKY_SESSION_MAX_WAITING=3 +# 粘性会话等待超时(时间段,例如 45s) +GATEWAY_SCHEDULING_STICKY_SESSION_WAIT_TIMEOUT=120s +# 兜底排队等待超时(时间段,例如 30s) +GATEWAY_SCHEDULING_FALLBACK_WAIT_TIMEOUT=30s +# 兜底最大排队长度 +GATEWAY_SCHEDULING_FALLBACK_MAX_WAITING=100 +# 启用调度批量负载计算 +GATEWAY_SCHEDULING_LOAD_BATCH_ENABLED=true +# 并发槽位清理周期(时间段,例如 30s) +GATEWAY_SCHEDULING_SLOT_CLEANUP_INTERVAL=30s +# 是否允许受控回源到 DB(默认 true,保持现有行为) +GATEWAY_SCHEDULING_DB_FALLBACK_ENABLED=true +# 受控回源超时(秒),0 表示不额外收紧超时 +GATEWAY_SCHEDULING_DB_FALLBACK_TIMEOUT_SECONDS=0 +# 受控回源限流(实例级 QPS),0 表示不限制 +GATEWAY_SCHEDULING_DB_FALLBACK_MAX_QPS=0 +# outbox 轮询周期(秒) +GATEWAY_SCHEDULING_OUTBOX_POLL_INTERVAL_SECONDS=1 +# outbox 滞后告警阈值(秒) +GATEWAY_SCHEDULING_OUTBOX_LAG_WARN_SECONDS=5 +# outbox 触发强制重建阈值(秒) +GATEWAY_SCHEDULING_OUTBOX_LAG_REBUILD_SECONDS=10 +# outbox 连续滞后触发次数 +GATEWAY_SCHEDULING_OUTBOX_LAG_REBUILD_FAILURES=3 +# outbox 积压触发重建阈值(行数) +GATEWAY_SCHEDULING_OUTBOX_BACKLOG_REBUILD_ROWS=10000 +# 全量重建周期(秒) +GATEWAY_SCHEDULING_FULL_REBUILD_INTERVAL_SECONDS=300 + +# ----------------------------------------------------------------------------- +# Dashboard Aggregation (Optional) +# ----------------------------------------------------------------------------- +# Enable aggregation job +# 启用仪表盘预聚合 +DASHBOARD_AGGREGATION_ENABLED=true +# Refresh interval (seconds) +# 刷新间隔(秒) +DASHBOARD_AGGREGATION_INTERVAL_SECONDS=60 +# Lookback window (seconds) +# 回看窗口(秒) +DASHBOARD_AGGREGATION_LOOKBACK_SECONDS=120 +# Allow manual backfill +# 允许手动回填 +DASHBOARD_AGGREGATION_BACKFILL_ENABLED=false +# Backfill max range (days) +# 回填最大跨度(天) +DASHBOARD_AGGREGATION_BACKFILL_MAX_DAYS=31 +# Recompute recent N days on startup +# 启动时重算最近 N 天 +DASHBOARD_AGGREGATION_RECOMPUTE_DAYS=2 +# Retention windows (days) +# 保留窗口(天) +DASHBOARD_AGGREGATION_RETENTION_USAGE_LOGS_DAYS=90 +DASHBOARD_AGGREGATION_RETENTION_HOURLY_DAYS=180 +DASHBOARD_AGGREGATION_RETENTION_DAILY_DAYS=730 + # ----------------------------------------------------------------------------- # Security Configuration # ----------------------------------------------------------------------------- @@ -124,6 +194,15 @@ GEMINI_OAUTH_SCOPES= # GEMINI_QUOTA_POLICY={"tiers":{"LEGACY":{"pro_rpd":50,"flash_rpd":1500,"cooldown_minutes":30},"PRO":{"pro_rpd":1500,"flash_rpd":4000,"cooldown_minutes":5},"ULTRA":{"pro_rpd":2000,"flash_rpd":0,"cooldown_minutes":5}}} GEMINI_QUOTA_POLICY= +# ----------------------------------------------------------------------------- +# Ops Monitoring Configuration (运维监控配置) +# ----------------------------------------------------------------------------- +# Enable ops monitoring features (background jobs and APIs) +# 是否启用运维监控功能(后台任务和接口) +# Set to false to hide ops menu in sidebar and disable all ops features +# 设置为 false 可在左侧栏隐藏运维监控菜单并禁用所有运维监控功能 +OPS_ENABLED=true + # ----------------------------------------------------------------------------- # Update Configuration (在线更新配置) # ----------------------------------------------------------------------------- diff --git a/deploy/Caddyfile b/deploy/Caddyfile index 3aeef51a..e5636213 100644 --- a/deploy/Caddyfile +++ b/deploy/Caddyfile @@ -33,6 +33,22 @@ # 修改为你的域名 example.com { + # ========================================================================= + # 静态资源长期缓存(高优先级,放在最前面) + # 带 hash 的文件可以永久缓存,浏览器和 CDN 都会缓存 + # ========================================================================= + @static { + path /assets/* + path /logo.png + path /favicon.ico + } + header @static { + Cache-Control "public, max-age=31536000, immutable" + # 移除可能干扰缓存的头 + -Pragma + -Expires + } + # ========================================================================= # TLS 安全配置 # ========================================================================= diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 87ff3148..9e85d1ff 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -97,7 +97,9 @@ security: enabled: true # Default CSP policy (override if you host assets on other domains) # 默认 CSP 策略(如果静态资源托管在其他域名,请自行覆盖) - policy: "default-src 'self'; script-src 'self' https://challenges.cloudflare.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https:; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' https:; frame-src https://challenges.cloudflare.com; frame-ancestors 'none'; base-uri 'self'; form-action 'self'" + # Note: __CSP_NONCE__ will be replaced with 'nonce-xxx' at request time for inline script security + # 注意:__CSP_NONCE__ 会在请求时被替换为 'nonce-xxx',用于内联脚本安全 + policy: "default-src 'self'; script-src 'self' __CSP_NONCE__ https://challenges.cloudflare.com https://static.cloudflareinsights.com; style-src 'self' 'unsafe-inline' https://fonts.googleapis.com; img-src 'self' data: https:; font-src 'self' data: https://fonts.gstatic.com; connect-src 'self' https:; frame-src https://challenges.cloudflare.com; frame-ancestors 'none'; base-uri 'self'; form-action 'self'" proxy_probe: # Allow skipping TLS verification for proxy probe (debug only) # 允许代理探测时跳过 TLS 证书验证(仅用于调试) @@ -159,7 +161,7 @@ gateway: max_line_size: 41943040 # Log upstream error response body summary (safe/truncated; does not log request content) # 记录上游错误响应体摘要(安全/截断;不记录请求内容) - log_upstream_error_body: false + log_upstream_error_body: true # Max bytes to log from upstream error body # 记录上游错误响应体的最大字节数 log_upstream_error_body_max_bytes: 2048 @@ -169,6 +171,126 @@ gateway: # Allow failover on selected 400 errors (default: off) # 允许在特定 400 错误时进行故障转移(默认:关闭) failover_on_400: false + # Scheduling configuration + # 调度配置 + scheduling: + # Sticky session max waiting queue size + # 粘性会话最大排队长度 + sticky_session_max_waiting: 3 + # Sticky session wait timeout (duration) + # 粘性会话等待超时(时间段) + sticky_session_wait_timeout: 120s + # Fallback wait timeout (duration) + # 兜底排队等待超时(时间段) + fallback_wait_timeout: 30s + # Fallback max waiting queue size + # 兜底最大排队长度 + fallback_max_waiting: 100 + # Enable batch load calculation for scheduling + # 启用调度批量负载计算 + load_batch_enabled: true + # Slot cleanup interval (duration) + # 并发槽位清理周期(时间段) + slot_cleanup_interval: 30s + # 是否允许受控回源到 DB(默认 true,保持现有行为) + db_fallback_enabled: true + # 受控回源超时(秒),0 表示不额外收紧超时 + db_fallback_timeout_seconds: 0 + # 受控回源限流(实例级 QPS),0 表示不限制 + db_fallback_max_qps: 0 + # outbox 轮询周期(秒) + outbox_poll_interval_seconds: 1 + # outbox 滞后告警阈值(秒) + outbox_lag_warn_seconds: 5 + # outbox 触发强制重建阈值(秒) + outbox_lag_rebuild_seconds: 10 + # outbox 连续滞后触发次数 + outbox_lag_rebuild_failures: 3 + # outbox 积压触发重建阈值(行数) + outbox_backlog_rebuild_rows: 10000 + # 全量重建周期(秒),0 表示禁用 + full_rebuild_interval_seconds: 300 + +# ============================================================================= +# API Key Auth Cache Configuration +# API Key 认证缓存配置 +# ============================================================================= +api_key_auth_cache: + # L1 cache size (entries), in-process LRU/TTL cache + # L1 缓存容量(条目数),进程内 LRU/TTL 缓存 + l1_size: 65535 + # L1 cache TTL (seconds) + # L1 缓存 TTL(秒) + l1_ttl_seconds: 15 + # L2 cache TTL (seconds), stored in Redis + # L2 缓存 TTL(秒),Redis 中存储 + l2_ttl_seconds: 300 + # Negative cache TTL (seconds) + # 负缓存 TTL(秒) + negative_ttl_seconds: 30 + # TTL jitter percent (0-100) + # TTL 抖动百分比(0-100) + jitter_percent: 10 + # Enable singleflight for cache misses + # 缓存未命中时启用 singleflight 合并回源 + singleflight: true + +# ============================================================================= +# Dashboard Cache Configuration +# 仪表盘缓存配置 +# ============================================================================= +dashboard_cache: + # Enable dashboard cache + # 启用仪表盘缓存 + enabled: true + # Redis key prefix for multi-environment isolation + # Redis key 前缀,用于多环境隔离 + key_prefix: "sub2api:" + # Fresh TTL (seconds); within this window cached stats are considered fresh + # 新鲜阈值(秒);命中后处于该窗口视为新鲜数据 + stats_fresh_ttl_seconds: 15 + # Cache TTL (seconds) stored in Redis + # Redis 缓存 TTL(秒) + stats_ttl_seconds: 30 + # Async refresh timeout (seconds) + # 异步刷新超时(秒) + stats_refresh_timeout_seconds: 30 + +# ============================================================================= +# Dashboard Aggregation Configuration +# 仪表盘预聚合配置(重启生效) +# ============================================================================= +dashboard_aggregation: + # Enable aggregation job + # 启用聚合作业 + enabled: true + # Refresh interval (seconds) + # 刷新间隔(秒) + interval_seconds: 60 + # Lookback window (seconds) for late-arriving data + # 回看窗口(秒),处理迟到数据 + lookback_seconds: 120 + # Allow manual backfill + # 允许手动回填 + backfill_enabled: false + # Backfill max range (days) + # 回填最大跨度(天) + backfill_max_days: 31 + # Recompute recent N days on startup + # 启动时重算最近 N 天 + recompute_days: 2 + # Retention windows (days) + # 保留窗口(天) + retention: + # Raw usage_logs retention + # 原始 usage_logs 保留天数 + usage_logs_days: 90 + # Hourly aggregation retention + # 小时聚合保留天数 + hourly_days: 180 + # Daily aggregation retention + # 日聚合保留天数 + daily_days: 730 # ============================================================================= # Concurrency Wait Configuration @@ -221,6 +343,19 @@ redis: # 数据库编号(0-15) db: 0 +# ============================================================================= +# Ops Monitoring (Optional) +# 运维监控 (可选) +# ============================================================================= +ops: + # Enable ops monitoring features (background jobs and APIs) + # 是否启用运维监控功能(后台任务和接口) + # Set to false to hide ops menu in sidebar and disable all ops features + # 设置为 false 可在左侧栏隐藏运维监控菜单并禁用所有运维监控功能 + # Other detailed settings (cleanup, aggregation, etc.) are configured in ops settings dialog + # 其他详细设置(数据清理、预聚合等)在运维监控设置对话框中配置 + enabled: true + # ============================================================================= # JWT Configuration # JWT 配置 diff --git a/deploy/docker-compose.standalone.yml b/deploy/docker-compose.standalone.yml new file mode 100644 index 00000000..1bf247c7 --- /dev/null +++ b/deploy/docker-compose.standalone.yml @@ -0,0 +1,93 @@ +# ============================================================================= +# Sub2API Docker Compose - Standalone Configuration +# ============================================================================= +# This configuration runs only the Sub2API application. +# PostgreSQL and Redis must be provided externally. +# +# Usage: +# 1. Copy .env.example to .env and configure database/redis connection +# 2. docker-compose -f docker-compose.standalone.yml up -d +# 3. Access: http://localhost:8080 +# ============================================================================= + +services: + sub2api: + image: weishaw/sub2api:latest + container_name: sub2api + restart: unless-stopped + ulimits: + nofile: + soft: 100000 + hard: 100000 + ports: + - "${BIND_HOST:-0.0.0.0}:${SERVER_PORT:-8080}:8080" + volumes: + - sub2api_data:/app/data + extra_hosts: + - "host.docker.internal:host-gateway" + environment: + # ======================================================================= + # Auto Setup + # ======================================================================= + - AUTO_SETUP=true + + # ======================================================================= + # Server Configuration + # ======================================================================= + - SERVER_HOST=0.0.0.0 + - SERVER_PORT=8080 + - SERVER_MODE=${SERVER_MODE:-release} + - RUN_MODE=${RUN_MODE:-standard} + + # ======================================================================= + # Database Configuration (PostgreSQL) - Required + # ======================================================================= + - DATABASE_HOST=${DATABASE_HOST:?DATABASE_HOST is required} + - DATABASE_PORT=${DATABASE_PORT:-5432} + - DATABASE_USER=${DATABASE_USER:-sub2api} + - DATABASE_PASSWORD=${DATABASE_PASSWORD:?DATABASE_PASSWORD is required} + - DATABASE_DBNAME=${DATABASE_DBNAME:-sub2api} + - DATABASE_SSLMODE=${DATABASE_SSLMODE:-disable} + + # ======================================================================= + # Redis Configuration - Required + # ======================================================================= + - REDIS_HOST=${REDIS_HOST:?REDIS_HOST is required} + - REDIS_PORT=${REDIS_PORT:-6379} + - REDIS_PASSWORD=${REDIS_PASSWORD:-} + - REDIS_DB=${REDIS_DB:-0} + + # ======================================================================= + # Admin Account (auto-created on first run) + # ======================================================================= + - ADMIN_EMAIL=${ADMIN_EMAIL:-admin@sub2api.local} + - ADMIN_PASSWORD=${ADMIN_PASSWORD:-} + + # ======================================================================= + # JWT Configuration + # ======================================================================= + - JWT_SECRET=${JWT_SECRET:-} + - JWT_EXPIRE_HOUR=${JWT_EXPIRE_HOUR:-24} + + # ======================================================================= + # Timezone Configuration + # ======================================================================= + - TZ=${TZ:-Asia/Shanghai} + + # ======================================================================= + # Gemini OAuth Configuration (optional) + # ======================================================================= + - GEMINI_OAUTH_CLIENT_ID=${GEMINI_OAUTH_CLIENT_ID:-} + - GEMINI_OAUTH_CLIENT_SECRET=${GEMINI_OAUTH_CLIENT_SECRET:-} + - GEMINI_OAUTH_SCOPES=${GEMINI_OAUTH_SCOPES:-} + - GEMINI_QUOTA_POLICY=${GEMINI_QUOTA_POLICY:-} + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8080/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 30s + +volumes: + sub2api_data: + driver: local diff --git a/frontend/.eslintignore b/frontend/.eslintignore new file mode 100644 index 00000000..d8682246 --- /dev/null +++ b/frontend/.eslintignore @@ -0,0 +1,14 @@ +# 忽略编译后的文件 +vite.config.js +vite.config.d.ts + +# 忽略依赖 +node_modules/ + +# 忽略构建输出 +dist/ +../backend/internal/web/dist/ + +# 忽略缓存 +.cache/ +.vite/ diff --git a/frontend/package-lock.json b/frontend/package-lock.json new file mode 100644 index 00000000..e6c6144e --- /dev/null +++ b/frontend/package-lock.json @@ -0,0 +1,6954 @@ +{ + "name": "sub2api-frontend", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "sub2api-frontend", + "version": "1.0.0", + "dependencies": { + "@lobehub/icons": "^4.0.2", + "@vueuse/core": "^10.7.0", + "axios": "^1.6.2", + "chart.js": "^4.4.1", + "driver.js": "^1.4.0", + "file-saver": "^2.0.5", + "pinia": "^2.1.7", + "vue": "^3.4.0", + "vue-chartjs": "^5.3.0", + "vue-i18n": "^9.14.5", + "vue-router": "^4.2.5", + "xlsx": "^0.18.5" + }, + "devDependencies": { + "@types/file-saver": "^2.0.7", + "@types/mdx": "^2.0.13", + "@types/node": "^20.10.5", + "@typescript-eslint/eslint-plugin": "^7.18.0", + "@typescript-eslint/parser": "^7.18.0", + "@vitejs/plugin-vue": "^5.2.3", + "@vitest/coverage-v8": "^2.1.9", + "@vue/test-utils": "^2.4.6", + "autoprefixer": "^10.4.16", + "eslint": "^8.57.0", + "eslint-plugin-vue": "^9.25.0", + "jsdom": "^24.1.3", + "postcss": "^8.4.32", + "tailwindcss": "^3.4.0", + "typescript": "~5.6.0", + "vite": "^5.0.10", + "vite-plugin-checker": "^0.9.1", + "vitest": "^2.1.9", + "vue-tsc": "^2.2.0" + } + }, + "node_modules/@alloc/quick-lru": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/@alloc/quick-lru/-/quick-lru-5.2.0.tgz", + "integrity": "sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@ant-design/cssinjs": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@ant-design/cssinjs/-/cssinjs-2.0.2.tgz", + "integrity": "sha512-7KDVIigtqlamOLtJ0hbjECX/sDGDaJXsM/KHala8I/1E4lpl9RAO585kbVvh/k1rIrFAV6JeGkXmdWyYj9XvuA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.11.1", + "@emotion/hash": "^0.8.0", + "@emotion/unitless": "^0.7.5", + "@rc-component/util": "^1.4.0", + "clsx": "^2.1.1", + "csstype": "^3.1.3", + "stylis": "^4.3.4" + }, + "peerDependencies": { + "react": ">=16.0.0", + "react-dom": ">=16.0.0" + } + }, + "node_modules/@asamuzakjp/css-color": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/@asamuzakjp/css-color/-/css-color-3.2.0.tgz", + "integrity": "sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@csstools/css-calc": "^2.1.3", + "@csstools/css-color-parser": "^3.0.9", + "@csstools/css-parser-algorithms": "^3.0.4", + "@csstools/css-tokenizer": "^3.0.3", + "lru-cache": "^10.4.3" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/code-frame/-/code-frame-7.27.1.tgz", + "integrity": "sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==", + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.27.1", + "js-tokens": "^4.0.0", + "picocolors": "^1.1.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/generator": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.28.5.tgz", + "integrity": "sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@babel/types": "^7.28.5", + "@jridgewell/gen-mapping": "^0.3.12", + "@jridgewell/trace-mapping": "^0.3.28", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-globals": { + "version": "7.28.0", + "resolved": "https://registry.npmjs.org/@babel/helper-globals/-/helper-globals-7.28.0.tgz", + "integrity": "sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-module-imports/-/helper-module-imports-7.27.1.tgz", + "integrity": "sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==", + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.27.1", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.27.1", + "resolved": "https://registry.npmjs.org/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz", + "integrity": "sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz", + "integrity": "sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.28.5.tgz", + "integrity": "sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==", + "license": "MIT", + "dependencies": { + "@babel/types": "^7.28.5" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.28.4", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.28.4.tgz", + "integrity": "sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==", + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/template": { + "version": "7.27.2", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.27.2.tgz", + "integrity": "sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/parser": "^7.27.2", + "@babel/types": "^7.27.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.28.5.tgz", + "integrity": "sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "@babel/generator": "^7.28.5", + "@babel/helper-globals": "^7.28.0", + "@babel/parser": "^7.28.5", + "@babel/template": "^7.27.2", + "@babel/types": "^7.28.5", + "debug": "^4.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.28.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.28.5.tgz", + "integrity": "sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==", + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.27.1", + "@babel/helper-validator-identifier": "^7.28.5" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@bcoe/v8-coverage": { + "version": "0.2.3", + "resolved": "https://registry.npmjs.org/@bcoe/v8-coverage/-/v8-coverage-0.2.3.tgz", + "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@csstools/color-helpers": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/@csstools/color-helpers/-/color-helpers-5.1.0.tgz", + "integrity": "sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT-0", + "engines": { + "node": ">=18" + } + }, + "node_modules/@csstools/css-calc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@csstools/css-calc/-/css-calc-2.1.4.tgz", + "integrity": "sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-color-parser": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@csstools/css-color-parser/-/css-color-parser-3.1.0.tgz", + "integrity": "sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "dependencies": { + "@csstools/color-helpers": "^5.1.0", + "@csstools/css-calc": "^2.1.4" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-parser-algorithms": "^3.0.5", + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-parser-algorithms": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@csstools/css-parser-algorithms/-/css-parser-algorithms-3.0.5.tgz", + "integrity": "sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "@csstools/css-tokenizer": "^3.0.4" + } + }, + "node_modules/@csstools/css-tokenizer": { + "version": "3.0.4", + "resolved": "https://registry.npmjs.org/@csstools/css-tokenizer/-/css-tokenizer-3.0.4.tgz", + "integrity": "sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/csstools" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/csstools" + } + ], + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/@emotion/babel-plugin": { + "version": "11.13.5", + "resolved": "https://registry.npmjs.org/@emotion/babel-plugin/-/babel-plugin-11.13.5.tgz", + "integrity": "sha512-pxHCpT2ex+0q+HH91/zsdHkw/lXd468DIN2zvfvLtPKLLMo6gQj7oLObq8PhkrxOZb/gGCq03S3Z7PDhS8pduQ==", + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.16.7", + "@babel/runtime": "^7.18.3", + "@emotion/hash": "^0.9.2", + "@emotion/memoize": "^0.9.0", + "@emotion/serialize": "^1.3.3", + "babel-plugin-macros": "^3.1.0", + "convert-source-map": "^1.5.0", + "escape-string-regexp": "^4.0.0", + "find-root": "^1.1.0", + "source-map": "^0.5.7", + "stylis": "4.2.0" + } + }, + "node_modules/@emotion/babel-plugin/node_modules/@emotion/hash": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz", + "integrity": "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==", + "license": "MIT" + }, + "node_modules/@emotion/babel-plugin/node_modules/stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==", + "license": "MIT" + }, + "node_modules/@emotion/cache": { + "version": "11.14.0", + "resolved": "https://registry.npmjs.org/@emotion/cache/-/cache-11.14.0.tgz", + "integrity": "sha512-L/B1lc/TViYk4DcpGxtAVbx0ZyiKM5ktoIyafGkH6zg/tj+mA+NE//aPYKG0k8kCHSHVJrpLpcAlOBEXQ3SavA==", + "license": "MIT", + "dependencies": { + "@emotion/memoize": "^0.9.0", + "@emotion/sheet": "^1.4.0", + "@emotion/utils": "^1.4.2", + "@emotion/weak-memoize": "^0.4.0", + "stylis": "4.2.0" + } + }, + "node_modules/@emotion/cache/node_modules/stylis": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.2.0.tgz", + "integrity": "sha512-Orov6g6BB1sDfYgzWfTHDOxamtX1bE/zo104Dh9e6fqJ3PooipYyfJ0pUmrZO2wAvO8YbEyeFrkV91XTsGMSrw==", + "license": "MIT" + }, + "node_modules/@emotion/css": { + "version": "11.13.5", + "resolved": "https://registry.npmjs.org/@emotion/css/-/css-11.13.5.tgz", + "integrity": "sha512-wQdD0Xhkn3Qy2VNcIzbLP9MR8TafI0MJb7BEAXKp+w4+XqErksWR4OXomuDzPsN4InLdGhVe6EYcn2ZIUCpB8w==", + "license": "MIT", + "dependencies": { + "@emotion/babel-plugin": "^11.13.5", + "@emotion/cache": "^11.13.5", + "@emotion/serialize": "^1.3.3", + "@emotion/sheet": "^1.4.0", + "@emotion/utils": "^1.4.2" + } + }, + "node_modules/@emotion/hash": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.8.0.tgz", + "integrity": "sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow==", + "license": "MIT" + }, + "node_modules/@emotion/memoize": { + "version": "0.9.0", + "resolved": "https://registry.npmjs.org/@emotion/memoize/-/memoize-0.9.0.tgz", + "integrity": "sha512-30FAj7/EoJ5mwVPOWhAyCX+FPfMDrVecJAM+Iw9NRoSl4BBAQeqj4cApHHUXOVvIPgLVDsCFoz/hGD+5QQD1GQ==", + "license": "MIT" + }, + "node_modules/@emotion/react": { + "version": "11.14.0", + "resolved": "https://registry.npmjs.org/@emotion/react/-/react-11.14.0.tgz", + "integrity": "sha512-O000MLDBDdk/EohJPFUqvnp4qnHeYkVP5B0xEG0D/L7cOKP9kefu2DXn8dj74cQfsEzUqh+sr1RzFqiL1o+PpA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.18.3", + "@emotion/babel-plugin": "^11.13.5", + "@emotion/cache": "^11.14.0", + "@emotion/serialize": "^1.3.3", + "@emotion/use-insertion-effect-with-fallbacks": "^1.2.0", + "@emotion/utils": "^1.4.2", + "@emotion/weak-memoize": "^0.4.0", + "hoist-non-react-statics": "^3.3.1" + }, + "peerDependencies": { + "react": ">=16.8.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + } + } + }, + "node_modules/@emotion/serialize": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@emotion/serialize/-/serialize-1.3.3.tgz", + "integrity": "sha512-EISGqt7sSNWHGI76hC7x1CksiXPahbxEOrC5RjmFRJTqLyEK9/9hZvBbiYn70dw4wuwMKiEMCUlR6ZXTSWQqxA==", + "license": "MIT", + "dependencies": { + "@emotion/hash": "^0.9.2", + "@emotion/memoize": "^0.9.0", + "@emotion/unitless": "^0.10.0", + "@emotion/utils": "^1.4.2", + "csstype": "^3.0.2" + } + }, + "node_modules/@emotion/serialize/node_modules/@emotion/hash": { + "version": "0.9.2", + "resolved": "https://registry.npmjs.org/@emotion/hash/-/hash-0.9.2.tgz", + "integrity": "sha512-MyqliTZGuOm3+5ZRSaaBGP3USLw6+EGykkwZns2EPC5g8jJ4z9OrdZY9apkl3+UP9+sdz76YYkwCKP5gh8iY3g==", + "license": "MIT" + }, + "node_modules/@emotion/serialize/node_modules/@emotion/unitless": { + "version": "0.10.0", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.10.0.tgz", + "integrity": "sha512-dFoMUuQA20zvtVTuxZww6OHoJYgrzfKM1t52mVySDJnMSEa08ruEvdYQbhvyu6soU+NeLVd3yKfTfT0NeV6qGg==", + "license": "MIT" + }, + "node_modules/@emotion/sheet": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@emotion/sheet/-/sheet-1.4.0.tgz", + "integrity": "sha512-fTBW9/8r2w3dXWYM4HCB1Rdp8NLibOw2+XELH5m5+AkWiL/KqYX6dc0kKYlaYyKjrQ6ds33MCdMPEwgs2z1rqg==", + "license": "MIT" + }, + "node_modules/@emotion/unitless": { + "version": "0.7.5", + "resolved": "https://registry.npmjs.org/@emotion/unitless/-/unitless-0.7.5.tgz", + "integrity": "sha512-OWORNpfjMsSSUBVrRBVGECkhWcULOAJz9ZW8uK9qgxD+87M7jHRcvh/A96XXNhXTLmKcoYSQtBEX7lHMO7YRwg==", + "license": "MIT" + }, + "node_modules/@emotion/use-insertion-effect-with-fallbacks": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@emotion/use-insertion-effect-with-fallbacks/-/use-insertion-effect-with-fallbacks-1.2.0.tgz", + "integrity": "sha512-yJMtVdH59sxi/aVJBpk9FQq+OR8ll5GT8oWd57UpeaKEVGab41JWaCFA7FRLoMLloOZF/c/wsPoe+bfGmRKgDg==", + "license": "MIT", + "peerDependencies": { + "react": ">=16.8.0" + } + }, + "node_modules/@emotion/utils": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/@emotion/utils/-/utils-1.4.2.tgz", + "integrity": "sha512-3vLclRofFziIa3J2wDh9jjbkUz9qk5Vi3IZ/FSTKViB0k+ef0fPV7dYrUIugbgupYDx7v9ud/SjrtEP8Y4xLoA==", + "license": "MIT" + }, + "node_modules/@emotion/weak-memoize": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/@emotion/weak-memoize/-/weak-memoize-0.4.0.tgz", + "integrity": "sha512-snKqtPW01tN0ui7yu9rGv69aJXr/a/Ywvl11sUjNtEcRc+ng/mQriFL0wLXMef74iHa/EkftbDzU9F8iFbH+zg==", + "license": "MIT" + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.9.1.tgz", + "integrity": "sha512-phrYmNiYppR7znFEdqgfWHXR6NCkZEK7hwWDHZUjit/2/U0r6XvkDl0SYnoM51Hq7FhCGdLDT6zxCCOY1hexsQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.12.2", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.12.2.tgz", + "integrity": "sha512-EriSTlt5OC9/7SXkRSCAhfSxxoSUgBm33OH+IkwbdpgoqsSsUg7y3uh+IICI/Qg4BBWr3U2i39RpmycbxMq4ew==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.1.tgz", + "integrity": "sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.13.0", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz", + "integrity": "sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==", + "deprecated": "Use @eslint/config-array instead", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.3", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@intlify/core-base": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/@intlify/core-base/-/core-base-9.14.5.tgz", + "integrity": "sha512-5ah5FqZG4pOoHjkvs8mjtv+gPKYU0zCISaYNjBNNqYiaITxW8ZtVih3GS/oTOqN8d9/mDLyrjD46GBApNxmlsA==", + "license": "MIT", + "dependencies": { + "@intlify/message-compiler": "9.14.5", + "@intlify/shared": "9.14.5" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/message-compiler": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/@intlify/message-compiler/-/message-compiler-9.14.5.tgz", + "integrity": "sha512-IHzgEu61/YIpQV5Pc3aRWScDcnFKWvQA9kigcINcCBXN8mbW+vk9SK+lDxA6STzKQsVJxUPg9ACC52pKKo3SVQ==", + "license": "MIT", + "dependencies": { + "@intlify/shared": "9.14.5", + "source-map-js": "^1.0.2" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@intlify/shared": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/@intlify/shared/-/shared-9.14.5.tgz", + "integrity": "sha512-9gB+E53BYuAEMhbCAxVgG38EZrk59sxBtv3jSizNL2hEWlgjBjAw1AwpLHtNaeda12pe6W20OGEa0TwuMSRbyQ==", + "license": "MIT", + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + } + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@istanbuljs/schema": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@istanbuljs/schema/-/schema-0.1.3.tgz", + "integrity": "sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.13", + "resolved": "https://registry.npmjs.org/@jridgewell/gen-mapping/-/gen-mapping-0.3.13.tgz", + "integrity": "sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.0", + "@jridgewell/trace-mapping": "^0.3.24" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.31", + "resolved": "https://registry.npmjs.org/@jridgewell/trace-mapping/-/trace-mapping-0.3.31.tgz", + "integrity": "sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==", + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@kurkle/color": { + "version": "0.3.4", + "resolved": "https://registry.npmjs.org/@kurkle/color/-/color-0.3.4.tgz", + "integrity": "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==", + "license": "MIT" + }, + "node_modules/@lobehub/icons": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@lobehub/icons/-/icons-4.0.2.tgz", + "integrity": "sha512-mYFEXXt7Z8iY8yLP5cDVctUPqlZUHWi5qzQCJiC646p7uiXhtpn93sRab/5pey+CYDh6BbRU6lhwiURu/SU5IA==", + "license": "MIT", + "workspaces": [ + "packages/*" + ], + "dependencies": { + "antd-style": "^4.1.0", + "lucide-react": "^0.469.0", + "polished": "^4.3.1" + }, + "peerDependencies": { + "@lobehub/ui": "^4.3.3", + "antd": "^6.1.1", + "react": "^19.0.0", + "react-dom": "^19.0.0" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@one-ini/wasm": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@one-ini/wasm/-/wasm-0.1.1.tgz", + "integrity": "sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@rc-component/util": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@rc-component/util/-/util-1.7.0.tgz", + "integrity": "sha512-tIvIGj4Vl6fsZFvWSkYw9sAfiCKUXMyhVz6kpKyZbwyZyRPqv2vxYZROdaO1VB4gqTNvUZFXh6i3APUiterw5g==", + "license": "MIT", + "dependencies": { + "is-mobile": "^5.0.0", + "react-is": "^18.2.0" + }, + "peerDependencies": { + "react": ">=18.0.0", + "react-dom": ">=18.0.0" + } + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.55.1.tgz", + "integrity": "sha512-9R0DM/ykwfGIlNu6+2U09ga0WXeZ9MRC2Ter8jnz8415VbuIykVuc6bhdrbORFZANDmTDvq26mJrEVTl8TdnDg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.55.1.tgz", + "integrity": "sha512-eFZCb1YUqhTysgW3sj/55du5cG57S7UTNtdMjCW7LwVcj3dTTcowCsC8p7uBdzKsZYa8J7IDE8lhMI+HX1vQvg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.55.1.tgz", + "integrity": "sha512-p3grE2PHcQm2e8PSGZdzIhCKbMCw/xi9XvMPErPhwO17vxtvCN5FEA2mSLgmKlCjHGMQTP6phuQTYWUnKewwGg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.55.1.tgz", + "integrity": "sha512-rDUjG25C9qoTm+e02Esi+aqTKSBYwVTaoS1wxcN47/Luqef57Vgp96xNANwt5npq9GDxsH7kXxNkJVEsWEOEaQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-freebsd-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.55.1.tgz", + "integrity": "sha512-+JiU7Jbp5cdxekIgdte0jfcu5oqw4GCKr6i3PJTlXTCU5H5Fvtkpbs4XJHRmWNXF+hKmn4v7ogI5OQPaupJgOg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-freebsd-x64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.55.1.tgz", + "integrity": "sha512-V5xC1tOVWtLLmr3YUk2f6EJK4qksksOYiz/TCsFHu/R+woubcLWdC9nZQmwjOAbmExBIVKsm1/wKmEy4z4u4Bw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.55.1.tgz", + "integrity": "sha512-Rn3n+FUk2J5VWx+ywrG/HGPTD9jXNbicRtTM11e/uorplArnXZYsVifnPPqNNP5BsO3roI4n8332ukpY/zN7rQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.55.1.tgz", + "integrity": "sha512-grPNWydeKtc1aEdrJDWk4opD7nFtQbMmV7769hiAaYyUKCT1faPRm2av8CX1YJsZ4TLAZcg9gTR1KvEzoLjXkg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.55.1.tgz", + "integrity": "sha512-a59mwd1k6x8tXKcUxSyISiquLwB5pX+fJW9TkWU46lCqD/GRDe9uDN31jrMmVP3feI3mhAdvcCClhV8V5MhJFQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.55.1.tgz", + "integrity": "sha512-puS1MEgWX5GsHSoiAsF0TYrpomdvkaXm0CofIMG5uVkP6IBV+ZO9xhC5YEN49nsgYo1DuuMquF9+7EDBVYu4uA==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.55.1.tgz", + "integrity": "sha512-r3Wv40in+lTsULSb6nnoudVbARdOwb2u5fpeoOAZjFLznp6tDU8kd+GTHmJoqZ9lt6/Sys33KdIHUaQihFcu7g==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-loong64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.55.1.tgz", + "integrity": "sha512-MR8c0+UxAlB22Fq4R+aQSPBayvYa3+9DrwG/i1TKQXFYEaoW3B5b/rkSRIypcZDdWjWnpcvxbNaAJDcSbJU3Lw==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.55.1.tgz", + "integrity": "sha512-3KhoECe1BRlSYpMTeVrD4sh2Pw2xgt4jzNSZIIPLFEsnQn9gAnZagW9+VqDqAHgm1Xc77LzJOo2LdigS5qZ+gw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-ppc64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.55.1.tgz", + "integrity": "sha512-ziR1OuZx0vdYZZ30vueNZTg73alF59DicYrPViG0NEgDVN8/Jl87zkAPu4u6VjZST2llgEUjaiNl9JM6HH1Vdw==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.55.1.tgz", + "integrity": "sha512-uW0Y12ih2XJRERZ4jAfKamTyIHVMPQnTZcQjme2HMVDAHY4amf5u414OqNYC+x+LzRdRcnIG1YodLrrtA8xsxw==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.55.1.tgz", + "integrity": "sha512-u9yZ0jUkOED1BFrqu3BwMQoixvGHGZ+JhJNkNKY/hyoEgOwlqKb62qu+7UjbPSHYjiVy8kKJHvXKv5coH4wDeg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.55.1.tgz", + "integrity": "sha512-/0PenBCmqM4ZUd0190j7J0UsQ/1nsi735iPRakO8iPciE7BQ495Y6msPzaOmvx0/pn+eJVVlZrNrSh4WSYLxNg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.55.1.tgz", + "integrity": "sha512-a8G4wiQxQG2BAvo+gU6XrReRRqj+pLS2NGXKm8io19goR+K8lw269eTrPkSdDTALwMmJp4th2Uh0D8J9bEV1vg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.55.1.tgz", + "integrity": "sha512-bD+zjpFrMpP/hqkfEcnjXWHMw5BIghGisOKPj+2NaNDuVT+8Ds4mPf3XcPHuat1tz89WRL+1wbcxKY3WSbiT7w==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-openbsd-x64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.55.1.tgz", + "integrity": "sha512-eLXw0dOiqE4QmvikfQ6yjgkg/xDM+MdU9YJuP4ySTibXU0oAvnEWXt7UDJmD4UkYialMfOGFPJnIHSe/kdzPxg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ] + }, + "node_modules/@rollup/rollup-openharmony-arm64": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.55.1.tgz", + "integrity": "sha512-xzm44KgEP11te3S2HCSyYf5zIzWmx3n8HDCc7EE59+lTcswEWNpvMLfd9uJvVX8LCg9QWG67Xt75AuHn4vgsXw==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openharmony" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.55.1.tgz", + "integrity": "sha512-yR6Bl3tMC/gBok5cz/Qi0xYnVbIxGx5Fcf/ca0eB6/6JwOY+SRUcJfI0OpeTpPls7f194as62thCt/2BjxYN8g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.55.1.tgz", + "integrity": "sha512-3fZBidchE0eY0oFZBnekYCfg+5wAB0mbpCBuofh5mZuzIU/4jIVkbESmd2dOsFNS78b53CYv3OAtwqkZZmU5nA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-gnu": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.55.1.tgz", + "integrity": "sha512-xGGY5pXj69IxKb4yv/POoocPy/qmEGhimy/FoTpTSVju3FYXUQQMFCaZZXJVidsmGxRioZAwpThl/4zX41gRKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.55.1.tgz", + "integrity": "sha512-SPEpaL6DX4rmcXtnhdrQYgzQ5W2uW3SCJch88lB2zImhJRhIIK44fkUrgIV/Q8yUNfw5oyZ5vkeQsZLhCb06lw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@types/estree": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", + "integrity": "sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/file-saver": { + "version": "2.0.7", + "resolved": "https://registry.npmjs.org/@types/file-saver/-/file-saver-2.0.7.tgz", + "integrity": "sha512-dNKVfHd/jk0SkR/exKGj2ggkB45MAkzvWCaqLUUgkyjITkGNzH8H+yUwr+BLJUBjZOe9w8X3wgmXhZDRg1ED6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/mdx": { + "version": "2.0.13", + "resolved": "https://registry.npmjs.org/@types/mdx/-/mdx-2.0.13.tgz", + "integrity": "sha512-+OWZQfAYyio6YkJb3HLxDrvnx6SWWDbC0zVPfBRzUk0/nqoDyf6dNxQi3eArPe8rJ473nobTMQ/8Zk+LxJ+Yuw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "20.19.27", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.19.27.tgz", + "integrity": "sha512-N2clP5pJhB2YnZJ3PIHFk5RkygRX5WO/5f0WC08tp0wd+sv0rsJk3MqWn3CbNmT2J505a5336jaQj4ph1AdMug==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/parse-json": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/@types/parse-json/-/parse-json-4.0.2.tgz", + "integrity": "sha512-dISoDXWWQwUquiKsyZ4Ng+HX2KsPL7LyHKHQwgGFEA3IaKac4Obd+h2a/a6waisAoepJlBcx9paWqjA8/HVjCw==", + "license": "MIT" + }, + "node_modules/@types/web-bluetooth": { + "version": "0.0.20", + "resolved": "https://registry.npmjs.org/@types/web-bluetooth/-/web-bluetooth-0.0.20.tgz", + "integrity": "sha512-g9gZnnXVq7gM7v3tJCWV/qw7w+KeOlSHAhgF9RytFyifW6AF61hdT2ucrYhPq9hLs5JIryeupHV3qGk95dH9ow==", + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-7.18.0.tgz", + "integrity": "sha512-94EQTWZ40mzBc42ATNIBimBEDltSJ9RQHCC8vc/PDbxi4k8dVwUAv4o98dk50M1zB+JGFxp43FP7f8+FP8R6Sw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.10.0", + "@typescript-eslint/scope-manager": "7.18.0", + "@typescript-eslint/type-utils": "7.18.0", + "@typescript-eslint/utils": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0", + "graphemer": "^1.4.0", + "ignore": "^5.3.1", + "natural-compare": "^1.4.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^7.0.0", + "eslint": "^8.56.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-7.18.0.tgz", + "integrity": "sha512-4Z+L8I2OqhZV8qA132M4wNL30ypZGYOQVBfMgxDH/K5UX0PNqTu1c6za9ST5r9+tavvHiTWmBnKzpCJ/GlVFtg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/scope-manager": "7.18.0", + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/typescript-estree": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-7.18.0.tgz", + "integrity": "sha512-jjhdIE/FPF2B7Z1uzc6i3oWKbGcHb87Qw7AWj6jmEqNOfDFbJWtjt/XfwCpvNkpGWlcJaog5vTR+VV8+w9JflA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-7.18.0.tgz", + "integrity": "sha512-XL0FJXuCLaDuX2sYqZUUSOJ2sG5/i1AAze+axqmLnSkNEVMVYLF+cbwlB2w8D1tinFuSikHmFta+P+HOofrLeA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "7.18.0", + "@typescript-eslint/utils": "7.18.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-7.18.0.tgz", + "integrity": "sha512-iZqi+Ds1y4EDYUtlOOC+aUmxnE9xS/yCigkjA7XpTKV6nCBd3Hp/PRGGmdwnfkV2ThMyYldP1wRpm/id99spTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-7.18.0.tgz", + "integrity": "sha512-aP1v/BSPnnyhMHts8cf1qQ6Q1IFwwRvAQGRvBFkWlo3/lH29OXA3Pts+c10nxRxIBrDnoMqzhgdwVe5f2D6OzA==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/visitor-keys": "7.18.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "^9.0.4", + "semver": "^7.6.0", + "ts-api-utils": "^1.3.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-7.18.0.tgz", + "integrity": "sha512-kK0/rNa2j74XuHVcoCZxdFBMF+aq/vH83CXAOHieC+2Gis4mF8jJXT5eAfyD3K0sAxtPuwxaIOIOvhwzVDt/kw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@typescript-eslint/scope-manager": "7.18.0", + "@typescript-eslint/types": "7.18.0", + "@typescript-eslint/typescript-estree": "7.18.0" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^8.56.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "7.18.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-7.18.0.tgz", + "integrity": "sha512-cDF0/Gf81QpY3xYyJKDV14Zwdmid5+uuENhjH2EqFaF0ni+yAyq/LzMaIJdhNJXZI7uLzwIlA+V7oWoyn6Curg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "7.18.0", + "eslint-visitor-keys": "^3.4.3" + }, + "engines": { + "node": "^18.18.0 || >=20.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.3.0.tgz", + "integrity": "sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==", + "dev": true, + "license": "ISC" + }, + "node_modules/@vitejs/plugin-vue": { + "version": "5.2.4", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-5.2.4.tgz", + "integrity": "sha512-7Yx/SXSOcQq5HiiV3orevHUFn+pmMB4cgbEkDYgnkUWb0WfeQ/wa2yFv6D5ICiCQOVpjA7vYDXrC7AGO8yjDHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "peerDependencies": { + "vite": "^5.0.0 || ^6.0.0", + "vue": "^3.2.25" + } + }, + "node_modules/@vitest/coverage-v8": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/coverage-v8/-/coverage-v8-2.1.9.tgz", + "integrity": "sha512-Z2cOr0ksM00MpEfyVE8KXIYPEcBFxdbLSs56L8PO0QQMxt/6bDj45uQfxoc96v05KW3clk7vvgP0qfDit9DmfQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.3.0", + "@bcoe/v8-coverage": "^0.2.3", + "debug": "^4.3.7", + "istanbul-lib-coverage": "^3.2.2", + "istanbul-lib-report": "^3.0.1", + "istanbul-lib-source-maps": "^5.0.6", + "istanbul-reports": "^3.1.7", + "magic-string": "^0.30.12", + "magicast": "^0.3.5", + "std-env": "^3.8.0", + "test-exclude": "^7.0.1", + "tinyrainbow": "^1.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@vitest/browser": "2.1.9", + "vitest": "2.1.9" + }, + "peerDependenciesMeta": { + "@vitest/browser": { + "optional": true + } + } + }, + "node_modules/@vitest/expect": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-2.1.9.tgz", + "integrity": "sha512-UJCIkTBenHeKT1TTlKMJWy1laZewsRIzYighyYiJKZreqtdxSos/S1t+ktRMQWu2CKqaarrkeszJx1cgC5tGZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "2.1.9", + "@vitest/utils": "2.1.9", + "chai": "^5.1.2", + "tinyrainbow": "^1.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/mocker": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/mocker/-/mocker-2.1.9.tgz", + "integrity": "sha512-tVL6uJgoUdi6icpxmdrn5YNo3g3Dxv+IHJBr0GXHaEdTcw3F+cPKnsXFhli6nO+f/6SDKPHEK1UN+k+TQv0Ehg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/spy": "2.1.9", + "estree-walker": "^3.0.3", + "magic-string": "^0.30.12" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "msw": "^2.4.9", + "vite": "^5.0.0" + }, + "peerDependenciesMeta": { + "msw": { + "optional": true + }, + "vite": { + "optional": true + } + } + }, + "node_modules/@vitest/mocker/node_modules/estree-walker": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-3.0.3.tgz", + "integrity": "sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "^1.0.0" + } + }, + "node_modules/@vitest/pretty-format": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/pretty-format/-/pretty-format-2.1.9.tgz", + "integrity": "sha512-KhRIdGV2U9HOUzxfiHmY8IFHTdqtOhIzCpd8WRdJiE7D/HUcZVD0EgQCVjm+Q9gkUXWgBvMmTtZgIG48wq7sOQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyrainbow": "^1.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/runner": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-2.1.9.tgz", + "integrity": "sha512-ZXSSqTFIrzduD63btIfEyOmNcBmQvgOVsPNPe0jYtESiXkhd8u2erDLnMxmGrDCwHCCHE7hxwRDCT3pt0esT4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/utils": "2.1.9", + "pathe": "^1.1.2" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/snapshot": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-2.1.9.tgz", + "integrity": "sha512-oBO82rEjsxLNJincVhLhaxxZdEtV0EFHMK5Kmx5sJ6H9L183dHECjiefOAdnqpIgT5eZwT04PoggUnW88vOBNQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "2.1.9", + "magic-string": "^0.30.12", + "pathe": "^1.1.2" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/spy": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-2.1.9.tgz", + "integrity": "sha512-E1B35FwzXXTs9FHNK6bDszs7mtydNi5MIfUWpceJ8Xbfb1gBMscAnwLbEu+B44ed6W3XjL9/ehLPHR1fkf1KLQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "tinyspy": "^3.0.2" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@vitest/utils": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-2.1.9.tgz", + "integrity": "sha512-v0psaMSkNJ3A2NMrUEHFRzJtDPFn+/VWZ5WxImB21T9fjucJRmS7xCS3ppEnARb9y11OAzaD+P2Ps+b+BGX5iQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/pretty-format": "2.1.9", + "loupe": "^3.1.2", + "tinyrainbow": "^1.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@volar/language-core": { + "version": "2.4.15", + "resolved": "https://registry.npmjs.org/@volar/language-core/-/language-core-2.4.15.tgz", + "integrity": "sha512-3VHw+QZU0ZG9IuQmzT68IyN4hZNd9GchGPhbD9+pa8CVv7rnoOZwo7T8weIbrRmihqy3ATpdfXFnqRrfPVK6CA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/source-map": "2.4.15" + } + }, + "node_modules/@volar/source-map": { + "version": "2.4.15", + "resolved": "https://registry.npmjs.org/@volar/source-map/-/source-map-2.4.15.tgz", + "integrity": "sha512-CPbMWlUN6hVZJYGcU/GSoHu4EnCHiLaXI9n8c9la6RaI9W5JHX+NqG+GSQcB0JdC2FIBLdZJwGsfKyBB71VlTg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@volar/typescript": { + "version": "2.4.15", + "resolved": "https://registry.npmjs.org/@volar/typescript/-/typescript-2.4.15.tgz", + "integrity": "sha512-2aZ8i0cqPGjXb4BhkMsPYDkkuc2ZQ6yOpqwAuNwUoncELqoy5fRgOQtLR9gB0g902iS0NAkvpIzs27geVyVdPg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/language-core": "2.4.15", + "path-browserify": "^1.0.1", + "vscode-uri": "^3.0.8" + } + }, + "node_modules/@vue/compiler-core": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.5.26.tgz", + "integrity": "sha512-vXyI5GMfuoBCnv5ucIT7jhHKl55Y477yxP6fc4eUswjP8FG3FFVFd41eNDArR+Uk3QKn2Z85NavjaxLxOC19/w==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@vue/shared": "3.5.26", + "entities": "^7.0.0", + "estree-walker": "^2.0.2", + "source-map-js": "^1.2.1" + } + }, + "node_modules/@vue/compiler-dom": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.5.26.tgz", + "integrity": "sha512-y1Tcd3eXs834QjswshSilCBnKGeQjQXB6PqFn/1nxcQw4pmG42G8lwz+FZPAZAby6gZeHSt/8LMPfZ4Rb+Bd/A==", + "license": "MIT", + "dependencies": { + "@vue/compiler-core": "3.5.26", + "@vue/shared": "3.5.26" + } + }, + "node_modules/@vue/compiler-sfc": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.5.26.tgz", + "integrity": "sha512-egp69qDTSEZcf4bGOSsprUr4xI73wfrY5oRs6GSgXFTiHrWj4Y3X5Ydtip9QMqiCMCPVwLglB9GBxXtTadJ3mA==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.28.5", + "@vue/compiler-core": "3.5.26", + "@vue/compiler-dom": "3.5.26", + "@vue/compiler-ssr": "3.5.26", + "@vue/shared": "3.5.26", + "estree-walker": "^2.0.2", + "magic-string": "^0.30.21", + "postcss": "^8.5.6", + "source-map-js": "^1.2.1" + } + }, + "node_modules/@vue/compiler-ssr": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.5.26.tgz", + "integrity": "sha512-lZT9/Y0nSIRUPVvapFJEVDbEXruZh2IYHMk2zTtEgJSlP5gVOqeWXH54xDKAaFS4rTnDeDBQUYDtxKyoW9FwDw==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.5.26", + "@vue/shared": "3.5.26" + } + }, + "node_modules/@vue/compiler-vue2": { + "version": "2.7.16", + "resolved": "https://registry.npmjs.org/@vue/compiler-vue2/-/compiler-vue2-2.7.16.tgz", + "integrity": "sha512-qYC3Psj9S/mfu9uVi5WvNZIzq+xnXMhOwbTFKKDD7b1lhpnn71jXSFdTQ+WsIEk0ONCd7VV2IMm7ONl6tbQ86A==", + "dev": true, + "license": "MIT", + "dependencies": { + "de-indent": "^1.0.2", + "he": "^1.2.0" + } + }, + "node_modules/@vue/devtools-api": { + "version": "6.6.4", + "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-6.6.4.tgz", + "integrity": "sha512-sGhTPMuXqZ1rVOk32RylztWkfXTRhuS7vgAKv0zjqk8gbsHkJ7xfFf+jbySxt7tWObEJwyKaHMikV/WGDiQm8g==", + "license": "MIT" + }, + "node_modules/@vue/language-core": { + "version": "2.2.12", + "resolved": "https://registry.npmjs.org/@vue/language-core/-/language-core-2.2.12.tgz", + "integrity": "sha512-IsGljWbKGU1MZpBPN+BvPAdr55YPkj2nB/TBNGNC32Vy2qLG25DYu/NBN2vNtZqdRbTRjaoYrahLrToim2NanA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/language-core": "2.4.15", + "@vue/compiler-dom": "^3.5.0", + "@vue/compiler-vue2": "^2.7.16", + "@vue/shared": "^3.5.0", + "alien-signals": "^1.0.3", + "minimatch": "^9.0.3", + "muggle-string": "^0.4.1", + "path-browserify": "^1.0.1" + }, + "peerDependencies": { + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@vue/reactivity": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.5.26.tgz", + "integrity": "sha512-9EnYB1/DIiUYYnzlnUBgwU32NNvLp/nhxLXeWRhHUEeWNTn1ECxX8aGO7RTXeX6PPcxe3LLuNBFoJbV4QZ+CFQ==", + "license": "MIT", + "dependencies": { + "@vue/shared": "3.5.26" + } + }, + "node_modules/@vue/runtime-core": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.5.26.tgz", + "integrity": "sha512-xJWM9KH1kd201w5DvMDOwDHYhrdPTrAatn56oB/LRG4plEQeZRQLw0Bpwih9KYoqmzaxF0OKSn6swzYi84e1/Q==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.5.26", + "@vue/shared": "3.5.26" + } + }, + "node_modules/@vue/runtime-dom": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.5.26.tgz", + "integrity": "sha512-XLLd/+4sPC2ZkN/6+V4O4gjJu6kSDbHAChvsyWgm1oGbdSO3efvGYnm25yCjtFm/K7rrSDvSfPDgN1pHgS4VNQ==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.5.26", + "@vue/runtime-core": "3.5.26", + "@vue/shared": "3.5.26", + "csstype": "^3.2.3" + } + }, + "node_modules/@vue/server-renderer": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.5.26.tgz", + "integrity": "sha512-TYKLXmrwWKSodyVuO1WAubucd+1XlLg4set0YoV+Hu8Lo79mp/YMwWV5mC5FgtsDxX3qo1ONrxFaTP1OQgy1uA==", + "license": "MIT", + "dependencies": { + "@vue/compiler-ssr": "3.5.26", + "@vue/shared": "3.5.26" + }, + "peerDependencies": { + "vue": "3.5.26" + } + }, + "node_modules/@vue/shared": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.5.26.tgz", + "integrity": "sha512-7Z6/y3uFI5PRoKeorTOSXKcDj0MSasfNNltcslbFrPpcw6aXRUALq4IfJlaTRspiWIUOEZbrpM+iQGmCOiWe4A==", + "license": "MIT" + }, + "node_modules/@vue/test-utils": { + "version": "2.4.6", + "resolved": "https://registry.npmjs.org/@vue/test-utils/-/test-utils-2.4.6.tgz", + "integrity": "sha512-FMxEjOpYNYiFe0GkaHsnJPXFHxQ6m4t8vI/ElPGpMWxZKpmRvQ33OIrvRXemy6yha03RxhOlQuy+gZMC3CQSow==", + "dev": true, + "license": "MIT", + "dependencies": { + "js-beautify": "^1.14.9", + "vue-component-type-helpers": "^2.0.0" + } + }, + "node_modules/@vueuse/core": { + "version": "10.11.1", + "resolved": "https://registry.npmjs.org/@vueuse/core/-/core-10.11.1.tgz", + "integrity": "sha512-guoy26JQktXPcz+0n3GukWIy/JDNKti9v6VEMu6kV2sYBsWuGiTU8OWdg+ADfUbHg3/3DlqySDe7JmdHrktiww==", + "license": "MIT", + "dependencies": { + "@types/web-bluetooth": "^0.0.20", + "@vueuse/metadata": "10.11.1", + "@vueuse/shared": "10.11.1", + "vue-demi": ">=0.14.8" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@vueuse/metadata": { + "version": "10.11.1", + "resolved": "https://registry.npmjs.org/@vueuse/metadata/-/metadata-10.11.1.tgz", + "integrity": "sha512-IGa5FXd003Ug1qAZmyE8wF3sJ81xGLSqTqtQ6jaVfkeZ4i5kS2mwQF61yhVqojRnenVew5PldLyRgvdl4YYuSw==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@vueuse/shared": { + "version": "10.11.1", + "resolved": "https://registry.npmjs.org/@vueuse/shared/-/shared-10.11.1.tgz", + "integrity": "sha512-LHpC8711VFZlDaYUXEBbFBCQ7GS3dVU9mjOhhMhXP6txTV4EhYQg/KGnQuvt/sPAtoUKq7VVUnL6mVtFoL42sA==", + "license": "MIT", + "dependencies": { + "vue-demi": ">=0.14.8" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/abbrev": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/abbrev/-/abbrev-2.0.0.tgz", + "integrity": "sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/acorn": { + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/adler-32": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/adler-32/-/adler-32-1.3.1.tgz", + "integrity": "sha512-ynZ4w/nUUv5rrsR8UUGoe1VC9hZj6V5hU9Qw1HlMDJGEJw5S7TfTErWTjMys6M7vr0YWcPqs3qAr4ss0nDfP+A==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/agent-base": { + "version": "7.1.4", + "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", + "integrity": "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/alien-signals": { + "version": "1.0.13", + "resolved": "https://registry.npmjs.org/alien-signals/-/alien-signals-1.0.13.tgz", + "integrity": "sha512-OGj9yyTnJEttvzhTUWuscOvtqxq5vrhF7vL9oS0xJ2mK0ItPYP1/y+vCFebfxoEyAz0++1AIwJ5CMr+Fk3nDmg==", + "dev": true, + "license": "MIT" + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/antd-style": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/antd-style/-/antd-style-4.1.0.tgz", + "integrity": "sha512-vnPBGg0OVlSz90KRYZhxd89aZiOImTiesF+9MQqN8jsLGZUQTjbP04X9jTdEfsztKUuMbBWg/RmB/wHTakbtMQ==", + "license": "MIT", + "dependencies": { + "@ant-design/cssinjs": "^2.0.0", + "@babel/runtime": "^7.24.1", + "@emotion/cache": "^11.11.0", + "@emotion/css": "^11.11.2", + "@emotion/react": "^11.11.4", + "@emotion/serialize": "^1.1.3", + "@emotion/utils": "^1.2.1", + "use-merge-value": "^1.2.0" + }, + "peerDependencies": { + "antd": ">=6.0.0", + "react": ">=18" + } + }, + "node_modules/any-promise": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/any-promise/-/any-promise-1.3.0.tgz", + "integrity": "sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==", + "dev": true, + "license": "MIT" + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/arg": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/arg/-/arg-5.0.2.tgz", + "integrity": "sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==", + "dev": true, + "license": "MIT" + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "dev": true, + "license": "Python-2.0" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/assertion-error": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/assertion-error/-/assertion-error-2.0.1.tgz", + "integrity": "sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/autoprefixer": { + "version": "10.4.23", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-10.4.23.tgz", + "integrity": "sha512-YYTXSFulfwytnjAPlw8QHncHJmlvFKtczb8InXaAx9Q0LbfDnfEYDE55omerIJKihhmU61Ft+cAOSzQVaBUmeA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/autoprefixer" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "browserslist": "^4.28.1", + "caniuse-lite": "^1.0.30001760", + "fraction.js": "^5.3.4", + "picocolors": "^1.1.1", + "postcss-value-parser": "^4.2.0" + }, + "bin": { + "autoprefixer": "bin/autoprefixer" + }, + "engines": { + "node": "^10 || ^12 || >=14" + }, + "peerDependencies": { + "postcss": "^8.1.0" + } + }, + "node_modules/axios": { + "version": "1.13.2", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz", + "integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.4", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/babel-plugin-macros": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/babel-plugin-macros/-/babel-plugin-macros-3.1.0.tgz", + "integrity": "sha512-Cg7TFGpIr01vOQNODXOOaGz2NpCU5gl8x1qJFbb6hbZxR7XrcE2vtbAsTAbJ7/xwJtUuJEw8K8Zr/AE0LHlesg==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.12.5", + "cosmiconfig": "^7.0.0", + "resolve": "^1.19.0" + }, + "engines": { + "node": ">=10", + "npm": ">=6" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/baseline-browser-mapping": { + "version": "2.9.14", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.14.tgz", + "integrity": "sha512-B0xUquLkiGLgHhpPBqvl7GWegWBUNuujQ6kXd/r1U38ElPT6Ok8KZ8e+FpUGEc2ZoRQUzq/aUnaKFc/svWUGSg==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "baseline-browser-mapping": "dist/cli.js" + } + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", + "dev": true, + "license": "ISC" + }, + "node_modules/brace-expansion": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz", + "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserslist": { + "version": "4.28.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.28.1.tgz", + "integrity": "sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "baseline-browser-mapping": "^2.9.0", + "caniuse-lite": "^1.0.30001759", + "electron-to-chromium": "^1.5.263", + "node-releases": "^2.0.27", + "update-browserslist-db": "^1.2.0" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/cac": { + "version": "6.7.14", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.7.14.tgz", + "integrity": "sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/camelcase-css": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/camelcase-css/-/camelcase-css-2.0.1.tgz", + "integrity": "sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001763", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001763.tgz", + "integrity": "sha512-mh/dGtq56uN98LlNX9qdbKnzINhX0QzhiWBFEkFfsFO4QyCvL8YegrJAazCwXIeqkIob8BlZPGM3xdnY+sgmvQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/cfb": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/cfb/-/cfb-1.2.2.tgz", + "integrity": "sha512-KfdUZsSOw19/ObEWasvBP/Ac4reZvAGauZhs6S/gqNhXhI7cKwvlH7ulj+dOEYnca4bm4SGo8C1bTAQvnTjgQA==", + "license": "Apache-2.0", + "dependencies": { + "adler-32": "~1.3.0", + "crc-32": "~1.2.0" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/chai": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/chai/-/chai-5.3.3.tgz", + "integrity": "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "assertion-error": "^2.0.1", + "check-error": "^2.1.1", + "deep-eql": "^5.0.1", + "loupe": "^3.1.0", + "pathval": "^2.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chart.js": { + "version": "4.5.1", + "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.5.1.tgz", + "integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==", + "license": "MIT", + "dependencies": { + "@kurkle/color": "^0.3.0" + }, + "engines": { + "pnpm": ">=8" + } + }, + "node_modules/check-error": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/check-error/-/check-error-2.1.3.tgz", + "integrity": "sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 16" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/clsx": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", + "integrity": "sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/codepage": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/codepage/-/codepage-1.15.0.tgz", + "integrity": "sha512-3g6NUTPd/YtuuGrhMnOMRjFc+LJw/bnMp3+0r/Wcz3IXUuCosKRJvMphm5+Q+bvTVGcJJuRvVLuYba+WojaFaA==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-4.1.1.tgz", + "integrity": "sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/config-chain": { + "version": "1.1.13", + "resolved": "https://registry.npmjs.org/config-chain/-/config-chain-1.1.13.tgz", + "integrity": "sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ini": "^1.3.4", + "proto-list": "~1.2.1" + } + }, + "node_modules/convert-source-map": { + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-1.9.0.tgz", + "integrity": "sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==", + "license": "MIT" + }, + "node_modules/cosmiconfig": { + "version": "7.1.0", + "resolved": "https://registry.npmjs.org/cosmiconfig/-/cosmiconfig-7.1.0.tgz", + "integrity": "sha512-AdmX6xUzdNASswsFtmwSt7Vj8po9IuqXm0UXz7QKPuEUmPB4XyjGfaAr2PSuELMwkRMVH1EpIkX5bTZGRB3eCA==", + "license": "MIT", + "dependencies": { + "@types/parse-json": "^4.0.0", + "import-fresh": "^3.2.1", + "parse-json": "^5.0.0", + "path-type": "^4.0.0", + "yaml": "^1.10.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/crc-32": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/crc-32/-/crc-32-1.2.2.tgz", + "integrity": "sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==", + "license": "Apache-2.0", + "bin": { + "crc32": "bin/crc32.njs" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/cssstyle": { + "version": "4.6.0", + "resolved": "https://registry.npmjs.org/cssstyle/-/cssstyle-4.6.0.tgz", + "integrity": "sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@asamuzakjp/css-color": "^3.2.0", + "rrweb-cssom": "^0.8.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/cssstyle/node_modules/rrweb-cssom": { + "version": "0.8.0", + "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.8.0.tgz", + "integrity": "sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==", + "dev": true, + "license": "MIT" + }, + "node_modules/csstype": { + "version": "3.2.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.2.3.tgz", + "integrity": "sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==", + "license": "MIT" + }, + "node_modules/data-urls": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/data-urls/-/data-urls-5.0.0.tgz", + "integrity": "sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^14.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/de-indent": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/de-indent/-/de-indent-1.0.2.tgz", + "integrity": "sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==", + "dev": true, + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.4.3", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.4.3.tgz", + "integrity": "sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==", + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/decimal.js": { + "version": "10.6.0", + "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.6.0.tgz", + "integrity": "sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==", + "dev": true, + "license": "MIT" + }, + "node_modules/deep-eql": { + "version": "5.0.2", + "resolved": "https://registry.npmjs.org/deep-eql/-/deep-eql-5.0.2.tgz", + "integrity": "sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/didyoumean": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/didyoumean/-/didyoumean-1.2.2.tgz", + "integrity": "sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dlv": { + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/dlv/-/dlv-1.1.3.tgz", + "integrity": "sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==", + "dev": true, + "license": "MIT" + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/driver.js": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/driver.js/-/driver.js-1.4.0.tgz", + "integrity": "sha512-Gm64jm6PmcU+si21sQhBrTAM1JvUrR0QhNmjkprNLxohOBzul9+pNHXgQaT9lW84gwg9GMLB3NZGuGolsz5uew==", + "license": "MIT" + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/editorconfig": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/editorconfig/-/editorconfig-1.0.4.tgz", + "integrity": "sha512-L9Qe08KWTlqYMVvMcTIvMAdl1cDUubzRNYL+WfA4bLDMHe4nemKkpmYzkznE1FwLKu0EEmy6obgQKzMJrg4x9Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@one-ini/wasm": "0.1.1", + "commander": "^10.0.0", + "minimatch": "9.0.1", + "semver": "^7.5.3" + }, + "bin": { + "editorconfig": "bin/editorconfig" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/editorconfig/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://registry.npmjs.org/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + } + }, + "node_modules/editorconfig/node_modules/minimatch": { + "version": "9.0.1", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.1.tgz", + "integrity": "sha512-0jWhJpD/MdhPXwPuiRkCbfYfSKp2qnn2eOc279qI7f+osl/l+prKSrvhg157zSYvx/1nmgn2NqdT6k2Z7zSH9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.267", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.267.tgz", + "integrity": "sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==", + "dev": true, + "license": "ISC" + }, + "node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, + "node_modules/entities": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-7.0.0.tgz", + "integrity": "sha512-FDWG5cmEYf2Z00IkYRhbFrwIwvdFKH07uV8dvNy0omp/Qb1xcyCWp2UDtcwJF4QZZvk0sLudP6/hAu42TaqVhQ==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/error-ex": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/error-ex/-/error-ex-1.3.4.tgz", + "integrity": "sha512-sqQamAnR14VgCr1A618A3sGrygcpK+HEbenA/HiEAkkUwcZIIB/tgWqHFxWgOyDh4nB4JCRimh79dR5Ywc9MDQ==", + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT" + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.1", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.1.tgz", + "integrity": "sha512-ypowyDxpVSYpkXr9WPv2PAZCtNip1Mv5KTW0SCurXv/9iOpcrH9PaqUElksqEB6pChqHGDRCFTyrZlGhnLNGiA==", + "deprecated": "This version is no longer supported. Please see https://eslint.org/version-support for other options.", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.1", + "@humanwhocodes/config-array": "^0.13.0", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-plugin-vue": { + "version": "9.33.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-vue/-/eslint-plugin-vue-9.33.0.tgz", + "integrity": "sha512-174lJKuNsuDIlLpjeXc5E2Tss8P44uIimAfGD0b90k0NoirJqpG7stLuU9Vp/9ioTOrQdWVREc4mRd1BD+CvGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "globals": "^13.24.0", + "natural-compare": "^1.4.0", + "nth-check": "^2.1.1", + "postcss-selector-parser": "^6.0.15", + "semver": "^7.6.3", + "vue-eslint-parser": "^9.4.3", + "xml-name-validator": "^4.0.0" + }, + "engines": { + "node": "^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.2.0 || ^7.0.0 || ^8.0.0 || ^9.0.0" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.7.0.tgz", + "integrity": "sha512-Ap6G0WQwcU/LHsvLwON1fAQX9Zp0A2Y6Y/cJBl9r/JbW90Zyg4/zbG6zzKa2OTALELarYHmKu0GhpM5EO+7T0g==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT" + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/expect-type": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/expect-type/-/expect-type-1.3.0.tgz", + "integrity": "sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.20.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.20.1.tgz", + "integrity": "sha512-GGToxJ/w1x32s/D2EKND7kTil4n8OVk/9mycTc4VDza13lOvpUZTGX3mFSCtV9ksdGBVzvsyAVLM6mHFThxXxw==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/fdir": { + "version": "6.5.0", + "resolved": "https://registry.npmjs.org/fdir/-/fdir-6.5.0.tgz", + "integrity": "sha512-tIbYtZbucOs0BRGqPJkshJUYdL+SDH7dVM8gjy+ERp3WAUjLEFJE+02kanyHtwjWOnwrKYBiwAmM0p4kLJAnXg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "picomatch": "^3 || ^4" + }, + "peerDependenciesMeta": { + "picomatch": { + "optional": true + } + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/file-saver": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/file-saver/-/file-saver-2.0.5.tgz", + "integrity": "sha512-P9bmyZ3h/PRG+Nzga+rbdI4OEpNDzAVyy74uVO9ATgzLK6VtAsYybF/+TOCvrc0MO793d6+42lLyZTw7/ArVzA==", + "license": "MIT" + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-root": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/find-root/-/find-root-1.1.0.tgz", + "integrity": "sha512-NKfW6bec6GfKc0SGx1e07QZY9PE99u0Bft/0rzSD5k3sO/vwkVUpDUKVm5Gpp5Ue3YfShPFTX2070tDs5kB9Ng==", + "license": "MIT" + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz", + "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==", + "dev": true, + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.11", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.11.tgz", + "integrity": "sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/form-data": { + "version": "4.0.5", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.5.tgz", + "integrity": "sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "hasown": "^2.0.2", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/frac": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/frac/-/frac-1.1.2.tgz", + "integrity": "sha512-w/XBfkibaTl3YDqASwfDUqkna4Z2p9cFSr1aHDt0WoMTECnRfBOv2WArlZILlqgWlmdIlALXGpM2AOhEk5W3IA==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/fraction.js": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-5.3.4.tgz", + "integrity": "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/rawify" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.12", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz", + "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "license": "BSD-3-Clause", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/hoist-non-react-statics/node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==", + "license": "MIT" + }, + "node_modules/html-encoding-sniffer": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/html-encoding-sniffer/-/html-encoding-sniffer-4.0.0.tgz", + "integrity": "sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-encoding": "^3.1.1" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/html-escaper": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/html-escaper/-/html-escaper-2.0.2.tgz", + "integrity": "sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==", + "dev": true, + "license": "MIT" + }, + "node_modules/http-proxy-agent": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/http-proxy-agent/-/http-proxy-agent-7.0.2.tgz", + "integrity": "sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.0", + "debug": "^4.3.4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/https-proxy-agent": { + "version": "7.0.6", + "resolved": "https://registry.npmjs.org/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz", + "integrity": "sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^7.1.2", + "debug": "4" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/import-fresh": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.1.tgz", + "integrity": "sha512-TR3KfrTZTYLPB6jUjfx6MF9WcWrHL9su5TObK4ZkYgBdWKPOFoSoQIdEuTuR82pmtxH2spWG9h6etwfr1pLBqQ==", + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "license": "MIT" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://registry.npmjs.org/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-mobile": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-mobile/-/is-mobile-5.0.0.tgz", + "integrity": "sha512-Tz/yndySvLAEXh+Uk8liFCxOwVH6YutuR74utvOcu7I9Di+DwM0mtdPVZNaVvvBUM2OXxne/NhOs1zAO7riusQ==", + "license": "MIT" + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-potential-custom-element-name": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/is-potential-custom-element-name/-/is-potential-custom-element-name-1.0.1.tgz", + "integrity": "sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/istanbul-lib-coverage": { + "version": "3.2.2", + "resolved": "https://registry.npmjs.org/istanbul-lib-coverage/-/istanbul-lib-coverage-3.2.2.tgz", + "integrity": "sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=8" + } + }, + "node_modules/istanbul-lib-report": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/istanbul-lib-report/-/istanbul-lib-report-3.0.1.tgz", + "integrity": "sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "istanbul-lib-coverage": "^3.0.0", + "make-dir": "^4.0.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-lib-source-maps": { + "version": "5.0.6", + "resolved": "https://registry.npmjs.org/istanbul-lib-source-maps/-/istanbul-lib-source-maps-5.0.6.tgz", + "integrity": "sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.23", + "debug": "^4.1.1", + "istanbul-lib-coverage": "^3.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/istanbul-reports": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/istanbul-reports/-/istanbul-reports-3.2.0.tgz", + "integrity": "sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "html-escaper": "^2.0.0", + "istanbul-lib-report": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jiti": { + "version": "1.21.7", + "resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.7.tgz", + "integrity": "sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==", + "dev": true, + "license": "MIT", + "bin": { + "jiti": "bin/jiti.js" + } + }, + "node_modules/js-beautify": { + "version": "1.15.4", + "resolved": "https://registry.npmjs.org/js-beautify/-/js-beautify-1.15.4.tgz", + "integrity": "sha512-9/KXeZUKKJwqCXUdBxFJ3vPh467OCckSBmYDwSK/EtV090K+iMJ7zx2S3HLVDIWFQdqMIsZWbnaGiba18aWhaA==", + "dev": true, + "license": "MIT", + "dependencies": { + "config-chain": "^1.1.13", + "editorconfig": "^1.0.4", + "glob": "^10.4.2", + "js-cookie": "^3.0.5", + "nopt": "^7.2.1" + }, + "bin": { + "css-beautify": "js/bin/css-beautify.js", + "html-beautify": "js/bin/html-beautify.js", + "js-beautify": "js/bin/js-beautify.js" + }, + "engines": { + "node": ">=14" + } + }, + "node_modules/js-beautify/node_modules/glob": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/js-cookie": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/js-cookie/-/js-cookie-3.0.5.tgz", + "integrity": "sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.1.tgz", + "integrity": "sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsdom": { + "version": "24.1.3", + "resolved": "https://registry.npmjs.org/jsdom/-/jsdom-24.1.3.tgz", + "integrity": "sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssstyle": "^4.0.1", + "data-urls": "^5.0.0", + "decimal.js": "^10.4.3", + "form-data": "^4.0.0", + "html-encoding-sniffer": "^4.0.0", + "http-proxy-agent": "^7.0.2", + "https-proxy-agent": "^7.0.5", + "is-potential-custom-element-name": "^1.0.1", + "nwsapi": "^2.2.12", + "parse5": "^7.1.2", + "rrweb-cssom": "^0.7.1", + "saxes": "^6.0.0", + "symbol-tree": "^3.2.4", + "tough-cookie": "^4.1.4", + "w3c-xmlserializer": "^5.0.0", + "webidl-conversions": "^7.0.0", + "whatwg-encoding": "^3.1.1", + "whatwg-mimetype": "^4.0.0", + "whatwg-url": "^14.0.0", + "ws": "^8.18.0", + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": ">=18" + }, + "peerDependencies": { + "canvas": "^2.11.2" + }, + "peerDependenciesMeta": { + "canvas": { + "optional": true + } + } + }, + "node_modules/jsdom/node_modules/xml-name-validator": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", + "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/lilconfig": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.3.tgz", + "integrity": "sha512-/vlFKAoH5Cgt3Ie+JLhRbwOsCQePABiU3tJ1egGvyQ+33R/vcwM2Zl2QR/LzjsBeItPt3oSVXapn+m4nQDvpzw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antonk52" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "license": "MIT" + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/loupe": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/loupe/-/loupe-3.2.1.tgz", + "integrity": "sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/lucide-react": { + "version": "0.469.0", + "resolved": "https://registry.npmjs.org/lucide-react/-/lucide-react-0.469.0.tgz", + "integrity": "sha512-28vvUnnKQ/dBwiCQtwJw7QauYnE7yd2Cyp4tTTJpvglX4EMpbflcdBgrgToX2j71B3YvugK/NH3BGUk+E/p/Fw==", + "license": "ISC", + "peerDependencies": { + "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" + } + }, + "node_modules/magic-string": { + "version": "0.30.21", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz", + "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.5.5" + } + }, + "node_modules/magicast": { + "version": "0.3.5", + "resolved": "https://registry.npmjs.org/magicast/-/magicast-0.3.5.tgz", + "integrity": "sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.25.4", + "@babel/types": "^7.25.4", + "source-map-js": "^1.2.0" + } + }, + "node_modules/make-dir": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-4.0.0.tgz", + "integrity": "sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==", + "dev": true, + "license": "MIT", + "dependencies": { + "semver": "^7.5.3" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "license": "MIT" + }, + "node_modules/muggle-string": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/muggle-string/-/muggle-string-0.4.1.tgz", + "integrity": "sha512-VNTrAak/KhO2i8dqqnqnAHOa3cYBwXEZe9h+D5h/1ZqFSTEFHdM65lR7RoIqq3tBBYavsOXV84NoHXZ0AkPyqQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/mz": { + "version": "2.7.0", + "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", + "integrity": "sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0", + "object-assign": "^4.0.1", + "thenify-all": "^1.0.0" + } + }, + "node_modules/nanoid": { + "version": "3.3.11", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.11.tgz", + "integrity": "sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/node-releases": { + "version": "2.0.27", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", + "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/nopt": { + "version": "7.2.1", + "resolved": "https://registry.npmjs.org/nopt/-/nopt-7.2.1.tgz", + "integrity": "sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w==", + "dev": true, + "license": "ISC", + "dependencies": { + "abbrev": "^2.0.0" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-run-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-6.0.0.tgz", + "integrity": "sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^4.0.0", + "unicorn-magic": "^0.3.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/npm-run-path/node_modules/path-key": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-4.0.0.tgz", + "integrity": "sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/nwsapi": { + "version": "2.2.23", + "resolved": "https://registry.npmjs.org/nwsapi/-/nwsapi-2.2.23.tgz", + "integrity": "sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-hash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/object-hash/-/object-hash-3.0.0.tgz", + "integrity": "sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parse5": { + "version": "7.3.0", + "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.3.0.tgz", + "integrity": "sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "entities": "^6.0.0" + }, + "funding": { + "url": "https://github.com/inikulin/parse5?sponsor=1" + } + }, + "node_modules/parse5/node_modules/entities": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/entities/-/entities-6.0.1.tgz", + "integrity": "sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/path-browserify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", + "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "license": "MIT" + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/pathe": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", + "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/pathval": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pathval/-/pathval-2.0.1.tgz", + "integrity": "sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.16" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pinia": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/pinia/-/pinia-2.3.1.tgz", + "integrity": "sha512-khUlZSwt9xXCaTbbxFYBKDc/bWAGWJjOgvxETwkTN7KRm66EeT1ZdZj6i2ceh9sP2Pzqsbc704r2yngBrxBVug==", + "license": "MIT", + "dependencies": { + "@vue/devtools-api": "^6.6.3", + "vue-demi": "^0.14.10" + }, + "funding": { + "url": "https://github.com/sponsors/posva" + }, + "peerDependencies": { + "typescript": ">=4.4.4", + "vue": "^2.7.0 || ^3.5.11" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/pirates": { + "version": "4.0.7", + "resolved": "https://registry.npmjs.org/pirates/-/pirates-4.0.7.tgz", + "integrity": "sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/polished": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/polished/-/polished-4.3.1.tgz", + "integrity": "sha512-OBatVyC/N7SCW/FaDHrSd+vn0o5cS855TOmYi4OkdWUMSJCET/xip//ch8xGUvtr3i44X9LVyWwQlRMTN3pwSA==", + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.17.8" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/postcss": { + "version": "8.5.6", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", + "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.11", + "picocolors": "^1.1.1", + "source-map-js": "^1.2.1" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-import": { + "version": "15.1.0", + "resolved": "https://registry.npmjs.org/postcss-import/-/postcss-import-15.1.0.tgz", + "integrity": "sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==", + "dev": true, + "license": "MIT", + "dependencies": { + "postcss-value-parser": "^4.0.0", + "read-cache": "^1.0.0", + "resolve": "^1.1.7" + }, + "engines": { + "node": ">=14.0.0" + }, + "peerDependencies": { + "postcss": "^8.0.0" + } + }, + "node_modules/postcss-js": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/postcss-js/-/postcss-js-4.1.0.tgz", + "integrity": "sha512-oIAOTqgIo7q2EOwbhb8UalYePMvYoIeRY2YKntdpFQXNosSu3vLrniGgmH9OKs/qAkfoj5oB3le/7mINW1LCfw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "camelcase-css": "^2.0.1" + }, + "engines": { + "node": "^12 || ^14 || >= 16" + }, + "peerDependencies": { + "postcss": "^8.4.21" + } + }, + "node_modules/postcss-load-config": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/postcss-load-config/-/postcss-load-config-6.0.1.tgz", + "integrity": "sha512-oPtTM4oerL+UXmx+93ytZVN82RrlY/wPUV8IeDxFrzIjXOLF1pN+EmKPLbubvKHT2HC20xXsCAH2Z+CKV6Oz/g==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "lilconfig": "^3.1.1" + }, + "engines": { + "node": ">= 18" + }, + "peerDependencies": { + "jiti": ">=1.21.0", + "postcss": ">=8.0.9", + "tsx": "^4.8.1", + "yaml": "^2.4.2" + }, + "peerDependenciesMeta": { + "jiti": { + "optional": true + }, + "postcss": { + "optional": true + }, + "tsx": { + "optional": true + }, + "yaml": { + "optional": true + } + } + }, + "node_modules/postcss-nested": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/postcss-nested/-/postcss-nested-6.2.0.tgz", + "integrity": "sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "postcss-selector-parser": "^6.1.1" + }, + "engines": { + "node": ">=12.0" + }, + "peerDependencies": { + "postcss": "^8.2.14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.2.tgz", + "integrity": "sha512-Q8qQfPiZ+THO/3ZrOrO0cJJKfpYCagtMUkXbnEfmgUjwXg6z/WBeOyS9APBBPCTSiDV+s4SwQGu8yFsiMRIudg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/postcss-value-parser": { + "version": "4.2.0", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz", + "integrity": "sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/proto-list": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/proto-list/-/proto-list-1.2.4.tgz", + "integrity": "sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==", + "dev": true, + "license": "ISC" + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/psl": { + "version": "1.15.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.15.0.tgz", + "integrity": "sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "funding": { + "url": "https://github.com/sponsors/lupomontero" + } + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/react-is": { + "version": "18.3.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz", + "integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==", + "license": "MIT" + }, + "node_modules/read-cache": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/read-cache/-/read-cache-1.0.0.tgz", + "integrity": "sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pify": "^2.3.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/resolve": { + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.1", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rollup": { + "version": "4.55.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.55.1.tgz", + "integrity": "sha512-wDv/Ht1BNHB4upNbK74s9usvl7hObDnvVzknxqY/E/O3X6rW1U1rV1aENEfJ54eFZDTNo7zv1f5N4edCluH7+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.8" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.55.1", + "@rollup/rollup-android-arm64": "4.55.1", + "@rollup/rollup-darwin-arm64": "4.55.1", + "@rollup/rollup-darwin-x64": "4.55.1", + "@rollup/rollup-freebsd-arm64": "4.55.1", + "@rollup/rollup-freebsd-x64": "4.55.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.55.1", + "@rollup/rollup-linux-arm-musleabihf": "4.55.1", + "@rollup/rollup-linux-arm64-gnu": "4.55.1", + "@rollup/rollup-linux-arm64-musl": "4.55.1", + "@rollup/rollup-linux-loong64-gnu": "4.55.1", + "@rollup/rollup-linux-loong64-musl": "4.55.1", + "@rollup/rollup-linux-ppc64-gnu": "4.55.1", + "@rollup/rollup-linux-ppc64-musl": "4.55.1", + "@rollup/rollup-linux-riscv64-gnu": "4.55.1", + "@rollup/rollup-linux-riscv64-musl": "4.55.1", + "@rollup/rollup-linux-s390x-gnu": "4.55.1", + "@rollup/rollup-linux-x64-gnu": "4.55.1", + "@rollup/rollup-linux-x64-musl": "4.55.1", + "@rollup/rollup-openbsd-x64": "4.55.1", + "@rollup/rollup-openharmony-arm64": "4.55.1", + "@rollup/rollup-win32-arm64-msvc": "4.55.1", + "@rollup/rollup-win32-ia32-msvc": "4.55.1", + "@rollup/rollup-win32-x64-gnu": "4.55.1", + "@rollup/rollup-win32-x64-msvc": "4.55.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/rrweb-cssom": { + "version": "0.7.1", + "resolved": "https://registry.npmjs.org/rrweb-cssom/-/rrweb-cssom-0.7.1.tgz", + "integrity": "sha512-TrEMa7JGdVm0UThDJSx7ddw5nVm3UJS9o9CCIZ72B1vSyEZoziDqBYP3XIoi/12lKrJR8rE3jeFHMok2F/Mnsg==", + "dev": true, + "license": "MIT" + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "dev": true, + "license": "MIT" + }, + "node_modules/saxes": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/saxes/-/saxes-6.0.0.tgz", + "integrity": "sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==", + "dev": true, + "license": "ISC", + "dependencies": { + "xmlchars": "^2.2.0" + }, + "engines": { + "node": ">=v12.22.7" + } + }, + "node_modules/semver": { + "version": "7.7.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.3.tgz", + "integrity": "sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/siginfo": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/siginfo/-/siginfo-2.0.0.tgz", + "integrity": "sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==", + "dev": true, + "license": "ISC" + }, + "node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map": { + "version": "0.5.7", + "resolved": "https://registry.npmjs.org/source-map/-/source-map-0.5.7.tgz", + "integrity": "sha512-LbrmJOMUSdEVxIKvdcJzQC+nQhe8FUZQTXQy6+I75skNgn3OoQ0DZA8YnFa7gp8tqtL3KPf1kmo0R5DoApeSGQ==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-js": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.1.tgz", + "integrity": "sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ssf": { + "version": "0.11.2", + "resolved": "https://registry.npmjs.org/ssf/-/ssf-0.11.2.tgz", + "integrity": "sha512-+idbmIXoYET47hH+d7dfm2epdOMUDjqcB4648sTZ+t2JwoyBFL/insLfB/racrDmsKB3diwsDA696pZMieAC5g==", + "license": "Apache-2.0", + "dependencies": { + "frac": "~1.1.2" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/stackback": { + "version": "0.0.2", + "resolved": "https://registry.npmjs.org/stackback/-/stackback-0.0.2.tgz", + "integrity": "sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==", + "dev": true, + "license": "MIT" + }, + "node_modules/std-env": { + "version": "3.10.0", + "resolved": "https://registry.npmjs.org/std-env/-/std-env-3.10.0.tgz", + "integrity": "sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==", + "dev": true, + "license": "MIT" + }, + "node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/string-width/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/string-width/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/stylis": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/stylis/-/stylis-4.3.6.tgz", + "integrity": "sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==", + "license": "MIT" + }, + "node_modules/sucrase": { + "version": "3.35.1", + "resolved": "https://registry.npmjs.org/sucrase/-/sucrase-3.35.1.tgz", + "integrity": "sha512-DhuTmvZWux4H1UOnWMB3sk0sbaCVOoQZjv8u1rDoTV0HTdGem9hkAZtl4JZy8P2z4Bg0nT+YMeOFyVr4zcG5Tw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.2", + "commander": "^4.0.0", + "lines-and-columns": "^1.1.6", + "mz": "^2.7.0", + "pirates": "^4.0.1", + "tinyglobby": "^0.2.11", + "ts-interface-checker": "^0.1.9" + }, + "bin": { + "sucrase": "bin/sucrase", + "sucrase-node": "bin/sucrase-node" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/symbol-tree": { + "version": "3.2.4", + "resolved": "https://registry.npmjs.org/symbol-tree/-/symbol-tree-3.2.4.tgz", + "integrity": "sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==", + "dev": true, + "license": "MIT" + }, + "node_modules/tailwindcss": { + "version": "3.4.19", + "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-3.4.19.tgz", + "integrity": "sha512-3ofp+LL8E+pK/JuPLPggVAIaEuhvIz4qNcf3nA1Xn2o/7fb7s/TYpHhwGDv1ZU3PkBluUVaF8PyCHcm48cKLWQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@alloc/quick-lru": "^5.2.0", + "arg": "^5.0.2", + "chokidar": "^3.6.0", + "didyoumean": "^1.2.2", + "dlv": "^1.1.3", + "fast-glob": "^3.3.2", + "glob-parent": "^6.0.2", + "is-glob": "^4.0.3", + "jiti": "^1.21.7", + "lilconfig": "^3.1.3", + "micromatch": "^4.0.8", + "normalize-path": "^3.0.0", + "object-hash": "^3.0.0", + "picocolors": "^1.1.1", + "postcss": "^8.4.47", + "postcss-import": "^15.1.0", + "postcss-js": "^4.0.1", + "postcss-load-config": "^4.0.2 || ^5.0 || ^6.0", + "postcss-nested": "^6.2.0", + "postcss-selector-parser": "^6.1.2", + "resolve": "^1.22.8", + "sucrase": "^3.35.0" + }, + "bin": { + "tailwind": "lib/cli.js", + "tailwindcss": "lib/cli.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/test-exclude": { + "version": "7.0.1", + "resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-7.0.1.tgz", + "integrity": "sha512-pFYqmTw68LXVjeWJMST4+borgQP2AyMNbg1BpZh9LbyhUeNkeaPF9gzfPGUAnSMV3qPYdWUwDIjjCLiSDOl7vg==", + "dev": true, + "license": "ISC", + "dependencies": { + "@istanbuljs/schema": "^0.1.2", + "glob": "^10.4.1", + "minimatch": "^9.0.4" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/test-exclude/node_modules/glob": { + "version": "10.5.0", + "resolved": "https://registry.npmjs.org/glob/-/glob-10.5.0.tgz", + "integrity": "sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/thenify": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", + "integrity": "sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==", + "dev": true, + "license": "MIT", + "dependencies": { + "any-promise": "^1.0.0" + } + }, + "node_modules/thenify-all": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/thenify-all/-/thenify-all-1.6.0.tgz", + "integrity": "sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "thenify": ">= 3.1.0 < 4" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/tiny-invariant": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/tiny-invariant/-/tiny-invariant-1.3.3.tgz", + "integrity": "sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinybench": { + "version": "2.9.0", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.9.0.tgz", + "integrity": "sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyexec": { + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/tinyexec/-/tinyexec-0.3.2.tgz", + "integrity": "sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/tinyglobby": { + "version": "0.2.15", + "resolved": "https://registry.npmjs.org/tinyglobby/-/tinyglobby-0.2.15.tgz", + "integrity": "sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "fdir": "^6.5.0", + "picomatch": "^4.0.3" + }, + "engines": { + "node": ">=12.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/SuperchupuDev" + } + }, + "node_modules/tinyglobby/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/tinypool": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-1.1.1.tgz", + "integrity": "sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^18.0.0 || >=20.0.0" + } + }, + "node_modules/tinyrainbow": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/tinyrainbow/-/tinyrainbow-1.2.0.tgz", + "integrity": "sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/tinyspy": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-3.0.2.tgz", + "integrity": "sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tough-cookie": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-4.1.4.tgz", + "integrity": "sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "psl": "^1.1.33", + "punycode": "^2.1.1", + "universalify": "^0.2.0", + "url-parse": "^1.5.3" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/tr46": { + "version": "5.1.1", + "resolved": "https://registry.npmjs.org/tr46/-/tr46-5.1.1.tgz", + "integrity": "sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==", + "dev": true, + "license": "MIT", + "dependencies": { + "punycode": "^2.3.1" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/ts-api-utils": { + "version": "1.4.3", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.3.tgz", + "integrity": "sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/ts-interface-checker": { + "version": "0.1.13", + "resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz", + "integrity": "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.6.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.6.3.tgz", + "integrity": "sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/unicorn-magic": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/unicorn-magic/-/unicorn-magic-0.3.0.tgz", + "integrity": "sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/universalify": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-0.2.0.tgz", + "integrity": "sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.2.3.tgz", + "integrity": "sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, + "node_modules/use-merge-value": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/use-merge-value/-/use-merge-value-1.2.0.tgz", + "integrity": "sha512-DXgG0kkgJN45TcyoXL49vJnn55LehnrmoHc7MbKi+QDBvr8dsesqws8UlyIWGHMR+JXgxc1nvY+jDGMlycsUcw==", + "license": "MIT", + "peerDependencies": { + "react": ">= 16.x" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "5.4.21", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.4.21.tgz", + "integrity": "sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.43", + "rollup": "^4.20.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "sass-embedded": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "sass-embedded": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vite-node": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-2.1.9.tgz", + "integrity": "sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==", + "dev": true, + "license": "MIT", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.3.7", + "es-module-lexer": "^1.5.4", + "pathe": "^1.1.2", + "vite": "^5.0.0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/vite-plugin-checker": { + "version": "0.9.3", + "resolved": "https://registry.npmjs.org/vite-plugin-checker/-/vite-plugin-checker-0.9.3.tgz", + "integrity": "sha512-Tf7QBjeBtG7q11zG0lvoF38/2AVUzzhMNu+Wk+mcsJ00Rk/FpJ4rmUviVJpzWkagbU13cGXvKpt7CMiqtxVTbQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.27.1", + "chokidar": "^4.0.3", + "npm-run-path": "^6.0.0", + "picocolors": "^1.1.1", + "picomatch": "^4.0.2", + "strip-ansi": "^7.1.0", + "tiny-invariant": "^1.3.3", + "tinyglobby": "^0.2.13", + "vscode-uri": "^3.1.0" + }, + "engines": { + "node": ">=14.16" + }, + "peerDependencies": { + "@biomejs/biome": ">=1.7", + "eslint": ">=7", + "meow": "^13.2.0", + "optionator": "^0.9.4", + "stylelint": ">=16", + "typescript": "*", + "vite": ">=2.0.0", + "vls": "*", + "vti": "*", + "vue-tsc": "~2.2.10" + }, + "peerDependenciesMeta": { + "@biomejs/biome": { + "optional": true + }, + "eslint": { + "optional": true + }, + "meow": { + "optional": true + }, + "optionator": { + "optional": true + }, + "stylelint": { + "optional": true + }, + "typescript": { + "optional": true + }, + "vls": { + "optional": true + }, + "vti": { + "optional": true + }, + "vue-tsc": { + "optional": true + } + } + }, + "node_modules/vite-plugin-checker/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/vite-plugin-checker/node_modules/chokidar": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-4.0.3.tgz", + "integrity": "sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "readdirp": "^4.0.1" + }, + "engines": { + "node": ">= 14.16.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/vite-plugin-checker/node_modules/picomatch": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz", + "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/vite-plugin-checker/node_modules/readdirp": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-4.1.2.tgz", + "integrity": "sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 14.18.0" + }, + "funding": { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + }, + "node_modules/vite-plugin-checker/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/vitest": { + "version": "2.1.9", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-2.1.9.tgz", + "integrity": "sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@vitest/expect": "2.1.9", + "@vitest/mocker": "2.1.9", + "@vitest/pretty-format": "^2.1.9", + "@vitest/runner": "2.1.9", + "@vitest/snapshot": "2.1.9", + "@vitest/spy": "2.1.9", + "@vitest/utils": "2.1.9", + "chai": "^5.1.2", + "debug": "^4.3.7", + "expect-type": "^1.1.0", + "magic-string": "^0.30.12", + "pathe": "^1.1.2", + "std-env": "^3.8.0", + "tinybench": "^2.9.0", + "tinyexec": "^0.3.1", + "tinypool": "^1.0.1", + "tinyrainbow": "^1.2.0", + "vite": "^5.0.0", + "vite-node": "2.1.9", + "why-is-node-running": "^2.3.0" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/node": "^18.0.0 || >=20.0.0", + "@vitest/browser": "2.1.9", + "@vitest/ui": "2.1.9", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/vscode-uri": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/vscode-uri/-/vscode-uri-3.1.0.tgz", + "integrity": "sha512-/BpdSx+yCQGnCvecbyXdxHDkuk55/G3xwnC0GqY4gmQ3j+A+g8kzzgB4Nk/SINjqn6+waqw3EgbVF2QKExkRxQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/vue": { + "version": "3.5.26", + "resolved": "https://registry.npmjs.org/vue/-/vue-3.5.26.tgz", + "integrity": "sha512-SJ/NTccVyAoNUJmkM9KUqPcYlY+u8OVL1X5EW9RIs3ch5H2uERxyyIUI4MRxVCSOiEcupX9xNGde1tL9ZKpimA==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.5.26", + "@vue/compiler-sfc": "3.5.26", + "@vue/runtime-dom": "3.5.26", + "@vue/server-renderer": "3.5.26", + "@vue/shared": "3.5.26" + }, + "peerDependencies": { + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/vue-chartjs": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/vue-chartjs/-/vue-chartjs-5.3.3.tgz", + "integrity": "sha512-jqxtL8KZ6YJ5NTv6XzrzLS7osyegOi28UGNZW0h9OkDL7Sh1396ht4Dorh04aKrl2LiSalQ84WtqiG0RIJb0tA==", + "license": "MIT", + "peerDependencies": { + "chart.js": "^4.1.1", + "vue": "^3.0.0-0 || ^2.7.0" + } + }, + "node_modules/vue-component-type-helpers": { + "version": "2.2.12", + "resolved": "https://registry.npmjs.org/vue-component-type-helpers/-/vue-component-type-helpers-2.2.12.tgz", + "integrity": "sha512-YbGqHZ5/eW4SnkPNR44mKVc6ZKQoRs/Rux1sxC6rdwXb4qpbOSYfDr9DsTHolOTGmIKgM9j141mZbBeg05R1pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/vue-demi": { + "version": "0.14.10", + "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.14.10.tgz", + "integrity": "sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "vue-demi-fix": "bin/vue-demi-fix.js", + "vue-demi-switch": "bin/vue-demi-switch.js" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + }, + "peerDependencies": { + "@vue/composition-api": "^1.0.0-rc.1", + "vue": "^3.0.0-0 || ^2.6.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + } + } + }, + "node_modules/vue-eslint-parser": { + "version": "9.4.3", + "resolved": "https://registry.npmjs.org/vue-eslint-parser/-/vue-eslint-parser-9.4.3.tgz", + "integrity": "sha512-2rYRLWlIpaiN8xbPiDyXZXRgLGOtWxERV7ND5fFAv5qo1D2N9Fu9MNajBNc6o13lZ+24DAWCkQCvj4klgmcITg==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.3.4", + "eslint-scope": "^7.1.1", + "eslint-visitor-keys": "^3.3.0", + "espree": "^9.3.1", + "esquery": "^1.4.0", + "lodash": "^4.17.21", + "semver": "^7.3.6" + }, + "engines": { + "node": "^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" + }, + "peerDependencies": { + "eslint": ">=6.0.0" + } + }, + "node_modules/vue-i18n": { + "version": "9.14.5", + "resolved": "https://registry.npmjs.org/vue-i18n/-/vue-i18n-9.14.5.tgz", + "integrity": "sha512-0jQ9Em3ymWngyiIkj0+c/k7WgaPO+TNzjKSNq9BvBQaKJECqn9cd9fL4tkDhB5G1QBskGl9YxxbDAhgbFtpe2g==", + "deprecated": "v9 and v10 no longer supported. please migrate to v11. about maintenance status, see https://vue-i18n.intlify.dev/guide/maintenance.html", + "license": "MIT", + "dependencies": { + "@intlify/core-base": "9.14.5", + "@intlify/shared": "9.14.5", + "@vue/devtools-api": "^6.5.0" + }, + "engines": { + "node": ">= 16" + }, + "funding": { + "url": "https://github.com/sponsors/kazupon" + }, + "peerDependencies": { + "vue": "^3.0.0" + } + }, + "node_modules/vue-router": { + "version": "4.6.4", + "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-4.6.4.tgz", + "integrity": "sha512-Hz9q5sa33Yhduglwz6g9skT8OBPii+4bFn88w6J+J4MfEo4KRRpmiNG/hHHkdbRFlLBOqxN8y8gf2Fb0MTUgVg==", + "license": "MIT", + "dependencies": { + "@vue/devtools-api": "^6.6.4" + }, + "funding": { + "url": "https://github.com/sponsors/posva" + }, + "peerDependencies": { + "vue": "^3.5.0" + } + }, + "node_modules/vue-tsc": { + "version": "2.2.12", + "resolved": "https://registry.npmjs.org/vue-tsc/-/vue-tsc-2.2.12.tgz", + "integrity": "sha512-P7OP77b2h/Pmk+lZdJ0YWs+5tJ6J2+uOQPo7tlBnY44QqQSPYvS0qVT4wqDJgwrZaLe47etJLLQRFia71GYITw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/typescript": "2.4.15", + "@vue/language-core": "2.2.12" + }, + "bin": { + "vue-tsc": "bin/vue-tsc.js" + }, + "peerDependencies": { + "typescript": ">=5.0.0" + } + }, + "node_modules/w3c-xmlserializer": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/w3c-xmlserializer/-/w3c-xmlserializer-5.0.0.tgz", + "integrity": "sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==", + "dev": true, + "license": "MIT", + "dependencies": { + "xml-name-validator": "^5.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/w3c-xmlserializer/node_modules/xml-name-validator": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-5.0.0.tgz", + "integrity": "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=18" + } + }, + "node_modules/webidl-conversions": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-7.0.0.tgz", + "integrity": "sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=12" + } + }, + "node_modules/whatwg-encoding": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", + "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", + "deprecated": "Use @exodus/bytes instead for a more spec-conformant and faster implementation", + "dev": true, + "license": "MIT", + "dependencies": { + "iconv-lite": "0.6.3" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/whatwg-mimetype": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", + "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + } + }, + "node_modules/whatwg-url": { + "version": "14.2.0", + "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-14.2.0.tgz", + "integrity": "sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==", + "dev": true, + "license": "MIT", + "dependencies": { + "tr46": "^5.1.0", + "webidl-conversions": "^7.0.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/why-is-node-running": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/why-is-node-running/-/why-is-node-running-2.3.0.tgz", + "integrity": "sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==", + "dev": true, + "license": "MIT", + "dependencies": { + "siginfo": "^2.0.0", + "stackback": "0.0.2" + }, + "bin": { + "why-is-node-running": "cli.js" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wmf": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wmf/-/wmf-1.0.2.tgz", + "integrity": "sha512-/p9K7bEh0Dj6WbXg4JG0xvLQmIadrner1bi45VMJTfnbVHsc7yIajZyoSoK60/dtVBs12Fm6WkUI5/3WAVsNMw==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/word": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/word/-/word-0.3.0.tgz", + "integrity": "sha512-OELeY0Q61OXpdUfTp+oweA/vtLVg5VDOXh+3he3PNzLGG/y0oylSOC1xRVj0+l4vQ3tj/bB1HVHv1ocXkQceFA==", + "license": "Apache-2.0", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrap-ansi-cjs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi-cjs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-regex": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-6.2.2.tgz", + "integrity": "sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "6.2.3", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.3.tgz", + "integrity": "sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/strip-ansi": { + "version": "7.1.2", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", + "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/ws": { + "version": "8.19.0", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.19.0.tgz", + "integrity": "sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/xlsx": { + "version": "0.18.5", + "resolved": "https://registry.npmjs.org/xlsx/-/xlsx-0.18.5.tgz", + "integrity": "sha512-dmg3LCjBPHZnQp5/F/+nnTa+miPJxUXB6vtk42YjBBKayDNagxGEeIdWApkYPOf3Z3pm3k62Knjzp7lMeTEtFQ==", + "license": "Apache-2.0", + "dependencies": { + "adler-32": "~1.3.0", + "cfb": "~1.2.1", + "codepage": "~1.15.0", + "crc-32": "~1.2.1", + "ssf": "~0.11.2", + "wmf": "~1.0.1", + "word": "~0.3.0" + }, + "bin": { + "xlsx": "bin/xlsx.njs" + }, + "engines": { + "node": ">=0.8" + } + }, + "node_modules/xml-name-validator": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz", + "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12" + } + }, + "node_modules/xmlchars": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/xmlchars/-/xmlchars-2.2.0.tgz", + "integrity": "sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==", + "dev": true, + "license": "MIT" + }, + "node_modules/yaml": { + "version": "1.10.2", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-1.10.2.tgz", + "integrity": "sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==", + "license": "ISC", + "engines": { + "node": ">= 6" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/frontend/package.json b/frontend/package.json index 2a85f585..c984cd96 100644 --- a/frontend/package.json +++ b/frontend/package.json @@ -9,7 +9,10 @@ "preview": "vite preview", "lint": "eslint . --ext .vue,.js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts --fix", "lint:check": "eslint . --ext .vue,.js,.jsx,.cjs,.mjs,.ts,.tsx,.cts,.mts", - "typecheck": "vue-tsc --noEmit" + "typecheck": "vue-tsc --noEmit", + "test": "vitest", + "test:run": "vitest run", + "test:coverage": "vitest run --coverage" }, "dependencies": { "@lobehub/icons": "^4.0.2", @@ -29,17 +32,21 @@ "@types/file-saver": "^2.0.7", "@types/mdx": "^2.0.13", "@types/node": "^20.10.5", - "@vitejs/plugin-vue": "^5.2.3", - "autoprefixer": "^10.4.16", "@typescript-eslint/eslint-plugin": "^7.18.0", "@typescript-eslint/parser": "^7.18.0", + "@vitejs/plugin-vue": "^5.2.3", + "@vitest/coverage-v8": "^2.1.9", + "@vue/test-utils": "^2.4.6", + "autoprefixer": "^10.4.16", "eslint": "^8.57.0", "eslint-plugin-vue": "^9.25.0", + "jsdom": "^24.1.3", "postcss": "^8.4.32", "tailwindcss": "^3.4.0", "typescript": "~5.6.0", "vite": "^5.0.10", "vite-plugin-checker": "^0.9.1", + "vitest": "^2.1.9", "vue-tsc": "^2.2.0" } } diff --git a/frontend/pnpm-lock.yaml b/frontend/pnpm-lock.yaml index c295165d..1a808176 100644 --- a/frontend/pnpm-lock.yaml +++ b/frontend/pnpm-lock.yaml @@ -63,6 +63,12 @@ importers: '@vitejs/plugin-vue': specifier: ^5.2.3 version: 5.2.4(vite@5.4.21(@types/node@20.19.27))(vue@3.5.26(typescript@5.6.3)) + '@vitest/coverage-v8': + specifier: ^2.1.9 + version: 2.1.9(vitest@2.1.9(@types/node@20.19.27)(jsdom@24.1.3)) + '@vue/test-utils': + specifier: ^2.4.6 + version: 2.4.6 autoprefixer: specifier: ^10.4.16 version: 10.4.23(postcss@8.5.6) @@ -72,6 +78,9 @@ importers: eslint-plugin-vue: specifier: ^9.25.0 version: 9.33.0(eslint@8.57.1) + jsdom: + specifier: ^24.1.3 + version: 24.1.3 postcss: specifier: ^8.4.32 version: 8.5.6 @@ -87,6 +96,9 @@ importers: vite-plugin-checker: specifier: ^0.9.1 version: 0.9.3(eslint@8.57.1)(optionator@0.9.4)(typescript@5.6.3)(vite@5.4.21(@types/node@20.19.27))(vue-tsc@2.2.12(typescript@5.6.3)) + vitest: + specifier: ^2.1.9 + version: 2.1.9(@types/node@20.19.27)(jsdom@24.1.3) vue-tsc: specifier: ^2.2.0 version: 2.2.12(typescript@5.6.3) @@ -97,6 +109,10 @@ packages: resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} engines: {node: '>=10'} + '@ampproject/remapping@2.3.0': + resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} + engines: {node: '>=6.0.0'} + '@ant-design/colors@8.0.0': resolution: {integrity: sha512-6YzkKCw30EI/E9kHOIXsQDHmMvTllT8STzjMb4K2qzit33RW2pqCJP0sk+hidBntXxE+Vz4n1+RvCTfBw6OErw==} @@ -135,6 +151,9 @@ packages: '@antfu/install-pkg@1.1.0': resolution: {integrity: sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==} + '@asamuzakjp/css-color@3.2.0': + resolution: {integrity: sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==} + '@babel/code-frame@7.27.1': resolution: {integrity: sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==} engines: {node: '>=6.9.0'} @@ -201,6 +220,9 @@ packages: '@types/react': optional: true + '@bcoe/v8-coverage@0.2.3': + resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==} + '@braintree/sanitize-url@7.1.1': resolution: {integrity: sha512-i1L7noDNxtFyL5DmZafWy1wRVhGehQmzZaz1HiN5e7iylJMSZR7ekOV7NsIqa5qBldlLrsKv4HbgFUVlQrz8Mw==} @@ -219,6 +241,34 @@ packages: '@chevrotain/utils@11.0.3': resolution: {integrity: sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==} + '@csstools/color-helpers@5.1.0': + resolution: {integrity: sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==} + engines: {node: '>=18'} + + '@csstools/css-calc@2.1.4': + resolution: {integrity: sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-parser-algorithms': ^3.0.5 + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-color-parser@3.1.0': + resolution: {integrity: sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-parser-algorithms': ^3.0.5 + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-parser-algorithms@3.0.5': + resolution: {integrity: sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-tokenizer@3.0.4': + resolution: {integrity: sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==} + engines: {node: '>=18'} + '@dnd-kit/accessibility@3.1.1': resolution: {integrity: sha512-2P+YgaXF+gRsIihwwY1gCsQSYnu9Zyj2py8kY5fFvUM1qm2WA2u639R6YNVfU4GWr+ZM5mqEsfHZZLoRONbemw==} peerDependencies: @@ -523,6 +573,14 @@ packages: resolution: {integrity: sha512-9gB+E53BYuAEMhbCAxVgG38EZrk59sxBtv3jSizNL2hEWlgjBjAw1AwpLHtNaeda12pe6W20OGEa0TwuMSRbyQ==} engines: {node: '>= 16'} + '@isaacs/cliui@8.0.2': + resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} + engines: {node: '>=12'} + + '@istanbuljs/schema@0.1.3': + resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==} + engines: {node: '>=8'} + '@jridgewell/gen-mapping@0.3.13': resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==} @@ -596,6 +654,13 @@ packages: resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} engines: {node: '>= 8'} + '@one-ini/wasm@0.1.1': + resolution: {integrity: sha512-XuySG1E38YScSJoMlqovLru4KTUNSjgVTIjyh7qMX6aNN5HY5Ct5LhRJdxO79JtTzKfzV/bnWpz+zquYrISsvw==} + + '@pkgjs/parseargs@0.11.0': + resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} + engines: {node: '>=14'} + '@primer/octicons@19.21.1': resolution: {integrity: sha512-7tgtBkCNcg75YJnckinzvES+uxysYQCe+CHSEnzr3VYgxttzKRvfmrnVogl3aEuHCQP4xhiE9k2lFDhYwGtTzQ==} @@ -1505,6 +1570,44 @@ packages: vite: ^5.0.0 || ^6.0.0 vue: ^3.2.25 + '@vitest/coverage-v8@2.1.9': + resolution: {integrity: sha512-Z2cOr0ksM00MpEfyVE8KXIYPEcBFxdbLSs56L8PO0QQMxt/6bDj45uQfxoc96v05KW3clk7vvgP0qfDit9DmfQ==} + peerDependencies: + '@vitest/browser': 2.1.9 + vitest: 2.1.9 + peerDependenciesMeta: + '@vitest/browser': + optional: true + + '@vitest/expect@2.1.9': + resolution: {integrity: sha512-UJCIkTBenHeKT1TTlKMJWy1laZewsRIzYighyYiJKZreqtdxSos/S1t+ktRMQWu2CKqaarrkeszJx1cgC5tGZw==} + + '@vitest/mocker@2.1.9': + resolution: {integrity: sha512-tVL6uJgoUdi6icpxmdrn5YNo3g3Dxv+IHJBr0GXHaEdTcw3F+cPKnsXFhli6nO+f/6SDKPHEK1UN+k+TQv0Ehg==} + peerDependencies: + msw: ^2.4.9 + vite: ^5.0.0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + + '@vitest/pretty-format@2.1.9': + resolution: {integrity: sha512-KhRIdGV2U9HOUzxfiHmY8IFHTdqtOhIzCpd8WRdJiE7D/HUcZVD0EgQCVjm+Q9gkUXWgBvMmTtZgIG48wq7sOQ==} + + '@vitest/runner@2.1.9': + resolution: {integrity: sha512-ZXSSqTFIrzduD63btIfEyOmNcBmQvgOVsPNPe0jYtESiXkhd8u2erDLnMxmGrDCwHCCHE7hxwRDCT3pt0esT4g==} + + '@vitest/snapshot@2.1.9': + resolution: {integrity: sha512-oBO82rEjsxLNJincVhLhaxxZdEtV0EFHMK5Kmx5sJ6H9L183dHECjiefOAdnqpIgT5eZwT04PoggUnW88vOBNQ==} + + '@vitest/spy@2.1.9': + resolution: {integrity: sha512-E1B35FwzXXTs9FHNK6bDszs7mtydNi5MIfUWpceJ8Xbfb1gBMscAnwLbEu+B44ed6W3XjL9/ehLPHR1fkf1KLQ==} + + '@vitest/utils@2.1.9': + resolution: {integrity: sha512-v0psaMSkNJ3A2NMrUEHFRzJtDPFn+/VWZ5WxImB21T9fjucJRmS7xCS3ppEnARb9y11OAzaD+P2Ps+b+BGX5iQ==} + '@volar/language-core@2.4.15': resolution: {integrity: sha512-3VHw+QZU0ZG9IuQmzT68IyN4hZNd9GchGPhbD9+pa8CVv7rnoOZwo7T8weIbrRmihqy3ATpdfXFnqRrfPVK6CA==} @@ -1557,6 +1660,9 @@ packages: '@vue/shared@3.5.26': resolution: {integrity: sha512-7Z6/y3uFI5PRoKeorTOSXKcDj0MSasfNNltcslbFrPpcw6aXRUALq4IfJlaTRspiWIUOEZbrpM+iQGmCOiWe4A==} + '@vue/test-utils@2.4.6': + resolution: {integrity: sha512-FMxEjOpYNYiFe0GkaHsnJPXFHxQ6m4t8vI/ElPGpMWxZKpmRvQ33OIrvRXemy6yha03RxhOlQuy+gZMC3CQSow==} + '@vueuse/core@10.11.1': resolution: {integrity: sha512-guoy26JQktXPcz+0n3GukWIy/JDNKti9v6VEMu6kV2sYBsWuGiTU8OWdg+ADfUbHg3/3DlqySDe7JmdHrktiww==} @@ -1566,6 +1672,10 @@ packages: '@vueuse/shared@10.11.1': resolution: {integrity: sha512-LHpC8711VFZlDaYUXEBbFBCQ7GS3dVU9mjOhhMhXP6txTV4EhYQg/KGnQuvt/sPAtoUKq7VVUnL6mVtFoL42sA==} + abbrev@2.0.0: + resolution: {integrity: sha512-6/mh1E2u2YgEsCHdY0Yx5oW+61gZU+1vXaoiHHrpKeuRNNgFvS+/jrwHiQhB5apAf5oB7UB7E19ol2R2LKH8hQ==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + acorn-jsx@5.3.2: resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} peerDependencies: @@ -1580,6 +1690,10 @@ packages: resolution: {integrity: sha512-ynZ4w/nUUv5rrsR8UUGoe1VC9hZj6V5hU9Qw1HlMDJGEJw5S7TfTErWTjMys6M7vr0YWcPqs3qAr4ss0nDfP+A==} engines: {node: '>=0.8'} + agent-base@7.1.4: + resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} + engines: {node: '>= 14'} + ahooks@3.9.6: resolution: {integrity: sha512-Mr7f05swd5SmKlR9SZo5U6M0LsL4ErweLzpdgXjA1JPmnZ78Vr6wzx0jUtvoxrcqGKYnX0Yjc02iEASVxHFPjQ==} peerDependencies: @@ -1604,6 +1718,10 @@ packages: resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} engines: {node: '>=8'} + ansi-styles@6.2.3: + resolution: {integrity: sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==} + engines: {node: '>=12'} + antd-style@4.1.0: resolution: {integrity: sha512-vnPBGg0OVlSz90KRYZhxd89aZiOImTiesF+9MQqN8jsLGZUQTjbP04X9jTdEfsztKUuMbBWg/RmB/wHTakbtMQ==} peerDependencies: @@ -1633,6 +1751,10 @@ packages: resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} engines: {node: '>=8'} + assertion-error@2.0.1: + resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} + engines: {node: '>=12'} + assign-symbols@1.0.0: resolution: {integrity: sha512-Q+JC7Whu8HhmTdBph/Tq59IoRtoy6KAm5zzPv00WdujX82lbAL8K7WVjne7vdCsAmbF4AYaDOPyO3k0kl8qIrw==} engines: {node: '>=0.10.0'} @@ -1694,6 +1816,10 @@ packages: engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} hasBin: true + cac@6.7.14: + resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} + engines: {node: '>=8'} + call-bind-apply-helpers@1.0.2: resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} engines: {node: '>= 0.4'} @@ -1716,6 +1842,10 @@ packages: resolution: {integrity: sha512-KfdUZsSOw19/ObEWasvBP/Ac4reZvAGauZhs6S/gqNhXhI7cKwvlH7ulj+dOEYnca4bm4SGo8C1bTAQvnTjgQA==} engines: {node: '>=0.8'} + chai@5.3.3: + resolution: {integrity: sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==} + engines: {node: '>=18'} + chalk@4.1.2: resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} engines: {node: '>=10'} @@ -1736,6 +1866,10 @@ packages: resolution: {integrity: sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==} engines: {pnpm: '>=8'} + check-error@2.1.3: + resolution: {integrity: sha512-PAJdDJusoxnwm1VwW07VWwUN1sl7smmC3OKggvndJFadxxDRyFJBX/ggnu/KE4kQAB7a3Dp8f/YXC1FlUprWmA==} + engines: {node: '>= 16'} + chevrotain-allstar@0.3.1: resolution: {integrity: sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==} peerDependencies: @@ -1793,6 +1927,10 @@ packages: comma-separated-tokens@2.0.3: resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==} + commander@10.0.1: + resolution: {integrity: sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==} + engines: {node: '>=14'} + commander@4.1.1: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} engines: {node: '>= 6'} @@ -1814,6 +1952,9 @@ packages: confbox@0.1.8: resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==} + config-chain@1.1.13: + resolution: {integrity: sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==} + convert-source-map@1.9.0: resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} @@ -1841,6 +1982,10 @@ packages: engines: {node: '>=4'} hasBin: true + cssstyle@4.6.0: + resolution: {integrity: sha512-2z+rWdzbbSZv6/rhtvzvqeZQHrBaqgogqt85sqFNbabZOuFbCVFb8kPeEtZjiKkbrm395irpNKiYeFeLiQnFPg==} + engines: {node: '>=18'} + csstype@3.2.3: resolution: {integrity: sha512-z1HGKcYy2xA8AGQfwrn0PAy+PB7X/GSj3UVJW9qKyn43xWa+gl5nXmU4qqLMRzWVLFC8KusUX8T/0kCiOYpAIQ==} @@ -2000,6 +2145,10 @@ packages: dagre-d3-es@7.0.13: resolution: {integrity: sha512-efEhnxpSuwpYOKRm/L5KbqoZmNNukHa/Flty4Wp62JRvgH2ojwVgPgdYyr4twpieZnyRDdIH7PY2mopX26+j2Q==} + data-urls@5.0.0: + resolution: {integrity: sha512-ZYP5VBHshaDAiVZxjbRVcFJpc+4xGgT0bK3vzy1HLN8jTO975HEbuYzZJcHoQEY5K1a0z8YayJkyVETa08eNTg==} + engines: {node: '>=18'} + dayjs@1.11.19: resolution: {integrity: sha512-t5EcLVS6QPBNqM2z8fakk/NKel+Xzshgt8FFKAn+qwlD1pzZWxh0nVCrvFK7ZDb6XucZeF9z8C7CBWTRIVApAw==} @@ -2015,6 +2164,9 @@ packages: supports-color: optional: true + decimal.js@10.6.0: + resolution: {integrity: sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==} + decode-named-character-reference@1.2.0: resolution: {integrity: sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==} @@ -2022,6 +2174,10 @@ packages: resolution: {integrity: sha512-+8VxcR21HhTy8nOt6jf20w0c9CADrw1O8d+VZ/YzzCt4bJ3uBjw+D1q2osAB8RnpwwaeYBxy0HyKQxD5JBMuuQ==} engines: {node: '>=14.16'} + deep-eql@5.0.2: + resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} + engines: {node: '>=6'} + deep-is@0.1.4: resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} @@ -2063,6 +2219,14 @@ packages: resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} engines: {node: '>= 0.4'} + eastasianwidth@0.2.0: + resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} + + editorconfig@1.0.4: + resolution: {integrity: sha512-L9Qe08KWTlqYMVvMcTIvMAdl1cDUubzRNYL+WfA4bLDMHe4nemKkpmYzkznE1FwLKu0EEmy6obgQKzMJrg4x9Q==} + engines: {node: '>=14'} + hasBin: true + electron-to-chromium@1.5.267: resolution: {integrity: sha512-0Drusm6MVRXSOJpGbaSVgcQsuB4hEkMpHXaVstcPmhu5LIedxs1xNK/nIxmQIU/RPC0+1/o0AVZfBTkTNJOdUw==} @@ -2072,6 +2236,12 @@ packages: emoji-regex@10.6.0: resolution: {integrity: sha512-toUI84YS5YmxW219erniWD0CIVOo46xGKColeNQRgOzDorgBi1v4D71/OFzgD9GO2UGKIv1C3Sp8DAn0+j5w7A==} + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + emoji-regex@9.2.2: + resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==} + entities@6.0.1: resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==} engines: {node: '>=0.12'} @@ -2091,6 +2261,9 @@ packages: resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} engines: {node: '>= 0.4'} + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + es-object-atoms@1.1.1: resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} engines: {node: '>= 0.4'} @@ -2189,6 +2362,10 @@ packages: resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} engines: {node: '>=0.10.0'} + expect-type@1.3.0: + resolution: {integrity: sha512-knvyeauYhqjOYvQ66MznSMs83wmHrCycNEN6Ao+2AeYEfxUIkuiVxdEa1qlGEPK+We3n0THiDciYSsCcgW/DoA==} + engines: {node: '>=12.0.0'} + extend-shallow@2.0.1: resolution: {integrity: sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==} engines: {node: '>=0.10.0'} @@ -2271,6 +2448,10 @@ packages: resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} engines: {node: '>=0.10.0'} + foreground-child@3.3.1: + resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==} + engines: {node: '>=14'} + form-data@4.0.5: resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} engines: {node: '>= 6'} @@ -2334,6 +2515,10 @@ packages: resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} engines: {node: '>=10.13.0'} + glob@10.5.0: + resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==} + hasBin: true + glob@7.2.3: resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} deprecated: Glob versions prior to v9 are no longer supported @@ -2421,12 +2606,27 @@ packages: hoist-non-react-statics@3.3.2: resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} + html-encoding-sniffer@4.0.0: + resolution: {integrity: sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==} + engines: {node: '>=18'} + + html-escaper@2.0.2: + resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==} + html-url-attributes@3.0.1: resolution: {integrity: sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==} html-void-elements@3.0.0: resolution: {integrity: sha512-bEqo66MRXsUGxWHV5IP0PUiAWwoEjba4VCzg0LjFJBpchPaTfyfCKTG6bc5F8ucKec3q5y6qOdGyYTSBEvhCrg==} + http-proxy-agent@7.0.2: + resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} + engines: {node: '>= 14'} + + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + iconv-lite@0.6.3: resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} engines: {node: '>=0.10.0'} @@ -2453,6 +2653,9 @@ packages: inherits@2.0.4: resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + ini@1.3.8: + resolution: {integrity: sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==} + inline-style-parser@0.2.7: resolution: {integrity: sha512-Nb2ctOyNR8DqQoR0OwRG95uNWIC0C1lCgf5Naz5H6Ji72KZ8OcFZLz2P5sNgwlyoJ8Yif11oMuYs5pBQa86csA==} @@ -2499,6 +2702,10 @@ packages: resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} engines: {node: '>=0.10.0'} + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + is-glob@4.0.3: resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} engines: {node: '>=0.10.0'} @@ -2525,6 +2732,9 @@ packages: resolution: {integrity: sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==} engines: {node: '>=0.10.0'} + is-potential-custom-element-name@1.0.1: + resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==} + isexe@2.0.0: resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} @@ -2532,10 +2742,34 @@ packages: resolution: {integrity: sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==} engines: {node: '>=0.10.0'} + istanbul-lib-coverage@3.2.2: + resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==} + engines: {node: '>=8'} + + istanbul-lib-report@3.0.1: + resolution: {integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==} + engines: {node: '>=10'} + + istanbul-lib-source-maps@5.0.6: + resolution: {integrity: sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==} + engines: {node: '>=10'} + + istanbul-reports@3.2.0: + resolution: {integrity: sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==} + engines: {node: '>=8'} + + jackspeak@3.4.3: + resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} + jiti@1.21.7: resolution: {integrity: sha512-/imKNG4EbWNrVjoNC/1H5/9GFy+tqjGBHCaSsN+P2RnPqjsLmv6UD3Ej+Kj8nBWaRAwyk7kK5ZUc+OEatnTR3A==} hasBin: true + js-beautify@1.15.4: + resolution: {integrity: sha512-9/KXeZUKKJwqCXUdBxFJ3vPh467OCckSBmYDwSK/EtV090K+iMJ7zx2S3HLVDIWFQdqMIsZWbnaGiba18aWhaA==} + engines: {node: '>=14'} + hasBin: true + js-cookie@3.0.5: resolution: {integrity: sha512-cEiJEAEoIbWfCZYKWhVwFuvPX1gETRYPw6LlaTKoxD3s2AkXzkCjnp6h0V77ozyqj0jakteJ4YqDJT830+lVGw==} engines: {node: '>=14'} @@ -2547,6 +2781,15 @@ packages: resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} hasBin: true + jsdom@24.1.3: + resolution: {integrity: sha512-MyL55p3Ut3cXbeBEG7Hcv0mVM8pp8PBNWxRqchZnSfAiES1v1mRnMeFfaHWIPULpwsYfvO+ZmMZz5tGCnjzDUQ==} + engines: {node: '>=18'} + peerDependencies: + canvas: ^2.11.2 + peerDependenciesMeta: + canvas: + optional: true + jsesc@3.1.0: resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==} engines: {node: '>=6'} @@ -2636,6 +2879,12 @@ packages: resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==} hasBin: true + loupe@3.2.1: + resolution: {integrity: sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==} + + lru-cache@10.4.3: + resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} + lucide-react@0.469.0: resolution: {integrity: sha512-28vvUnnKQ/dBwiCQtwJw7QauYnE7yd2Cyp4tTTJpvglX4EMpbflcdBgrgToX2j71B3YvugK/NH3BGUk+E/p/Fw==} peerDependencies: @@ -2649,6 +2898,13 @@ packages: magic-string@0.30.21: resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + magicast@0.3.5: + resolution: {integrity: sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==} + + make-dir@4.0.0: + resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==} + engines: {node: '>=10'} + markdown-extensions@2.0.0: resolution: {integrity: sha512-o5vL7aDWatOTX8LzaS1WMoaoxIiLRQJuIKKe2wAw6IeULDHaqbiqiggmx+pKvZDb1Sj+pE46Sn1T7lCqfFtg1Q==} engines: {node: '>=16'} @@ -2877,10 +3133,18 @@ packages: minimatch@3.1.2: resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + minimatch@9.0.1: + resolution: {integrity: sha512-0jWhJpD/MdhPXwPuiRkCbfYfSKp2qnn2eOc279qI7f+osl/l+prKSrvhg157zSYvx/1nmgn2NqdT6k2Z7zSH9w==} + engines: {node: '>=16 || 14 >=14.17'} + minimatch@9.0.5: resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} engines: {node: '>=16 || 14 >=14.17'} + minipass@7.1.2: + resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} + engines: {node: '>=16 || 14 >=14.17'} + mixin-deep@1.3.2: resolution: {integrity: sha512-WRoDn//mXBiJ1H40rqa3vH0toePwSsGb45iInWlTySa+Uu4k3tYUSxa2v1KqAiLtvlrSzaExqS1gtk96A9zvEA==} engines: {node: '>=0.10.0'} @@ -2928,6 +3192,11 @@ packages: node-releases@2.0.27: resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==} + nopt@7.2.1: + resolution: {integrity: sha512-taM24ViiimT/XntxbPyJQzCG+p4EKOpgD3mxFwW38mGjVUrfERQOeY4EDHjdnptttfHuHQXFx+lTP08Q+mLa/w==} + engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} + hasBin: true + normalize-path@3.0.0: resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} engines: {node: '>=0.10.0'} @@ -2942,6 +3211,9 @@ packages: numeral@2.0.6: resolution: {integrity: sha512-qaKRmtYPZ5qdw4jWJD6bxEf1FJEqllJrwxCLIm0sQU/A7v2/czigzOb+C2uSiFsa9lBUzeH7M1oK+Q+OLxL3kA==} + nwsapi@2.2.23: + resolution: {integrity: sha512-7wfH4sLbt4M0gCDzGE6vzQBo0bfTKjU7Sfpqy/7gs1qBfYz2vEJH6vXcBKpO3+6Yu1telwd0t9HpyOoLEQQbIQ==} + object-assign@4.1.1: resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} engines: {node: '>=0.10.0'} @@ -2975,6 +3247,9 @@ packages: resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} engines: {node: '>=10'} + package-json-from-dist@1.0.1: + resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==} + package-manager-detector@1.6.0: resolution: {integrity: sha512-61A5ThoTiDG/C8s8UMZwSorAGwMJ0ERVGj2OjoW5pAalsNOg15+iQiPzrLJ4jhZ1HJzmC2PIHT2oEiH3R5fzNA==} @@ -3017,13 +3292,24 @@ packages: path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + path-scurry@1.11.1: + resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==} + engines: {node: '>=16 || 14 >=14.18'} + path-type@4.0.0: resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} engines: {node: '>=8'} + pathe@1.1.2: + resolution: {integrity: sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==} + pathe@2.0.3: resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} + pathval@2.0.1: + resolution: {integrity: sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==} + engines: {node: '>= 14.16'} + picocolors@1.1.1: resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} @@ -3122,9 +3408,15 @@ packages: property-information@7.1.0: resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==} + proto-list@1.2.4: + resolution: {integrity: sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==} + proxy-from-env@1.1.0: resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + psl@1.15.0: + resolution: {integrity: sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==} + punycode@2.3.1: resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} engines: {node: '>=6'} @@ -3133,6 +3425,9 @@ packages: resolution: {integrity: sha512-5fBfMOcDi5SA9qj5jZhWAcTtDfKF5WFdd2uD9nVNlbxVv1baq65aALy6qofpNEGELHvisjjasxQp7BlM9gvMzw==} engines: {node: '>=18'} + querystringify@2.2.0: + resolution: {integrity: sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==} + queue-microtask@1.2.3: resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} @@ -3369,6 +3664,9 @@ packages: remark-stringify@11.0.0: resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + requires-port@1.0.0: + resolution: {integrity: sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==} + reselect@5.1.1: resolution: {integrity: sha512-K/BG6eIky/SBpzfHZv/dd+9JBFiS4SWV7FIujVyJRux6e45+73RaUHXLmIR1f7WOMaQ0U1km6qwklRQxpJJY0w==} @@ -3404,6 +3702,12 @@ packages: roughjs@4.6.6: resolution: {integrity: sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==} + rrweb-cssom@0.7.1: + resolution: {integrity: sha512-TrEMa7JGdVm0UThDJSx7ddw5nVm3UJS9o9CCIZ72B1vSyEZoziDqBYP3XIoi/12lKrJR8rE3jeFHMok2F/Mnsg==} + + rrweb-cssom@0.8.0: + resolution: {integrity: sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==} + run-parallel@1.2.0: resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} @@ -3413,6 +3717,10 @@ packages: safer-buffer@2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + saxes@6.0.0: + resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==} + engines: {node: '>=v12.22.7'} + scheduler@0.27.0: resolution: {integrity: sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==} @@ -3457,6 +3765,13 @@ packages: shiki@3.20.0: resolution: {integrity: sha512-kgCOlsnyWb+p0WU+01RjkCH+eBVsjL1jOwUYWv0YDWkM2/A46+LDKVs5yZCUXjJG6bj4ndFoAg5iLIIue6dulg==} + siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + + signal-exit@4.1.0: + resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} + engines: {node: '>=14'} + slash@3.0.0: resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} engines: {node: '>=8'} @@ -3488,9 +3803,23 @@ packages: resolution: {integrity: sha512-+idbmIXoYET47hH+d7dfm2epdOMUDjqcB4648sTZ+t2JwoyBFL/insLfB/racrDmsKB3diwsDA696pZMieAC5g==} engines: {node: '>=0.8'} + stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + string-convert@0.2.1: resolution: {integrity: sha512-u/1tdPl4yQnPBjnVrmdLo9gtuLvELKsAoRapekWggdiQNvvvum+jYF329d84NAa660KQw7pB2n36KrIKVoXa3A==} + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + string-width@5.1.2: + resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==} + engines: {node: '>=12'} + stringify-entities@4.0.4: resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==} @@ -3536,6 +3865,9 @@ packages: peerDependencies: react: ^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + symbol-tree@3.2.4: + resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} + tabbable@6.4.0: resolution: {integrity: sha512-05PUHKSNE8ou2dwIxTngl4EzcnsCDZGJ/iCLtDflR/SHB/ny14rXc+qU5P4mG9JkusiV7EivzY9Mhm55AzAvCg==} @@ -3544,6 +3876,10 @@ packages: engines: {node: '>=14.0.0'} hasBin: true + test-exclude@7.0.1: + resolution: {integrity: sha512-pFYqmTw68LXVjeWJMST4+borgQP2AyMNbg1BpZh9LbyhUeNkeaPF9gzfPGUAnSMV3qPYdWUwDIjjCLiSDOl7vg==} + engines: {node: '>=18'} + text-table@0.2.0: resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} @@ -3561,6 +3897,12 @@ packages: tiny-invariant@1.3.3: resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} + tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + + tinyexec@0.3.2: + resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} + tinyexec@1.0.2: resolution: {integrity: sha512-W/KYk+NFhkmsYpuHq5JykngiOCnxeVL8v8dFnqxSD8qEEdRfXk1SDM6JzNqcERbcGYj9tMrDQBYV9cjgnunFIg==} engines: {node: '>=18'} @@ -3569,6 +3911,18 @@ packages: resolution: {integrity: sha512-j2Zq4NyQYG5XMST4cbs02Ak8iJUdxRM0XI5QyxXuZOzKOINmWurp3smXu3y5wDcJrptwpSjgXHzIQxR0omXljQ==} engines: {node: '>=12.0.0'} + tinypool@1.1.1: + resolution: {integrity: sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==} + engines: {node: ^18.0.0 || >=20.0.0} + + tinyrainbow@1.2.0: + resolution: {integrity: sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==} + engines: {node: '>=14.0.0'} + + tinyspy@3.0.2: + resolution: {integrity: sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==} + engines: {node: '>=14.0.0'} + to-regex-range@5.0.1: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} @@ -3576,6 +3930,14 @@ packages: to-vfile@8.0.0: resolution: {integrity: sha512-IcmH1xB5576MJc9qcfEC/m/nQCFt3fzMHz45sSlgJyTWjRbKW1HAkJpuf3DgE57YzIlZcwcBZA5ENQbBo4aLkg==} + tough-cookie@4.1.4: + resolution: {integrity: sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==} + engines: {node: '>=6'} + + tr46@5.1.1: + resolution: {integrity: sha512-hdF5ZgjTqgAntKkklYw0R03MG2x/bSzTtkxmIRw/sTNV8YXsCJ1tfLAX23lhxhHJlEf3CRCOCGGWw3vI3GaSPw==} + engines: {node: '>=18'} + trim-lines@3.0.1: resolution: {integrity: sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==} @@ -3655,6 +4017,10 @@ packages: unist-util-visit@5.0.0: resolution: {integrity: sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==} + universalify@0.2.0: + resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==} + engines: {node: '>= 4.0.0'} + update-browserslist-db@1.2.3: resolution: {integrity: sha512-Js0m9cx+qOgDxo0eMiFGEueWztz+d4+M3rGlmKPT+T4IS/jP4ylw3Nwpu6cpTTP8R1MAC1kF4VbdLt3ARf209w==} hasBin: true @@ -3668,6 +4034,9 @@ packages: resolution: {integrity: sha512-n2huDr9h9yzd6exQVnH/jU5mr+Pfx08LRXXZhkLLetAMESRj+anQsTAh940iMrIetKAmry9coFuZQ2jY8/p3WA==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + url-parse@1.5.10: + resolution: {integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==} + use-merge-value@1.2.0: resolution: {integrity: sha512-DXgG0kkgJN45TcyoXL49vJnn55LehnrmoHc7MbKi+QDBvr8dsesqws8UlyIWGHMR+JXgxc1nvY+jDGMlycsUcw==} peerDependencies: @@ -3701,6 +4070,11 @@ packages: vfile@6.0.3: resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} + vite-node@2.1.9: + resolution: {integrity: sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + vite-plugin-checker@0.9.3: resolution: {integrity: sha512-Tf7QBjeBtG7q11zG0lvoF38/2AVUzzhMNu+Wk+mcsJ00Rk/FpJ4rmUviVJpzWkagbU13cGXvKpt7CMiqtxVTbQ==} engines: {node: '>=14.16'} @@ -3766,6 +4140,31 @@ packages: terser: optional: true + vitest@2.1.9: + resolution: {integrity: sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@types/node': ^18.0.0 || >=20.0.0 + '@vitest/browser': 2.1.9 + '@vitest/ui': 2.1.9 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@types/node': + optional: true + '@vitest/browser': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + vscode-jsonrpc@8.2.0: resolution: {integrity: sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==} engines: {node: '>=14.0.0'} @@ -3795,6 +4194,9 @@ packages: chart.js: ^4.1.1 vue: ^3.0.0-0 || ^2.7.0 + vue-component-type-helpers@2.2.12: + resolution: {integrity: sha512-YbGqHZ5/eW4SnkPNR44mKVc6ZKQoRs/Rux1sxC6rdwXb4qpbOSYfDr9DsTHolOTGmIKgM9j141mZbBeg05R1pw==} + vue-demi@0.14.10: resolution: {integrity: sha512-nMZBOwuzabUO0nLgIcc6rycZEebF6eeUfaiQx9+WSk8e29IbLvPU9feI6tqW4kTo3hvoYAJkMh8n8D0fuISphg==} engines: {node: '>=12'} @@ -3837,14 +4239,40 @@ packages: typescript: optional: true + w3c-xmlserializer@5.0.0: + resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==} + engines: {node: '>=18'} + web-namespaces@2.0.1: resolution: {integrity: sha512-bKr1DkiNa2krS7qxNtdrtHAmzuYGFQLiQ13TsorsdT6ULTkPLKuu5+GsFpDlg6JFjUTwX2DyhMPG2be8uPrqsQ==} + webidl-conversions@7.0.0: + resolution: {integrity: sha512-VwddBukDzu71offAQR975unBIGqfKZpM+8ZX6ySk8nYhVoo5CYaZyzt3YBvYtRtO+aoGlqxPg/B87NGVZ/fu6g==} + engines: {node: '>=12'} + + whatwg-encoding@3.1.1: + resolution: {integrity: sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==} + engines: {node: '>=18'} + deprecated: Use @exodus/bytes instead for a more spec-conformant and faster implementation + + whatwg-mimetype@4.0.0: + resolution: {integrity: sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==} + engines: {node: '>=18'} + + whatwg-url@14.2.0: + resolution: {integrity: sha512-De72GdQZzNTUBBChsXueQUnPKDkg/5A5zp7pFDuQAj5UFoENpiACU0wlCvzpAGnTkj++ihpKwKyYewn/XNUbKw==} + engines: {node: '>=18'} + which@2.0.2: resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} engines: {node: '>= 8'} hasBin: true + why-is-node-running@2.3.0: + resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} + engines: {node: '>=8'} + hasBin: true + wmf@1.0.2: resolution: {integrity: sha512-/p9K7bEh0Dj6WbXg4JG0xvLQmIadrner1bi45VMJTfnbVHsc7yIajZyoSoK60/dtVBs12Fm6WkUI5/3WAVsNMw==} engines: {node: '>=0.8'} @@ -3857,9 +4285,29 @@ packages: resolution: {integrity: sha512-OELeY0Q61OXpdUfTp+oweA/vtLVg5VDOXh+3he3PNzLGG/y0oylSOC1xRVj0+l4vQ3tj/bB1HVHv1ocXkQceFA==} engines: {node: '>=0.8'} + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + + wrap-ansi@8.1.0: + resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==} + engines: {node: '>=12'} + wrappy@1.0.2: resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + ws@8.19.0: + resolution: {integrity: sha512-blAT2mjOEIi0ZzruJfIhb3nps74PRWTCz1IjglWEEpQl5XS/UNama6u2/rjFkDDouqr4L67ry+1aGIALViWjDg==} + engines: {node: '>=10.0.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: '>=5.0.2' + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + xlsx@0.18.5: resolution: {integrity: sha512-dmg3LCjBPHZnQp5/F/+nnTa+miPJxUXB6vtk42YjBBKayDNagxGEeIdWApkYPOf3Z3pm3k62Knjzp7lMeTEtFQ==} engines: {node: '>=0.8'} @@ -3869,6 +4317,13 @@ packages: resolution: {integrity: sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==} engines: {node: '>=12'} + xml-name-validator@5.0.0: + resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==} + engines: {node: '>=18'} + + xmlchars@2.2.0: + resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} + yaml@1.10.2: resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} engines: {node: '>= 6'} @@ -3893,6 +4348,11 @@ snapshots: '@alloc/quick-lru@5.2.0': {} + '@ampproject/remapping@2.3.0': + dependencies: + '@jridgewell/gen-mapping': 0.3.13 + '@jridgewell/trace-mapping': 0.3.31 + '@ant-design/colors@8.0.0': dependencies: '@ant-design/fast-color': 3.0.0 @@ -3944,6 +4404,14 @@ snapshots: package-manager-detector: 1.6.0 tinyexec: 1.0.2 + '@asamuzakjp/css-color@3.2.0': + dependencies: + '@csstools/css-calc': 2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-color-parser': 3.1.0(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + lru-cache: 10.4.3 + '@babel/code-frame@7.27.1': dependencies: '@babel/helper-validator-identifier': 7.28.5 @@ -4025,6 +4493,8 @@ snapshots: optionalDependencies: '@types/react': 19.2.7 + '@bcoe/v8-coverage@0.2.3': {} + '@braintree/sanitize-url@7.1.1': {} '@chevrotain/cst-dts-gen@11.0.3': @@ -4044,6 +4514,26 @@ snapshots: '@chevrotain/utils@11.0.3': {} + '@csstools/color-helpers@5.1.0': {} + + '@csstools/css-calc@2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + + '@csstools/css-color-parser@3.1.0(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/color-helpers': 5.1.0 + '@csstools/css-calc': 2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + + '@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/css-tokenizer': 3.0.4 + + '@csstools/css-tokenizer@3.0.4': {} + '@dnd-kit/accessibility@3.1.1(react@19.2.3)': dependencies: react: 19.2.3 @@ -4320,6 +4810,17 @@ snapshots: '@intlify/shared@9.14.5': {} + '@isaacs/cliui@8.0.2': + dependencies: + string-width: 5.1.2 + string-width-cjs: string-width@4.2.3 + strip-ansi: 7.1.2 + strip-ansi-cjs: strip-ansi@6.0.1 + wrap-ansi: 8.1.0 + wrap-ansi-cjs: wrap-ansi@7.0.0 + + '@istanbuljs/schema@0.1.3': {} + '@jridgewell/gen-mapping@0.3.13': dependencies: '@jridgewell/sourcemap-codec': 1.5.5 @@ -4505,6 +5006,11 @@ snapshots: '@nodelib/fs.scandir': 2.1.5 fastq: 1.20.1 + '@one-ini/wasm@0.1.1': {} + + '@pkgjs/parseargs@0.11.0': + optional: true + '@primer/octicons@19.21.1': dependencies: object-assign: 4.1.1 @@ -5439,6 +5945,64 @@ snapshots: vite: 5.4.21(@types/node@20.19.27) vue: 3.5.26(typescript@5.6.3) + '@vitest/coverage-v8@2.1.9(vitest@2.1.9(@types/node@20.19.27)(jsdom@24.1.3))': + dependencies: + '@ampproject/remapping': 2.3.0 + '@bcoe/v8-coverage': 0.2.3 + debug: 4.4.3 + istanbul-lib-coverage: 3.2.2 + istanbul-lib-report: 3.0.1 + istanbul-lib-source-maps: 5.0.6 + istanbul-reports: 3.2.0 + magic-string: 0.30.21 + magicast: 0.3.5 + std-env: 3.10.0 + test-exclude: 7.0.1 + tinyrainbow: 1.2.0 + vitest: 2.1.9(@types/node@20.19.27)(jsdom@24.1.3) + transitivePeerDependencies: + - supports-color + + '@vitest/expect@2.1.9': + dependencies: + '@vitest/spy': 2.1.9 + '@vitest/utils': 2.1.9 + chai: 5.3.3 + tinyrainbow: 1.2.0 + + '@vitest/mocker@2.1.9(vite@5.4.21(@types/node@20.19.27))': + dependencies: + '@vitest/spy': 2.1.9 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 5.4.21(@types/node@20.19.27) + + '@vitest/pretty-format@2.1.9': + dependencies: + tinyrainbow: 1.2.0 + + '@vitest/runner@2.1.9': + dependencies: + '@vitest/utils': 2.1.9 + pathe: 1.1.2 + + '@vitest/snapshot@2.1.9': + dependencies: + '@vitest/pretty-format': 2.1.9 + magic-string: 0.30.21 + pathe: 1.1.2 + + '@vitest/spy@2.1.9': + dependencies: + tinyspy: 3.0.2 + + '@vitest/utils@2.1.9': + dependencies: + '@vitest/pretty-format': 2.1.9 + loupe: 3.2.1 + tinyrainbow: 1.2.0 + '@volar/language-core@2.4.15': dependencies: '@volar/source-map': 2.4.15 @@ -5525,6 +6089,11 @@ snapshots: '@vue/shared@3.5.26': {} + '@vue/test-utils@2.4.6': + dependencies: + js-beautify: 1.15.4 + vue-component-type-helpers: 2.2.12 + '@vueuse/core@10.11.1(vue@3.5.26(typescript@5.6.3))': dependencies: '@types/web-bluetooth': 0.0.20 @@ -5544,6 +6113,8 @@ snapshots: - '@vue/composition-api' - vue + abbrev@2.0.0: {} + acorn-jsx@5.3.2(acorn@8.15.0): dependencies: acorn: 8.15.0 @@ -5552,6 +6123,8 @@ snapshots: adler-32@1.3.1: {} + agent-base@7.1.4: {} + ahooks@3.9.6(react-dom@19.2.3(react@19.2.3))(react@19.2.3): dependencies: '@babel/runtime': 7.28.4 @@ -5584,6 +6157,8 @@ snapshots: dependencies: color-convert: 2.0.1 + ansi-styles@6.2.3: {} + antd-style@4.1.0(@types/react@19.2.7)(antd@6.1.3(react-dom@19.2.3(react@19.2.3))(react@19.2.3))(react-dom@19.2.3(react@19.2.3))(react@19.2.3): dependencies: '@ant-design/cssinjs': 2.0.1(react-dom@19.2.3(react@19.2.3))(react@19.2.3) @@ -5671,6 +6246,8 @@ snapshots: array-union@2.1.0: {} + assertion-error@2.0.1: {} + assign-symbols@1.0.0: {} astring@1.9.0: {} @@ -5733,6 +6310,8 @@ snapshots: node-releases: 2.0.27 update-browserslist-db: 1.2.3(browserslist@4.28.1) + cac@6.7.14: {} + call-bind-apply-helpers@1.0.2: dependencies: es-errors: 1.3.0 @@ -5751,6 +6330,14 @@ snapshots: adler-32: 1.3.1 crc-32: 1.2.2 + chai@5.3.3: + dependencies: + assertion-error: 2.0.1 + check-error: 2.1.3 + deep-eql: 5.0.2 + loupe: 3.2.1 + pathval: 2.0.1 + chalk@4.1.2: dependencies: ansi-styles: 4.3.0 @@ -5768,6 +6355,8 @@ snapshots: dependencies: '@kurkle/color': 0.3.4 + check-error@2.1.3: {} + chevrotain-allstar@0.3.1(chevrotain@11.0.3): dependencies: chevrotain: 11.0.3 @@ -5828,6 +6417,8 @@ snapshots: comma-separated-tokens@2.0.3: {} + commander@10.0.1: {} + commander@4.1.1: {} commander@7.2.0: {} @@ -5840,6 +6431,11 @@ snapshots: confbox@0.1.8: {} + config-chain@1.1.13: + dependencies: + ini: 1.3.8 + proto-list: 1.2.4 + convert-source-map@1.9.0: {} cose-base@1.0.3: @@ -5868,6 +6464,11 @@ snapshots: cssesc@3.0.0: {} + cssstyle@4.6.0: + dependencies: + '@asamuzakjp/css-color': 3.2.0 + rrweb-cssom: 0.8.0 + csstype@3.2.3: {} cytoscape-cose-bilkent@4.1.0(cytoscape@3.33.1): @@ -6054,6 +6655,11 @@ snapshots: d3: 7.9.0 lodash-es: 4.17.22 + data-urls@5.0.0: + dependencies: + whatwg-mimetype: 4.0.0 + whatwg-url: 14.2.0 + dayjs@1.11.19: {} de-indent@1.0.2: {} @@ -6062,12 +6668,16 @@ snapshots: dependencies: ms: 2.1.3 + decimal.js@10.6.0: {} + decode-named-character-reference@1.2.0: dependencies: character-entities: 2.0.2 decode-uri-component@0.4.1: {} + deep-eql@5.0.2: {} + deep-is@0.1.4: {} delaunator@5.0.1: @@ -6106,12 +6716,25 @@ snapshots: es-errors: 1.3.0 gopd: 1.2.0 + eastasianwidth@0.2.0: {} + + editorconfig@1.0.4: + dependencies: + '@one-ini/wasm': 0.1.1 + commander: 10.0.1 + minimatch: 9.0.1 + semver: 7.7.3 + electron-to-chromium@1.5.267: {} emoji-mart@5.6.0: {} emoji-regex@10.6.0: {} + emoji-regex@8.0.0: {} + + emoji-regex@9.2.2: {} + entities@6.0.1: {} entities@7.0.0: {} @@ -6124,6 +6747,8 @@ snapshots: es-errors@1.3.0: {} + es-module-lexer@1.7.0: {} + es-object-atoms@1.1.1: dependencies: es-errors: 1.3.0 @@ -6300,6 +6925,8 @@ snapshots: esutils@2.0.3: {} + expect-type@1.3.0: {} + extend-shallow@2.0.1: dependencies: is-extendable: 0.1.1 @@ -6368,6 +6995,11 @@ snapshots: for-in@1.0.2: {} + foreground-child@3.3.1: + dependencies: + cross-spawn: 7.0.6 + signal-exit: 4.1.0 + form-data@4.0.5: dependencies: asynckit: 0.4.0 @@ -6431,6 +7063,15 @@ snapshots: dependencies: is-glob: 4.0.3 + glob@10.5.0: + dependencies: + foreground-child: 3.3.1 + jackspeak: 3.4.3 + minimatch: 9.0.5 + minipass: 7.1.2 + package-json-from-dist: 1.0.1 + path-scurry: 1.11.1 + glob@7.2.3: dependencies: fs.realpath: 1.0.0 @@ -6618,10 +7259,30 @@ snapshots: dependencies: react-is: 16.13.1 + html-encoding-sniffer@4.0.0: + dependencies: + whatwg-encoding: 3.1.1 + + html-escaper@2.0.2: {} + html-url-attributes@3.0.1: {} html-void-elements@3.0.0: {} + http-proxy-agent@7.0.2: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + iconv-lite@0.6.3: dependencies: safer-buffer: 2.1.2 @@ -6644,6 +7305,8 @@ snapshots: inherits@2.0.4: {} + ini@1.3.8: {} + inline-style-parser@0.2.7: {} internmap@1.0.1: {} @@ -6679,6 +7342,8 @@ snapshots: is-extglob@2.1.1: {} + is-fullwidth-code-point@3.0.0: {} + is-glob@4.0.3: dependencies: is-extglob: 2.1.1 @@ -6697,12 +7362,49 @@ snapshots: dependencies: isobject: 3.0.1 + is-potential-custom-element-name@1.0.1: {} + isexe@2.0.0: {} isobject@3.0.1: {} + istanbul-lib-coverage@3.2.2: {} + + istanbul-lib-report@3.0.1: + dependencies: + istanbul-lib-coverage: 3.2.2 + make-dir: 4.0.0 + supports-color: 7.2.0 + + istanbul-lib-source-maps@5.0.6: + dependencies: + '@jridgewell/trace-mapping': 0.3.31 + debug: 4.4.3 + istanbul-lib-coverage: 3.2.2 + transitivePeerDependencies: + - supports-color + + istanbul-reports@3.2.0: + dependencies: + html-escaper: 2.0.2 + istanbul-lib-report: 3.0.1 + + jackspeak@3.4.3: + dependencies: + '@isaacs/cliui': 8.0.2 + optionalDependencies: + '@pkgjs/parseargs': 0.11.0 + jiti@1.21.7: {} + js-beautify@1.15.4: + dependencies: + config-chain: 1.1.13 + editorconfig: 1.0.4 + glob: 10.5.0 + js-cookie: 3.0.5 + nopt: 7.2.1 + js-cookie@3.0.5: {} js-tokens@4.0.0: {} @@ -6711,6 +7413,34 @@ snapshots: dependencies: argparse: 2.0.1 + jsdom@24.1.3: + dependencies: + cssstyle: 4.6.0 + data-urls: 5.0.0 + decimal.js: 10.6.0 + form-data: 4.0.5 + html-encoding-sniffer: 4.0.0 + http-proxy-agent: 7.0.2 + https-proxy-agent: 7.0.6 + is-potential-custom-element-name: 1.0.1 + nwsapi: 2.2.23 + parse5: 7.3.0 + rrweb-cssom: 0.7.1 + saxes: 6.0.0 + symbol-tree: 3.2.4 + tough-cookie: 4.1.4 + w3c-xmlserializer: 5.0.0 + webidl-conversions: 7.0.0 + whatwg-encoding: 3.1.1 + whatwg-mimetype: 4.0.0 + whatwg-url: 14.2.0 + ws: 8.19.0 + xml-name-validator: 5.0.0 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + jsesc@3.1.0: {} json-buffer@3.0.1: {} @@ -6809,6 +7539,10 @@ snapshots: dependencies: js-tokens: 4.0.0 + loupe@3.2.1: {} + + lru-cache@10.4.3: {} + lucide-react@0.469.0(react@19.2.3): dependencies: react: 19.2.3 @@ -6821,6 +7555,16 @@ snapshots: dependencies: '@jridgewell/sourcemap-codec': 1.5.5 + magicast@0.3.5: + dependencies: + '@babel/parser': 7.28.5 + '@babel/types': 7.28.5 + source-map-js: 1.2.1 + + make-dir@4.0.0: + dependencies: + semver: 7.7.3 + markdown-extensions@2.0.0: {} markdown-table@3.0.4: {} @@ -7351,10 +8095,16 @@ snapshots: dependencies: brace-expansion: 1.1.12 + minimatch@9.0.1: + dependencies: + brace-expansion: 2.0.2 + minimatch@9.0.5: dependencies: brace-expansion: 2.0.2 + minipass@7.1.2: {} + mixin-deep@1.3.2: dependencies: for-in: 1.0.2 @@ -7398,6 +8148,10 @@ snapshots: node-releases@2.0.27: {} + nopt@7.2.1: + dependencies: + abbrev: 2.0.0 + normalize-path@3.0.0: {} npm-run-path@6.0.0: @@ -7411,6 +8165,8 @@ snapshots: numeral@2.0.6: {} + nwsapi@2.2.23: {} + object-assign@4.1.1: {} object-hash@3.0.0: {} @@ -7446,6 +8202,8 @@ snapshots: dependencies: p-limit: 3.1.0 + package-json-from-dist@1.0.1: {} + package-manager-detector@1.6.0: {} parent-module@1.0.1: @@ -7487,10 +8245,19 @@ snapshots: path-parse@1.0.7: {} + path-scurry@1.11.1: + dependencies: + lru-cache: 10.4.3 + minipass: 7.1.2 + path-type@4.0.0: {} + pathe@1.1.2: {} + pathe@2.0.3: {} + pathval@2.0.1: {} + picocolors@1.1.1: {} picomatch@2.3.1: {} @@ -7575,8 +8342,14 @@ snapshots: property-information@7.1.0: {} + proto-list@1.2.4: {} + proxy-from-env@1.1.0: {} + psl@1.15.0: + dependencies: + punycode: 2.3.1 + punycode@2.3.1: {} query-string@9.3.1: @@ -7585,6 +8358,8 @@ snapshots: filter-obj: 5.1.0 split-on-first: 3.0.0 + querystringify@2.2.0: {} + queue-microtask@1.2.3: {} rc-collapse@4.0.0(react-dom@19.2.3(react@19.2.3))(react@19.2.3): @@ -7928,6 +8703,8 @@ snapshots: mdast-util-to-markdown: 2.1.2 unified: 11.0.5 + requires-port@1.0.0: {} + reselect@5.1.1: {} resize-observer-polyfill@1.5.1: {} @@ -7983,6 +8760,10 @@ snapshots: points-on-curve: 0.2.0 points-on-path: 0.2.1 + rrweb-cssom@0.7.1: {} + + rrweb-cssom@0.8.0: {} + run-parallel@1.2.0: dependencies: queue-microtask: 1.2.3 @@ -7991,6 +8772,10 @@ snapshots: safer-buffer@2.1.2: {} + saxes@6.0.0: + dependencies: + xmlchars: 2.2.0 + scheduler@0.27.0: {} screenfull@5.2.0: {} @@ -8034,6 +8819,10 @@ snapshots: '@shikijs/vscode-textmate': 10.0.2 '@types/hast': 3.0.4 + siginfo@2.0.0: {} + + signal-exit@4.1.0: {} + slash@3.0.0: {} source-map-js@1.2.1: {} @@ -8054,8 +8843,24 @@ snapshots: dependencies: frac: 1.1.2 + stackback@0.0.2: {} + + std-env@3.10.0: {} + string-convert@0.2.1: {} + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + string-width@5.1.2: + dependencies: + eastasianwidth: 0.2.0 + emoji-regex: 9.2.2 + strip-ansi: 7.1.2 + stringify-entities@4.0.4: dependencies: character-entities-html4: 2.1.0 @@ -8105,6 +8910,8 @@ snapshots: react: 19.2.3 use-sync-external-store: 1.6.0(react@19.2.3) + symbol-tree@3.2.4: {} + tabbable@6.4.0: {} tailwindcss@3.4.19: @@ -8135,6 +8942,12 @@ snapshots: - tsx - yaml + test-exclude@7.0.1: + dependencies: + '@istanbuljs/schema': 0.1.3 + glob: 10.5.0 + minimatch: 9.0.5 + text-table@0.2.0: {} thenify-all@1.6.0: @@ -8149,6 +8962,10 @@ snapshots: tiny-invariant@1.3.3: {} + tinybench@2.9.0: {} + + tinyexec@0.3.2: {} + tinyexec@1.0.2: {} tinyglobby@0.2.15: @@ -8156,6 +8973,12 @@ snapshots: fdir: 6.5.0(picomatch@4.0.3) picomatch: 4.0.3 + tinypool@1.1.1: {} + + tinyrainbow@1.2.0: {} + + tinyspy@3.0.2: {} + to-regex-range@5.0.1: dependencies: is-number: 7.0.0 @@ -8164,6 +8987,17 @@ snapshots: dependencies: vfile: 6.0.3 + tough-cookie@4.1.4: + dependencies: + psl: 1.15.0 + punycode: 2.3.1 + universalify: 0.2.0 + url-parse: 1.5.10 + + tr46@5.1.1: + dependencies: + punycode: 2.3.1 + trim-lines@3.0.1: {} trough@2.2.0: {} @@ -8243,6 +9077,8 @@ snapshots: unist-util-is: 6.0.1 unist-util-visit-parents: 6.0.2 + universalify@0.2.0: {} + update-browserslist-db@1.2.3(browserslist@4.28.1): dependencies: browserslist: 4.28.1 @@ -8255,6 +9091,11 @@ snapshots: url-join@5.0.0: {} + url-parse@1.5.10: + dependencies: + querystringify: 2.2.0 + requires-port: 1.0.0 + use-merge-value@1.2.0(react@19.2.3): dependencies: react: 19.2.3 @@ -8286,6 +9127,24 @@ snapshots: '@types/unist': 3.0.3 vfile-message: 4.0.3 + vite-node@2.1.9(@types/node@20.19.27): + dependencies: + cac: 6.7.14 + debug: 4.4.3 + es-module-lexer: 1.7.0 + pathe: 1.1.2 + vite: 5.4.21(@types/node@20.19.27) + transitivePeerDependencies: + - '@types/node' + - less + - lightningcss + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + vite-plugin-checker@0.9.3(eslint@8.57.1)(optionator@0.9.4)(typescript@5.6.3)(vite@5.4.21(@types/node@20.19.27))(vue-tsc@2.2.12(typescript@5.6.3)): dependencies: '@babel/code-frame': 7.27.1 @@ -8313,6 +9172,42 @@ snapshots: '@types/node': 20.19.27 fsevents: 2.3.3 + vitest@2.1.9(@types/node@20.19.27)(jsdom@24.1.3): + dependencies: + '@vitest/expect': 2.1.9 + '@vitest/mocker': 2.1.9(vite@5.4.21(@types/node@20.19.27)) + '@vitest/pretty-format': 2.1.9 + '@vitest/runner': 2.1.9 + '@vitest/snapshot': 2.1.9 + '@vitest/spy': 2.1.9 + '@vitest/utils': 2.1.9 + chai: 5.3.3 + debug: 4.4.3 + expect-type: 1.3.0 + magic-string: 0.30.21 + pathe: 1.1.2 + std-env: 3.10.0 + tinybench: 2.9.0 + tinyexec: 0.3.2 + tinypool: 1.1.1 + tinyrainbow: 1.2.0 + vite: 5.4.21(@types/node@20.19.27) + vite-node: 2.1.9(@types/node@20.19.27) + why-is-node-running: 2.3.0 + optionalDependencies: + '@types/node': 20.19.27 + jsdom: 24.1.3 + transitivePeerDependencies: + - less + - lightningcss + - msw + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + vscode-jsonrpc@8.2.0: {} vscode-languageserver-protocol@3.17.5: @@ -8337,6 +9232,8 @@ snapshots: chart.js: 4.5.1 vue: 3.5.26(typescript@5.6.3) + vue-component-type-helpers@2.2.12: {} + vue-demi@0.14.10(vue@3.5.26(typescript@5.6.3)): dependencies: vue: 3.5.26(typescript@5.6.3) @@ -8382,20 +9279,56 @@ snapshots: optionalDependencies: typescript: 5.6.3 + w3c-xmlserializer@5.0.0: + dependencies: + xml-name-validator: 5.0.0 + web-namespaces@2.0.1: {} + webidl-conversions@7.0.0: {} + + whatwg-encoding@3.1.1: + dependencies: + iconv-lite: 0.6.3 + + whatwg-mimetype@4.0.0: {} + + whatwg-url@14.2.0: + dependencies: + tr46: 5.1.1 + webidl-conversions: 7.0.0 + which@2.0.2: dependencies: isexe: 2.0.0 + why-is-node-running@2.3.0: + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 + wmf@1.0.2: {} word-wrap@1.2.5: {} word@0.3.0: {} + wrap-ansi@7.0.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + + wrap-ansi@8.1.0: + dependencies: + ansi-styles: 6.2.3 + string-width: 5.1.2 + strip-ansi: 7.1.2 + wrappy@1.0.2: {} + ws@8.19.0: {} + xlsx@0.18.5: dependencies: adler-32: 1.3.1 @@ -8408,6 +9341,10 @@ snapshots: xml-name-validator@4.0.0: {} + xml-name-validator@5.0.0: {} + + xmlchars@2.2.0: {} + yaml@1.10.2: {} yocto-queue@0.1.0: {} diff --git a/frontend/src/App.vue b/frontend/src/App.vue index 8bae7b74..7b847c1b 100644 --- a/frontend/src/App.vue +++ b/frontend/src/App.vue @@ -2,6 +2,7 @@ import { RouterView, useRouter, useRoute } from 'vue-router' import { onMounted, watch } from 'vue' import Toast from '@/components/common/Toast.vue' +import NavigationProgress from '@/components/common/NavigationProgress.vue' import { useAppStore, useAuthStore, useSubscriptionStore } from '@/stores' import { getSetupStatus } from '@/api/setup' @@ -84,6 +85,7 @@ onMounted(async () => { diff --git a/frontend/src/__tests__/integration/navigation.spec.ts b/frontend/src/__tests__/integration/navigation.spec.ts new file mode 100644 index 00000000..54e3e1c0 --- /dev/null +++ b/frontend/src/__tests__/integration/navigation.spec.ts @@ -0,0 +1,478 @@ +/** + * 导航集成测试 + * 测试完整的页面导航流程、预加载和错误恢复机制 + */ +import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest' +import { createRouter, createWebHistory, type Router } from 'vue-router' +import { createPinia, setActivePinia } from 'pinia' +import { mount, flushPromises } from '@vue/test-utils' +import { defineComponent, h, nextTick } from 'vue' +import { useNavigationLoadingState, _resetNavigationLoadingInstance } from '@/composables/useNavigationLoading' +import { useRoutePrefetch } from '@/composables/useRoutePrefetch' + +// Mock 视图组件 +const MockDashboard = defineComponent({ + name: 'MockDashboard', + render() { + return h('div', { class: 'dashboard' }, 'Dashboard') + } +}) + +const MockKeys = defineComponent({ + name: 'MockKeys', + render() { + return h('div', { class: 'keys' }, 'Keys') + } +}) + +const MockUsage = defineComponent({ + name: 'MockUsage', + render() { + return h('div', { class: 'usage' }, 'Usage') + } +}) + +// Mock stores +vi.mock('@/stores/auth', () => ({ + useAuthStore: () => ({ + isAuthenticated: true, + isAdmin: false, + isSimpleMode: false, + checkAuth: vi.fn() + }) +})) + +vi.mock('@/stores/app', () => ({ + useAppStore: () => ({ + siteName: 'Test Site' + }) +})) + +// 创建测试路由 +function createTestRouter(): Router { + return createRouter({ + history: createWebHistory(), + routes: [ + { + path: '/', + redirect: '/dashboard' + }, + { + path: '/dashboard', + name: 'Dashboard', + component: MockDashboard, + meta: { requiresAuth: true, title: 'Dashboard' } + }, + { + path: '/keys', + name: 'Keys', + component: MockKeys, + meta: { requiresAuth: true, title: 'Keys' } + }, + { + path: '/usage', + name: 'Usage', + component: MockUsage, + meta: { requiresAuth: true, title: 'Usage' } + } + ] + }) +} + +// 测试用 App 组件 +const TestApp = defineComponent({ + name: 'TestApp', + setup() { + return () => h('div', { id: 'app' }, [h('router-view')]) + } +}) + +describe('Navigation Integration Tests', () => { + let router: Router + let originalRequestIdleCallback: typeof window.requestIdleCallback + let originalCancelIdleCallback: typeof window.cancelIdleCallback + + beforeEach(() => { + // 设置 Pinia + setActivePinia(createPinia()) + + // 重置导航加载状态 + _resetNavigationLoadingInstance() + + // 创建新的路由实例 + router = createTestRouter() + + // Mock requestIdleCallback + originalRequestIdleCallback = window.requestIdleCallback + originalCancelIdleCallback = window.cancelIdleCallback + + vi.stubGlobal('requestIdleCallback', (cb: IdleRequestCallback) => { + const id = setTimeout(() => cb({ didTimeout: false, timeRemaining: () => 50 }), 0) + return id + }) + vi.stubGlobal('cancelIdleCallback', (id: number) => clearTimeout(id)) + }) + + afterEach(() => { + vi.restoreAllMocks() + window.requestIdleCallback = originalRequestIdleCallback + window.cancelIdleCallback = originalCancelIdleCallback + }) + + describe('完整页面导航流程', () => { + it('导航时应该触发加载状态变化', async () => { + const navigationLoading = useNavigationLoadingState() + + // 初始状态 + expect(navigationLoading.isLoading.value).toBe(false) + + // 挂载应用 + const wrapper = mount(TestApp, { + global: { + plugins: [router] + } + }) + + // 等待路由初始化 + await router.isReady() + await flushPromises() + + // 导航到 /dashboard + await router.push('/dashboard') + await flushPromises() + await nextTick() + + // 导航结束后状态应该重置 + expect(navigationLoading.isLoading.value).toBe(false) + + wrapper.unmount() + }) + + it('导航到新页面应该正确渲染组件', async () => { + const wrapper = mount(TestApp, { + global: { + plugins: [router] + } + }) + + await router.isReady() + await router.push('/dashboard') + await flushPromises() + await nextTick() + + // 检查当前路由 + expect(router.currentRoute.value.path).toBe('/dashboard') + + wrapper.unmount() + }) + + it('连续快速导航应该正确处理路由状态', async () => { + const wrapper = mount(TestApp, { + global: { + plugins: [router] + } + }) + + await router.isReady() + await router.push('/dashboard') + + // 快速连续导航 + router.push('/keys') + router.push('/usage') + router.push('/dashboard') + + await flushPromises() + await nextTick() + + // 应该最终停在 /dashboard + expect(router.currentRoute.value.path).toBe('/dashboard') + + wrapper.unmount() + }) + }) + + describe('路由预加载', () => { + it('导航后应该触发相关路由预加载', async () => { + const routePrefetch = useRoutePrefetch() + const triggerSpy = vi.spyOn(routePrefetch, 'triggerPrefetch') + + // 设置 afterEach 守卫 + router.afterEach((to) => { + routePrefetch.triggerPrefetch(to) + }) + + const wrapper = mount(TestApp, { + global: { + plugins: [router] + } + }) + + await router.isReady() + await router.push('/dashboard') + await flushPromises() + + // 应该触发预加载 + expect(triggerSpy).toHaveBeenCalled() + + wrapper.unmount() + }) + + it('已预加载的路由不应重复预加载', async () => { + const routePrefetch = useRoutePrefetch() + + const wrapper = mount(TestApp, { + global: { + plugins: [router] + } + }) + + await router.isReady() + await router.push('/dashboard') + await flushPromises() + + // 手动触发预加载 + routePrefetch.triggerPrefetch(router.currentRoute.value) + await new Promise((resolve) => setTimeout(resolve, 100)) + + const prefetchedCount = routePrefetch.prefetchedRoutes.value.size + + // 再次触发相同路由预加载 + routePrefetch.triggerPrefetch(router.currentRoute.value) + await new Promise((resolve) => setTimeout(resolve, 100)) + + // 预加载数量不应增加 + expect(routePrefetch.prefetchedRoutes.value.size).toBe(prefetchedCount) + + wrapper.unmount() + }) + + it('路由变化时应取消之前的预加载任务', async () => { + const routePrefetch = useRoutePrefetch() + + const wrapper = mount(TestApp, { + global: { + plugins: [router] + } + }) + + await router.isReady() + + // 触发预加载 + routePrefetch.triggerPrefetch(router.currentRoute.value) + + // 立即导航到新路由(这会在内部调用 cancelPendingPrefetch) + routePrefetch.triggerPrefetch({ path: '/keys' } as any) + + // 由于 triggerPrefetch 内部调用 cancelPendingPrefetch,检查是否有预加载被正确管理 + expect(routePrefetch.prefetchedRoutes.value.size).toBeLessThanOrEqual(2) + + wrapper.unmount() + }) + }) + + describe('Chunk 加载错误恢复', () => { + it('chunk 加载失败应该被正确捕获', async () => { + const errorHandler = vi.fn() + + // 创建带错误处理的路由 + const errorRouter = createRouter({ + history: createWebHistory(), + routes: [ + { + path: '/dashboard', + name: 'Dashboard', + component: MockDashboard + }, + { + path: '/error-page', + name: 'ErrorPage', + // 模拟加载失败的组件 + component: () => Promise.reject(new Error('Failed to fetch dynamically imported module')) + } + ] + }) + + errorRouter.onError(errorHandler) + + const wrapper = mount(TestApp, { + global: { + plugins: [errorRouter] + } + }) + + await errorRouter.isReady() + await errorRouter.push('/dashboard') + await flushPromises() + + // 尝试导航到会失败的页面 + try { + await errorRouter.push('/error-page') + } catch { + // 预期会失败 + } + + await flushPromises() + + // 错误处理器应该被调用 + expect(errorHandler).toHaveBeenCalled() + + wrapper.unmount() + }) + + it('chunk 加载错误应该包含正确的错误信息', async () => { + let capturedError: Error | null = null + + const errorRouter = createRouter({ + history: createWebHistory(), + routes: [ + { + path: '/dashboard', + name: 'Dashboard', + component: MockDashboard + }, + { + path: '/chunk-error', + name: 'ChunkError', + component: () => { + const error = new Error('Loading chunk failed') + error.name = 'ChunkLoadError' + return Promise.reject(error) + } + } + ] + }) + + errorRouter.onError((error) => { + capturedError = error + }) + + const wrapper = mount(TestApp, { + global: { + plugins: [errorRouter] + } + }) + + await errorRouter.isReady() + + try { + await errorRouter.push('/chunk-error') + } catch { + // 预期会失败 + } + + await flushPromises() + + expect(capturedError).not.toBeNull() + expect(capturedError!.name).toBe('ChunkLoadError') + + wrapper.unmount() + }) + }) + + describe('导航状态管理', () => { + it('导航开始时 isLoading 应该变为 true', async () => { + const navigationLoading = useNavigationLoadingState() + + // 创建一个延迟加载的组件来模拟真实场景 + const DelayedComponent = defineComponent({ + name: 'DelayedComponent', + async setup() { + await new Promise((resolve) => setTimeout(resolve, 50)) + return () => h('div', 'Delayed') + } + }) + + const delayRouter = createRouter({ + history: createWebHistory(), + routes: [ + { + path: '/dashboard', + name: 'Dashboard', + component: MockDashboard + }, + { + path: '/delayed', + name: 'Delayed', + component: DelayedComponent + } + ] + }) + + // 设置导航守卫 + delayRouter.beforeEach(() => { + navigationLoading.startNavigation() + }) + + delayRouter.afterEach(() => { + navigationLoading.endNavigation() + }) + + const wrapper = mount(TestApp, { + global: { + plugins: [delayRouter] + } + }) + + await delayRouter.isReady() + await delayRouter.push('/dashboard') + await flushPromises() + + // 导航结束后 isLoading 应该为 false + expect(navigationLoading.isLoading.value).toBe(false) + + wrapper.unmount() + }) + + it('导航取消时应该正确重置状态', async () => { + const navigationLoading = useNavigationLoadingState() + + const testRouter = createRouter({ + history: createWebHistory(), + routes: [ + { + path: '/dashboard', + name: 'Dashboard', + component: MockDashboard + }, + { + path: '/keys', + name: 'Keys', + component: MockKeys, + beforeEnter: (_to, _from, next) => { + // 模拟导航取消 + next(false) + } + } + ] + }) + + testRouter.beforeEach(() => { + navigationLoading.startNavigation() + }) + + testRouter.afterEach(() => { + navigationLoading.endNavigation() + }) + + const wrapper = mount(TestApp, { + global: { + plugins: [testRouter] + } + }) + + await testRouter.isReady() + await testRouter.push('/dashboard') + await flushPromises() + + // 尝试导航到被取消的路由 + await testRouter.push('/keys').catch(() => {}) + await flushPromises() + + // 导航被取消后,状态应该被重置 + // 注意:由于 afterEach 仍然会被调用,isLoading 应该为 false + expect(navigationLoading.isLoading.value).toBe(false) + + wrapper.unmount() + }) + }) +}) diff --git a/frontend/src/__tests__/setup.ts b/frontend/src/__tests__/setup.ts new file mode 100644 index 00000000..decb2a37 --- /dev/null +++ b/frontend/src/__tests__/setup.ts @@ -0,0 +1,45 @@ +/** + * Vitest 测试环境设置 + * 提供全局 mock 和测试工具 + */ +import { config } from '@vue/test-utils' +import { vi } from 'vitest' + +// Mock requestIdleCallback (Safari < 15 不支持) +if (typeof globalThis.requestIdleCallback === 'undefined') { + globalThis.requestIdleCallback = ((callback: IdleRequestCallback) => { + return window.setTimeout(() => callback({ didTimeout: false, timeRemaining: () => 50 }), 1) + }) as unknown as typeof requestIdleCallback +} + +if (typeof globalThis.cancelIdleCallback === 'undefined') { + globalThis.cancelIdleCallback = ((id: number) => { + window.clearTimeout(id) + }) as unknown as typeof cancelIdleCallback +} + +// Mock IntersectionObserver +class MockIntersectionObserver { + observe = vi.fn() + disconnect = vi.fn() + unobserve = vi.fn() +} + +globalThis.IntersectionObserver = MockIntersectionObserver as unknown as typeof IntersectionObserver + +// Mock ResizeObserver +class MockResizeObserver { + observe = vi.fn() + disconnect = vi.fn() + unobserve = vi.fn() +} + +globalThis.ResizeObserver = MockResizeObserver as unknown as typeof ResizeObserver + +// Vue Test Utils 全局配置 +config.global.stubs = { + // 可以在这里添加全局 stub +} + +// 设置全局测试超时 +vi.setConfig({ testTimeout: 10000 }) diff --git a/frontend/src/api/admin/accounts.ts b/frontend/src/api/admin/accounts.ts index 4e1f6cd3..54d0ad94 100644 --- a/frontend/src/api/admin/accounts.ts +++ b/frontend/src/api/admin/accounts.ts @@ -275,11 +275,15 @@ export async function bulkUpdate( ): Promise<{ success: number failed: number + success_ids?: number[] + failed_ids?: number[] results: Array<{ account_id: number; success: boolean; error?: string }> -}> { + }> { const { data } = await apiClient.post<{ success: number failed: number + success_ids?: number[] + failed_ids?: number[] results: Array<{ account_id: number; success: boolean; error?: string }> }>('/admin/accounts/bulk-update', { account_ids: accountIds, diff --git a/frontend/src/api/admin/dashboard.ts b/frontend/src/api/admin/dashboard.ts index 83e56c0e..9b338788 100644 --- a/frontend/src/api/admin/dashboard.ts +++ b/frontend/src/api/admin/dashboard.ts @@ -46,6 +46,10 @@ export interface TrendParams { granularity?: 'day' | 'hour' user_id?: number api_key_id?: number + model?: string + account_id?: number + group_id?: number + stream?: boolean } export interface TrendResponse { @@ -70,6 +74,10 @@ export interface ModelStatsParams { end_date?: string user_id?: number api_key_id?: number + model?: string + account_id?: number + group_id?: number + stream?: boolean } export interface ModelStatsResponse { diff --git a/frontend/src/api/admin/index.ts b/frontend/src/api/admin/index.ts index ea12f6d2..e86f6348 100644 --- a/frontend/src/api/admin/index.ts +++ b/frontend/src/api/admin/index.ts @@ -9,6 +9,7 @@ import groupsAPI from './groups' import accountsAPI from './accounts' import proxiesAPI from './proxies' import redeemAPI from './redeem' +import promoAPI from './promo' import settingsAPI from './settings' import systemAPI from './system' import subscriptionsAPI from './subscriptions' @@ -16,6 +17,7 @@ import usageAPI from './usage' import geminiAPI from './gemini' import antigravityAPI from './antigravity' import userAttributesAPI from './userAttributes' +import opsAPI from './ops' /** * Unified admin API object for convenient access @@ -27,13 +29,15 @@ export const adminAPI = { accounts: accountsAPI, proxies: proxiesAPI, redeem: redeemAPI, + promo: promoAPI, settings: settingsAPI, system: systemAPI, subscriptions: subscriptionsAPI, usage: usageAPI, gemini: geminiAPI, antigravity: antigravityAPI, - userAttributes: userAttributesAPI + userAttributes: userAttributesAPI, + ops: opsAPI } export { @@ -43,13 +47,15 @@ export { accountsAPI, proxiesAPI, redeemAPI, + promoAPI, settingsAPI, systemAPI, subscriptionsAPI, usageAPI, geminiAPI, antigravityAPI, - userAttributesAPI + userAttributesAPI, + opsAPI } export default adminAPI diff --git a/frontend/src/api/admin/ops.ts b/frontend/src/api/admin/ops.ts new file mode 100644 index 00000000..6e048436 --- /dev/null +++ b/frontend/src/api/admin/ops.ts @@ -0,0 +1,1210 @@ +/** + * Admin Ops API endpoints (vNext) + * - Error logs list/detail + retry (client/upstream) + * - Dashboard overview (raw path) + */ + +import { apiClient } from '../client' +import type { PaginatedResponse } from '@/types' + +export type OpsRetryMode = 'client' | 'upstream' +export type OpsQueryMode = 'auto' | 'raw' | 'preagg' + +export interface OpsRequestOptions { + signal?: AbortSignal +} + +export interface OpsRetryRequest { + mode: OpsRetryMode + pinned_account_id?: number + force?: boolean +} + +export interface OpsRetryAttempt { + id: number + created_at: string + requested_by_user_id: number + source_error_id: number + mode: string + pinned_account_id?: number | null + pinned_account_name?: string + + status: string + started_at?: string | null + finished_at?: string | null + duration_ms?: number | null + + success?: boolean | null + http_status_code?: number | null + upstream_request_id?: string | null + used_account_id?: number | null + used_account_name?: string + response_preview?: string | null + response_truncated?: boolean | null + + result_request_id?: string | null + result_error_id?: number | null + error_message?: string | null +} + +export type OpsUpstreamErrorEvent = { + at_unix_ms?: number + platform?: string + account_id?: number + account_name?: string + upstream_status_code?: number + upstream_request_id?: string + upstream_request_body?: string + kind?: string + message?: string + detail?: string +} + +export interface OpsRetryResult { + attempt_id: number + mode: OpsRetryMode + status: 'running' | 'succeeded' | 'failed' | string + + pinned_account_id?: number | null + used_account_id?: number | null + + http_status_code: number + upstream_request_id: string + + response_preview: string + response_truncated: boolean + + error_message: string + + started_at: string + finished_at: string + duration_ms: number +} + +export interface OpsDashboardOverview { + start_time: string + end_time: string + platform: string + group_id?: number | null + + health_score?: number + + system_metrics?: OpsSystemMetricsSnapshot | null + job_heartbeats?: OpsJobHeartbeat[] | null + + success_count: number + error_count_total: number + business_limited_count: number + error_count_sla: number + request_count_total: number + request_count_sla: number + + token_consumed: number + + sla: number + error_rate: number + upstream_error_rate: number + upstream_error_count_excl_429_529: number + upstream_429_count: number + upstream_529_count: number + + qps: { + current: number + peak: number + avg: number + } + tps: { + current: number + peak: number + avg: number + } + + duration: OpsPercentiles + ttft: OpsPercentiles +} + +export interface OpsPercentiles { + p50_ms?: number | null + p90_ms?: number | null + p95_ms?: number | null + p99_ms?: number | null + avg_ms?: number | null + max_ms?: number | null +} + +export interface OpsThroughputTrendPoint { + bucket_start: string + request_count: number + token_consumed: number + qps: number + tps: number +} + +export interface OpsThroughputPlatformBreakdownItem { + platform: string + request_count: number + token_consumed: number +} + +export interface OpsThroughputGroupBreakdownItem { + group_id: number + group_name: string + request_count: number + token_consumed: number +} + +export interface OpsThroughputTrendResponse { + bucket: string + points: OpsThroughputTrendPoint[] + by_platform?: OpsThroughputPlatformBreakdownItem[] + top_groups?: OpsThroughputGroupBreakdownItem[] +} + +export type OpsRequestKind = 'success' | 'error' +export type OpsRequestDetailsKind = OpsRequestKind | 'all' +export type OpsRequestDetailsSort = 'created_at_desc' | 'duration_desc' + +export interface OpsRequestDetail { + kind: OpsRequestKind + created_at: string + request_id: string + + platform?: string + model?: string + duration_ms?: number | null + status_code?: number | null + + error_id?: number | null + phase?: string + severity?: string + message?: string + + user_id?: number | null + api_key_id?: number | null + account_id?: number | null + group_id?: number | null + + stream?: boolean +} + +export interface OpsRequestDetailsParams { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + + kind?: OpsRequestDetailsKind + + platform?: string + group_id?: number | null + + user_id?: number + api_key_id?: number + account_id?: number + + model?: string + request_id?: string + q?: string + + min_duration_ms?: number + max_duration_ms?: number + + sort?: OpsRequestDetailsSort + + page?: number + page_size?: number +} + +export type OpsRequestDetailsResponse = PaginatedResponse + +export interface OpsLatencyHistogramBucket { + range: string + count: number +} + +export interface OpsLatencyHistogramResponse { + start_time: string + end_time: string + platform: string + group_id?: number | null + + total_requests: number + buckets: OpsLatencyHistogramBucket[] +} + +export interface OpsErrorTrendPoint { + bucket_start: string + error_count_total: number + business_limited_count: number + error_count_sla: number + upstream_error_count_excl_429_529: number + upstream_429_count: number + upstream_529_count: number +} + +export interface OpsErrorTrendResponse { + bucket: string + points: OpsErrorTrendPoint[] +} + +export interface OpsErrorDistributionItem { + status_code: number + total: number + sla: number + business_limited: number +} + +export interface OpsErrorDistributionResponse { + total: number + items: OpsErrorDistributionItem[] +} + +export interface OpsSystemMetricsSnapshot { + id: number + created_at: string + window_minutes: number + + cpu_usage_percent?: number | null + memory_used_mb?: number | null + memory_total_mb?: number | null + memory_usage_percent?: number | null + + db_ok?: boolean | null + redis_ok?: boolean | null + + // Config-derived limits (best-effort) for rendering "current vs max". + db_max_open_conns?: number | null + redis_pool_size?: number | null + + redis_conn_total?: number | null + redis_conn_idle?: number | null + + db_conn_active?: number | null + db_conn_idle?: number | null + db_conn_waiting?: number | null + + goroutine_count?: number | null + concurrency_queue_depth?: number | null +} + +export interface OpsJobHeartbeat { + job_name: string + last_run_at?: string | null + last_success_at?: string | null + last_error_at?: string | null + last_error?: string | null + last_duration_ms?: number | null + last_result?: string | null + updated_at: string +} + +export interface PlatformConcurrencyInfo { + platform: string + current_in_use: number + max_capacity: number + load_percentage: number + waiting_in_queue: number +} + +export interface GroupConcurrencyInfo { + group_id: number + group_name: string + platform: string + current_in_use: number + max_capacity: number + load_percentage: number + waiting_in_queue: number +} + +export interface AccountConcurrencyInfo { + account_id: number + account_name?: string + platform: string + group_id: number + group_name: string + current_in_use: number + max_capacity: number + load_percentage: number + waiting_in_queue: number +} + +export interface OpsConcurrencyStatsResponse { + enabled: boolean + platform: Record + group: Record + account: Record + timestamp?: string +} + +export async function getConcurrencyStats(platform?: string, groupId?: number | null): Promise { + const params: Record = {} + if (platform) { + params.platform = platform + } + if (typeof groupId === 'number' && groupId > 0) { + params.group_id = groupId + } + + const { data } = await apiClient.get('/admin/ops/concurrency', { params }) + return data +} + +export interface PlatformAvailability { + platform: string + total_accounts: number + available_count: number + rate_limit_count: number + error_count: number +} + +export interface GroupAvailability { + group_id: number + group_name: string + platform: string + total_accounts: number + available_count: number + rate_limit_count: number + error_count: number +} + +export interface AccountAvailability { + account_id: number + account_name: string + platform: string + group_id: number + group_name: string + status: string + is_available: boolean + is_rate_limited: boolean + rate_limit_reset_at?: string + rate_limit_remaining_sec?: number + is_overloaded: boolean + overload_until?: string + overload_remaining_sec?: number + has_error: boolean + error_message?: string +} + +export interface OpsAccountAvailabilityStatsResponse { + enabled: boolean + platform: Record + group: Record + account: Record + timestamp?: string +} + +export async function getAccountAvailabilityStats(platform?: string, groupId?: number | null): Promise { + const params: Record = {} + if (platform) { + params.platform = platform + } + if (typeof groupId === 'number' && groupId > 0) { + params.group_id = groupId + } + const { data } = await apiClient.get('/admin/ops/account-availability', { params }) + return data +} + +export interface OpsRateSummary { + current: number + peak: number + avg: number +} + +export interface OpsRealtimeTrafficSummary { + window: string + start_time: string + end_time: string + platform: string + group_id?: number | null + qps: OpsRateSummary + tps: OpsRateSummary +} + +export interface OpsRealtimeTrafficSummaryResponse { + enabled: boolean + summary: OpsRealtimeTrafficSummary | null + timestamp?: string +} + +export async function getRealtimeTrafficSummary( + window: string, + platform?: string, + groupId?: number | null +): Promise { + const params: Record = { window } + if (platform) { + params.platform = platform + } + if (typeof groupId === 'number' && groupId > 0) { + params.group_id = groupId + } + + const { data } = await apiClient.get('/admin/ops/realtime-traffic', { params }) + return data +} + +/** + * Subscribe to realtime QPS updates via WebSocket. + * + * Note: browsers cannot set Authorization headers for WebSockets. + * We authenticate via Sec-WebSocket-Protocol using a prefixed token item: + * ["sub2api-admin", "jwt."] + */ +export interface SubscribeQPSOptions { + token?: string | null + onOpen?: () => void + onClose?: (event: CloseEvent) => void + onError?: (event: Event) => void + /** + * Called when the server closes with an application close code that indicates + * reconnecting is not useful (e.g. feature flag disabled). + */ + onFatalClose?: (event: CloseEvent) => void + /** + * More granular status updates for UI (connecting/reconnecting/offline/etc). + */ + onStatusChange?: (status: OpsWSStatus) => void + /** + * Called when a reconnect is scheduled (helps display "retry in Xs"). + */ + onReconnectScheduled?: (info: { attempt: number, delayMs: number }) => void + wsBaseUrl?: string + /** + * Maximum reconnect attempts. Defaults to Infinity to keep the dashboard live. + * Set to 0 to disable reconnect. + */ + maxReconnectAttempts?: number + reconnectBaseDelayMs?: number + reconnectMaxDelayMs?: number + /** + * Stale connection detection (heartbeat-by-observation). + * If no messages are received within this window, the socket is closed to trigger a reconnect. + * Set to 0 to disable. + */ + staleTimeoutMs?: number + /** + * How often to check staleness. Only used when `staleTimeoutMs > 0`. + */ + staleCheckIntervalMs?: number +} + +export type OpsWSStatus = 'connecting' | 'connected' | 'reconnecting' | 'offline' | 'closed' + +export const OPS_WS_CLOSE_CODES = { + REALTIME_DISABLED: 4001 +} as const + +const OPS_WS_BASE_PROTOCOL = 'sub2api-admin' + +export function subscribeQPS(onMessage: (data: any) => void, options: SubscribeQPSOptions = {}): () => void { + let ws: WebSocket | null = null + let reconnectAttempts = 0 + const maxReconnectAttempts = Number.isFinite(options.maxReconnectAttempts as number) + ? (options.maxReconnectAttempts as number) + : Infinity + const baseDelayMs = options.reconnectBaseDelayMs ?? 1000 + const maxDelayMs = options.reconnectMaxDelayMs ?? 30000 + let reconnectTimer: ReturnType | null = null + let shouldReconnect = true + let isConnecting = false + let hasConnectedOnce = false + let lastMessageAt = 0 + const staleTimeoutMs = options.staleTimeoutMs ?? 120_000 + const staleCheckIntervalMs = options.staleCheckIntervalMs ?? 30_000 + let staleTimer: ReturnType | null = null + + const setStatus = (status: OpsWSStatus) => { + options.onStatusChange?.(status) + } + + const clearReconnectTimer = () => { + if (reconnectTimer) { + clearTimeout(reconnectTimer) + reconnectTimer = null + } + } + + const clearStaleTimer = () => { + if (staleTimer) { + clearInterval(staleTimer) + staleTimer = null + } + } + + const startStaleTimer = () => { + clearStaleTimer() + if (!staleTimeoutMs || staleTimeoutMs <= 0) return + staleTimer = setInterval(() => { + if (!shouldReconnect) return + if (!ws || ws.readyState !== WebSocket.OPEN) return + if (!lastMessageAt) return + const ageMs = Date.now() - lastMessageAt + if (ageMs > staleTimeoutMs) { + // Treat as a half-open connection; closing triggers the normal reconnect path. + ws.close() + } + }, staleCheckIntervalMs) + } + + const scheduleReconnect = () => { + if (!shouldReconnect) return + if (hasConnectedOnce && reconnectAttempts >= maxReconnectAttempts) return + + // If we're offline, wait for the browser to come back online. + if (typeof navigator !== 'undefined' && 'onLine' in navigator && !navigator.onLine) { + setStatus('offline') + return + } + + const expDelay = baseDelayMs * Math.pow(2, reconnectAttempts) + const delay = Math.min(expDelay, maxDelayMs) + const jitter = Math.floor(Math.random() * 250) + clearReconnectTimer() + reconnectTimer = setTimeout(() => { + reconnectAttempts++ + connect() + }, delay + jitter) + options.onReconnectScheduled?.({ attempt: reconnectAttempts + 1, delayMs: delay + jitter }) + } + + const handleOnline = () => { + if (!shouldReconnect) return + if (ws && (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING)) return + connect() + } + + const handleOffline = () => { + setStatus('offline') + } + + const connect = () => { + if (!shouldReconnect) return + if (isConnecting) return + if (ws && (ws.readyState === WebSocket.OPEN || ws.readyState === WebSocket.CONNECTING)) return + if (hasConnectedOnce && reconnectAttempts >= maxReconnectAttempts) return + + isConnecting = true + setStatus(hasConnectedOnce ? 'reconnecting' : 'connecting') + const protocol = window.location.protocol === 'https:' ? 'wss:' : 'ws:' + const wsBaseUrl = options.wsBaseUrl || import.meta.env.VITE_WS_BASE_URL || window.location.host + const wsURL = new URL(`${protocol}//${wsBaseUrl}/api/v1/admin/ops/ws/qps`) + + // Do NOT put admin JWT in the URL query string (it can leak via access logs, proxies, etc). + // Browsers cannot set Authorization headers for WebSockets, so we pass the token via + // Sec-WebSocket-Protocol (subprotocol list): ["sub2api-admin", "jwt."]. + const rawToken = String(options.token ?? localStorage.getItem('auth_token') ?? '').trim() + const protocols: string[] = [OPS_WS_BASE_PROTOCOL] + if (rawToken) protocols.push(`jwt.${rawToken}`) + + ws = new WebSocket(wsURL.toString(), protocols) + + ws.onopen = () => { + reconnectAttempts = 0 + isConnecting = false + hasConnectedOnce = true + clearReconnectTimer() + lastMessageAt = Date.now() + startStaleTimer() + setStatus('connected') + options.onOpen?.() + } + + ws.onmessage = (e) => { + try { + const data = JSON.parse(e.data) + lastMessageAt = Date.now() + onMessage(data) + } catch (err) { + console.warn('[OpsWS] Failed to parse message:', err) + } + } + + ws.onerror = (error) => { + console.error('[OpsWS] Connection error:', error) + options.onError?.(error) + } + + ws.onclose = (event) => { + isConnecting = false + options.onClose?.(event) + clearStaleTimer() + ws = null + + // If the server explicitly tells us to stop reconnecting, honor it. + if (event && typeof event.code === 'number' && event.code === OPS_WS_CLOSE_CODES.REALTIME_DISABLED) { + shouldReconnect = false + clearReconnectTimer() + setStatus('closed') + options.onFatalClose?.(event) + return + } + + scheduleReconnect() + } + } + + window.addEventListener('online', handleOnline) + window.addEventListener('offline', handleOffline) + connect() + + return () => { + shouldReconnect = false + window.removeEventListener('online', handleOnline) + window.removeEventListener('offline', handleOffline) + clearReconnectTimer() + clearStaleTimer() + if (ws) ws.close() + ws = null + setStatus('closed') + } +} + +export type OpsSeverity = string +export type OpsPhase = string + +export type AlertSeverity = 'critical' | 'warning' | 'info' +export type ThresholdMode = 'count' | 'percentage' | 'both' +export type MetricType = + | 'success_rate' + | 'error_rate' + | 'upstream_error_rate' + | 'cpu_usage_percent' + | 'memory_usage_percent' + | 'concurrency_queue_depth' + | 'group_available_accounts' + | 'group_available_ratio' + | 'group_rate_limit_ratio' + | 'account_rate_limited_count' + | 'account_error_count' + | 'account_error_ratio' + | 'overload_account_count' +export type Operator = '>' | '>=' | '<' | '<=' | '==' | '!=' + +export interface AlertRule { + id?: number + name: string + description?: string + enabled: boolean + metric_type: MetricType + operator: Operator + threshold: number + window_minutes: number + sustained_minutes: number + severity: OpsSeverity + cooldown_minutes: number + notify_email: boolean + filters?: Record + created_at?: string + updated_at?: string + last_triggered_at?: string | null +} + +export interface AlertEvent { + id: number + rule_id: number + severity: OpsSeverity | string + status: 'firing' | 'resolved' | 'manual_resolved' | string + title?: string + description?: string + metric_value?: number + threshold_value?: number + dimensions?: Record + fired_at: string + resolved_at?: string | null + email_sent: boolean + created_at: string +} + +export interface EmailNotificationConfig { + alert: { + enabled: boolean + recipients: string[] + min_severity: AlertSeverity | '' + rate_limit_per_hour: number + batching_window_seconds: number + include_resolved_alerts: boolean + } + report: { + enabled: boolean + recipients: string[] + daily_summary_enabled: boolean + daily_summary_schedule: string + weekly_summary_enabled: boolean + weekly_summary_schedule: string + error_digest_enabled: boolean + error_digest_schedule: string + error_digest_min_count: number + account_health_enabled: boolean + account_health_schedule: string + account_health_error_rate_threshold: number + } +} + +export interface OpsMetricThresholds { + sla_percent_min?: number | null // SLA低于此值变红 + ttft_p99_ms_max?: number | null // TTFT P99高于此值变红 + request_error_rate_percent_max?: number | null // 请求错误率高于此值变红 + upstream_error_rate_percent_max?: number | null // 上游错误率高于此值变红 +} + +export interface OpsDistributedLockSettings { + enabled: boolean + key: string + ttl_seconds: number +} + +export interface OpsAlertRuntimeSettings { + evaluation_interval_seconds: number + distributed_lock: OpsDistributedLockSettings + silencing: { + enabled: boolean + global_until_rfc3339: string + global_reason: string + entries?: Array<{ + rule_id?: number + severities?: Array + until_rfc3339: string + reason: string + }> + } + thresholds: OpsMetricThresholds // 指标阈值配置 +} + +export interface OpsAdvancedSettings { + data_retention: OpsDataRetentionSettings + aggregation: OpsAggregationSettings + ignore_count_tokens_errors: boolean + ignore_context_canceled: boolean + ignore_no_available_accounts: boolean + auto_refresh_enabled: boolean + auto_refresh_interval_seconds: number +} + +export interface OpsDataRetentionSettings { + cleanup_enabled: boolean + cleanup_schedule: string + error_log_retention_days: number + minute_metrics_retention_days: number + hourly_metrics_retention_days: number +} + +export interface OpsAggregationSettings { + aggregation_enabled: boolean +} + +export interface OpsErrorLog { + id: number + created_at: string + + // Standardized classification + phase: OpsPhase + type: string + error_owner: 'client' | 'provider' | 'platform' | string + error_source: 'client_request' | 'upstream_http' | 'gateway' | string + + severity: OpsSeverity + status_code: number + platform: string + model: string + + is_retryable: boolean + retry_count: number + + resolved: boolean + resolved_at?: string | null + resolved_by_user_id?: number | null + resolved_retry_id?: number | null + + client_request_id: string + request_id: string + message: string + + user_id?: number | null + user_email: string + api_key_id?: number | null + account_id?: number | null + account_name: string + group_id?: number | null + group_name: string + + client_ip?: string | null + request_path?: string + stream?: boolean +} + +export interface OpsErrorDetail extends OpsErrorLog { + error_body: string + user_agent: string + + // Upstream context (optional; enriched by gateway services) + upstream_status_code?: number | null + upstream_error_message?: string + upstream_error_detail?: string + upstream_errors?: string + + auth_latency_ms?: number | null + routing_latency_ms?: number | null + upstream_latency_ms?: number | null + response_latency_ms?: number | null + time_to_first_token_ms?: number | null + + request_body: string + request_body_truncated: boolean + request_body_bytes?: number | null + + is_business_limited: boolean +} + +export type OpsErrorLogsResponse = PaginatedResponse + +export async function getDashboardOverview( + params: { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + mode?: OpsQueryMode + }, + options: OpsRequestOptions = {} +): Promise { + const { data } = await apiClient.get('/admin/ops/dashboard/overview', { + params, + signal: options.signal + }) + return data +} + +export async function getThroughputTrend( + params: { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + mode?: OpsQueryMode + }, + options: OpsRequestOptions = {} +): Promise { + const { data } = await apiClient.get('/admin/ops/dashboard/throughput-trend', { + params, + signal: options.signal + }) + return data +} + +export async function getLatencyHistogram( + params: { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + mode?: OpsQueryMode + }, + options: OpsRequestOptions = {} +): Promise { + const { data } = await apiClient.get('/admin/ops/dashboard/latency-histogram', { + params, + signal: options.signal + }) + return data +} + +export async function getErrorTrend( + params: { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + mode?: OpsQueryMode + }, + options: OpsRequestOptions = {} +): Promise { + const { data } = await apiClient.get('/admin/ops/dashboard/error-trend', { + params, + signal: options.signal + }) + return data +} + +export async function getErrorDistribution( + params: { + time_range?: '5m' | '30m' | '1h' | '6h' | '24h' + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + mode?: OpsQueryMode + }, + options: OpsRequestOptions = {} +): Promise { + const { data } = await apiClient.get('/admin/ops/dashboard/error-distribution', { + params, + signal: options.signal + }) + return data +} + +export type OpsErrorListView = 'errors' | 'excluded' | 'all' + +export type OpsErrorListQueryParams = { + page?: number + page_size?: number + time_range?: string + start_time?: string + end_time?: string + platform?: string + group_id?: number | null + account_id?: number | null + + phase?: string + error_owner?: string + error_source?: string + resolved?: string + view?: OpsErrorListView + + q?: string + status_codes?: string + status_codes_other?: string +} + +// Legacy unified endpoints +export async function listErrorLogs(params: OpsErrorListQueryParams): Promise { + const { data } = await apiClient.get('/admin/ops/errors', { params }) + return data +} + +export async function getErrorLogDetail(id: number): Promise { + const { data } = await apiClient.get(`/admin/ops/errors/${id}`) + return data +} + +export async function retryErrorRequest(id: number, req: OpsRetryRequest): Promise { + const { data } = await apiClient.post(`/admin/ops/errors/${id}/retry`, req) + return data +} + +export async function listRetryAttempts(errorId: number, limit = 50): Promise { + const { data } = await apiClient.get(`/admin/ops/errors/${errorId}/retries`, { params: { limit } }) + return data +} + +export async function updateErrorResolved(errorId: number, resolved: boolean): Promise { + await apiClient.put(`/admin/ops/errors/${errorId}/resolve`, { resolved }) +} + +// New split endpoints +export async function listRequestErrors(params: OpsErrorListQueryParams): Promise { + const { data } = await apiClient.get('/admin/ops/request-errors', { params }) + return data +} + +export async function listUpstreamErrors(params: OpsErrorListQueryParams): Promise { + const { data } = await apiClient.get('/admin/ops/upstream-errors', { params }) + return data +} + +export async function getRequestErrorDetail(id: number): Promise { + const { data } = await apiClient.get(`/admin/ops/request-errors/${id}`) + return data +} + +export async function getUpstreamErrorDetail(id: number): Promise { + const { data } = await apiClient.get(`/admin/ops/upstream-errors/${id}`) + return data +} + +export async function retryRequestErrorClient(id: number): Promise { + const { data } = await apiClient.post(`/admin/ops/request-errors/${id}/retry-client`, {}) + return data +} + +export async function retryRequestErrorUpstreamEvent(id: number, idx: number): Promise { + const { data } = await apiClient.post(`/admin/ops/request-errors/${id}/upstream-errors/${idx}/retry`, {}) + return data +} + +export async function retryUpstreamError(id: number): Promise { + const { data } = await apiClient.post(`/admin/ops/upstream-errors/${id}/retry`, {}) + return data +} + +export async function updateRequestErrorResolved(errorId: number, resolved: boolean): Promise { + await apiClient.put(`/admin/ops/request-errors/${errorId}/resolve`, { resolved }) +} + +export async function updateUpstreamErrorResolved(errorId: number, resolved: boolean): Promise { + await apiClient.put(`/admin/ops/upstream-errors/${errorId}/resolve`, { resolved }) +} + +export async function listRequestErrorUpstreamErrors( + id: number, + params: OpsErrorListQueryParams = {}, + options: { include_detail?: boolean } = {} +): Promise> { + const query: Record = { ...params } + if (options.include_detail) query.include_detail = '1' + const { data } = await apiClient.get>(`/admin/ops/request-errors/${id}/upstream-errors`, { params: query }) + return data +} + +export async function listRequestDetails(params: OpsRequestDetailsParams): Promise { + const { data } = await apiClient.get('/admin/ops/requests', { params }) + return data +} + +// Alert rules +export async function listAlertRules(): Promise { + const { data } = await apiClient.get('/admin/ops/alert-rules') + return data +} + +export async function createAlertRule(rule: AlertRule): Promise { + const { data } = await apiClient.post('/admin/ops/alert-rules', rule) + return data +} + +export async function updateAlertRule(id: number, rule: Partial): Promise { + const { data } = await apiClient.put(`/admin/ops/alert-rules/${id}`, rule) + return data +} + +export async function deleteAlertRule(id: number): Promise { + await apiClient.delete(`/admin/ops/alert-rules/${id}`) +} + +export interface AlertEventsQuery { + limit?: number + status?: string + severity?: string + email_sent?: boolean + time_range?: string + start_time?: string + end_time?: string + before_fired_at?: string + before_id?: number + platform?: string + group_id?: number +} + +export async function listAlertEvents(params: AlertEventsQuery = {}): Promise { + const { data } = await apiClient.get('/admin/ops/alert-events', { params }) + return data +} + +export async function getAlertEvent(id: number): Promise { + const { data } = await apiClient.get(`/admin/ops/alert-events/${id}`) + return data +} + +export async function updateAlertEventStatus(id: number, status: 'resolved' | 'manual_resolved'): Promise { + await apiClient.put(`/admin/ops/alert-events/${id}/status`, { status }) +} + +export async function createAlertSilence(payload: { + rule_id: number + platform: string + group_id?: number | null + region?: string | null + until: string + reason?: string +}): Promise { + await apiClient.post('/admin/ops/alert-silences', payload) +} + +// Email notification config +export async function getEmailNotificationConfig(): Promise { + const { data } = await apiClient.get('/admin/ops/email-notification/config') + return data +} + +export async function updateEmailNotificationConfig(config: EmailNotificationConfig): Promise { + const { data } = await apiClient.put('/admin/ops/email-notification/config', config) + return data +} + +// Runtime settings (DB-backed) +export async function getAlertRuntimeSettings(): Promise { + const { data } = await apiClient.get('/admin/ops/runtime/alert') + return data +} + +export async function updateAlertRuntimeSettings(config: OpsAlertRuntimeSettings): Promise { + const { data } = await apiClient.put('/admin/ops/runtime/alert', config) + return data +} + +// Advanced settings (DB-backed) +export async function getAdvancedSettings(): Promise { + const { data } = await apiClient.get('/admin/ops/advanced-settings') + return data +} + +export async function updateAdvancedSettings(config: OpsAdvancedSettings): Promise { + const { data } = await apiClient.put('/admin/ops/advanced-settings', config) + return data +} + +// ==================== Metric Thresholds ==================== + +async function getMetricThresholds(): Promise { + const { data } = await apiClient.get('/admin/ops/settings/metric-thresholds') + return data +} + +async function updateMetricThresholds(thresholds: OpsMetricThresholds): Promise { + await apiClient.put('/admin/ops/settings/metric-thresholds', thresholds) +} + +export const opsAPI = { + getDashboardOverview, + getThroughputTrend, + getLatencyHistogram, + getErrorTrend, + getErrorDistribution, + getConcurrencyStats, + getAccountAvailabilityStats, + getRealtimeTrafficSummary, + subscribeQPS, + + // Legacy unified endpoints + listErrorLogs, + getErrorLogDetail, + retryErrorRequest, + listRetryAttempts, + updateErrorResolved, + + // New split endpoints + listRequestErrors, + listUpstreamErrors, + getRequestErrorDetail, + getUpstreamErrorDetail, + retryRequestErrorClient, + retryRequestErrorUpstreamEvent, + retryUpstreamError, + updateRequestErrorResolved, + updateUpstreamErrorResolved, + listRequestErrorUpstreamErrors, + + listRequestDetails, + listAlertRules, + createAlertRule, + updateAlertRule, + deleteAlertRule, + listAlertEvents, + getAlertEvent, + updateAlertEventStatus, + createAlertSilence, + getEmailNotificationConfig, + updateEmailNotificationConfig, + getAlertRuntimeSettings, + updateAlertRuntimeSettings, + getAdvancedSettings, + updateAdvancedSettings, + getMetricThresholds, + updateMetricThresholds +} + +export default opsAPI diff --git a/frontend/src/api/admin/promo.ts b/frontend/src/api/admin/promo.ts new file mode 100644 index 00000000..6a8c4559 --- /dev/null +++ b/frontend/src/api/admin/promo.ts @@ -0,0 +1,69 @@ +/** + * Admin Promo Codes API endpoints + */ + +import { apiClient } from '../client' +import type { + PromoCode, + PromoCodeUsage, + CreatePromoCodeRequest, + UpdatePromoCodeRequest, + BasePaginationResponse +} from '@/types' + +export async function list( + page: number = 1, + pageSize: number = 20, + filters?: { + status?: string + search?: string + } +): Promise> { + const { data } = await apiClient.get>('/admin/promo-codes', { + params: { page, page_size: pageSize, ...filters } + }) + return data +} + +export async function getById(id: number): Promise { + const { data } = await apiClient.get(`/admin/promo-codes/${id}`) + return data +} + +export async function create(request: CreatePromoCodeRequest): Promise { + const { data } = await apiClient.post('/admin/promo-codes', request) + return data +} + +export async function update(id: number, request: UpdatePromoCodeRequest): Promise { + const { data } = await apiClient.put(`/admin/promo-codes/${id}`, request) + return data +} + +export async function deleteCode(id: number): Promise<{ message: string }> { + const { data } = await apiClient.delete<{ message: string }>(`/admin/promo-codes/${id}`) + return data +} + +export async function getUsages( + id: number, + page: number = 1, + pageSize: number = 20 +): Promise> { + const { data } = await apiClient.get>( + `/admin/promo-codes/${id}/usages`, + { params: { page, page_size: pageSize } } + ) + return data +} + +const promoAPI = { + list, + getById, + create, + update, + delete: deleteCode, + getUsages +} + +export default promoAPI diff --git a/frontend/src/api/admin/proxies.ts b/frontend/src/api/admin/proxies.ts index fe20a205..1af2ea39 100644 --- a/frontend/src/api/admin/proxies.ts +++ b/frontend/src/api/admin/proxies.ts @@ -4,7 +4,13 @@ */ import { apiClient } from '../client' -import type { Proxy, CreateProxyRequest, UpdateProxyRequest, PaginatedResponse } from '@/types' +import type { + Proxy, + ProxyAccountSummary, + CreateProxyRequest, + UpdateProxyRequest, + PaginatedResponse +} from '@/types' /** * List all proxies with pagination @@ -120,6 +126,7 @@ export async function testProxy(id: number): Promise<{ city?: string region?: string country?: string + country_code?: string }> { const { data } = await apiClient.post<{ success: boolean @@ -129,6 +136,7 @@ export async function testProxy(id: number): Promise<{ city?: string region?: string country?: string + country_code?: string }>(`/admin/proxies/${id}/test`) return data } @@ -160,8 +168,8 @@ export async function getStats(id: number): Promise<{ * @param id - Proxy ID * @returns List of accounts using the proxy */ -export async function getProxyAccounts(id: number): Promise> { - const { data } = await apiClient.get>(`/admin/proxies/${id}/accounts`) +export async function getProxyAccounts(id: number): Promise { + const { data } = await apiClient.get(`/admin/proxies/${id}/accounts`) return data } @@ -189,6 +197,17 @@ export async function batchCreate( return data } +export async function batchDelete(ids: number[]): Promise<{ + deleted_ids: number[] + skipped: Array<{ id: number; reason: string }> +}> { + const { data } = await apiClient.post<{ + deleted_ids: number[] + skipped: Array<{ id: number; reason: string }> + }>('/admin/proxies/batch-delete', { ids }) + return data +} + export const proxiesAPI = { list, getAll, @@ -201,7 +220,8 @@ export const proxiesAPI = { testProxy, getStats, getProxyAccounts, - batchCreate + batchCreate, + batchDelete } export default proxiesAPI diff --git a/frontend/src/api/admin/settings.ts b/frontend/src/api/admin/settings.ts index 2f6991e7..fc72be8d 100644 --- a/frontend/src/api/admin/settings.ts +++ b/frontend/src/api/admin/settings.ts @@ -22,6 +22,7 @@ export interface SystemSettings { api_base_url: string contact_info: string doc_url: string + home_content: string // SMTP settings smtp_host: string smtp_port: number @@ -34,14 +35,29 @@ export interface SystemSettings { turnstile_enabled: boolean turnstile_site_key: string turnstile_secret_key_configured: boolean - // LinuxDo Connect OAuth 登录(终端用户 SSO) + + // LinuxDo Connect OAuth settings linuxdo_connect_enabled: boolean linuxdo_connect_client_id: string linuxdo_connect_client_secret_configured: boolean linuxdo_connect_redirect_url: string + + // Model fallback configuration + enable_model_fallback: boolean + fallback_model_anthropic: string + fallback_model_openai: string + fallback_model_gemini: string + fallback_model_antigravity: string + // Identity patch configuration (Claude -> Gemini) enable_identity_patch: boolean identity_patch_prompt: string + + // Ops Monitoring (vNext) + ops_monitoring_enabled: boolean + ops_realtime_monitoring_enabled: boolean + ops_query_mode_default: 'auto' | 'raw' | 'preagg' | string + ops_metrics_interval_seconds: number } export interface UpdateSettingsRequest { @@ -55,6 +71,7 @@ export interface UpdateSettingsRequest { api_base_url?: string contact_info?: string doc_url?: string + home_content?: string smtp_host?: string smtp_port?: number smtp_username?: string @@ -69,8 +86,17 @@ export interface UpdateSettingsRequest { linuxdo_connect_client_id?: string linuxdo_connect_client_secret?: string linuxdo_connect_redirect_url?: string + enable_model_fallback?: boolean + fallback_model_anthropic?: string + fallback_model_openai?: string + fallback_model_gemini?: string + fallback_model_antigravity?: string enable_identity_patch?: boolean identity_patch_prompt?: string + ops_monitoring_enabled?: boolean + ops_realtime_monitoring_enabled?: boolean + ops_query_mode_default?: 'auto' | 'raw' | 'preagg' | string + ops_metrics_interval_seconds?: number } /** @@ -175,6 +201,41 @@ export async function deleteAdminApiKey(): Promise<{ message: string }> { return data } +/** + * Stream timeout settings interface + */ +export interface StreamTimeoutSettings { + enabled: boolean + action: 'temp_unsched' | 'error' | 'none' + temp_unsched_minutes: number + threshold_count: number + threshold_window_minutes: number +} + +/** + * Get stream timeout settings + * @returns Stream timeout settings + */ +export async function getStreamTimeoutSettings(): Promise { + const { data } = await apiClient.get('/admin/settings/stream-timeout') + return data +} + +/** + * Update stream timeout settings + * @param settings - Stream timeout settings to update + * @returns Updated settings + */ +export async function updateStreamTimeoutSettings( + settings: StreamTimeoutSettings +): Promise { + const { data } = await apiClient.put( + '/admin/settings/stream-timeout', + settings + ) + return data +} + export const settingsAPI = { getSettings, updateSettings, @@ -182,7 +243,9 @@ export const settingsAPI = { sendTestEmail, getAdminApiKey, regenerateAdminApiKey, - deleteAdminApiKey + deleteAdminApiKey, + getStreamTimeoutSettings, + updateStreamTimeoutSettings } export default settingsAPI diff --git a/frontend/src/api/admin/usage.ts b/frontend/src/api/admin/usage.ts index 4712dafd..dd85fc24 100644 --- a/frontend/src/api/admin/usage.ts +++ b/frontend/src/api/admin/usage.ts @@ -16,6 +16,7 @@ export interface AdminUsageStatsResponse { total_tokens: number total_cost: number total_actual_cost: number + total_account_cost?: number average_duration_ms: number } @@ -64,7 +65,6 @@ export async function getStats(params: { group_id?: number model?: string stream?: boolean - billing_type?: number period?: string start_date?: string end_date?: string diff --git a/frontend/src/api/auth.ts b/frontend/src/api/auth.ts index 9c5379f2..fddc23ef 100644 --- a/frontend/src/api/auth.ts +++ b/frontend/src/api/auth.ts @@ -113,6 +113,26 @@ export async function sendVerifyCode( return data } +/** + * Validate promo code response + */ +export interface ValidatePromoCodeResponse { + valid: boolean + bonus_amount?: number + error_code?: string + message?: string +} + +/** + * Validate promo code (public endpoint, no auth required) + * @param code - Promo code to validate + * @returns Validation result with bonus amount if valid + */ +export async function validatePromoCode(code: string): Promise { + const { data } = await apiClient.post('/auth/validate-promo-code', { code }) + return data +} + export const authAPI = { login, register, @@ -123,7 +143,8 @@ export const authAPI = { getAuthToken, clearAuthToken, getPublicSettings, - sendVerifyCode + sendVerifyCode, + validatePromoCode } export default authAPI diff --git a/frontend/src/api/client.ts b/frontend/src/api/client.ts index 4e53069a..3827498b 100644 --- a/frontend/src/api/client.ts +++ b/frontend/src/api/client.ts @@ -80,9 +80,45 @@ apiClient.interceptors.response.use( return response }, (error: AxiosError>) => { + // Request cancellation: keep the original axios cancellation error so callers can ignore it. + // Otherwise we'd misclassify it as a generic "network error". + if (error.code === 'ERR_CANCELED' || axios.isCancel(error)) { + return Promise.reject(error) + } + // Handle common errors if (error.response) { const { status, data } = error.response + const url = String(error.config?.url || '') + + // Validate `data` shape to avoid HTML error pages breaking our error handling. + const apiData = (typeof data === 'object' && data !== null ? data : {}) as Record + + // Ops monitoring disabled: treat as feature-flagged 404, and proactively redirect away + // from ops pages to avoid broken UI states. + if (status === 404 && apiData.message === 'Ops monitoring is disabled') { + try { + localStorage.setItem('ops_monitoring_enabled_cached', 'false') + } catch { + // ignore localStorage failures + } + try { + window.dispatchEvent(new CustomEvent('ops-monitoring-disabled')) + } catch { + // ignore event failures + } + + if (window.location.pathname.startsWith('/admin/ops')) { + window.location.href = '/admin/settings' + } + + return Promise.reject({ + status, + code: 'OPS_DISABLED', + message: apiData.message || error.message, + url + }) + } // 401: Unauthorized - clear token and redirect to login if (status === 401) { @@ -113,8 +149,8 @@ apiClient.interceptors.response.use( // Return structured error return Promise.reject({ status, - code: data?.code, - message: data?.message || error.message + code: apiData.code, + message: apiData.message || apiData.detail || error.message }) } diff --git a/frontend/src/api/keys.ts b/frontend/src/api/keys.ts index caa339e4..cdae1359 100644 --- a/frontend/src/api/keys.ts +++ b/frontend/src/api/keys.ts @@ -42,12 +42,16 @@ export async function getById(id: number): Promise { * @param name - Key name * @param groupId - Optional group ID * @param customKey - Optional custom key value + * @param ipWhitelist - Optional IP whitelist + * @param ipBlacklist - Optional IP blacklist * @returns Created API key */ export async function create( name: string, groupId?: number | null, - customKey?: string + customKey?: string, + ipWhitelist?: string[], + ipBlacklist?: string[] ): Promise { const payload: CreateApiKeyRequest = { name } if (groupId !== undefined) { @@ -56,6 +60,12 @@ export async function create( if (customKey) { payload.custom_key = customKey } + if (ipWhitelist && ipWhitelist.length > 0) { + payload.ip_whitelist = ipWhitelist + } + if (ipBlacklist && ipBlacklist.length > 0) { + payload.ip_blacklist = ipBlacklist + } const { data } = await apiClient.post('/keys', payload) return data diff --git a/frontend/src/components/account/AccountCapacityCell.vue b/frontend/src/components/account/AccountCapacityCell.vue new file mode 100644 index 00000000..ae338aca --- /dev/null +++ b/frontend/src/components/account/AccountCapacityCell.vue @@ -0,0 +1,199 @@ + + + diff --git a/frontend/src/components/account/AccountGroupsCell.vue b/frontend/src/components/account/AccountGroupsCell.vue new file mode 100644 index 00000000..512383a5 --- /dev/null +++ b/frontend/src/components/account/AccountGroupsCell.vue @@ -0,0 +1,158 @@ + + + diff --git a/frontend/src/components/account/AccountStatsModal.vue b/frontend/src/components/account/AccountStatsModal.vue index 92016699..7968fa8d 100644 --- a/frontend/src/components/account/AccountStatsModal.vue +++ b/frontend/src/components/account/AccountStatsModal.vue @@ -73,11 +73,12 @@

{{ t('admin.accounts.stats.accumulatedCost') }} - ({{ t('admin.accounts.stats.standardCost') }}: ${{ + + ({{ t('usage.userBilled') }}: ${{ formatCost(stats.summary.total_user_cost) }} · + {{ t('admin.accounts.stats.standardCost') }}: ${{ formatCost(stats.summary.total_standard_cost) - }}) + }}) +

@@ -121,12 +122,15 @@

${{ formatCost(stats.summary.avg_daily_cost) }}

-

+

{{ t('admin.accounts.stats.basedOnActualDays', { days: stats.summary.actual_days_used }) }} + + ({{ t('usage.userBilled') }}: ${{ formatCost(stats.summary.avg_daily_user_cost) }}) +

@@ -189,13 +193,17 @@
- {{ - t('admin.accounts.stats.cost') - }} + {{ t('usage.accountBilled') }} ${{ formatCost(stats.summary.today?.cost || 0) }}
+
+ {{ t('usage.userBilled') }} + ${{ formatCost(stats.summary.today?.user_cost || 0) }} +
{{ t('admin.accounts.stats.requests') @@ -240,13 +248,17 @@ }}
- {{ - t('admin.accounts.stats.cost') - }} + {{ t('usage.accountBilled') }} ${{ formatCost(stats.summary.highest_cost_day?.cost || 0) }}
+
+ {{ t('usage.userBilled') }} + ${{ formatCost(stats.summary.highest_cost_day?.user_cost || 0) }} +
{{ t('admin.accounts.stats.requests') @@ -291,13 +303,17 @@ }}
- {{ - t('admin.accounts.stats.cost') - }} + {{ t('usage.accountBilled') }} ${{ formatCost(stats.summary.highest_request_day?.cost || 0) }}
+
+ {{ t('usage.userBilled') }} + ${{ formatCost(stats.summary.highest_request_day?.user_cost || 0) }} +
@@ -397,13 +413,17 @@ }}
- {{ - t('admin.accounts.stats.todayCost') - }} + {{ t('usage.accountBilled') }} ${{ formatCost(stats.summary.today?.cost || 0) }}
+
+ {{ t('usage.userBilled') }} + ${{ formatCost(stats.summary.today?.user_cost || 0) }} +
@@ -517,14 +537,24 @@ const trendChartData = computed(() => { labels: stats.value.history.map((h) => h.label), datasets: [ { - label: t('admin.accounts.stats.cost') + ' (USD)', - data: stats.value.history.map((h) => h.cost), + label: t('usage.accountBilled') + ' (USD)', + data: stats.value.history.map((h) => h.actual_cost), borderColor: '#3b82f6', backgroundColor: 'rgba(59, 130, 246, 0.1)', fill: true, tension: 0.3, yAxisID: 'y' }, + { + label: t('usage.userBilled') + ' (USD)', + data: stats.value.history.map((h) => h.user_cost), + borderColor: '#10b981', + backgroundColor: 'rgba(16, 185, 129, 0.08)', + fill: false, + tension: 0.3, + borderDash: [5, 5], + yAxisID: 'y' + }, { label: t('admin.accounts.stats.requests'), data: stats.value.history.map((h) => h.requests), @@ -602,7 +632,7 @@ const lineChartOptions = computed(() => ({ }, title: { display: true, - text: t('admin.accounts.stats.cost') + ' (USD)', + text: t('usage.accountBilled') + ' (USD)', color: '#3b82f6', font: { size: 11 diff --git a/frontend/src/components/account/AccountTodayStatsCell.vue b/frontend/src/components/account/AccountTodayStatsCell.vue index b8bbc618..a920f314 100644 --- a/frontend/src/components/account/AccountTodayStatsCell.vue +++ b/frontend/src/components/account/AccountTodayStatsCell.vue @@ -32,15 +32,20 @@ formatTokens(stats.tokens) }} - +
- {{ t('admin.accounts.stats.cost') }}: + {{ t('usage.accountBilled') }}: {{ formatCurrency(stats.cost) }}
+ +
+ {{ t('usage.userBilled') }}: + {{ + formatCurrency(stats.user_cost) + }} +
diff --git a/frontend/src/components/account/BulkEditAccountModal.vue b/frontend/src/components/account/BulkEditAccountModal.vue index 51ad32d1..fb776e96 100644 --- a/frontend/src/components/account/BulkEditAccountModal.vue +++ b/frontend/src/components/account/BulkEditAccountModal.vue @@ -459,7 +459,7 @@ -
+
+
+
+ + +
+ +

{{ t('admin.accounts.billingRateMultiplierHint') }}

+
@@ -655,6 +685,7 @@ const enableInterceptWarmup = ref(false) const enableProxy = ref(false) const enableConcurrency = ref(false) const enablePriority = ref(false) +const enableRateMultiplier = ref(false) const enableStatus = ref(false) const enableGroups = ref(false) @@ -670,6 +701,7 @@ const interceptWarmupRequests = ref(false) const proxyId = ref(null) const concurrency = ref(1) const priority = ref(1) +const rateMultiplier = ref(1) const status = ref<'active' | 'inactive'>('active') const groupIds = ref([]) @@ -778,6 +810,16 @@ const addPresetMapping = (from: string, to: string) => { const toggleErrorCode = (code: number) => { const index = selectedErrorCodes.value.indexOf(code) if (index === -1) { + // Adding code - check for 429/529 warning + if (code === 429) { + if (!confirm(t('admin.accounts.customErrorCodes429Warning'))) { + return + } + } else if (code === 529) { + if (!confirm(t('admin.accounts.customErrorCodes529Warning'))) { + return + } + } selectedErrorCodes.value.push(code) } else { selectedErrorCodes.value.splice(index, 1) @@ -794,6 +836,16 @@ const addCustomErrorCode = () => { appStore.showInfo(t('admin.accounts.errorCodeExists')) return } + // Check for 429/529 warning + if (code === 429) { + if (!confirm(t('admin.accounts.customErrorCodes429Warning'))) { + return + } + } else if (code === 529) { + if (!confirm(t('admin.accounts.customErrorCodes529Warning'))) { + return + } + } selectedErrorCodes.value.push(code) customErrorCodeInput.value = null } @@ -843,6 +895,10 @@ const buildUpdatePayload = (): Record | null => { updates.priority = priority.value } + if (enableRateMultiplier.value) { + updates.rate_multiplier = rateMultiplier.value + } + if (enableStatus.value) { updates.status = status.value } @@ -903,6 +959,7 @@ const handleSubmit = async () => { enableProxy.value || enableConcurrency.value || enablePriority.value || + enableRateMultiplier.value || enableStatus.value || enableGroups.value @@ -957,6 +1014,7 @@ watch( enableProxy.value = false enableConcurrency.value = false enablePriority.value = false + enableRateMultiplier.value = false enableStatus.value = false enableGroups.value = false @@ -971,6 +1029,7 @@ watch( proxyId.value = null concurrency.value = 1 priority.value = 1 + rateMultiplier.value = 1 status.value = 'active' groupIds.value = [] } diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 5833632b..c81de00e 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -1196,7 +1196,7 @@
-
+
@@ -1212,6 +1212,11 @@ />

{{ t('admin.accounts.priorityHint') }}

+
+ + +

{{ t('admin.accounts.billingRateMultiplierHint') }}

+
@@ -1832,6 +1837,7 @@ const form = reactive({ proxy_id: null as number | null, concurrency: 10, priority: 1, + rate_multiplier: 1, group_ids: [] as number[], expires_at: null as number | null }) @@ -1976,6 +1982,16 @@ const addPresetMapping = (from: string, to: string) => { const toggleErrorCode = (code: number) => { const index = selectedErrorCodes.value.indexOf(code) if (index === -1) { + // Adding code - check for 429/529 warning + if (code === 429) { + if (!confirm(t('admin.accounts.customErrorCodes429Warning'))) { + return + } + } else if (code === 529) { + if (!confirm(t('admin.accounts.customErrorCodes529Warning'))) { + return + } + } selectedErrorCodes.value.push(code) } else { selectedErrorCodes.value.splice(index, 1) @@ -1993,6 +2009,16 @@ const addCustomErrorCode = () => { appStore.showInfo(t('admin.accounts.errorCodeExists')) return } + // Check for 429/529 warning + if (code === 429) { + if (!confirm(t('admin.accounts.customErrorCodes429Warning'))) { + return + } + } else if (code === 529) { + if (!confirm(t('admin.accounts.customErrorCodes529Warning'))) { + return + } + } selectedErrorCodes.value.push(code) customErrorCodeInput.value = null } @@ -2099,6 +2125,7 @@ const resetForm = () => { form.proxy_id = null form.concurrency = 10 form.priority = 1 + form.rate_multiplier = 1 form.group_ids = [] form.expires_at = null accountCategory.value = 'oauth-based' @@ -2252,6 +2279,7 @@ const createAccountAndFinish = async ( proxy_id: form.proxy_id, concurrency: form.concurrency, priority: form.priority, + rate_multiplier: form.rate_multiplier, group_ids: form.group_ids, expires_at: form.expires_at, auto_pause_on_expired: autoPauseOnExpired.value @@ -2462,6 +2490,7 @@ const handleCookieAuth = async (sessionKey: string) => { await adminAPI.accounts.create({ name: accountName, + notes: form.notes, platform: form.platform, type: addMethod.value, // Use addMethod as type: 'oauth' or 'setup-token' credentials, @@ -2469,6 +2498,9 @@ const handleCookieAuth = async (sessionKey: string) => { proxy_id: form.proxy_id, concurrency: form.concurrency, priority: form.priority, + rate_multiplier: form.rate_multiplier, + group_ids: form.group_ids, + expires_at: form.expires_at, auto_pause_on_expired: autoPauseOnExpired.value }) diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index 3b36cfbf..d27364f1 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -549,7 +549,7 @@
-
+
@@ -564,6 +564,11 @@ data-tour="account-form-priority" />
+
+ + +

{{ t('admin.accounts.billingRateMultiplierHint') }}

+
@@ -599,6 +604,136 @@
+ +
+
+

{{ t('admin.accounts.quotaControl.title') }}

+

+ {{ t('admin.accounts.quotaControl.hint') }} +

+
+ + +
+
+
+ +

+ {{ t('admin.accounts.quotaControl.windowCost.hint') }} +

+
+ +
+ +
+
+ +
+ $ + +
+

{{ t('admin.accounts.quotaControl.windowCost.limitHint') }}

+
+
+ +
+ $ + +
+

{{ t('admin.accounts.quotaControl.windowCost.stickyReserveHint') }}

+
+
+
+ + +
+
+
+ +

+ {{ t('admin.accounts.quotaControl.sessionLimit.hint') }} +

+
+ +
+ +
+
+ + +

{{ t('admin.accounts.quotaControl.sessionLimit.maxSessionsHint') }}

+
+
+ +
+ + {{ t('common.minutes') }} +
+

{{ t('admin.accounts.quotaControl.sessionLimit.idleTimeoutHint') }}

+
+
+
+
+
@@ -762,6 +897,14 @@ const mixedScheduling = ref(false) // For antigravity accounts: enable mixed sch const tempUnschedEnabled = ref(false) const tempUnschedRules = ref([]) +// Quota control state (Anthropic OAuth/SetupToken only) +const windowCostEnabled = ref(false) +const windowCostLimit = ref(null) +const windowCostStickyReserve = ref(null) +const sessionLimitEnabled = ref(false) +const maxSessions = ref(null) +const sessionIdleTimeout = ref(null) + // Computed: current preset mappings based on platform const presetMappings = computed(() => getPresetMappingsByPlatform(props.account?.platform || 'anthropic')) const tempUnschedPresets = computed(() => [ @@ -807,6 +950,7 @@ const form = reactive({ proxy_id: null as number | null, concurrency: 1, priority: 1, + rate_multiplier: 1, status: 'active' as 'active' | 'inactive', group_ids: [] as number[], expires_at: null as number | null @@ -834,6 +978,7 @@ watch( form.proxy_id = newAccount.proxy_id form.concurrency = newAccount.concurrency form.priority = newAccount.priority + form.rate_multiplier = newAccount.rate_multiplier ?? 1 form.status = newAccount.status as 'active' | 'inactive' form.group_ids = newAccount.group_ids || [] form.expires_at = newAccount.expires_at ?? null @@ -847,6 +992,9 @@ watch( const extra = newAccount.extra as Record | undefined mixedScheduling.value = extra?.mixed_scheduling === true + // Load quota control settings (Anthropic OAuth/SetupToken only) + loadQuotaControlSettings(newAccount) + loadTempUnschedRules(credentials) // Initialize API Key fields for apikey type @@ -936,6 +1084,16 @@ const addPresetMapping = (from: string, to: string) => { const toggleErrorCode = (code: number) => { const index = selectedErrorCodes.value.indexOf(code) if (index === -1) { + // Adding code - check for 429/529 warning + if (code === 429) { + if (!confirm(t('admin.accounts.customErrorCodes429Warning'))) { + return + } + } else if (code === 529) { + if (!confirm(t('admin.accounts.customErrorCodes529Warning'))) { + return + } + } selectedErrorCodes.value.push(code) } else { selectedErrorCodes.value.splice(index, 1) @@ -953,6 +1111,16 @@ const addCustomErrorCode = () => { appStore.showInfo(t('admin.accounts.errorCodeExists')) return } + // Check for 429/529 warning + if (code === 429) { + if (!confirm(t('admin.accounts.customErrorCodes429Warning'))) { + return + } + } else if (code === 529) { + if (!confirm(t('admin.accounts.customErrorCodes529Warning'))) { + return + } + } selectedErrorCodes.value.push(code) customErrorCodeInput.value = null } @@ -1060,6 +1228,35 @@ function loadTempUnschedRules(credentials?: Record) { }) } +// Load quota control settings from account (Anthropic OAuth/SetupToken only) +function loadQuotaControlSettings(account: Account) { + // Reset all quota control state first + windowCostEnabled.value = false + windowCostLimit.value = null + windowCostStickyReserve.value = null + sessionLimitEnabled.value = false + maxSessions.value = null + sessionIdleTimeout.value = null + + // Only applies to Anthropic OAuth/SetupToken accounts + if (account.platform !== 'anthropic' || (account.type !== 'oauth' && account.type !== 'setup-token')) { + return + } + + // Load from extra field (via backend DTO fields) + if (account.window_cost_limit != null && account.window_cost_limit > 0) { + windowCostEnabled.value = true + windowCostLimit.value = account.window_cost_limit + windowCostStickyReserve.value = account.window_cost_sticky_reserve ?? 10 + } + + if (account.max_sessions != null && account.max_sessions > 0) { + sessionLimitEnabled.value = true + maxSessions.value = account.max_sessions + sessionIdleTimeout.value = account.session_idle_timeout_minutes ?? 5 + } +} + function formatTempUnschedKeywords(value: unknown) { if (Array.isArray(value)) { return value @@ -1187,6 +1384,32 @@ const handleSubmit = async () => { updatePayload.extra = newExtra } + // For Anthropic OAuth/SetupToken accounts, handle quota control settings in extra + if (props.account.platform === 'anthropic' && (props.account.type === 'oauth' || props.account.type === 'setup-token')) { + const currentExtra = (props.account.extra as Record) || {} + const newExtra: Record = { ...currentExtra } + + // Window cost limit settings + if (windowCostEnabled.value && windowCostLimit.value != null && windowCostLimit.value > 0) { + newExtra.window_cost_limit = windowCostLimit.value + newExtra.window_cost_sticky_reserve = windowCostStickyReserve.value ?? 10 + } else { + delete newExtra.window_cost_limit + delete newExtra.window_cost_sticky_reserve + } + + // Session limit settings + if (sessionLimitEnabled.value && maxSessions.value != null && maxSessions.value > 0) { + newExtra.max_sessions = maxSessions.value + newExtra.session_idle_timeout_minutes = sessionIdleTimeout.value ?? 5 + } else { + delete newExtra.max_sessions + delete newExtra.session_idle_timeout_minutes + } + + updatePayload.extra = newExtra + } + await adminAPI.accounts.update(props.account.id, updatePayload) appStore.showSuccess(t('admin.accounts.accountUpdated')) emit('updated') diff --git a/frontend/src/components/account/UsageProgressBar.vue b/frontend/src/components/account/UsageProgressBar.vue index 1b3561ef..93844295 100644 --- a/frontend/src/components/account/UsageProgressBar.vue +++ b/frontend/src/components/account/UsageProgressBar.vue @@ -15,7 +15,13 @@ {{ formatTokens }} - ${{ formatCost }} + A ${{ formatAccountCost }} + + U ${{ formatUserCost }} +
@@ -149,8 +155,13 @@ const formatTokens = computed(() => { return t.toString() }) -const formatCost = computed(() => { +const formatAccountCost = computed(() => { if (!props.windowStats) return '0.00' return props.windowStats.cost.toFixed(2) }) + +const formatUserCost = computed(() => { + if (!props.windowStats || props.windowStats.user_cost == null) return '0.00' + return props.windowStats.user_cost.toFixed(2) +}) diff --git a/frontend/src/components/admin/account/AccountStatsModal.vue b/frontend/src/components/admin/account/AccountStatsModal.vue index 138f5811..72a71d36 100644 --- a/frontend/src/components/admin/account/AccountStatsModal.vue +++ b/frontend/src/components/admin/account/AccountStatsModal.vue @@ -61,11 +61,12 @@

{{ t('admin.accounts.stats.accumulatedCost') }} - ({{ t('admin.accounts.stats.standardCost') }}: ${{ + + ({{ t('usage.userBilled') }}: ${{ formatCost(stats.summary.total_user_cost) }} · + {{ t('admin.accounts.stats.standardCost') }}: ${{ formatCost(stats.summary.total_standard_cost) - }}) + }}) +

@@ -108,12 +109,15 @@

${{ formatCost(stats.summary.avg_daily_cost) }}

-

+

{{ t('admin.accounts.stats.basedOnActualDays', { days: stats.summary.actual_days_used }) }} + + ({{ t('usage.userBilled') }}: ${{ formatCost(stats.summary.avg_daily_user_cost) }}) +

@@ -164,13 +168,17 @@
- {{ - t('admin.accounts.stats.cost') - }} + {{ t('usage.accountBilled') }} ${{ formatCost(stats.summary.today?.cost || 0) }}
+
+ {{ t('usage.userBilled') }} + ${{ formatCost(stats.summary.today?.user_cost || 0) }} +
{{ t('admin.accounts.stats.requests') @@ -210,13 +218,17 @@ }}
- {{ - t('admin.accounts.stats.cost') - }} + {{ t('usage.accountBilled') }} ${{ formatCost(stats.summary.highest_cost_day?.cost || 0) }}
+
+ {{ t('usage.userBilled') }} + ${{ formatCost(stats.summary.highest_cost_day?.user_cost || 0) }} +
{{ t('admin.accounts.stats.requests') @@ -260,13 +272,17 @@ }}
- {{ - t('admin.accounts.stats.cost') - }} + {{ t('usage.accountBilled') }} ${{ formatCost(stats.summary.highest_request_day?.cost || 0) }}
+
+ {{ t('usage.userBilled') }} + ${{ formatCost(stats.summary.highest_request_day?.user_cost || 0) }} +
@@ -485,14 +501,24 @@ const trendChartData = computed(() => { labels: stats.value.history.map((h) => h.label), datasets: [ { - label: t('admin.accounts.stats.cost') + ' (USD)', - data: stats.value.history.map((h) => h.cost), + label: t('usage.accountBilled') + ' (USD)', + data: stats.value.history.map((h) => h.actual_cost), borderColor: '#3b82f6', backgroundColor: 'rgba(59, 130, 246, 0.1)', fill: true, tension: 0.3, yAxisID: 'y' }, + { + label: t('usage.userBilled') + ' (USD)', + data: stats.value.history.map((h) => h.user_cost), + borderColor: '#10b981', + backgroundColor: 'rgba(16, 185, 129, 0.08)', + fill: false, + tension: 0.3, + borderDash: [5, 5], + yAxisID: 'y' + }, { label: t('admin.accounts.stats.requests'), data: stats.value.history.map((h) => h.requests), @@ -570,7 +596,7 @@ const lineChartOptions = computed(() => ({ }, title: { display: true, - text: t('admin.accounts.stats.cost') + ' (USD)', + text: t('usage.accountBilled') + ' (USD)', color: '#3b82f6', font: { size: 11 diff --git a/frontend/src/components/admin/usage/UsageFilters.vue b/frontend/src/components/admin/usage/UsageFilters.vue index 924f5fb6..0926d83c 100644 --- a/frontend/src/components/admin/usage/UsageFilters.vue +++ b/frontend/src/components/admin/usage/UsageFilters.vue @@ -127,12 +127,6 @@ - -
@@ -227,12 +221,6 @@ const streamTypeOptions = ref([ { value: false, label: t('usage.sync') } ]) -const billingTypeOptions = ref([ - { value: null, label: t('admin.usage.allBillingTypes') }, - { value: 1, label: t('usage.subscription') }, - { value: 0, label: t('usage.balance') } -]) - const emitChange = () => emit('change') const updateStartDate = (value: string) => { diff --git a/frontend/src/components/admin/usage/UsageStatsCards.vue b/frontend/src/components/admin/usage/UsageStatsCards.vue index 16ce6619..cd962a09 100644 --- a/frontend/src/components/admin/usage/UsageStatsCards.vue +++ b/frontend/src/components/admin/usage/UsageStatsCards.vue @@ -27,9 +27,18 @@

{{ t('usage.totalCost') }}

-

${{ (stats?.total_actual_cost || 0).toFixed(4) }}

-

- {{ t('usage.standardCost') }}: ${{ (stats?.total_cost || 0).toFixed(4) }} +

+ ${{ ((stats?.total_account_cost ?? stats?.total_actual_cost) || 0).toFixed(4) }} +

+

+ {{ t('usage.userBilled') }}: + ${{ (stats?.total_actual_cost || 0).toFixed(4) }} + · {{ t('usage.standardCost') }}: + ${{ (stats?.total_cost || 0).toFixed(4) }} +

+

+ {{ t('usage.standardCost') }}: + ${{ (stats?.total_cost || 0).toFixed(4) }}

diff --git a/frontend/src/components/admin/usage/UsageTable.vue b/frontend/src/components/admin/usage/UsageTable.vue index c53b8c90..d2260c59 100644 --- a/frontend/src/components/admin/usage/UsageTable.vue +++ b/frontend/src/components/admin/usage/UsageTable.vue @@ -81,27 +81,26 @@ - - + + @@ -203,14 +207,24 @@ {{ t('usage.rate') }} {{ (tooltipData?.rate_multiplier || 1).toFixed(2) }}x +
+ {{ t('usage.accountMultiplier') }} + {{ (tooltipData?.account_rate_multiplier ?? 1).toFixed(2) }}x +
{{ t('usage.original') }} ${{ tooltipData?.total_cost?.toFixed(6) || '0.000000' }}
-
- {{ t('usage.billed') }} +
+ {{ t('usage.userBilled') }} ${{ tooltipData?.actual_cost?.toFixed(6) || '0.000000' }}
+
+ {{ t('usage.accountBilled') }} + + ${{ (((tooltipData?.total_cost || 0) * (tooltipData?.account_rate_multiplier ?? 1)) || 0).toFixed(6) }} + +
@@ -249,11 +263,11 @@ const cols = computed(() => [ { key: 'stream', label: t('usage.type'), sortable: false }, { key: 'tokens', label: t('usage.tokens'), sortable: false }, { key: 'cost', label: t('usage.cost'), sortable: false }, - { key: 'billing_type', label: t('usage.billingType'), sortable: false }, { key: 'first_token', label: t('usage.firstToken'), sortable: false }, { key: 'duration', label: t('usage.duration'), sortable: false }, { key: 'created_at', label: t('usage.time'), sortable: true }, - { key: 'user_agent', label: t('usage.userAgent'), sortable: false } + { key: 'user_agent', label: t('usage.userAgent'), sortable: false }, + { key: 'ip_address', label: t('admin.usage.ipAddress'), sortable: false } ]) const formatCacheTokens = (tokens: number): string => { diff --git a/frontend/src/components/admin/user/UserBalanceModal.vue b/frontend/src/components/admin/user/UserBalanceModal.vue index 1918577a..c669c2a5 100644 --- a/frontend/src/components/admin/user/UserBalanceModal.vue +++ b/frontend/src/components/admin/user/UserBalanceModal.vue @@ -3,14 +3,17 @@
{{ user.email.charAt(0).toUpperCase() }}
-

{{ user.email }}

{{ t('admin.users.currentBalance') }}: ${{ user.balance.toFixed(2) }}

+

{{ user.email }}

{{ t('admin.users.currentBalance') }}: ${{ formatBalance(user.balance) }}

-
$
+
+
$
+ +
-
{{ t('admin.users.newBalance') }}:${{ calculateNewBalance().toFixed(2) }}
+
{{ t('admin.users.newBalance') }}:${{ formatBalance(calculateNewBalance()) }}